diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index ee0f3701c114..5bc20d744c5e 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -59,8 +59,6 @@ The following command installs `buildkitd` and `buildctl` to `/usr/local/bin`: make && sudo make install ``` -You can also use `make binaries-all` to prepare `buildkitd.containerd_only` and `buildkitd.oci_only`. - To build containerized `moby/buildkit:local` and `moby/buildkit:local-rootless` images: ```bash make images @@ -152,7 +150,7 @@ otherwise cleanup our project.

Register for the Docker Community Slack (dockercommunity.slack.com) - Click here for an invite to docker community slack. + Click here for an invite to docker community slack. You'll find us in #buildkit channel, and the #moby-project channel for general discussions.

diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000000..8d77e584d110 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,10 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + open-pull-requests-limit: 10 + directory: "/" + schedule: + interval: "daily" + labels: + - "dependencies" + - "bot" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f8f3ef95de06..40d60dc762a4 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,5 +1,9 @@ name: build +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + on: schedule: - cron: '0 10 * * *' # everyday at 10am @@ -7,40 +11,46 @@ on: push: branches: - 'master' + - 'v[0-9]+.[0-9]+' tags: - 'v*' - 'dockerfile/*' pull_request: - branches: - - 'master' - - 'v*' + paths-ignore: + - 'README.md' + - 'docs/**' + - 'frontend/dockerfile/docs/**' env: - REPO_SLUG_ORIGIN: "moby/buildkit:v0.10.0-rc1" + REPO_SLUG_ORIGIN: "moby/buildkit:v0.11.0-rc4" REPO_SLUG_TARGET: "moby/buildkit" DF_REPO_SLUG_TARGET: "docker/dockerfile-upstream" PLATFORMS: "linux/amd64,linux/arm/v7,linux/arm64,linux/s390x,linux/ppc64le,linux/riscv64" CACHE_GHA_SCOPE_IT: "integration-tests" CACHE_GHA_SCOPE_BINARIES: "binaries" CACHE_GHA_SCOPE_CROSS: "cross" + TESTFLAGS: "-v --parallel=6 --timeout=30m" + BUILDX_VERSION: "v0.10.0-rc3" # leave empty to use the one available on GitHub virtual environment + GO_VERSION: "1.19" jobs: base: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Expose GitHub Runtime - uses: crazy-max/ghaction-github-runtime@v1 + uses: crazy-max/ghaction-github-runtime@v2 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 with: + version: ${{ env.BUILDX_VERSION }} driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} buildkitd-flags: --debug - @@ -59,7 +69,7 @@ jobs: CACHE_TO: type=gha,scope=${{ env.CACHE_GHA_SCOPE_IT }} test: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: [base] strategy: fail-fast: false @@ -71,7 +81,6 @@ jobs: - containerd - containerd-rootless - containerd-1.5 - - containerd-1.4 - containerd-snapshotter-stargz - oci - oci-rootless @@ -89,23 +98,23 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Expose GitHub Runtime - uses: crazy-max/ghaction-github-runtime@v1 + uses: crazy-max/ghaction-github-runtime@v2 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 with: + version: ${{ env.BUILDX_VERSION }} driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} buildkitd-flags: --debug - name: Test pkg=${{ matrix.pkg }} ; typ=${{ matrix.typ }} ; skipit=${{ matrix.skip-integration-tests }} ; worker=${{ matrix.worker }} run: | - export TESTFLAGS="-v --parallel=6 --timeout=20m" if [ -n "${{ matrix.worker }}" ]; then export TESTFLAGS="${TESTFLAGS} --run=//worker=${{ matrix.worker }}$" fi @@ -118,37 +127,126 @@ jobs: CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_IT }} type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }} - name: Upload coverage file - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: coverage path: ./coverage + test-nydus: + runs-on: ubuntu-20.04 + needs: [base] + strategy: + fail-fast: false + matrix: + pkg: + - ./client + worker: + - containerd + - oci + typ: + - integration + exclude: + - pkg: ./client ./cmd/buildctl ./worker/containerd ./solver ./frontend + typ: dockerfile + include: + - pkg: ./... + skip-integration-tests: 1 + typ: integration + steps: + - + name: Checkout + uses: actions/checkout@v3 + - + name: Expose GitHub Runtime + uses: crazy-max/ghaction-github-runtime@v2 + - + name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + version: ${{ env.BUILDX_VERSION }} + driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} + buildkitd-flags: --debug + - + name: Test pkg=${{ matrix.pkg }} ; typ=${{ matrix.typ }} ; skipit=${{ matrix.skip-integration-tests }} ; worker=${{ matrix.worker }} + run: | + if [ -n "${{ matrix.worker }}" ]; then + export TESTFLAGS="${TESTFLAGS} --tags=nydus --run=//worker=${{ matrix.worker }}$" + fi + ./hack/test ${{ matrix.typ }} + env: + BUILDKITD_TAGS: nydus + TESTPKGS: ${{ matrix.pkg }} + SKIP_INTEGRATION_TESTS: ${{ matrix.skip-integration-tests }} + CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_IT }} type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }} + + test-s3: + runs-on: ubuntu-20.04 + needs: + - base + steps: + - + name: Checkout + uses: actions/checkout@v3 + - + name: Expose GitHub Runtime + uses: crazy-max/ghaction-github-runtime@v2 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + version: ${{ env.BUILDX_VERSION }} + driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} + buildkitd-flags: --debug + - + name: Test + run: | + hack/s3_test/run_test.sh + + test-azblob: + runs-on: ubuntu-20.04 + needs: + - base + steps: + - + name: Checkout + uses: actions/checkout@v3 + - + name: Expose GitHub Runtime + uses: crazy-max/ghaction-github-runtime@v2 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + version: ${{ env.BUILDX_VERSION }} + driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} + buildkitd-flags: --debug + - + name: Test + run: | + hack/azblob_test/run_test.sh + test-os: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: - # - ubuntu-latest - # - macOS-latest - - windows-latest + # - ubuntu-20.04 + # - macOS-11 + - windows-2022 steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - - name: Cache Go modules - uses: actions/cache@v2 + uses: actions/setup-go@v3 with: - path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- + go-version: ${{ env.GO_VERSION }} + cache: true - name: Go mod run: | @@ -160,31 +258,31 @@ jobs: SKIP_INTEGRATION_TESTS: 1 run: | mkdir -p ./coverage - go test -coverprofile=./coverage/coverage-${{ github.job }}-${{ matrix.os }}.txt -covermode=atomic ./... + go test -coverprofile=./coverage/coverage-${{ github.job }}-${{ matrix.os }}.txt -covermode=atomic ${TESTFLAGS} ./... shell: bash - name: Upload coverage file - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: coverage path: ./coverage upload-coverage: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: [test, test-os] steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Download coverage files - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: coverage path: ./coverage - name: List coverage files - uses: actions/github-script@v3 + uses: actions/github-script@v6 id: files with: result-encoding: string @@ -195,26 +293,27 @@ jobs: .join(','); - name: Send to Codecov - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v3 with: files: ${{ steps.files.outputs.result }} cross: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Expose GitHub Runtime - uses: crazy-max/ghaction-github-runtime@v1 + uses: crazy-max/ghaction-github-runtime@v2 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 with: + version: ${{ env.BUILDX_VERSION }} driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} buildkitd-flags: --debug - @@ -228,7 +327,7 @@ jobs: CACHE_TO: type=gha,scope=${{ env.CACHE_GHA_SCOPE_CROSS }} release-base: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 outputs: tag: ${{ steps.prep.outputs.tag }} push: ${{ steps.prep.outputs.push }} @@ -246,13 +345,15 @@ jobs: PUSH=push elif [[ $GITHUB_REF == refs/heads/* ]]; then TAG=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') - PUSH=push + if [ $GITHUB_REF = "refs/heads/${{ github.event.repository.default_branch }}" ]; then + PUSH=push + fi fi - echo ::set-output name=tag::${TAG} - echo ::set-output name=push::${PUSH} + echo "tag=${TAG}" >>${GITHUB_OUTPUT} + echo "push=${PUSH}" >>${GITHUB_OUTPUT} image: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: [release-base, test, cross] strategy: fail-fast: false @@ -263,23 +364,24 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Expose GitHub Runtime - uses: crazy-max/ghaction-github-runtime@v1 + uses: crazy-max/ghaction-github-runtime@v2 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 with: + version: ${{ env.BUILDX_VERSION }} driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} buildkitd-flags: --debug - name: Login to DockerHub if: needs.release-base.outputs.push == 'push' - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} @@ -288,27 +390,29 @@ jobs: run: | ./hack/images "${{ needs.release-base.outputs.tag }}" "$REPO_SLUG_TARGET" "${{ needs.release-base.outputs.push }}" env: + RELEASE: ${{ startsWith(github.ref, 'refs/tags/v') }} TARGET: ${{ matrix.target-stage }} CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_CROSS }} type=gha,scope=image${{ matrix.target-stage }} CACHE_TO: type=gha,scope=image${{ matrix.target-stage }} binaries: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: [release-base, test, cross] steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Expose GitHub Runtime - uses: crazy-max/ghaction-github-runtime@v1 + uses: crazy-max/ghaction-github-runtime@v2 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 with: + version: ${{ env.BUILDX_VERSION }} driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} buildkitd-flags: --debug - @@ -316,15 +420,12 @@ jobs: run: | ./hack/release-tar "${{ needs.release-base.outputs.tag }}" release-out env: + RELEASE: ${{ startsWith(github.ref, 'refs/tags/v') }} PLATFORMS: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64 CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }} type=gha,scope=${{ env.CACHE_GHA_SCOPE_CROSS }} - - - name: Move artifacts - run: | - mv ./release-out/**/* ./release-out/ - name: Upload artifacts - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: buildkit path: ./release-out/* @@ -332,7 +433,7 @@ jobs: - name: GitHub Release if: startsWith(github.ref, 'refs/tags/v') - uses: softprops/action-gh-release@v1 + uses: softprops/action-gh-release@1e07f4398721186383de40550babbdf2b84acfc5 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: @@ -341,12 +442,12 @@ jobs: name: ${{ needs.release-base.outputs.tag }} frontend-base: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 if: github.event_name != 'schedule' outputs: typ: ${{ steps.prep.outputs.typ }} - tag: ${{ steps.prep.outputs.tag }} push: ${{ steps.prep.outputs.push }} + matrix: ${{ steps.prep.outputs.matrix }} steps: - name: Prepare @@ -359,51 +460,63 @@ jobs: TYP=tag TAG=${GITHUB_REF#refs/tags/} PUSH=push - elif [[ $GITHUB_REF == refs/heads/* ]]; then + elif [ $GITHUB_REF = "refs/heads/${{ github.event.repository.default_branch }}" ]; then PUSH=push fi - echo ::set-output name=typ::${TYP} - echo ::set-output name=tag::${TAG} - echo ::set-output name=push::${PUSH} + echo "typ=${TYP}" >>${GITHUB_OUTPUT} + echo "push=${PUSH}" >>${GITHUB_OUTPUT} + if [ "${TYP}" = "master" ]; then + echo "matrix=$(jq -cn --arg tag "$TAG" '[$tag, "labs"]')" >>${GITHUB_OUTPUT} + else + echo "matrix=$(jq -cn --arg tag "$TAG" '[$tag]')" >>${GITHUB_OUTPUT} + fi frontend-image: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 if: github.event_name != 'schedule' needs: [frontend-base, test] + strategy: + fail-fast: false + matrix: + tag: ${{ fromJson(needs.frontend-base.outputs.matrix) }} steps: + - + name: Prepare + run: | + if [ "${{ matrix.tag }}" = "labs" ]; then + echo "CACHE_SCOPE=frontend-labs" >>${GITHUB_ENV} + else + echo "CACHE_SCOPE=frontend-mainline" >>${GITHUB_ENV} + fi - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Expose GitHub Runtime - uses: crazy-max/ghaction-github-runtime@v1 + uses: crazy-max/ghaction-github-runtime@v2 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 with: + version: ${{ env.BUILDX_VERSION }} driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} buildkitd-flags: --debug - name: Login to DockerHub - uses: docker/login-action@v1 + uses: docker/login-action@v2 if: needs.frontend-base.outputs.push == 'push' with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build ${{ needs.frontend-base.outputs.typ }}/${{ needs.frontend-base.outputs.tag }} - run: | - ./frontend/dockerfile/cmd/dockerfile-frontend/hack/release "${{ needs.frontend-base.outputs.typ }}" "${{ needs.frontend-base.outputs.tag }}" "$DF_REPO_SLUG_TARGET" "${{ needs.frontend-base.outputs.push }}" - env: - CACHE_FROM: type=gha,scope=frontend-${{ needs.frontend-base.outputs.typ }} - CACHE_TO: type=gha,scope=frontend-${{ needs.frontend-base.outputs.typ }} - - - name: Build ${{ needs.frontend-base.outputs.typ }}/labs - if: needs.frontend-base.outputs.typ == 'master' + name: Build run: | - ./frontend/dockerfile/cmd/dockerfile-frontend/hack/release "${{ needs.frontend-base.outputs.typ }}" labs "$DF_REPO_SLUG_TARGET" "${{ needs.frontend-base.outputs.push }}" + ./frontend/dockerfile/cmd/dockerfile-frontend/hack/release "${{ needs.frontend-base.outputs.typ }}" "${{ matrix.tag }}" "$DF_REPO_SLUG_TARGET" "${{ needs.frontend-base.outputs.push }}" env: - CACHE_FROM: type=gha,scope=frontend-${{ needs.frontend-base.outputs.typ }} + RELEASE: ${{ startsWith(github.ref, 'refs/tags/v') }} + PLATFORMS: ${{ env.PLATFORMS }},linux/386,linux/mips,linux/mipsle,linux/mips64,linux/mips64le + CACHE_FROM: type=gha,scope=${{ env.CACHE_SCOPE }} + CACHE_TO: type=gha,scope=${{ env.CACHE_SCOPE }} diff --git a/.github/workflows/buildx-image.yml b/.github/workflows/buildx-image.yml index e97ca41c579c..d9c655480f06 100644 --- a/.github/workflows/buildx-image.yml +++ b/.github/workflows/buildx-image.yml @@ -9,6 +9,10 @@ # moby/buildkit:v0.8.1-rootless > moby/buildkit:buildx-stable-1-rootless name: buildx-image +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + on: workflow_dispatch: inputs: @@ -27,10 +31,11 @@ on: env: REPO_SLUG_TARGET: "moby/buildkit" + BUILDX_VERSION: "v0.9.1" # leave empty to use the one available on GitHub virtual environment jobs: create: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 strategy: fail-fast: false matrix: @@ -40,11 +45,14 @@ jobs: steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 + with: + version: ${{ env.BUILDX_VERSION }} + buildkitd-flags: --debug - name: Login to DockerHub if: github.event.inputs.dry-run != 'true' - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/dockerd.yml b/.github/workflows/dockerd.yml new file mode 100644 index 000000000000..436288dc0846 --- /dev/null +++ b/.github/workflows/dockerd.yml @@ -0,0 +1,139 @@ +name: dockerd + +on: + # TODO: add event to build on command in PR (e.g., /test-dockerd) + workflow_dispatch: + inputs: + version: + description: 'Docker version' + required: true + default: '20.10.19' + +env: + REPO_SLUG_ORIGIN: "moby/buildkit:latest" + CACHE_GHA_SCOPE_IT: "integration-tests" + CACHE_GHA_SCOPE_BINARIES: "binaries" + TESTFLAGS: "-v --parallel=1 --timeout=30m" + BUILDX_VERSION: "v0.9.1" # leave empty to use the one available on GitHub virtual environment + +jobs: + prepare: + runs-on: ubuntu-20.04 + steps: + - + name: Check version + run: | + version=${{ github.event.inputs.version }} + if [ -z "$version" ]; then + version=20.10.19 + fi + echo "DOCKER_VERSION=$version" >> $GITHUB_ENV + - + name: Check build + uses: actions/github-script@v6 + id: build + with: + result-encoding: string + script: | + try { + new URL("${{ env.DOCKER_VERSION }}"); + } catch (e) { + return false; + } + return true; + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + version: ${{ env.BUILDX_VERSION }} + driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} + buildkitd-flags: --debug + - + name: Build + if: steps.build.outputs.result == 'true' + uses: docker/build-push-action@v3 + with: + context: ${{ env.DOCKER_VERSION }} + target: binary + outputs: /tmp/moby + - + name: Rename binary + if: steps.build.outputs.result == 'true' + run: | + if [ -L "/tmp/moby/binary-daemon/dockerd" ]; then + mv -f $(readlink /tmp/moby/binary-daemon/dockerd) /tmp/moby/dockerd + fi + - + name: Download + if: steps.build.outputs.result != 'true' + run: | + mkdir -p /tmp/moby + cd /tmp/moby + wget -qO- "https://download.docker.com/linux/static/stable/x86_64/docker-${{ env.DOCKER_VERSION }}.tgz" | tar xvz --strip 1 + - + name: Upload dockerd + uses: actions/upload-artifact@v3 + with: + name: dockerd + path: /tmp/moby/dockerd + if-no-files-found: error + + test: + runs-on: ubuntu-20.04 + needs: + - prepare + strategy: + fail-fast: false + matrix: + worker: + - dockerd + - dockerd-containerd + pkg: + - ./client + - ./cmd/buildctl + - ./solver + - ./frontend + - ./frontend/dockerfile + typ: + - integration + include: + - pkg: ./... + skip-integration-tests: 1 + steps: + - + name: Checkout + uses: actions/checkout@v3 + - + name: Expose GitHub Runtime + uses: crazy-max/ghaction-github-runtime@v2 + - + name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + version: ${{ env.BUILDX_VERSION }} + driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} + buildkitd-flags: --debug + - + name: Download dockerd + uses: actions/download-artifact@v3 + with: + name: dockerd + path: ./build/ + - + name: Fix dockerd perms + run: | + chmod +x ./build/dockerd + - + name: Test + run: | + ./hack/test ${{ matrix.typ }} + env: + TEST_DOCKERD: "1" + TEST_DOCKERD_BINARY: "./build/dockerd" + TESTPKGS: "${{ matrix.pkg }}" + TESTFLAGS: "${{ env.TESTFLAGS }} --run=//worker=${{ matrix.worker }}$" + SKIP_INTEGRATION_TESTS: "${{ matrix.skip-integration-tests }}" + CACHE_FROM: "type=gha,scope=${{ env.CACHE_GHA_SCOPE_IT }} type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }}" diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml index ba5d757b7fb2..21bdc61939e0 100644 --- a/.github/workflows/validate.yml +++ b/.github/workflows/validate.yml @@ -1,24 +1,27 @@ name: validate +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + on: workflow_dispatch: push: branches: - 'master' + - 'v[0-9]+.[0-9]+' tags: - 'v*' - 'dockerfile/*' pull_request: - branches: - - 'master' - - 'v*' env: REPO_SLUG_ORIGIN: "moby/buildkit:latest" + BUILDX_VERSION: "v0.9.1" # leave empty to use the one available on GitHub virtual environment jobs: validate: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 strategy: fail-fast: false matrix: @@ -30,17 +33,15 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 with: + version: ${{ env.BUILDX_VERSION }} driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} + buildkitd-flags: --debug - name: Run run: | ${{ matrix.script }} - - - name: Dump context - if: always() - uses: crazy-max/ghaction-dump-context@v1 diff --git a/.gitignore b/.gitignore index 5b74bfefa9f7..75c0a9be9885 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# BuildKit project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files bin coverage release-out diff --git a/.golangci.yml b/.golangci.yml index 28911415b7d0..2917d8c47aaf 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -7,19 +7,22 @@ run: build-tags: - dfrunsecurity + - dfaddgit + - dfaddchecksum linters: enable: - deadcode + - depguard - gofmt - goimports + - gosimple - revive - govet - importas - ineffassign - misspell - staticcheck - - structcheck - typecheck - unused - varcheck @@ -27,9 +30,22 @@ linters: - errname - makezero - whitespace + - nolintlint + - gosec + - forbidigo disable-all: true linters-settings: + depguard: + list-type: blacklist + include-go-root: true + packages: + # The io/ioutil package has been deprecated. + # https://go.dev/doc/go1.16#ioutil + - io/ioutil + forbidigo: + forbid: + - '^fmt\.Errorf(# use errors\.Errorf instead)?$' importas: alias: - pkg: "github.com/opencontainers/image-spec/specs-go/v1" @@ -37,9 +53,20 @@ linters-settings: - pkg: "github.com/opencontainers/go-digest" alias: "digest" no-unaliased: true + gosec: + excludes: + - G101 # Potential hardcoded credentials (false positives) + - G402 # TLS MinVersion too low + - G601 # Implicit memory aliasing in for loop (false positives) + - G504 # Import blocklist: net/http/cgi + config: + G306: "0644" issues: exclude-rules: - linters: - revive text: "stutters" + - linters: + - staticcheck + text: "SA1019: .*Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md" diff --git a/Dockerfile b/Dockerfile index 14e9bd0e6d8d..b64f57bd8358 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,29 +1,39 @@ # syntax=docker/dockerfile-upstream:master -ARG RUNC_VERSION=v1.0.2 -ARG CONTAINERD_VERSION=v1.6.1 +ARG RUNC_VERSION=v1.1.4 +ARG CONTAINERD_VERSION=v1.6.18 # containerd v1.5 for integration tests -ARG CONTAINERD_ALT_VERSION_15=v1.5.10 -# containerd v1.4 for integration tests -ARG CONTAINERD_ALT_VERSION_14=v1.4.13 -# available targets: buildkitd, buildkitd.oci_only, buildkitd.containerd_only -ARG BUILDKIT_TARGET=buildkitd +ARG CONTAINERD_ALT_VERSION_15=v1.5.18 ARG REGISTRY_VERSION=2.8.0 -ARG ROOTLESSKIT_VERSION=v0.14.6 -ARG CNI_VERSION=v1.1.0 -ARG STARGZ_SNAPSHOTTER_VERSION=v0.11.2 +ARG ROOTLESSKIT_VERSION=v1.0.1 +ARG CNI_VERSION=v1.1.1 +ARG STARGZ_SNAPSHOTTER_VERSION=v0.13.0 +ARG NERDCTL_VERSION=v1.0.0 +ARG DNSNAME_VERSION=v1.3.1 +ARG NYDUS_VERSION=v2.1.0 + +ARG ALPINE_VERSION=3.17 + +# alpine base for buildkit image +# TODO: remove this when alpine image supports riscv64 +FROM alpine:${ALPINE_VERSION} AS alpine-amd64 +FROM alpine:${ALPINE_VERSION} AS alpine-arm +FROM alpine:${ALPINE_VERSION} AS alpine-arm64 +FROM alpine:${ALPINE_VERSION} AS alpine-s390x +FROM alpine:${ALPINE_VERSION} AS alpine-ppc64le +FROM alpine:edge@sha256:c223f84e05c23c0571ce8decefef818864869187e1a3ea47719412e205c8c64e AS alpine-riscv64 +FROM alpine-$TARGETARCH AS alpinebase -ARG ALPINE_VERSION=3.15 +# xx is a helper for cross-compilation +FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.2.1 AS xx + +# go base image +FROM --platform=$BUILDPLATFORM golang:1.19-alpine${ALPINE_VERSION} AS golatest # git stage is used for checking out remote repository sources FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS git RUN apk add --no-cache git -# xx is a helper for cross-compilation -FROM --platform=$BUILDPLATFORM tonistiigi/xx@sha256:1e96844fadaa2f9aea021b2b05299bc02fe4c39a92d8e735b93e8e2b15610128 AS xx - -FROM --platform=$BUILDPLATFORM golang:1.17-alpine AS golatest - # gobuild is base stage for compiling go/cgo FROM golatest AS gobuild-base RUN apk add --no-cache file bash clang lld pkgconfig git make @@ -48,6 +58,15 @@ RUN --mount=from=runc-src,src=/usr/src/runc,target=. --mount=target=/root/.cache CGO_ENABLED=1 xx-go build -mod=vendor -ldflags '-extldflags -static' -tags 'apparmor seccomp netgo cgo static_build osusergo' -o /usr/bin/runc ./ && \ xx-verify --static /usr/bin/runc +# dnsname CNI plugin for testing +FROM gobuild-base AS dnsname +ARG DNSNAME_VERSION +WORKDIR /go/dnsname +RUN git clone https://github.com/containers/dnsname.git . \ + && git checkout -q "$DNSNAME_VERSION" +RUN --mount=target=/root/.cache,type=cache \ + set -e; make binaries; mv bin/dnsname /usr/bin/dnsname + FROM gobuild-base AS buildkit-base WORKDIR /src ENV GOFLAGS=-mod=vendor @@ -72,6 +91,7 @@ RUN --mount=target=. --mount=target=/root/.cache,type=cache \ # build buildkitd binary FROM buildkit-base AS buildkitd +# BUILDKITD_TAGS defines additional Go build tags for compiling buildkitd ARG BUILDKITD_TAGS ARG TARGETPLATFORM RUN --mount=target=. --mount=target=/root/.cache,type=cache \ @@ -82,8 +102,9 @@ RUN --mount=target=. --mount=target=/root/.cache,type=cache \ FROM scratch AS binaries-linux-helper COPY --link --from=runc /usr/bin/runc /buildkit-runc -# built from https://github.com/tonistiigi/binfmt/releases/tag/buildkit%2Fv6.2.0-24 -COPY --link --from=tonistiigi/binfmt:buildkit@sha256:ea7632b4e0b2406db438730c604339b38c23ac51a2f73c89ba50abe5e2146b4b / / +# built from https://github.com/tonistiigi/binfmt/releases/tag/buildkit%2Fv7.1.0-30 +COPY --link --from=tonistiigi/binfmt:buildkit-v7.1.0-30@sha256:45dd57b4ba2f24e2354f71f1e4e51f073cb7a28fd848ce6f5f2a7701142a6bf0 / / + FROM binaries-linux-helper AS binaries-linux COPY --link --from=buildctl /usr/bin/buildctl / COPY --link --from=buildkitd /usr/bin/buildkitd / @@ -95,6 +116,8 @@ FROM scratch AS binaries-windows COPY --link --from=buildctl /usr/bin/buildctl /buildctl.exe FROM binaries-$TARGETOS AS binaries +# enable scanning for this stage +ARG BUILDKIT_SBOM_SCAN_STAGE=true FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS releaser RUN apk add --no-cache tar gzip @@ -107,8 +130,7 @@ RUN --mount=from=binaries \ FROM scratch AS release COPY --link --from=releaser /out/ / -# tonistiigi/alpine supports riscv64 -FROM tonistiigi/alpine:${ALPINE_VERSION} AS buildkit-export +FROM alpinebase AS buildkit-export RUN apk add --no-cache fuse3 git openssh pigz xz \ && ln -s fusermount3 /usr/bin/fusermount COPY --link examples/buildctl-daemonless/buildctl-daemonless.sh /usr/bin/ @@ -123,7 +145,7 @@ RUN git clone https://github.com/containerd/containerd.git containerd FROM gobuild-base AS containerd-base WORKDIR /go/src/github.com/containerd/containerd ARG TARGETPLATFORM -ENV CGO_ENABLED=1 BUILDTAGS=no_btrfs +ENV CGO_ENABLED=1 BUILDTAGS=no_btrfs GO111MODULE=off RUN xx-apk add musl-dev gcc && xx-go --wrap FROM containerd-base AS containerd @@ -139,7 +161,6 @@ RUN --mount=from=containerd-src,src=/usr/src/containerd,readwrite --mount=target # containerd v1.5 for integration tests FROM containerd-base as containerd-alt-15 ARG CONTAINERD_ALT_VERSION_15 -ARG GO111MODULE=off RUN --mount=from=containerd-src,src=/usr/src/containerd,readwrite --mount=target=/root/.cache,type=cache \ git fetch origin \ && git checkout -q "$CONTAINERD_ALT_VERSION_15" \ @@ -147,17 +168,6 @@ RUN --mount=from=containerd-src,src=/usr/src/containerd,readwrite --mount=target && make bin/containerd-shim-runc-v2 \ && mv bin /out -# containerd v1.4 for integration tests -FROM containerd-base as containerd-alt-14 -ARG CONTAINERD_ALT_VERSION_14 -ARG GO111MODULE=off -RUN --mount=from=containerd-src,src=/usr/src/containerd,readwrite --mount=target=/root/.cache,type=cache \ - git fetch origin \ - && git checkout -q "$CONTAINERD_ALT_VERSION_14" \ - && make bin/containerd \ - && make bin/containerd-shim-runc-v2 \ - && mv bin /out - ARG REGISTRY_VERSION FROM registry:$REGISTRY_VERSION AS registry @@ -183,39 +193,24 @@ RUN --mount=target=/root/.cache,type=cache \ xx-verify --static /out/containerd-stargz-grpc && \ xx-verify --static /out/ctr-remote -# Copy together all binaries needed for oci worker mode -FROM buildkit-export AS buildkit-buildkitd.oci_only -COPY --link --from=buildkitd.oci_only /usr/bin/buildkitd.oci_only /usr/bin/ -COPY --link --from=buildctl /usr/bin/buildctl /usr/bin/ -ENTRYPOINT ["buildkitd.oci_only"] - -# Copy together all binaries for containerd worker mode -FROM buildkit-export AS buildkit-buildkitd.containerd_only -COPY --link --from=buildkitd.containerd_only /usr/bin/buildkitd.containerd_only /usr/bin/ -COPY --link --from=buildctl /usr/bin/buildctl /usr/bin/ -ENTRYPOINT ["buildkitd.containerd_only"] +FROM gobuild-base AS nydus +ARG NYDUS_VERSION +ARG TARGETOS +ARG TARGETARCH +SHELL ["/bin/bash", "-c"] +RUN wget https://github.com/dragonflyoss/image-service/releases/download/$NYDUS_VERSION/nydus-static-$NYDUS_VERSION-$TARGETOS-$TARGETARCH.tgz +RUN mkdir -p /out/nydus-static && tar xzvf nydus-static-$NYDUS_VERSION-$TARGETOS-$TARGETARCH.tgz -C /out -# Copy together all binaries for oci+containerd mode -FROM buildkit-export AS buildkit-buildkitd-linux +FROM buildkit-export AS buildkit-linux COPY --link --from=binaries / /usr/bin/ ENTRYPOINT ["buildkitd"] -FROM binaries AS buildkit-buildkitd-darwin +FROM binaries AS buildkit-darwin -FROM binaries AS buildkit-buildkitd-windows +FROM binaries AS buildkit-windows # this is not in binaries-windows because it is not intended for release yet, just CI COPY --link --from=buildkitd /usr/bin/buildkitd /buildkitd.exe -FROM buildkit-buildkitd-$TARGETOS AS buildkit-buildkitd - -FROM alpine:${ALPINE_VERSION} AS containerd-runtime -COPY --link --from=runc /usr/bin/runc /usr/bin/ -COPY --link --from=containerd /out/containerd* /usr/bin/ -COPY --link --from=containerd /out/ctr /usr/bin/ -VOLUME /var/lib/containerd -VOLUME /run/containerd -ENTRYPOINT ["containerd"] - FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS cni-plugins RUN apk add --no-cache curl ARG CNI_VERSION @@ -223,31 +218,41 @@ ARG TARGETOS ARG TARGETARCH WORKDIR /opt/cni/bin RUN curl -Ls https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-$TARGETOS-$TARGETARCH-$CNI_VERSION.tgz | tar xzv +COPY --link --from=dnsname /usr/bin/dnsname /opt/cni/bin/ FROM buildkit-base AS integration-tests-base ENV BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR="1000:1000" -RUN apk add --no-cache shadow shadow-uidmap sudo vim iptables fuse \ +ARG NERDCTL_VERSION +RUN apk add --no-cache shadow shadow-uidmap sudo vim iptables ip6tables dnsmasq fuse curl git-daemon \ && useradd --create-home --home-dir /home/user --uid 1000 -s /bin/sh user \ && echo "XDG_RUNTIME_DIR=/run/user/1000; export XDG_RUNTIME_DIR" >> /home/user/.profile \ && mkdir -m 0700 -p /run/user/1000 \ && chown -R user /run/user/1000 /home/user \ && ln -s /sbin/iptables-legacy /usr/bin/iptables \ - && xx-go --wrap + && xx-go --wrap \ + && curl -Ls https://raw.githubusercontent.com/containerd/nerdctl/$NERDCTL_VERSION/extras/rootless/containerd-rootless.sh > /usr/bin/containerd-rootless.sh \ + && chmod 0755 /usr/bin/containerd-rootless.sh +# The entrypoint script is needed for enabling nested cgroup v2 (https://github.com/moby/buildkit/issues/3265#issuecomment-1309631736) +RUN curl -Ls https://raw.githubusercontent.com/moby/moby/v20.10.21/hack/dind > /docker-entrypoint.sh \ + && chmod 0755 /docker-entrypoint.sh +ENTRYPOINT ["/docker-entrypoint.sh"] # musl is needed to directly use the registry binary that is built on alpine -ENV BUILDKIT_INTEGRATION_CONTAINERD_EXTRA="containerd-1.4=/opt/containerd-alt-14/bin,containerd-1.5=/opt/containerd-alt-15/bin" +ENV BUILDKIT_INTEGRATION_CONTAINERD_EXTRA="containerd-1.5=/opt/containerd-alt-15/bin" ENV BUILDKIT_INTEGRATION_SNAPSHOTTER=stargz ENV CGO_ENABLED=0 +COPY --link --from=nydus /out/nydus-static/* /usr/bin/ COPY --link --from=stargz-snapshotter /out/* /usr/bin/ COPY --link --from=rootlesskit /rootlesskit /usr/bin/ -COPY --link --from=containerd-alt-14 /out/containerd* /opt/containerd-alt-14/bin/ COPY --link --from=containerd-alt-15 /out/containerd* /opt/containerd-alt-15/bin/ COPY --link --from=registry /bin/registry /usr/bin/ COPY --link --from=runc /usr/bin/runc /usr/bin/ COPY --link --from=containerd /out/containerd* /usr/bin/ -COPY --link --from=cni-plugins /opt/cni/bin/bridge /opt/cni/bin/host-local /opt/cni/bin/loopback /opt/cni/bin/ +COPY --link --from=cni-plugins /opt/cni/bin/bridge /opt/cni/bin/host-local /opt/cni/bin/loopback /opt/cni/bin/firewall /opt/cni/bin/dnsname /opt/cni/bin/ COPY --link hack/fixtures/cni.json /etc/buildkit/cni.json +COPY --link hack/fixtures/dns-cni.conflist /etc/buildkit/dns-cni.conflist COPY --link --from=binaries / /usr/bin/ +# integration-tests prepares an image suitable for running all tests FROM integration-tests-base AS integration-tests COPY . . ENV BUILDKIT_RUN_NETWORK_INTEGRATION_TESTS=1 BUILDKIT_CNI_INIT_LOCK_PATH=/run/buildkit_cni_bridge.lock @@ -256,7 +261,7 @@ FROM integration-tests AS dev-env VOLUME /var/lib/buildkit # Rootless mode. -FROM tonistiigi/alpine:${ALPINE_VERSION} AS rootless +FROM alpinebase AS rootless RUN apk add --no-cache fuse3 fuse-overlayfs git openssh pigz shadow-uidmap xz RUN adduser -D -u 1000 user \ && mkdir -p /run/user/1000 /home/user/.local/tmp /home/user/.local/share/buildkit \ @@ -275,7 +280,5 @@ ENV BUILDKIT_HOST=unix:///run/user/1000/buildkit/buildkitd.sock VOLUME /home/user/.local/share/buildkit ENTRYPOINT ["rootlesskit", "buildkitd"] - -FROM buildkit-${BUILDKIT_TARGET} - - +# buildkit builds the buildkit container image +FROM buildkit-$TARGETOS AS buildkit diff --git a/MAINTAINERS b/MAINTAINERS index b35691790d0c..d8a8221b4e08 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -155,6 +155,7 @@ made through a pull request. "crazy-max", "hinshun", "ijc", + "jedevc", "ktock", "sipsma", "tiborvass", @@ -208,6 +209,11 @@ made through a pull request. Email = "ian.campbell@docker.com" GitHub = "ijc" + [people.jedevc] + Name = "Justin Chadwell" + Email = "me@jedevc.com" + GitHub = "jedevc" + [people.ktock] Name = "Kohei Tokunaga" Email = "ktokunaga.mail@gmail.com" diff --git a/README.md b/README.md index ebea076eb16b..c295a095819d 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ [![asciicinema example](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU.png)](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU) -# BuildKit +# BuildKit [![GoDoc](https://godoc.org/github.com/moby/buildkit?status.svg)](https://godoc.org/github.com/moby/buildkit/client/llb) [![Build Status](https://github.com/moby/buildkit/workflows/build/badge.svg)](https://github.com/moby/buildkit/actions?query=workflow%3Abuild) @@ -26,12 +26,18 @@ Read the proposal from https://github.com/moby/moby/issues/32925 Introductory blog post https://blog.mobyproject.org/introducing-buildkit-17e056cc5317 -Join `#buildkit` channel on [Docker Community Slack](http://dockr.ly/slack) +Join `#buildkit` channel on [Docker Community Slack](https://dockr.ly/comm-slack) -:information_source: If you are visiting this repo for the usage of BuildKit-only Dockerfile features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`, please refer to [`frontend/dockerfile/docs/syntax.md`](frontend/dockerfile/docs/syntax.md). +> **Note** +> +> If you are visiting this repo for the usage of BuildKit-only Dockerfile features +> like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`, please refer to [`frontend/dockerfile/docs/reference.md`](frontend/dockerfile/docs/reference.md) -:information_source: [BuildKit has been integrated to `docker build` since Docker 18.06 .](https://docs.docker.com/develop/develop-images/build_enhancements/) -You don't need to read this document unless you want to use the full-featured standalone version of BuildKit. +> **Note** +> +> [BuildKit has been integrated to `docker build` since Docker 18.09](https://docs.docker.com/develop/develop-images/build_enhancements/). +> You don't need to read this document unless you want to use the full-featured +> standalone version of BuildKit. @@ -39,12 +45,11 @@ You don't need to read this document unless you want to use the full-featured st - [Used by](#used-by) - [Quick start](#quick-start) - - [Starting the `buildkitd` daemon:](#starting-the-buildkitd-daemon) + - [Starting the `buildkitd` daemon](#starting-the-buildkitd-daemon) - [Exploring LLB](#exploring-llb) - [Exploring Dockerfiles](#exploring-dockerfiles) - [Building a Dockerfile with `buildctl`](#building-a-dockerfile-with-buildctl) - - [Building a Dockerfile using external frontend:](#building-a-dockerfile-using-external-frontend) - - [Building a Dockerfile with experimental features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`](#building-a-dockerfile-with-experimental-features-like-run---mounttypebindcachetmpfssecretssh) + - [Building a Dockerfile using external frontend](#building-a-dockerfile-using-external-frontend) - [Output](#output) - [Image/Registry](#imageregistry) - [Local directory](#local-directory) @@ -58,6 +63,8 @@ You don't need to read this document unless you want to use the full-featured st - [Registry (push image and cache separately)](#registry-push-image-and-cache-separately) - [Local directory](#local-directory-1) - [GitHub Actions cache (experimental)](#github-actions-cache-experimental) + - [S3 cache (experimental)](#s3-cache-experimental) + - [Azure Blob Storage cache (experimental)](#azure-blob-storage-cache-experimental) - [Consistent hashing](#consistent-hashing) - [Metadata](#metadata) - [Systemd socket activation](#systemd-socket-activation) @@ -70,6 +77,8 @@ You don't need to read this document unless you want to use the full-featured st - [Opentracing support](#opentracing-support) - [Running BuildKit without root privileges](#running-buildkit-without-root-privileges) - [Building multi-platform images](#building-multi-platform-images) + - [Configuring `buildctl`](#configuring-buildctl) + - [Color Output Controls](#color-output-controls) - [Contributing](#contributing) @@ -93,6 +102,8 @@ BuildKit is used by the following projects: - [Earthly earthfiles](https://github.com/vladaionescu/earthly) - [Gitpod](https://github.com/gitpod-io/gitpod) - [Dagger](https://dagger.io) +- [envd](https://github.com/tensorchord/envd/) +- [Depot](https://depot.dev) ## Quick start @@ -114,7 +125,9 @@ $ brew install buildkit To build BuildKit from source, see [`.github/CONTRIBUTING.md`](./.github/CONTRIBUTING.md). -### Starting the `buildkitd` daemon: +For a `buildctl` reference, see [this document](./docs/buildctl.md). + +### Starting the `buildkitd` daemon You need to run `buildkitd` as the root user on the host. @@ -130,7 +143,7 @@ By default, the OCI (runc) worker is used. You can set `--oci-worker=false --con We are open to adding more backends. -To start the buildkitd daemon using systemd socket activiation, you can install the buildkit systemd unit files. +To start the buildkitd daemon using systemd socket activation, you can install the buildkit systemd unit files. See [Systemd socket activation](#systemd-socket-activation) The buildkitd daemon listens gRPC API on `/run/buildkit/buildkitd.sock` by default, but you can also use TCP sockets. @@ -157,7 +170,10 @@ Currently, the following high-level languages has been implemented for LLB: - [HLB](https://github.com/openllb/hlb) - [Earthfile (Earthly)](https://github.com/earthly/earthly) - [Cargo Wharf (Rust)](https://github.com/denzp/cargo-wharf) -- [Nix](https://github.com/AkihiroSuda/buildkit-nix) +- [Nix](https://github.com/reproducible-containers/buildkit-nix) +- [mopy (Python)](https://github.com/cmdjulian/mopy) +- [envd (starlark)](https://github.com/tensorchord/envd/) +- [Blubber](https://gitlab.wikimedia.org/repos/releng/blubber) - (open a PR to add your own language) ### Exploring Dockerfiles @@ -184,7 +200,9 @@ buildctl build \ `--local` exposes local source files from client to the builder. `context` and `dockerfile` are the names Dockerfile frontend looks for build context and Dockerfile location. -#### Building a Dockerfile using external frontend: +If the Dockerfile has a different filename it can be specified with `--opt filename=./Dockerfile-alternative`. + +#### Building a Dockerfile using external frontend External versions of the Dockerfile frontend are pushed to https://hub.docker.com/r/docker/dockerfile-upstream and https://hub.docker.com/r/docker/dockerfile and can be used with the gateway frontend. The source for the external frontend is currently located in `./frontend/dockerfile/cmd/dockerfile-frontend` but will move out of this repository in the future ([#163](https://github.com/moby/buildkit/issues/163)). For automatic build from master branch of this repository `docker/dockerfile-upstream:master` or `docker/dockerfile-upstream:master-labs` image can be used. @@ -201,10 +219,6 @@ buildctl build \ --opt build-arg:APT_MIRROR=cdn-fastly.deb.debian.org ``` -#### Building a Dockerfile with experimental features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)` - -See [`frontend/dockerfile/docs/experimental.md`](frontend/dockerfile/docs/experimental.md). - ### Output By default, the build result and intermediate cache will only remain internally in BuildKit. An output needs to be specified to retrieve the result. @@ -215,7 +229,13 @@ By default, the build result and intermediate cache will only remain internally buildctl build ... --output type=image,name=docker.io/username/image,push=true ``` -To export the cache embed with the image and pushing them to registry together, type `registry` is required to import the cache, you should specify `--export-cache type=inline` and `--import-cache type=registry,ref=...`. To export the cache to a local directy, you should specify `--export-cache type=local`. +To export the image to multiple registries: + +```bash +buildctl build ... --output type=image,\"name=docker.io/username/image,docker.io/username2/image2\",push=true +``` + +To export the cache embed with the image and pushing them to registry together, type `registry` is required to import the cache, you should specify `--export-cache type=inline` and `--import-cache type=registry,ref=...`. To export the cache to a local directly, you should specify `--export-cache type=local`. Details in [Export cache](#export-cache). ```bash @@ -226,23 +246,35 @@ buildctl build ...\ ``` Keys supported by image output: -* `name=[value]`: image name +* `name=`: specify image name(s) * `push=true`: push after creating the image * `push-by-digest=true`: push unnamed image * `registry.insecure=true`: push to insecure HTTP registry * `oci-mediatypes=true`: use OCI mediatypes in configuration JSON instead of Docker's * `unpack=true`: unpack image after creation (for use with containerd) -* `dangling-name-prefix=[value]`: name image with `prefix@` , used for anonymous images +* `dangling-name-prefix=`: name image with `prefix@`, used for anonymous images * `name-canonical=true`: add additional canonical name `name@` -* `compression=[uncompressed,gzip,estargz,zstd]`: choose compression type for layers newly created and cached, gzip is default value. estargz should be used with `oci-mediatypes=true`. -* `compression-level=[value]`: compression level for gzip, estargz (0-9) and zstd (0-22) -* `force-compression=true`: forcefully apply `compression` option to all layers (including already existing layers). -* `buildinfo=true`: inline build info in [image config](docs/build-repro.md#image-config) (default `true`). -* `buildinfo-attrs=true`: inline build info attributes in [image config](docs/build-repro.md#image-config) (default `false`). +* `compression=`: choose compression type for layers newly created and cached, gzip is default value. estargz should be used with `oci-mediatypes=true`. +* `compression-level=`: compression level for gzip, estargz (0-9) and zstd (0-22) +* `force-compression=true`: forcefully apply `compression` option to all layers (including already existing layers) +* `buildinfo=true`: attach inline build info in [image config](docs/buildinfo.md#image-config) (default `true`) +* `buildinfo-attrs=true`: attach inline build info attributes in [image config](docs/buildinfo.md#image-config) (default `false`) +* `store=true`: store the result images to the worker's (e.g. containerd) image store as well as ensures that the image has all blobs in the content store (default `true`). Ignored if the worker doesn't have image store (e.g. OCI worker). +* `annotation.=`: attach an annotation with the respective `key` and `value` to the built image + * Using the extended syntaxes, `annotation-.=`, `annotation[].=` and both combined with `annotation-[].=`, allows configuring exactly where to attach the annotation. + * `` specifies what object to attach to, and can be any of `manifest` (the default), `manifest-descriptor`, `index` and `index-descriptor` + * `` specifies which objects to attach to (by default, all), and is the same key passed into the `platform` opt, see [`docs/multi-platform.md`](docs/multi-platform.md). + * See [`docs/annotations.md`](docs/annotations.md) for more details. If credentials are required, `buildctl` will attempt to read Docker configuration file `$DOCKER_CONFIG/config.json`. `$DOCKER_CONFIG` defaults to `~/.docker`. +> **Warning** +> +> Build information along `buildinfo` and `buildinfo-attrs` attributes are +> deprecated and will be removed in the next release. See the [Deprecated features page](./docs/deprecated.md) +> for status and alternative recommendation about this feature. + #### Local directory The local client will copy the files directly to the client. This is useful if BuildKit is being used for building something else than container images. @@ -285,6 +317,7 @@ buildctl build ... --output type=docker,name=myimage | docker load buildctl build ... --output type=oci,dest=path/to/output.tar buildctl build ... --output type=oci > output.tar ``` + #### containerd image store The containerd worker needs to be used @@ -296,7 +329,6 @@ ctr --namespace=buildkit images ls To change the containerd namespace, you need to change `worker.containerd.namespace` in [`/etc/buildkit/buildkitd.toml`](./docs/buildkitd.toml.md). - ## Cache To show local build cache (`/var/lib/buildkit`): @@ -356,17 +388,19 @@ buildctl build ... \ `--export-cache` options: * `type=registry` -* `mode=min` (default): only export layers for the resulting image -* `mode=max`: export all the layers of all intermediate steps. -* `ref=docker.io/user/image:tag`: reference -* `oci-mediatypes=true|false`: whether to use OCI mediatypes in exported manifests. Since BuildKit `v0.8` defaults to true. -* `compression=[uncompressed,gzip,estargz,zstd]`: choose compression type for layers newly created and cached, gzip is default value. estargz and zstd should be used with `oci-mediatypes=true`. -* `compression-level=[value]`: compression level for gzip, estargz (0-9) and zstd (0-22) -* `force-compression=true`: forcibly apply `compression` option to all layers. +* `mode=`: specify cache layers to export (default: `min`) + * `min`: only export layers for the resulting image + * `max`: export all the layers of all intermediate steps +* `ref=`: specify repository reference to store cache, e.g. `docker.io/user/image:tag` +* `oci-mediatypes=`: whether to use OCI mediatypes in exported manifests (default: `true`, since BuildKit `v0.8`) +* `compression=`: choose compression type for layers newly created and cached, gzip is default value. estargz and zstd should be used with `oci-mediatypes=true` +* `compression-level=`: choose compression level for gzip, estargz (0-9) and zstd (0-22) +* `force-compression=true`: forcibly apply `compression` option to all layers +* `ignore-error=`: specify if error is ignored in case cache export fails (default: `false`) `--import-cache` options: * `type=registry` -* `ref=docker.io/user/image:tag`: reference +* `ref=`: specify repository reference to retrieve cache from, e.g. `docker.io/user/image:tag` #### Local directory @@ -379,19 +413,22 @@ The directory layout conforms to OCI Image Spec v1.0. `--export-cache` options: * `type=local` -* `mode=min` (default): only export layers for the resulting image -* `mode=max`: export all the layers of all intermediate steps. -* `dest=path/to/output-dir`: destination directory for cache exporter -* `oci-mediatypes=true|false`: whether to use OCI mediatypes in exported manifests. Since BuildKit `v0.8` defaults to true. -* `compression=[uncompressed,gzip,estargz,zstd]`: choose compression type for layers newly created and cached, gzip is default value. estargz and zstd should be used with `oci-mediatypes=true`. -* `compression-level=[value]`: compression level for gzip, estargz (0-9) and zstd (0-22) -* `force-compression=true`: forcibly apply `compression` option to all layers. +* `mode=`: specify cache layers to export (default: `min`) + * `min`: only export layers for the resulting image + * `max`: export all the layers of all intermediate steps +* `dest=`: destination directory for cache exporter +* `tag=`: specify custom tag of image to write to local index (default: `latest`) +* `oci-mediatypes=`: whether to use OCI mediatypes in exported manifests (default `true`, since BuildKit `v0.8`) +* `compression=`: choose compression type for layers newly created and cached, gzip is default value. estargz and zstd should be used with `oci-mediatypes=true`. +* `compression-level=`: compression level for gzip, estargz (0-9) and zstd (0-22) +* `force-compression=true`: forcibly apply `compression` option to all layers +* `ignore-error=`: specify if error is ignored in case cache export fails (default: `false`) `--import-cache` options: * `type=local` -* `src=path/to/input-dir`: source directory for cache importer -* `digest=sha256:deadbeef`: digest of the manifest list to import. -* `tag=customtag`: custom tag of image. Defaults "latest" tag digest in `index.json` is for digest, not for tag +* `src=`: source directory for cache importer +* `tag=`: specify custom tag of image to read from local index (default: `latest`) +* `digest=sha256:`: specify explicit digest of the manifest list to import #### GitHub Actions cache (experimental) @@ -402,31 +439,129 @@ buildctl build ... \ --import-cache type=gha ``` -Github Actions cache saves both cache metadata and layers to GitHub's Cache service. This cache currently has a [size limit of 10GB](https://docs.github.com/en/actions/advanced-guides/caching-dependencies-to-speed-up-workflows#usage-limits-and-eviction-policy) that is shared accross different caches in the repo. If you exceed this limit, GitHub will save your cache but will begin evicting caches until the total size is less than 10 GB. Recycling caches too often can result in slower runtimes overall. +GitHub Actions cache saves both cache metadata and layers to GitHub's Cache service. This cache currently has a [size limit of 10GB](https://docs.github.com/en/actions/advanced-guides/caching-dependencies-to-speed-up-workflows#usage-limits-and-eviction-policy) that is shared accross different caches in the repo. If you exceed this limit, GitHub will save your cache but will begin evicting caches until the total size is less than 10 GB. Recycling caches too often can result in slower runtimes overall. Similarly to using [actions/cache](https://github.com/actions/cache), caches are [scoped by branch](https://docs.github.com/en/actions/advanced-guides/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache), with the default and target branches being available to every branch. -Following attributes are required to authenticate against the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication): +Following attributes are required to authenticate against the [GitHub Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication): * `url`: Cache server URL (default `$ACTIONS_CACHE_URL`) * `token`: Access token (default `$ACTIONS_RUNTIME_TOKEN`) :information_source: This type of cache can be used with [Docker Build Push Action](https://github.com/docker/build-push-action) -where `url` and `token` will be automatically set. To use this backend in a inline `run` step, you have to include [crazy-max/ghaction-github-runtime](https://github.com/crazy-max/ghaction-github-runtime) +where `url` and `token` will be automatically set. To use this backend in an inline `run` step, you have to include [crazy-max/ghaction-github-runtime](https://github.com/crazy-max/ghaction-github-runtime) in your workflow to expose the runtime. `--export-cache` options: * `type=gha` -* `mode=min` (default): only export layers for the resulting image -* `mode=max`: export all the layers of all intermediate steps. -* `scope=buildkit`: which scope cache object belongs to (default `buildkit`) +* `mode=`: specify cache layers to export (default: `min`) + * `min`: only export layers for the resulting image + * `max`: export all the layers of all intermediate steps +* `scope=`: which scope cache object belongs to (default `buildkit`) +* `ignore-error=`: specify if error is ignored in case cache export fails (default: `false`) `--import-cache` options: * `type=gha` -* `scope=buildkit`: which scope cache object belongs to (default `buildkit`) +* `scope=`: which scope cache object belongs to (default `buildkit`) + +#### S3 cache (experimental) + +```bash +buildctl build ... \ + --output type=image,name=docker.io/username/image,push=true \ + --export-cache type=s3,region=eu-west-1,bucket=my_bucket,name=my_image \ + --import-cache type=s3,region=eu-west-1,bucket=my_bucket,name=my_image +``` + +The following attributes are required: +* `bucket`: AWS S3 bucket (default: `$AWS_BUCKET`) +* `region`: AWS region (default: `$AWS_REGION`) + +Storage locations: +* blobs: `s3:////`, default: `s3:///blobs/` +* manifests: `s3:////`, default: `s3:///manifests/` + +S3 configuration: +* `blobs_prefix`: global prefix to store / read blobs on s3 (default: `blobs/`) +* `manifests_prefix`: global prefix to store / read blobs on s3 (default: `manifests/`) +* `endpoint_url`: specify a specific S3 endpoint (default: empty) +* `use_path_style`: if set to `true`, put the bucket name in the URL instead of in the hostname (default: `false`) + +AWS Authentication: + +The simplest way is to use an IAM Instance profile. +Others options are: + +* Any system using environment variables / config files supported by the [AWS Go SDK](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html). The configuration must be available for the buildkit daemon, not for the client. +* Using the following attributes: + * `access_key_id`: Access Key ID + * `secret_access_key`: Secret Access Key + * `session_token`: Session Token + +`--export-cache` options: +* `type=s3` +* `mode=`: specify cache layers to export (default: `min`) + * `min`: only export layers for the resulting image + * `max`: export all the layers of all intermediate steps +* `prefix=`: set global prefix to store / read files on s3 (default: empty) +* `name=`: specify name of the manifest to use (default `buildkit`) + * Multiple manifest names can be specified at the same time, separated by `;`. The standard use case is to use the git sha1 as name, and the branch name as duplicate, and load both with 2 `import-cache` commands. +* `ignore-error=`: specify if error is ignored in case cache export fails (default: `false`) + +`--import-cache` options: +* `type=s3` +* `prefix=`: set global prefix to store / read files on s3 (default: empty) +* `blobs_prefix=`: set global prefix to store / read blobs on s3 (default: `blobs/`) +* `manifests_prefix=`: set global prefix to store / read manifests on s3 (default: `manifests/`) +* `name=`: name of the manifest to use (default `buildkit`) + +#### Azure Blob Storage cache (experimental) + +```bash +buildctl build ... \ + --output type=image,name=docker.io/username/image,push=true \ + --export-cache type=azblob,account_url=https://myaccount.blob.core.windows.net,name=my_image \ + --import-cache type=azblob,account_url=https://myaccount.blob.core.windows.net,name=my_image +``` + +The following attributes are required: +* `account_url`: The Azure Blob Storage account URL (default: `$BUILDKIT_AZURE_STORAGE_ACCOUNT_URL`) + +Storage locations: +* blobs: `///`, default: `//blobs/` +* manifests: `///`, default: `//manifests/` + +Azure Blob Storage configuration: +* `container`: The Azure Blob Storage container name (default: `buildkit-cache` or `$BUILDKIT_AZURE_STORAGE_CONTAINER` if set) +* `blobs_prefix`: Global prefix to store / read blobs on the Azure Blob Storage container (``) (default: `blobs/`) +* `manifests_prefix`: Global prefix to store / read blobs on the Azure Blob Storage container (``) (default: `manifests/`) + +Azure Blob Storage authentication: + +There are 2 options supported for Azure Blob Storage authentication: + +* Any system using environment variables supported by the [Azure SDK for Go](https://docs.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication). The configuration must be available for the buildkit daemon, not for the client. +* Secret Access Key, using the `secret_access_key` attribute to specify the primary or secondary account key for your Azure Blob Storage account. [Azure Blob Storage account keys](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage) + +`--export-cache` options: +* `type=azblob` +* `mode=`: specify cache layers to export (default: `min`) + * `min`: only export layers for the resulting image + * `max`: export all the layers of all intermediate steps +* `prefix=`: set global prefix to store / read files on the Azure Blob Storage container (``) (default: empty) +* `name=`: specify name of the manifest to use (default: `buildkit`) + * Multiple manifest names can be specified at the same time, separated by `;`. The standard use case is to use the git sha1 as name, and the branch name as duplicate, and load both with 2 `import-cache` commands. +* `ignore-error=`: specify if error is ignored in case cache export fails (default: `false`) + +`--import-cache` options: +* `type=azblob` +* `prefix=`: set global prefix to store / read files on the Azure Blob Storage container (``) (default: empty) +* `blobs_prefix=`: set global prefix to store / read blobs on the Azure Blob Storage container (``) (default: `blobs/`) +* `manifests_prefix=`: set global prefix to store / read manifests on the Azure Blob Storage container (``) (default: `manifests/`) +* `name=`: name of the manifest to use (default: `buildkit`) ### Consistent hashing -If you have multiple BuildKit daemon instances but you don't want to use registry for sharing cache across the cluster, +If you have multiple BuildKit daemon instances, but you don't want to use registry for sharing cache across the cluster, consider client-side load balancing using consistent hashing. See [`./examples/kubernetes/consistenthash`](./examples/kubernetes/consistenthash). @@ -446,26 +581,6 @@ jq '.' metadata.json ``` ```json { - "containerimage.buildinfo": { - "frontend": "dockerfile.v0", - "attrs": { - "context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master", - "filename": "Dockerfile", - "source": "docker/dockerfile:master" - }, - "sources": [ - { - "type": "docker-image", - "ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0", - "pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0" - }, - { - "type": "docker-image", - "ref": "docker.io/library/alpine:3.13", - "pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c" - } - ] - }, "containerimage.config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66", "containerimage.descriptor": { "annotations": { @@ -512,7 +627,7 @@ buildctl \ `buildctl build` can be called against randomly load balanced the `buildkitd` daemon. -See also [Consistent hashing](#consistenthashing) for client-side load balancing. +See also [Consistent hashing](#consistent-hashing) for client-side load balancing. ## Containerizing BuildKit @@ -603,7 +718,16 @@ Please refer to [`docs/rootless.md`](docs/rootless.md). Please refer to [`docs/multi-platform.md`](docs/multi-platform.md). +### Configuring `buildctl` + +#### Color Output Controls + +`buildctl` has support for modifying the colors that are used to output information to the terminal. You can set the environment variable `BUILDKIT_COLORS` to something like `run=green:warning=yellow:error=red:cancel=255,165,0` to set the colors that you would like to use. Setting `NO_COLOR` to anything will disable any colorized output as recommended by [no-color.org](https://no-color.org/). + +Parsing errors will be reported but ignored. This will result in default color values being used where needed. + +- [The list of pre-defined colors](https://github.com/moby/buildkit/blob/master/util/progress/progressui/colors.go). + ## Contributing Want to contribute to BuildKit? Awesome! You can find information about contributing to this project in the [CONTRIBUTING.md](/.github/CONTRIBUTING.md) - diff --git a/api/services/control/control.pb.go b/api/services/control/control.pb.go index 939f2c2ca7d8..2567a0d9700a 100644 --- a/api/services/control/control.pb.go +++ b/api/services/control/control.pb.go @@ -6,12 +6,14 @@ package moby_buildkit_v1 import ( context "context" fmt "fmt" + rpc "github.com/gogo/googleapis/google/rpc" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" _ "github.com/golang/protobuf/ptypes/timestamp" types "github.com/moby/buildkit/api/types" pb "github.com/moby/buildkit/solver/pb" + pb1 "github.com/moby/buildkit/sourcepolicy/pb" github_com_moby_buildkit_util_entitlements "github.com/moby/buildkit/util/entitlements" github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" grpc "google.golang.org/grpc" @@ -35,6 +37,34 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +type BuildHistoryEventType int32 + +const ( + BuildHistoryEventType_STARTED BuildHistoryEventType = 0 + BuildHistoryEventType_COMPLETE BuildHistoryEventType = 1 + BuildHistoryEventType_DELETED BuildHistoryEventType = 2 +) + +var BuildHistoryEventType_name = map[int32]string{ + 0: "STARTED", + 1: "COMPLETE", + 2: "DELETED", +} + +var BuildHistoryEventType_value = map[string]int32{ + "STARTED": 0, + "COMPLETE": 1, + "DELETED": 2, +} + +func (x BuildHistoryEventType) String() string { + return proto.EnumName(BuildHistoryEventType_name, int32(x)) +} + +func (BuildHistoryEventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{0} +} + type PruneRequest struct { Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` All bool `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` @@ -347,6 +377,8 @@ type SolveRequest struct { Cache CacheOptions `protobuf:"bytes,8,opt,name=Cache,proto3" json:"Cache"` Entitlements []github_com_moby_buildkit_util_entitlements.Entitlement `protobuf:"bytes,9,rep,name=Entitlements,proto3,customtype=github.com/moby/buildkit/util/entitlements.Entitlement" json:"Entitlements,omitempty"` FrontendInputs map[string]*pb.Definition `protobuf:"bytes,10,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Internal bool `protobuf:"varint,11,opt,name=Internal,proto3" json:"Internal,omitempty"` + SourcePolicy *pb1.Policy `protobuf:"bytes,12,opt,name=SourcePolicy,proto3" json:"SourcePolicy,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -448,6 +480,20 @@ func (m *SolveRequest) GetFrontendInputs() map[string]*pb.Definition { return nil } +func (m *SolveRequest) GetInternal() bool { + if m != nil { + return m.Internal + } + return false +} + +func (m *SolveRequest) GetSourcePolicy() *pb1.Policy { + if m != nil { + return m.SourcePolicy + } + return nil +} + type CacheOptions struct { // ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0. // When ExportRefDeprecated is set, the solver appends @@ -1240,978 +1286,1334 @@ func (m *ListWorkersResponse) GetRecord() []*types.WorkerRecord { return nil } -func init() { - proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest") - proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest") - proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse") - proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord") - proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.ExporterAttrsEntry") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.FrontendAttrsEntry") - proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.SolveRequest.FrontendInputsEntry") - proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptions.ExportAttrsDeprecatedEntry") - proto.RegisterType((*CacheOptionsEntry)(nil), "moby.buildkit.v1.CacheOptionsEntry") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptionsEntry.AttrsEntry") - proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.SolveResponse") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveResponse.ExporterResponseEntry") - proto.RegisterType((*StatusRequest)(nil), "moby.buildkit.v1.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "moby.buildkit.v1.StatusResponse") - proto.RegisterType((*Vertex)(nil), "moby.buildkit.v1.Vertex") - proto.RegisterType((*VertexStatus)(nil), "moby.buildkit.v1.VertexStatus") - proto.RegisterType((*VertexLog)(nil), "moby.buildkit.v1.VertexLog") - proto.RegisterType((*VertexWarning)(nil), "moby.buildkit.v1.VertexWarning") - proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage") - proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest") - proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse") +type InfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) } - -var fileDescriptor_0c5120591600887d = []byte{ - // 1543 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xef, 0xda, 0xf1, 0xd7, 0x8b, 0x13, 0xa5, 0xd3, 0x52, 0xad, 0x16, 0x91, 0xa4, 0xdb, 0x22, - 0x45, 0x55, 0xbb, 0x4e, 0x03, 0x85, 0x12, 0x3e, 0xd4, 0x3a, 0x2e, 0x34, 0x55, 0x23, 0xca, 0xa4, - 0xa5, 0x52, 0x0f, 0x48, 0x6b, 0x7b, 0xbc, 0x59, 0x65, 0xbd, 0xb3, 0xcc, 0xcc, 0xa6, 0x35, 0x7f, - 0x00, 0x67, 0x6e, 0xfc, 0x01, 0x1c, 0x38, 0x71, 0xe6, 0x2f, 0x40, 0xea, 0x91, 0x73, 0x0f, 0x01, - 0xf5, 0x0e, 0xe2, 0xc8, 0x11, 0xcd, 0xc7, 0x3a, 0xeb, 0xd8, 0xce, 0x57, 0x39, 0x79, 0xde, 0xcc, - 0x7b, 0xbf, 0x7d, 0x9f, 0x33, 0xef, 0x19, 0xe6, 0x3a, 0x34, 0x16, 0x8c, 0x46, 0x5e, 0xc2, 0xa8, - 0xa0, 0x68, 0xa1, 0x4f, 0xdb, 0x03, 0xaf, 0x9d, 0x86, 0x51, 0x77, 0x37, 0x14, 0xde, 0xde, 0x4d, - 0xe7, 0x46, 0x10, 0x8a, 0x9d, 0xb4, 0xed, 0x75, 0x68, 0xbf, 0x11, 0xd0, 0x80, 0x36, 0x14, 0x63, - 0x3b, 0xed, 0x29, 0x4a, 0x11, 0x6a, 0xa5, 0x01, 0x9c, 0xa5, 0x80, 0xd2, 0x20, 0x22, 0x07, 0x5c, - 0x22, 0xec, 0x13, 0x2e, 0xfc, 0x7e, 0x62, 0x18, 0xae, 0xe7, 0xf0, 0xe4, 0xc7, 0x1a, 0xd9, 0xc7, - 0x1a, 0x9c, 0x46, 0x7b, 0x84, 0x35, 0x92, 0x76, 0x83, 0x26, 0xdc, 0x70, 0x37, 0xa6, 0x72, 0xfb, - 0x49, 0xd8, 0x10, 0x83, 0x84, 0xf0, 0xc6, 0x73, 0xca, 0x76, 0x09, 0xd3, 0x02, 0xee, 0xf7, 0x16, - 0xd4, 0x1f, 0xb1, 0x34, 0x26, 0x98, 0x7c, 0x9b, 0x12, 0x2e, 0xd0, 0x25, 0x28, 0xf7, 0xc2, 0x48, - 0x10, 0x66, 0x5b, 0xcb, 0xc5, 0x95, 0x1a, 0x36, 0x14, 0x5a, 0x80, 0xa2, 0x1f, 0x45, 0x76, 0x61, - 0xd9, 0x5a, 0xa9, 0x62, 0xb9, 0x44, 0x2b, 0x50, 0xdf, 0x25, 0x24, 0x69, 0xa5, 0xcc, 0x17, 0x21, - 0x8d, 0xed, 0xe2, 0xb2, 0xb5, 0x52, 0x6c, 0xce, 0xbc, 0xdc, 0x5f, 0xb2, 0xf0, 0xc8, 0x09, 0x72, - 0xa1, 0x26, 0xe9, 0xe6, 0x40, 0x10, 0x6e, 0xcf, 0xe4, 0xd8, 0x0e, 0xb6, 0xdd, 0x6b, 0xb0, 0xd0, - 0x0a, 0xf9, 0xee, 0x13, 0xee, 0x07, 0xc7, 0xe9, 0xe2, 0x3e, 0x80, 0xf3, 0x39, 0x5e, 0x9e, 0xd0, - 0x98, 0x13, 0x74, 0x0b, 0xca, 0x8c, 0x74, 0x28, 0xeb, 0x2a, 0xe6, 0xd9, 0xb5, 0x77, 0xbc, 0xc3, - 0xb1, 0xf1, 0x8c, 0x80, 0x64, 0xc2, 0x86, 0xd9, 0xfd, 0xb1, 0x08, 0xb3, 0xb9, 0x7d, 0x34, 0x0f, - 0x85, 0xcd, 0x96, 0x6d, 0x2d, 0x5b, 0x2b, 0x35, 0x5c, 0xd8, 0x6c, 0x21, 0x1b, 0x2a, 0x5b, 0xa9, - 0xf0, 0xdb, 0x11, 0x31, 0xb6, 0x67, 0x24, 0xba, 0x08, 0xa5, 0xcd, 0xf8, 0x09, 0x27, 0xca, 0xf0, - 0x2a, 0xd6, 0x04, 0x42, 0x30, 0xb3, 0x1d, 0x7e, 0x47, 0xb4, 0x99, 0x58, 0xad, 0x91, 0x03, 0xe5, - 0x47, 0x3e, 0x23, 0xb1, 0xb0, 0x4b, 0x12, 0xb7, 0x59, 0xb0, 0x2d, 0x6c, 0x76, 0x50, 0x13, 0x6a, - 0x1b, 0x8c, 0xf8, 0x82, 0x74, 0xef, 0x0a, 0xbb, 0xbc, 0x6c, 0xad, 0xcc, 0xae, 0x39, 0x9e, 0x4e, - 0x0a, 0x2f, 0x4b, 0x0a, 0xef, 0x71, 0x96, 0x14, 0xcd, 0xea, 0xcb, 0xfd, 0xa5, 0x73, 0x3f, 0xfc, - 0x21, 0x7d, 0x37, 0x14, 0x43, 0x77, 0x00, 0x1e, 0xfa, 0x5c, 0x3c, 0xe1, 0x0a, 0xa4, 0x72, 0x2c, - 0xc8, 0x8c, 0x02, 0xc8, 0xc9, 0xa0, 0x45, 0x00, 0xe5, 0x84, 0x0d, 0x9a, 0xc6, 0xc2, 0xae, 0x2a, - 0xdd, 0x73, 0x3b, 0x68, 0x19, 0x66, 0x5b, 0x84, 0x77, 0x58, 0x98, 0xa8, 0x50, 0xd7, 0x94, 0x7b, - 0xf2, 0x5b, 0x12, 0x41, 0x7b, 0xf0, 0xf1, 0x20, 0x21, 0x36, 0x28, 0x86, 0xdc, 0x8e, 0x8c, 0xe5, - 0xf6, 0x8e, 0xcf, 0x48, 0xd7, 0x9e, 0x55, 0xee, 0x32, 0x94, 0xf4, 0xaf, 0xf6, 0x04, 0xb7, 0xeb, - 0x2a, 0xc8, 0x19, 0xe9, 0xfe, 0x54, 0x86, 0xfa, 0xb6, 0xcc, 0xf1, 0x2c, 0x1d, 0x16, 0xa0, 0x88, - 0x49, 0xcf, 0xc4, 0x46, 0x2e, 0x91, 0x07, 0xd0, 0x22, 0xbd, 0x30, 0x0e, 0x95, 0x56, 0x05, 0x65, - 0xf8, 0xbc, 0x97, 0xb4, 0xbd, 0x83, 0x5d, 0x9c, 0xe3, 0x40, 0x0e, 0x54, 0xef, 0xbd, 0x48, 0x28, - 0x93, 0x29, 0x55, 0x54, 0x30, 0x43, 0x1a, 0x3d, 0x85, 0xb9, 0x6c, 0x7d, 0x57, 0x08, 0x26, 0x13, - 0x55, 0xa6, 0xd1, 0xcd, 0xf1, 0x34, 0xca, 0x2b, 0xe5, 0x8d, 0xc8, 0xdc, 0x8b, 0x05, 0x1b, 0xe0, - 0x51, 0x1c, 0x69, 0xe1, 0x36, 0xe1, 0x5c, 0x6a, 0xa8, 0xc2, 0x8f, 0x33, 0x52, 0xaa, 0xf3, 0x39, - 0xa3, 0xb1, 0x20, 0x71, 0x57, 0x85, 0xbe, 0x86, 0x87, 0xb4, 0x54, 0x27, 0x5b, 0x6b, 0x75, 0x2a, - 0x27, 0x52, 0x67, 0x44, 0xc6, 0xa8, 0x33, 0xb2, 0x87, 0xd6, 0xa1, 0xb4, 0xe1, 0x77, 0x76, 0x88, - 0x8a, 0xf2, 0xec, 0xda, 0xe2, 0x38, 0xa0, 0x3a, 0xfe, 0x52, 0x85, 0x95, 0xab, 0x42, 0x3d, 0x87, - 0xb5, 0x08, 0xfa, 0x06, 0xea, 0xf7, 0x62, 0x11, 0x8a, 0x88, 0xf4, 0x55, 0xc4, 0x6a, 0x32, 0x62, - 0xcd, 0xf5, 0x57, 0xfb, 0x4b, 0x1f, 0x4c, 0xbd, 0x78, 0x52, 0x11, 0x46, 0x0d, 0x92, 0x93, 0xf2, - 0x72, 0x10, 0x78, 0x04, 0x0f, 0x3d, 0x83, 0xf9, 0x4c, 0xd9, 0xcd, 0x38, 0x49, 0x05, 0xb7, 0x41, - 0x59, 0xbd, 0x76, 0x42, 0xab, 0xb5, 0x90, 0x36, 0xfb, 0x10, 0x92, 0x73, 0x07, 0xd0, 0x78, 0xac, - 0x64, 0x4e, 0xed, 0x92, 0x41, 0x96, 0x53, 0xbb, 0x64, 0x20, 0xcb, 0x7a, 0xcf, 0x8f, 0x52, 0x5d, - 0xee, 0x35, 0xac, 0x89, 0xf5, 0xc2, 0x6d, 0x4b, 0x22, 0x8c, 0xbb, 0xf7, 0x54, 0x08, 0x5f, 0xc1, - 0x85, 0x09, 0xaa, 0x4e, 0x80, 0xb8, 0x9a, 0x87, 0x18, 0xcf, 0xe9, 0x03, 0x48, 0xf7, 0x97, 0x22, - 0xd4, 0xf3, 0x01, 0x43, 0xab, 0x70, 0x41, 0xdb, 0x89, 0x49, 0xaf, 0x45, 0x12, 0x46, 0x3a, 0xf2, - 0x96, 0x30, 0xe0, 0x93, 0x8e, 0xd0, 0x1a, 0x5c, 0xdc, 0xec, 0x9b, 0x6d, 0x9e, 0x13, 0x29, 0xa8, - 0x7a, 0x9c, 0x78, 0x86, 0x28, 0xbc, 0xa5, 0xa1, 0x94, 0x27, 0x72, 0x42, 0x45, 0x15, 0xb0, 0x8f, - 0x8e, 0xce, 0x2a, 0x6f, 0xa2, 0xac, 0x8e, 0xdb, 0x64, 0x5c, 0xf4, 0x29, 0x54, 0xf4, 0x41, 0x56, - 0x98, 0x57, 0x8e, 0xfe, 0x84, 0x06, 0xcb, 0x64, 0xa4, 0xb8, 0xb6, 0x83, 0xdb, 0xa5, 0x53, 0x88, - 0x1b, 0x19, 0xe7, 0x3e, 0x38, 0xd3, 0x55, 0x3e, 0x4d, 0x0a, 0xb8, 0x3f, 0x5b, 0x70, 0x7e, 0xec, - 0x43, 0xf2, 0xd5, 0x50, 0xf7, 0xa6, 0x86, 0x50, 0x6b, 0xd4, 0x82, 0x92, 0xae, 0xfc, 0x82, 0x52, - 0xd8, 0x3b, 0x81, 0xc2, 0x5e, 0xae, 0xec, 0xb5, 0xb0, 0x73, 0x1b, 0xe0, 0x6c, 0xc9, 0xea, 0xfe, - 0x6a, 0xc1, 0x9c, 0xa9, 0x32, 0xf3, 0xc4, 0xfa, 0xb0, 0x90, 0x95, 0x50, 0xb6, 0x67, 0x1e, 0xdb, - 0x5b, 0x53, 0x0b, 0x54, 0xb3, 0x79, 0x87, 0xe5, 0xb4, 0x8e, 0x63, 0x70, 0xce, 0x46, 0x96, 0x57, - 0x87, 0x58, 0x4f, 0xa5, 0xf9, 0x65, 0x98, 0xdb, 0x16, 0xbe, 0x48, 0xf9, 0xd4, 0x97, 0xc3, 0xfd, - 0xc7, 0x82, 0xf9, 0x8c, 0xc7, 0x58, 0xf7, 0x3e, 0x54, 0xf7, 0x08, 0x13, 0xe4, 0x05, 0xe1, 0xc6, - 0x2a, 0x7b, 0xdc, 0xaa, 0xaf, 0x15, 0x07, 0x1e, 0x72, 0xa2, 0x75, 0xa8, 0x72, 0x85, 0x43, 0xb2, - 0x40, 0x2d, 0x4e, 0x93, 0x32, 0xdf, 0x1b, 0xf2, 0xa3, 0x06, 0xcc, 0x44, 0x34, 0xe0, 0xa6, 0x66, - 0xde, 0x9e, 0x26, 0xf7, 0x90, 0x06, 0x58, 0x31, 0xa2, 0x8f, 0xa1, 0xfa, 0xdc, 0x67, 0x71, 0x18, - 0x07, 0x59, 0x15, 0x2c, 0x4d, 0x13, 0x7a, 0xaa, 0xf9, 0xf0, 0x50, 0x40, 0x76, 0x3a, 0x65, 0x7d, - 0x86, 0x1e, 0x40, 0xb9, 0x1b, 0x06, 0x84, 0x0b, 0xed, 0x92, 0xe6, 0x9a, 0xbc, 0xe4, 0x5f, 0xed, - 0x2f, 0x5d, 0xcb, 0xdd, 0xe2, 0x34, 0x21, 0xb1, 0x6c, 0x76, 0xfd, 0x30, 0x26, 0x8c, 0x37, 0x02, - 0x7a, 0x43, 0x8b, 0x78, 0x2d, 0xf5, 0x83, 0x0d, 0x82, 0xc4, 0x0a, 0xf5, 0x5d, 0xad, 0xee, 0x8b, - 0xb3, 0x61, 0x69, 0x04, 0x59, 0x06, 0xb1, 0xdf, 0x27, 0xe6, 0x6d, 0x56, 0x6b, 0xd9, 0x38, 0x74, - 0x64, 0x9e, 0x77, 0x55, 0x4b, 0x55, 0xc5, 0x86, 0x42, 0xeb, 0x50, 0xe1, 0xc2, 0x67, 0xf2, 0xce, - 0x29, 0x9d, 0xb0, 0xe3, 0xc9, 0x04, 0xd0, 0x67, 0x50, 0xeb, 0xd0, 0x7e, 0x12, 0x11, 0x29, 0x5d, - 0x3e, 0xa1, 0xf4, 0x81, 0x88, 0x4c, 0x3d, 0xc2, 0x18, 0x65, 0xaa, 0xd7, 0xaa, 0x61, 0x4d, 0xa0, - 0x0f, 0x61, 0x2e, 0x61, 0x34, 0x60, 0x84, 0xf3, 0x2f, 0x18, 0x4d, 0x13, 0xf3, 0xc2, 0x9e, 0x97, - 0x97, 0xf7, 0xa3, 0xfc, 0x01, 0x1e, 0xe5, 0x73, 0xff, 0x2e, 0x40, 0x3d, 0x9f, 0x22, 0x63, 0x4d, - 0xe8, 0x03, 0x28, 0xeb, 0x84, 0xd3, 0xb9, 0x7e, 0x36, 0x1f, 0x6b, 0x84, 0x89, 0x3e, 0xb6, 0xa1, - 0xd2, 0x49, 0x99, 0xea, 0x50, 0x75, 0xdf, 0x9a, 0x91, 0xd2, 0x52, 0x41, 0x85, 0x1f, 0x29, 0x1f, - 0x17, 0xb1, 0x26, 0x64, 0xd3, 0x3a, 0x9c, 0x53, 0x4e, 0xd7, 0xb4, 0x0e, 0xc5, 0xf2, 0xf1, 0xab, - 0xbc, 0x51, 0xfc, 0xaa, 0xa7, 0x8e, 0x9f, 0xfb, 0x9b, 0x05, 0xb5, 0x61, 0x6d, 0xe5, 0xbc, 0x6b, - 0xbd, 0xb1, 0x77, 0x47, 0x3c, 0x53, 0x38, 0x9b, 0x67, 0x2e, 0x41, 0x99, 0x0b, 0x46, 0xfc, 0xbe, - 0x1e, 0xa9, 0xb0, 0xa1, 0xe4, 0x2d, 0xd6, 0xe7, 0x81, 0x8a, 0x50, 0x1d, 0xcb, 0xa5, 0xfb, 0xaf, - 0x05, 0x73, 0x23, 0xe5, 0xfe, 0xbf, 0xda, 0x72, 0x11, 0x4a, 0x11, 0xd9, 0x23, 0x7a, 0xe8, 0x2b, - 0x62, 0x4d, 0xc8, 0x5d, 0xbe, 0x43, 0x99, 0x50, 0xca, 0xd5, 0xb1, 0x26, 0xa4, 0xce, 0x5d, 0x22, - 0xfc, 0x30, 0x52, 0xf7, 0x52, 0x1d, 0x1b, 0x4a, 0xea, 0x9c, 0xb2, 0xc8, 0x34, 0xbe, 0x72, 0x89, - 0x5c, 0x98, 0x09, 0xe3, 0x1e, 0x35, 0x69, 0xa3, 0x3a, 0x9b, 0x6d, 0x9a, 0xb2, 0x0e, 0xd9, 0x8c, - 0x7b, 0x14, 0xab, 0x33, 0x74, 0x19, 0xca, 0xcc, 0x8f, 0x03, 0x92, 0x75, 0xbd, 0x35, 0xc9, 0x85, - 0xe5, 0x0e, 0x36, 0x07, 0xae, 0x0b, 0x75, 0x35, 0x38, 0x6e, 0x11, 0x2e, 0xc7, 0x14, 0x99, 0xd6, - 0x5d, 0x5f, 0xf8, 0xca, 0xec, 0x3a, 0x56, 0x6b, 0xf7, 0x3a, 0xa0, 0x87, 0x21, 0x17, 0x4f, 0xd5, - 0xc0, 0xcb, 0x8f, 0x9b, 0x2a, 0xb7, 0xe1, 0xc2, 0x08, 0xb7, 0x79, 0x16, 0x3e, 0x39, 0x34, 0x57, - 0x5e, 0x1d, 0xbf, 0x71, 0xd5, 0x5c, 0xed, 0x69, 0xc1, 0xd1, 0xf1, 0x72, 0xed, 0xaf, 0x22, 0x54, - 0x36, 0xf4, 0x5f, 0x06, 0xe8, 0x31, 0xd4, 0x86, 0x63, 0x2b, 0x72, 0xc7, 0x61, 0x0e, 0xcf, 0xbf, - 0xce, 0x95, 0x23, 0x79, 0x8c, 0x7e, 0xf7, 0xa1, 0xa4, 0x06, 0x78, 0x34, 0xe1, 0xdd, 0xc9, 0x4f, - 0xf6, 0xce, 0xd1, 0x03, 0xf1, 0xaa, 0x25, 0x91, 0xd4, 0xa3, 0x3d, 0x09, 0x29, 0xdf, 0x6e, 0x3b, - 0x4b, 0xc7, 0xbc, 0xf6, 0x68, 0x0b, 0xca, 0xe6, 0x26, 0x9b, 0xc4, 0x9a, 0x7f, 0x9a, 0x9d, 0xe5, - 0xe9, 0x0c, 0x1a, 0x6c, 0xd5, 0x42, 0x5b, 0xc3, 0x09, 0x6a, 0x92, 0x6a, 0xf9, 0x34, 0x70, 0x8e, - 0x39, 0x5f, 0xb1, 0x56, 0x2d, 0xf4, 0x0c, 0x66, 0x73, 0x81, 0x46, 0x13, 0x02, 0x3a, 0x9e, 0x35, - 0xce, 0xbb, 0xc7, 0x70, 0x69, 0x65, 0x9b, 0xf5, 0x97, 0xaf, 0x17, 0xad, 0xdf, 0x5f, 0x2f, 0x5a, - 0x7f, 0xbe, 0x5e, 0xb4, 0xda, 0x65, 0x55, 0xf2, 0xef, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x54, - 0x8e, 0x72, 0x11, 0x36, 0x12, 0x00, 0x00, +func (m *InfoRequest) Reset() { *m = InfoRequest{} } +func (m *InfoRequest) String() string { return proto.CompactTextString(m) } +func (*InfoRequest) ProtoMessage() {} +func (*InfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{17} +} +func (m *InfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoRequest.Merge(m, src) +} +func (m *InfoRequest) XXX_Size() int { + return m.Size() +} +func (m *InfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InfoRequest.DiscardUnknown(m) } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +var xxx_messageInfo_InfoRequest proto.InternalMessageInfo -// ControlClient is the client API for Control service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ControlClient interface { - DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) - Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) - Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) - Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) - Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) - ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) +type InfoResponse struct { + BuildkitVersion *types.BuildkitVersion `protobuf:"bytes,1,opt,name=buildkitVersion,proto3" json:"buildkitVersion,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -type controlClient struct { - cc *grpc.ClientConn +func (m *InfoResponse) Reset() { *m = InfoResponse{} } +func (m *InfoResponse) String() string { return proto.CompactTextString(m) } +func (*InfoResponse) ProtoMessage() {} +func (*InfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{18} } - -func NewControlClient(cc *grpc.ClientConn) ControlClient { - return &controlClient{cc} +func (m *InfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (c *controlClient) DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) { - out := new(DiskUsageResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/DiskUsage", in, out, opts...) - if err != nil { - return nil, err +func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return out, nil } - -func (c *controlClient) Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) { - stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[0], "/moby.buildkit.v1.Control/Prune", opts...) - if err != nil { - return nil, err - } - x := &controlPruneClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil +func (m *InfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoResponse.Merge(m, src) } - -type Control_PruneClient interface { - Recv() (*UsageRecord, error) - grpc.ClientStream +func (m *InfoResponse) XXX_Size() int { + return m.Size() } - -type controlPruneClient struct { - grpc.ClientStream +func (m *InfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InfoResponse.DiscardUnknown(m) } -func (x *controlPruneClient) Recv() (*UsageRecord, error) { - m := new(UsageRecord) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +var xxx_messageInfo_InfoResponse proto.InternalMessageInfo -func (c *controlClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { - out := new(SolveResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/Solve", in, out, opts...) - if err != nil { - return nil, err +func (m *InfoResponse) GetBuildkitVersion() *types.BuildkitVersion { + if m != nil { + return m.BuildkitVersion } - return out, nil + return nil } -func (c *controlClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) { - stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[1], "/moby.buildkit.v1.Control/Status", opts...) - if err != nil { - return nil, err - } - x := &controlStatusClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil +type BuildHistoryRequest struct { + ActiveOnly bool `protobuf:"varint,1,opt,name=ActiveOnly,proto3" json:"ActiveOnly,omitempty"` + Ref string `protobuf:"bytes,2,opt,name=Ref,proto3" json:"Ref,omitempty"` + EarlyExit bool `protobuf:"varint,3,opt,name=EarlyExit,proto3" json:"EarlyExit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -type Control_StatusClient interface { - Recv() (*StatusResponse, error) - grpc.ClientStream +func (m *BuildHistoryRequest) Reset() { *m = BuildHistoryRequest{} } +func (m *BuildHistoryRequest) String() string { return proto.CompactTextString(m) } +func (*BuildHistoryRequest) ProtoMessage() {} +func (*BuildHistoryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{19} } - -type controlStatusClient struct { - grpc.ClientStream +func (m *BuildHistoryRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (x *controlStatusClient) Recv() (*StatusResponse, error) { - m := new(StatusResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (m *BuildHistoryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BuildHistoryRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return m, nil } - -func (c *controlClient) Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) { - stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[2], "/moby.buildkit.v1.Control/Session", opts...) - if err != nil { - return nil, err - } - x := &controlSessionClient{stream} - return x, nil +func (m *BuildHistoryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildHistoryRequest.Merge(m, src) } - -type Control_SessionClient interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ClientStream +func (m *BuildHistoryRequest) XXX_Size() int { + return m.Size() } - -type controlSessionClient struct { - grpc.ClientStream +func (m *BuildHistoryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BuildHistoryRequest.DiscardUnknown(m) } -func (x *controlSessionClient) Send(m *BytesMessage) error { - return x.ClientStream.SendMsg(m) -} +var xxx_messageInfo_BuildHistoryRequest proto.InternalMessageInfo -func (x *controlSessionClient) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (m *BuildHistoryRequest) GetActiveOnly() bool { + if m != nil { + return m.ActiveOnly } - return m, nil + return false } -func (c *controlClient) ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) { - out := new(ListWorkersResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/ListWorkers", in, out, opts...) - if err != nil { - return nil, err +func (m *BuildHistoryRequest) GetRef() string { + if m != nil { + return m.Ref } - return out, nil + return "" } -// ControlServer is the server API for Control service. -type ControlServer interface { - DiskUsage(context.Context, *DiskUsageRequest) (*DiskUsageResponse, error) - Prune(*PruneRequest, Control_PruneServer) error - Solve(context.Context, *SolveRequest) (*SolveResponse, error) - Status(*StatusRequest, Control_StatusServer) error - Session(Control_SessionServer) error - ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error) +func (m *BuildHistoryRequest) GetEarlyExit() bool { + if m != nil { + return m.EarlyExit + } + return false } -// UnimplementedControlServer can be embedded to have forward compatible implementations. -type UnimplementedControlServer struct { +type BuildHistoryEvent struct { + Type BuildHistoryEventType `protobuf:"varint,1,opt,name=type,proto3,enum=moby.buildkit.v1.BuildHistoryEventType" json:"type,omitempty"` + Record *BuildHistoryRecord `protobuf:"bytes,2,opt,name=record,proto3" json:"record,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (*UnimplementedControlServer) DiskUsage(ctx context.Context, req *DiskUsageRequest) (*DiskUsageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DiskUsage not implemented") +func (m *BuildHistoryEvent) Reset() { *m = BuildHistoryEvent{} } +func (m *BuildHistoryEvent) String() string { return proto.CompactTextString(m) } +func (*BuildHistoryEvent) ProtoMessage() {} +func (*BuildHistoryEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{20} } -func (*UnimplementedControlServer) Prune(req *PruneRequest, srv Control_PruneServer) error { - return status.Errorf(codes.Unimplemented, "method Prune not implemented") +func (m *BuildHistoryEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (*UnimplementedControlServer) Solve(ctx context.Context, req *SolveRequest) (*SolveResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Solve not implemented") +func (m *BuildHistoryEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BuildHistoryEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (*UnimplementedControlServer) Status(req *StatusRequest, srv Control_StatusServer) error { - return status.Errorf(codes.Unimplemented, "method Status not implemented") +func (m *BuildHistoryEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildHistoryEvent.Merge(m, src) } -func (*UnimplementedControlServer) Session(srv Control_SessionServer) error { - return status.Errorf(codes.Unimplemented, "method Session not implemented") +func (m *BuildHistoryEvent) XXX_Size() int { + return m.Size() } -func (*UnimplementedControlServer) ListWorkers(ctx context.Context, req *ListWorkersRequest) (*ListWorkersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListWorkers not implemented") +func (m *BuildHistoryEvent) XXX_DiscardUnknown() { + xxx_messageInfo_BuildHistoryEvent.DiscardUnknown(m) } -func RegisterControlServer(s *grpc.Server, srv ControlServer) { - s.RegisterService(&_Control_serviceDesc, srv) +var xxx_messageInfo_BuildHistoryEvent proto.InternalMessageInfo + +func (m *BuildHistoryEvent) GetType() BuildHistoryEventType { + if m != nil { + return m.Type + } + return BuildHistoryEventType_STARTED } -func _Control_DiskUsage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DiskUsageRequest) - if err := dec(in); err != nil { - return nil, err +func (m *BuildHistoryEvent) GetRecord() *BuildHistoryRecord { + if m != nil { + return m.Record } - if interceptor == nil { - return srv.(ControlServer).DiskUsage(ctx, in) + return nil +} + +type BuildHistoryRecord struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` + FrontendAttrs map[string]string `protobuf:"bytes,3,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Exporters []*Exporter `protobuf:"bytes,4,rep,name=Exporters,proto3" json:"Exporters,omitempty"` + Error *rpc.Status `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` + CreatedAt *time.Time `protobuf:"bytes,6,opt,name=CreatedAt,proto3,stdtime" json:"CreatedAt,omitempty"` + CompletedAt *time.Time `protobuf:"bytes,7,opt,name=CompletedAt,proto3,stdtime" json:"CompletedAt,omitempty"` + Logs *Descriptor `protobuf:"bytes,8,opt,name=logs,proto3" json:"logs,omitempty"` + ExporterResponse map[string]string `protobuf:"bytes,9,rep,name=ExporterResponse,proto3" json:"ExporterResponse,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Result *BuildResultInfo `protobuf:"bytes,10,opt,name=Result,proto3" json:"Result,omitempty"` + Results map[string]*BuildResultInfo `protobuf:"bytes,11,rep,name=Results,proto3" json:"Results,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Generation int32 `protobuf:"varint,12,opt,name=Generation,proto3" json:"Generation,omitempty"` + Trace *Descriptor `protobuf:"bytes,13,opt,name=trace,proto3" json:"trace,omitempty"` + Pinned bool `protobuf:"varint,14,opt,name=pinned,proto3" json:"pinned,omitempty"` + NumCachedSteps int32 `protobuf:"varint,15,opt,name=numCachedSteps,proto3" json:"numCachedSteps,omitempty"` + NumTotalSteps int32 `protobuf:"varint,16,opt,name=numTotalSteps,proto3" json:"numTotalSteps,omitempty"` + NumCompletedSteps int32 `protobuf:"varint,17,opt,name=numCompletedSteps,proto3" json:"numCompletedSteps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BuildHistoryRecord) Reset() { *m = BuildHistoryRecord{} } +func (m *BuildHistoryRecord) String() string { return proto.CompactTextString(m) } +func (*BuildHistoryRecord) ProtoMessage() {} +func (*BuildHistoryRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{21} +} +func (m *BuildHistoryRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildHistoryRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BuildHistoryRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.Control/DiskUsage", +} +func (m *BuildHistoryRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildHistoryRecord.Merge(m, src) +} +func (m *BuildHistoryRecord) XXX_Size() int { + return m.Size() +} +func (m *BuildHistoryRecord) XXX_DiscardUnknown() { + xxx_messageInfo_BuildHistoryRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildHistoryRecord proto.InternalMessageInfo + +func (m *BuildHistoryRecord) GetRef() string { + if m != nil { + return m.Ref } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServer).DiskUsage(ctx, req.(*DiskUsageRequest)) + return "" +} + +func (m *BuildHistoryRecord) GetFrontend() string { + if m != nil { + return m.Frontend } - return interceptor(ctx, in, info, handler) + return "" } -func _Control_Prune_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(PruneRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (m *BuildHistoryRecord) GetFrontendAttrs() map[string]string { + if m != nil { + return m.FrontendAttrs } - return srv.(ControlServer).Prune(m, &controlPruneServer{stream}) + return nil } -type Control_PruneServer interface { - Send(*UsageRecord) error - grpc.ServerStream +func (m *BuildHistoryRecord) GetExporters() []*Exporter { + if m != nil { + return m.Exporters + } + return nil } -type controlPruneServer struct { - grpc.ServerStream +func (m *BuildHistoryRecord) GetError() *rpc.Status { + if m != nil { + return m.Error + } + return nil } -func (x *controlPruneServer) Send(m *UsageRecord) error { - return x.ServerStream.SendMsg(m) +func (m *BuildHistoryRecord) GetCreatedAt() *time.Time { + if m != nil { + return m.CreatedAt + } + return nil } -func _Control_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SolveRequest) - if err := dec(in); err != nil { - return nil, err +func (m *BuildHistoryRecord) GetCompletedAt() *time.Time { + if m != nil { + return m.CompletedAt } - if interceptor == nil { - return srv.(ControlServer).Solve(ctx, in) + return nil +} + +func (m *BuildHistoryRecord) GetLogs() *Descriptor { + if m != nil { + return m.Logs } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.Control/Solve", + return nil +} + +func (m *BuildHistoryRecord) GetExporterResponse() map[string]string { + if m != nil { + return m.ExporterResponse } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServer).Solve(ctx, req.(*SolveRequest)) + return nil +} + +func (m *BuildHistoryRecord) GetResult() *BuildResultInfo { + if m != nil { + return m.Result } - return interceptor(ctx, in, info, handler) + return nil } -func _Control_Status_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(StatusRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (m *BuildHistoryRecord) GetResults() map[string]*BuildResultInfo { + if m != nil { + return m.Results } - return srv.(ControlServer).Status(m, &controlStatusServer{stream}) + return nil } -type Control_StatusServer interface { - Send(*StatusResponse) error - grpc.ServerStream +func (m *BuildHistoryRecord) GetGeneration() int32 { + if m != nil { + return m.Generation + } + return 0 } -type controlStatusServer struct { - grpc.ServerStream +func (m *BuildHistoryRecord) GetTrace() *Descriptor { + if m != nil { + return m.Trace + } + return nil } -func (x *controlStatusServer) Send(m *StatusResponse) error { - return x.ServerStream.SendMsg(m) +func (m *BuildHistoryRecord) GetPinned() bool { + if m != nil { + return m.Pinned + } + return false } -func _Control_Session_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ControlServer).Session(&controlSessionServer{stream}) +func (m *BuildHistoryRecord) GetNumCachedSteps() int32 { + if m != nil { + return m.NumCachedSteps + } + return 0 } -type Control_SessionServer interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ServerStream +func (m *BuildHistoryRecord) GetNumTotalSteps() int32 { + if m != nil { + return m.NumTotalSteps + } + return 0 } -type controlSessionServer struct { - grpc.ServerStream +func (m *BuildHistoryRecord) GetNumCompletedSteps() int32 { + if m != nil { + return m.NumCompletedSteps + } + return 0 } -func (x *controlSessionServer) Send(m *BytesMessage) error { - return x.ServerStream.SendMsg(m) +type UpdateBuildHistoryRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Pinned bool `protobuf:"varint,2,opt,name=Pinned,proto3" json:"Pinned,omitempty"` + Delete bool `protobuf:"varint,3,opt,name=Delete,proto3" json:"Delete,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (x *controlSessionServer) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err +func (m *UpdateBuildHistoryRequest) Reset() { *m = UpdateBuildHistoryRequest{} } +func (m *UpdateBuildHistoryRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateBuildHistoryRequest) ProtoMessage() {} +func (*UpdateBuildHistoryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{22} +} +func (m *UpdateBuildHistoryRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateBuildHistoryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateBuildHistoryRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return m, nil +} +func (m *UpdateBuildHistoryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateBuildHistoryRequest.Merge(m, src) +} +func (m *UpdateBuildHistoryRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateBuildHistoryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateBuildHistoryRequest.DiscardUnknown(m) } -func _Control_ListWorkers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListWorkersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServer).ListWorkers(ctx, in) +var xxx_messageInfo_UpdateBuildHistoryRequest proto.InternalMessageInfo + +func (m *UpdateBuildHistoryRequest) GetRef() string { + if m != nil { + return m.Ref } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.Control/ListWorkers", + return "" +} + +func (m *UpdateBuildHistoryRequest) GetPinned() bool { + if m != nil { + return m.Pinned } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServer).ListWorkers(ctx, req.(*ListWorkersRequest)) + return false +} + +func (m *UpdateBuildHistoryRequest) GetDelete() bool { + if m != nil { + return m.Delete } - return interceptor(ctx, in, info, handler) + return false } -var _Control_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.buildkit.v1.Control", - HandlerType: (*ControlServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "DiskUsage", - Handler: _Control_DiskUsage_Handler, - }, - { - MethodName: "Solve", - Handler: _Control_Solve_Handler, - }, - { - MethodName: "ListWorkers", - Handler: _Control_ListWorkers_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Prune", - Handler: _Control_Prune_Handler, - ServerStreams: true, - }, - { - StreamName: "Status", - Handler: _Control_Status_Handler, - ServerStreams: true, - }, - { - StreamName: "Session", - Handler: _Control_Session_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "control.proto", +type UpdateBuildHistoryResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *PruneRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *UpdateBuildHistoryResponse) Reset() { *m = UpdateBuildHistoryResponse{} } +func (m *UpdateBuildHistoryResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateBuildHistoryResponse) ProtoMessage() {} +func (*UpdateBuildHistoryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{23} +} +func (m *UpdateBuildHistoryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateBuildHistoryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateBuildHistoryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil } - -func (m *PruneRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *UpdateBuildHistoryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateBuildHistoryResponse.Merge(m, src) +} +func (m *UpdateBuildHistoryResponse) XXX_Size() int { + return m.Size() +} +func (m *UpdateBuildHistoryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateBuildHistoryResponse.DiscardUnknown(m) } -func (m *PruneRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +var xxx_messageInfo_UpdateBuildHistoryResponse proto.InternalMessageInfo + +type Descriptor struct { + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Descriptor) Reset() { *m = Descriptor{} } +func (m *Descriptor) String() string { return proto.CompactTextString(m) } +func (*Descriptor) ProtoMessage() {} +func (*Descriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{24} +} +func (m *Descriptor) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Descriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Descriptor.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - if m.KeepBytes != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.KeepBytes)) - i-- - dAtA[i] = 0x20 +} +func (m *Descriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_Descriptor.Merge(m, src) +} +func (m *Descriptor) XXX_Size() int { + return m.Size() +} +func (m *Descriptor) XXX_DiscardUnknown() { + xxx_messageInfo_Descriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_Descriptor proto.InternalMessageInfo + +func (m *Descriptor) GetMediaType() string { + if m != nil { + return m.MediaType } - if m.KeepDuration != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.KeepDuration)) - i-- - dAtA[i] = 0x18 + return "" +} + +func (m *Descriptor) GetSize_() int64 { + if m != nil { + return m.Size_ } - if m.All { - i-- - if m.All { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + return 0 +} + +func (m *Descriptor) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations } - if len(m.Filter) > 0 { - for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filter[iNdEx]) - copy(dAtA[i:], m.Filter[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx]))) - i-- - dAtA[i] = 0xa + return nil +} + +type BuildResultInfo struct { + Result *Descriptor `protobuf:"bytes,1,opt,name=Result,proto3" json:"Result,omitempty"` + Attestations []*Descriptor `protobuf:"bytes,2,rep,name=Attestations,proto3" json:"Attestations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BuildResultInfo) Reset() { *m = BuildResultInfo{} } +func (m *BuildResultInfo) String() string { return proto.CompactTextString(m) } +func (*BuildResultInfo) ProtoMessage() {} +func (*BuildResultInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{25} +} +func (m *BuildResultInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildResultInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BuildResultInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - return len(dAtA) - i, nil +} +func (m *BuildResultInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildResultInfo.Merge(m, src) +} +func (m *BuildResultInfo) XXX_Size() int { + return m.Size() +} +func (m *BuildResultInfo) XXX_DiscardUnknown() { + xxx_messageInfo_BuildResultInfo.DiscardUnknown(m) } -func (m *DiskUsageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_BuildResultInfo proto.InternalMessageInfo + +func (m *BuildResultInfo) GetResult() *Descriptor { + if m != nil { + return m.Result } - return dAtA[:n], nil + return nil } -func (m *DiskUsageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *BuildResultInfo) GetAttestations() []*Descriptor { + if m != nil { + return m.Attestations + } + return nil } -func (m *DiskUsageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filter) > 0 { - for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filter[iNdEx]) - copy(dAtA[i:], m.Filter[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx]))) - i-- - dAtA[i] = 0xa +type Exporter struct { + Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` + Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exporter) Reset() { *m = Exporter{} } +func (m *Exporter) String() string { return proto.CompactTextString(m) } +func (*Exporter) ProtoMessage() {} +func (*Exporter) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{26} +} +func (m *Exporter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exporter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exporter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - return len(dAtA) - i, nil +} +func (m *Exporter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exporter.Merge(m, src) +} +func (m *Exporter) XXX_Size() int { + return m.Size() +} +func (m *Exporter) XXX_DiscardUnknown() { + xxx_messageInfo_Exporter.DiscardUnknown(m) } -func (m *DiskUsageResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_Exporter proto.InternalMessageInfo + +func (m *Exporter) GetType() string { + if m != nil { + return m.Type } - return dAtA[:n], nil + return "" } -func (m *DiskUsageResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Exporter) GetAttrs() map[string]string { + if m != nil { + return m.Attrs + } + return nil } -func (m *DiskUsageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Record) > 0 { - for iNdEx := len(m.Record) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Record[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil +func init() { + proto.RegisterEnum("moby.buildkit.v1.BuildHistoryEventType", BuildHistoryEventType_name, BuildHistoryEventType_value) + proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest") + proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest") + proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse") + proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord") + proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.ExporterAttrsEntry") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.FrontendAttrsEntry") + proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.SolveRequest.FrontendInputsEntry") + proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptions.ExportAttrsDeprecatedEntry") + proto.RegisterType((*CacheOptionsEntry)(nil), "moby.buildkit.v1.CacheOptionsEntry") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptionsEntry.AttrsEntry") + proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.SolveResponse") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveResponse.ExporterResponseEntry") + proto.RegisterType((*StatusRequest)(nil), "moby.buildkit.v1.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "moby.buildkit.v1.StatusResponse") + proto.RegisterType((*Vertex)(nil), "moby.buildkit.v1.Vertex") + proto.RegisterType((*VertexStatus)(nil), "moby.buildkit.v1.VertexStatus") + proto.RegisterType((*VertexLog)(nil), "moby.buildkit.v1.VertexLog") + proto.RegisterType((*VertexWarning)(nil), "moby.buildkit.v1.VertexWarning") + proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage") + proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest") + proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse") + proto.RegisterType((*InfoRequest)(nil), "moby.buildkit.v1.InfoRequest") + proto.RegisterType((*InfoResponse)(nil), "moby.buildkit.v1.InfoResponse") + proto.RegisterType((*BuildHistoryRequest)(nil), "moby.buildkit.v1.BuildHistoryRequest") + proto.RegisterType((*BuildHistoryEvent)(nil), "moby.buildkit.v1.BuildHistoryEvent") + proto.RegisterType((*BuildHistoryRecord)(nil), "moby.buildkit.v1.BuildHistoryRecord") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.BuildHistoryRecord.ExporterResponseEntry") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.BuildHistoryRecord.FrontendAttrsEntry") + proto.RegisterMapType((map[string]*BuildResultInfo)(nil), "moby.buildkit.v1.BuildHistoryRecord.ResultsEntry") + proto.RegisterType((*UpdateBuildHistoryRequest)(nil), "moby.buildkit.v1.UpdateBuildHistoryRequest") + proto.RegisterType((*UpdateBuildHistoryResponse)(nil), "moby.buildkit.v1.UpdateBuildHistoryResponse") + proto.RegisterType((*Descriptor)(nil), "moby.buildkit.v1.Descriptor") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.Descriptor.AnnotationsEntry") + proto.RegisterType((*BuildResultInfo)(nil), "moby.buildkit.v1.BuildResultInfo") + proto.RegisterType((*Exporter)(nil), "moby.buildkit.v1.Exporter") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.Exporter.AttrsEntry") } -func (m *UsageRecord) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) } + +var fileDescriptor_0c5120591600887d = []byte{ + // 2261 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xcd, 0x6e, 0x1b, 0xc9, + 0x11, 0xde, 0x21, 0x25, 0xfe, 0x14, 0x29, 0x59, 0x6a, 0x7b, 0x8d, 0xc9, 0xc4, 0x2b, 0xc9, 0xb3, + 0x76, 0x22, 0x38, 0xf6, 0x50, 0xcb, 0xac, 0x63, 0xaf, 0x9c, 0x38, 0x16, 0x45, 0x66, 0x2d, 0xc7, + 0x82, 0xb5, 0x2d, 0x79, 0x0d, 0x2c, 0xe0, 0x04, 0x23, 0xb2, 0x45, 0x0f, 0x34, 0x9c, 0x99, 0x74, + 0x37, 0xb5, 0xe6, 0x3e, 0x40, 0x80, 0xcd, 0x21, 0xc8, 0x25, 0xc8, 0x25, 0xf7, 0x9c, 0x72, 0xce, + 0x13, 0x04, 0xf0, 0x31, 0xe7, 0x3d, 0x38, 0x81, 0x1f, 0x20, 0xc8, 0x31, 0xb9, 0x05, 0xfd, 0x33, + 0xe4, 0x90, 0x33, 0x94, 0x28, 0xdb, 0x27, 0x76, 0x75, 0xd7, 0x57, 0x53, 0x55, 0x5d, 0x5d, 0x5d, + 0xd5, 0x84, 0x85, 0x76, 0x18, 0x70, 0x1a, 0xfa, 0x4e, 0x44, 0x43, 0x1e, 0xa2, 0xa5, 0x5e, 0x78, + 0x38, 0x70, 0x0e, 0xfb, 0x9e, 0xdf, 0x39, 0xf6, 0xb8, 0x73, 0xf2, 0x89, 0x75, 0xab, 0xeb, 0xf1, + 0x17, 0xfd, 0x43, 0xa7, 0x1d, 0xf6, 0x6a, 0xdd, 0xb0, 0x1b, 0xd6, 0x24, 0xe3, 0x61, 0xff, 0x48, + 0x52, 0x92, 0x90, 0x23, 0x25, 0xc0, 0x5a, 0xed, 0x86, 0x61, 0xd7, 0x27, 0x23, 0x2e, 0xee, 0xf5, + 0x08, 0xe3, 0x6e, 0x2f, 0xd2, 0x0c, 0x37, 0x13, 0xf2, 0xc4, 0xc7, 0x6a, 0xf1, 0xc7, 0x6a, 0x2c, + 0xf4, 0x4f, 0x08, 0xad, 0x45, 0x87, 0xb5, 0x30, 0x62, 0x9a, 0xbb, 0x36, 0x95, 0xdb, 0x8d, 0xbc, + 0x1a, 0x1f, 0x44, 0x84, 0xd5, 0xbe, 0x0e, 0xe9, 0x31, 0xa1, 0x1a, 0x50, 0x9f, 0x54, 0x57, 0xe9, + 0xe3, 0x46, 0x1e, 0xd3, 0xc3, 0x1a, 0x8d, 0xda, 0x35, 0xc6, 0x5d, 0xde, 0x8f, 0x3f, 0x72, 0xfb, + 0x14, 0x95, 0xfa, 0xb4, 0x4d, 0xa2, 0xd0, 0xf7, 0xda, 0x03, 0xa1, 0x98, 0x1a, 0x29, 0x98, 0xfd, + 0x5b, 0x03, 0xaa, 0x7b, 0xb4, 0x1f, 0x10, 0x4c, 0x7e, 0xd3, 0x27, 0x8c, 0xa3, 0xcb, 0x50, 0x38, + 0xf2, 0x7c, 0x4e, 0xa8, 0x69, 0xac, 0xe5, 0xd7, 0xcb, 0x58, 0x53, 0x68, 0x09, 0xf2, 0xae, 0xef, + 0x9b, 0xb9, 0x35, 0x63, 0xbd, 0x84, 0xc5, 0x10, 0xad, 0x43, 0xf5, 0x98, 0x90, 0xa8, 0xd9, 0xa7, + 0x2e, 0xf7, 0xc2, 0xc0, 0xcc, 0xaf, 0x19, 0xeb, 0xf9, 0xc6, 0xdc, 0xab, 0xd7, 0xab, 0x06, 0x1e, + 0x5b, 0x41, 0x36, 0x94, 0x05, 0xdd, 0x18, 0x70, 0xc2, 0xcc, 0xb9, 0x04, 0xdb, 0x68, 0xda, 0xbe, + 0x01, 0x4b, 0x4d, 0x8f, 0x1d, 0x3f, 0x65, 0x6e, 0xf7, 0x2c, 0x5d, 0xec, 0x47, 0xb0, 0x9c, 0xe0, + 0x65, 0x51, 0x18, 0x30, 0x82, 0x6e, 0x43, 0x81, 0x92, 0x76, 0x48, 0x3b, 0x92, 0xb9, 0x52, 0xff, + 0xc8, 0x99, 0x0c, 0x03, 0x47, 0x03, 0x04, 0x13, 0xd6, 0xcc, 0xf6, 0x9f, 0xf2, 0x50, 0x49, 0xcc, + 0xa3, 0x45, 0xc8, 0xed, 0x34, 0x4d, 0x63, 0xcd, 0x58, 0x2f, 0xe3, 0xdc, 0x4e, 0x13, 0x99, 0x50, + 0xdc, 0xed, 0x73, 0xf7, 0xd0, 0x27, 0xda, 0xf6, 0x98, 0x44, 0x97, 0x60, 0x7e, 0x27, 0x78, 0xca, + 0x88, 0x34, 0xbc, 0x84, 0x15, 0x81, 0x10, 0xcc, 0xed, 0x7b, 0xdf, 0x10, 0x65, 0x26, 0x96, 0x63, + 0x64, 0x41, 0x61, 0xcf, 0xa5, 0x24, 0xe0, 0xe6, 0xbc, 0x90, 0xdb, 0xc8, 0x99, 0x06, 0xd6, 0x33, + 0xa8, 0x01, 0xe5, 0x6d, 0x4a, 0x5c, 0x4e, 0x3a, 0x5b, 0xdc, 0x2c, 0xac, 0x19, 0xeb, 0x95, 0xba, + 0xe5, 0xa8, 0x4d, 0x76, 0xe2, 0xf8, 0x73, 0x0e, 0xe2, 0xf8, 0x6b, 0x94, 0x5e, 0xbd, 0x5e, 0xfd, + 0xe0, 0x0f, 0xff, 0x14, 0xbe, 0x1b, 0xc2, 0xd0, 0x03, 0x80, 0xc7, 0x2e, 0xe3, 0x4f, 0x99, 0x14, + 0x52, 0x3c, 0x53, 0xc8, 0x9c, 0x14, 0x90, 0xc0, 0xa0, 0x15, 0x00, 0xe9, 0x84, 0xed, 0xb0, 0x1f, + 0x70, 0xb3, 0x24, 0x75, 0x4f, 0xcc, 0xa0, 0x35, 0xa8, 0x34, 0x09, 0x6b, 0x53, 0x2f, 0x92, 0x5b, + 0x5d, 0x96, 0xee, 0x49, 0x4e, 0x09, 0x09, 0xca, 0x83, 0x07, 0x83, 0x88, 0x98, 0x20, 0x19, 0x12, + 0x33, 0x62, 0x2f, 0xf7, 0x5f, 0xb8, 0x94, 0x74, 0xcc, 0x8a, 0x74, 0x97, 0xa6, 0x84, 0x7f, 0x95, + 0x27, 0x98, 0x59, 0x95, 0x9b, 0x1c, 0x93, 0xf6, 0xef, 0x8a, 0x50, 0xdd, 0x17, 0xc7, 0x29, 0x0e, + 0x87, 0x25, 0xc8, 0x63, 0x72, 0xa4, 0xf7, 0x46, 0x0c, 0x91, 0x03, 0xd0, 0x24, 0x47, 0x5e, 0xe0, + 0x49, 0xad, 0x72, 0xd2, 0xf0, 0x45, 0x27, 0x3a, 0x74, 0x46, 0xb3, 0x38, 0xc1, 0x81, 0x2c, 0x28, + 0xb5, 0x5e, 0x46, 0x21, 0x15, 0x21, 0x95, 0x97, 0x62, 0x86, 0x34, 0x7a, 0x06, 0x0b, 0xf1, 0x78, + 0x8b, 0x73, 0x2a, 0x02, 0x55, 0x84, 0xd1, 0x27, 0xe9, 0x30, 0x4a, 0x2a, 0xe5, 0x8c, 0x61, 0x5a, + 0x01, 0xa7, 0x03, 0x3c, 0x2e, 0x47, 0x58, 0xb8, 0x4f, 0x18, 0x13, 0x1a, 0xca, 0xed, 0xc7, 0x31, + 0x29, 0xd4, 0xf9, 0x05, 0x0d, 0x03, 0x4e, 0x82, 0x8e, 0xdc, 0xfa, 0x32, 0x1e, 0xd2, 0x42, 0x9d, + 0x78, 0xac, 0xd4, 0x29, 0xce, 0xa4, 0xce, 0x18, 0x46, 0xab, 0x33, 0x36, 0x87, 0x36, 0x61, 0x7e, + 0xdb, 0x6d, 0xbf, 0x20, 0x72, 0x97, 0x2b, 0xf5, 0x95, 0xb4, 0x40, 0xb9, 0xfc, 0x44, 0x6e, 0x2b, + 0x93, 0x07, 0xf5, 0x03, 0xac, 0x20, 0xe8, 0x57, 0x50, 0x6d, 0x05, 0xdc, 0xe3, 0x3e, 0xe9, 0xc9, + 0x1d, 0x2b, 0x8b, 0x1d, 0x6b, 0x6c, 0x7e, 0xf7, 0x7a, 0xf5, 0x27, 0x53, 0xd3, 0x4f, 0x9f, 0x7b, + 0x7e, 0x8d, 0x24, 0x50, 0x4e, 0x42, 0x04, 0x1e, 0x93, 0x87, 0xbe, 0x82, 0xc5, 0x58, 0xd9, 0x9d, + 0x20, 0xea, 0x73, 0x66, 0x82, 0xb4, 0xba, 0x3e, 0xa3, 0xd5, 0x0a, 0xa4, 0xcc, 0x9e, 0x90, 0x24, + 0x9c, 0xbd, 0x13, 0x70, 0x42, 0x03, 0xd7, 0xd7, 0x21, 0x38, 0xa4, 0xd1, 0x8e, 0x88, 0x34, 0x91, + 0x25, 0xf7, 0x64, 0x6e, 0x34, 0xab, 0xd2, 0x35, 0xd7, 0xd3, 0x5f, 0x4d, 0xe6, 0x52, 0x47, 0x31, + 0xe3, 0x31, 0xa8, 0xf5, 0x00, 0x50, 0x3a, 0x24, 0x44, 0xe8, 0x1e, 0x93, 0x41, 0x1c, 0xba, 0xc7, + 0x64, 0x20, 0xb2, 0xc7, 0x89, 0xeb, 0xf7, 0x55, 0x56, 0x29, 0x63, 0x45, 0x6c, 0xe6, 0xee, 0x1a, + 0x42, 0x42, 0x7a, 0x17, 0xcf, 0x25, 0xe1, 0x0b, 0xb8, 0x98, 0xe1, 0x91, 0x0c, 0x11, 0xd7, 0x92, + 0x22, 0xd2, 0x47, 0x67, 0x24, 0xd2, 0xfe, 0x6b, 0x1e, 0xaa, 0xc9, 0xb8, 0x40, 0x1b, 0x70, 0x51, + 0xd9, 0x89, 0xc9, 0x51, 0x93, 0x44, 0x94, 0xb4, 0x45, 0x32, 0xd2, 0xc2, 0xb3, 0x96, 0x50, 0x1d, + 0x2e, 0xed, 0xf4, 0xf4, 0x34, 0x4b, 0x40, 0x72, 0xf2, 0xd8, 0x67, 0xae, 0xa1, 0x10, 0x3e, 0x54, + 0xa2, 0xa4, 0x27, 0x12, 0xa0, 0xbc, 0x8c, 0x8b, 0xcf, 0x4e, 0x0f, 0x5e, 0x27, 0x13, 0xab, 0xc2, + 0x23, 0x5b, 0x2e, 0xfa, 0x19, 0x14, 0xd5, 0x42, 0x7c, 0xfe, 0x3f, 0x3e, 0xfd, 0x13, 0x4a, 0x58, + 0x8c, 0x11, 0x70, 0x65, 0x07, 0x33, 0xe7, 0xcf, 0x01, 0xd7, 0x18, 0xeb, 0x21, 0x58, 0xd3, 0x55, + 0x3e, 0x4f, 0x08, 0xd8, 0x7f, 0x31, 0x60, 0x39, 0xf5, 0x21, 0x71, 0x39, 0xc9, 0xf4, 0xac, 0x44, + 0xc8, 0x31, 0x6a, 0xc2, 0xbc, 0x4a, 0x30, 0x39, 0xa9, 0xb0, 0x33, 0x83, 0xc2, 0x4e, 0x22, 0xbb, + 0x28, 0xb0, 0x75, 0x17, 0xe0, 0xed, 0x82, 0xd5, 0xfe, 0x9b, 0x01, 0x0b, 0xfa, 0x30, 0xeb, 0x9b, + 0xdc, 0x85, 0xa5, 0xf8, 0x08, 0xc5, 0x73, 0xfa, 0x4e, 0xbf, 0x3d, 0x35, 0x0f, 0x28, 0x36, 0x67, + 0x12, 0xa7, 0x74, 0x4c, 0x89, 0xb3, 0xb6, 0xe3, 0xb8, 0x9a, 0x60, 0x3d, 0x97, 0xe6, 0x57, 0x61, + 0x61, 0x5f, 0x96, 0x60, 0x53, 0x2f, 0x28, 0xfb, 0x3f, 0x06, 0x2c, 0xc6, 0x3c, 0xda, 0xba, 0x4f, + 0xa1, 0x74, 0x42, 0x28, 0x27, 0x2f, 0x09, 0xd3, 0x56, 0x99, 0x69, 0xab, 0xbe, 0x94, 0x1c, 0x78, + 0xc8, 0x89, 0x36, 0xa1, 0xa4, 0xca, 0x3d, 0x12, 0x6f, 0xd4, 0xca, 0x34, 0x94, 0xfe, 0xde, 0x90, + 0x1f, 0xd5, 0x60, 0xce, 0x0f, 0xbb, 0x4c, 0x9f, 0x99, 0xef, 0x4f, 0xc3, 0x3d, 0x0e, 0xbb, 0x58, + 0x32, 0xa2, 0x7b, 0x50, 0xfa, 0xda, 0xa5, 0x81, 0x17, 0x74, 0xe3, 0x53, 0xb0, 0x3a, 0x0d, 0xf4, + 0x4c, 0xf1, 0xe1, 0x21, 0x40, 0x14, 0x54, 0x05, 0xb5, 0x86, 0x1e, 0x41, 0xa1, 0xe3, 0x75, 0x09, + 0xe3, 0xca, 0x25, 0x8d, 0xba, 0xb8, 0x4b, 0xbe, 0x7b, 0xbd, 0x7a, 0x23, 0x71, 0x59, 0x84, 0x11, + 0x09, 0x44, 0xf9, 0xee, 0x7a, 0x01, 0xa1, 0xa2, 0xbc, 0xbd, 0xa5, 0x20, 0x4e, 0x53, 0xfe, 0x60, + 0x2d, 0x41, 0xc8, 0xf2, 0xd4, 0x95, 0x20, 0xf3, 0xc5, 0xdb, 0xc9, 0x52, 0x12, 0xc4, 0x31, 0x08, + 0xdc, 0x1e, 0xd1, 0x25, 0x80, 0x1c, 0x8b, 0xfa, 0xa4, 0x2d, 0xe2, 0xbc, 0x23, 0x2b, 0xb7, 0x12, + 0xd6, 0x14, 0xda, 0x84, 0x22, 0xe3, 0x2e, 0x15, 0x39, 0x67, 0x7e, 0xc6, 0xc2, 0x2a, 0x06, 0xa0, + 0xfb, 0x50, 0x6e, 0x87, 0xbd, 0xc8, 0x27, 0x02, 0x5d, 0x98, 0x11, 0x3d, 0x82, 0x88, 0xd0, 0x23, + 0x94, 0x86, 0x54, 0x96, 0x74, 0x65, 0xac, 0x08, 0x74, 0x07, 0x16, 0x22, 0x1a, 0x76, 0x29, 0x61, + 0xec, 0x73, 0x1a, 0xf6, 0x23, 0x7d, 0x91, 0x2f, 0x8b, 0xe4, 0xbd, 0x97, 0x5c, 0xc0, 0xe3, 0x7c, + 0xf6, 0xbf, 0x73, 0x50, 0x4d, 0x86, 0x48, 0xaa, 0xd6, 0x7d, 0x04, 0x05, 0x15, 0x70, 0x2a, 0xd6, + 0xdf, 0xce, 0xc7, 0x4a, 0x42, 0xa6, 0x8f, 0x4d, 0x28, 0xb6, 0xfb, 0x54, 0x16, 0xc2, 0xaa, 0x3c, + 0x8e, 0x49, 0x61, 0x29, 0x0f, 0xb9, 0xeb, 0x4b, 0x1f, 0xe7, 0xb1, 0x22, 0x44, 0x6d, 0x3c, 0xec, + 0xbc, 0xce, 0x57, 0x1b, 0x0f, 0x61, 0xc9, 0xfd, 0x2b, 0xbe, 0xd3, 0xfe, 0x95, 0xce, 0xbd, 0x7f, + 0xf6, 0xdf, 0x0d, 0x28, 0x0f, 0xcf, 0x56, 0xc2, 0xbb, 0xc6, 0x3b, 0x7b, 0x77, 0xcc, 0x33, 0xb9, + 0xb7, 0xf3, 0xcc, 0x65, 0x28, 0x30, 0x4e, 0x89, 0xdb, 0x53, 0x9d, 0x1b, 0xd6, 0x94, 0xc8, 0x62, + 0x3d, 0xd6, 0x95, 0x3b, 0x54, 0xc5, 0x62, 0x68, 0xff, 0xd7, 0x80, 0x85, 0xb1, 0xe3, 0xfe, 0x5e, + 0x6d, 0xb9, 0x04, 0xf3, 0x3e, 0x39, 0x21, 0xaa, 0xb7, 0xcc, 0x63, 0x45, 0x88, 0x59, 0xf6, 0x22, + 0xa4, 0x5c, 0x2a, 0x57, 0xc5, 0x8a, 0x10, 0x3a, 0x77, 0x08, 0x77, 0x3d, 0x5f, 0xe6, 0xa5, 0x2a, + 0xd6, 0x94, 0xd0, 0xb9, 0x4f, 0x7d, 0x5d, 0x5f, 0x8b, 0x21, 0xb2, 0x61, 0xce, 0x0b, 0x8e, 0x42, + 0x1d, 0x36, 0xb2, 0xb2, 0x51, 0x75, 0xda, 0x4e, 0x70, 0x14, 0x62, 0xb9, 0x86, 0xae, 0x42, 0x81, + 0xba, 0x41, 0x97, 0xc4, 0xc5, 0x75, 0x59, 0x70, 0x61, 0x31, 0x83, 0xf5, 0x82, 0x6d, 0x43, 0x55, + 0xf6, 0xa7, 0xbb, 0x84, 0x89, 0x6e, 0x48, 0x84, 0x75, 0xc7, 0xe5, 0xae, 0x34, 0xbb, 0x8a, 0xe5, + 0xd8, 0xbe, 0x09, 0xe8, 0xb1, 0xc7, 0xf8, 0x33, 0xd9, 0xc2, 0xb3, 0xb3, 0x9a, 0xd7, 0x7d, 0xb8, + 0x38, 0xc6, 0xad, 0xaf, 0x85, 0x9f, 0x4e, 0xb4, 0xaf, 0xd7, 0xd2, 0x19, 0x57, 0xbe, 0x14, 0x38, + 0x0a, 0x38, 0xd1, 0xc5, 0x2e, 0x40, 0x45, 0xda, 0xa5, 0xbe, 0x6d, 0xbb, 0x50, 0x55, 0xa4, 0x16, + 0xfe, 0x05, 0x5c, 0x88, 0x05, 0x7d, 0x49, 0xa8, 0x6c, 0x45, 0x0c, 0xe9, 0x97, 0x1f, 0x4e, 0xfb, + 0x4a, 0x63, 0x9c, 0x1d, 0x4f, 0xe2, 0x6d, 0x02, 0x17, 0x25, 0xcf, 0x43, 0x8f, 0xf1, 0x90, 0x0e, + 0x62, 0xab, 0x57, 0x00, 0xb6, 0xda, 0xdc, 0x3b, 0x21, 0x4f, 0x02, 0x5f, 0x5d, 0xa3, 0x25, 0x9c, + 0x98, 0x89, 0xaf, 0xc8, 0xdc, 0xa8, 0x87, 0xbb, 0x02, 0xe5, 0x96, 0x4b, 0xfd, 0x41, 0xeb, 0xa5, + 0xc7, 0x75, 0x2b, 0x3d, 0x9a, 0xb0, 0x7f, 0x6f, 0xc0, 0x72, 0xf2, 0x3b, 0xad, 0x13, 0x91, 0x2e, + 0xee, 0xc1, 0x1c, 0x8f, 0xeb, 0x98, 0xc5, 0x2c, 0x23, 0x52, 0x10, 0x51, 0xea, 0x60, 0x09, 0x4a, + 0x78, 0x5a, 0x1d, 0x9c, 0x6b, 0xa7, 0xc3, 0x27, 0x3c, 0xfd, 0xbf, 0x12, 0xa0, 0xf4, 0x72, 0x46, + 0x6f, 0x9a, 0x6c, 0xee, 0x72, 0x13, 0xcd, 0xdd, 0xf3, 0xc9, 0xe6, 0x4e, 0x5d, 0xcd, 0x77, 0x66, + 0xd1, 0x64, 0x86, 0x16, 0xef, 0x2e, 0x94, 0xe3, 0xea, 0x26, 0xbe, 0xc0, 0xad, 0xb4, 0xe8, 0x61, + 0x01, 0x34, 0x62, 0x46, 0xeb, 0xf1, 0x8d, 0xa3, 0xee, 0x3a, 0x14, 0xe7, 0x14, 0x1a, 0xb5, 0x1d, + 0x5d, 0x57, 0xe8, 0x5b, 0xe8, 0xfe, 0xf9, 0xde, 0x2d, 0xe6, 0x26, 0xdf, 0x2c, 0x1a, 0x50, 0xd9, + 0x8e, 0x13, 0xe5, 0x39, 0x1e, 0x2d, 0x92, 0x20, 0xb4, 0xa1, 0x0b, 0x1b, 0x95, 0x9a, 0xaf, 0xa4, + 0x4d, 0x8c, 0x1f, 0x28, 0x42, 0xaa, 0x2b, 0x9b, 0xa3, 0x8c, 0xd2, 0xb2, 0x2c, 0x1d, 0xb4, 0x39, + 0x93, 0xef, 0x67, 0xac, 0x2f, 0xd1, 0x67, 0x50, 0xc0, 0x84, 0xf5, 0x7d, 0x2e, 0x5f, 0x42, 0x2a, + 0xf5, 0xab, 0x53, 0xa4, 0x2b, 0x26, 0x79, 0x56, 0x35, 0x00, 0xfd, 0x12, 0x8a, 0x6a, 0xc4, 0xcc, + 0xca, 0xb4, 0x96, 0x3f, 0x43, 0x33, 0x8d, 0xd1, 0x0d, 0x85, 0xa6, 0xc4, 0x71, 0xfc, 0x9c, 0x04, + 0x44, 0xbf, 0xd0, 0x89, 0xb6, 0x76, 0x1e, 0x27, 0x66, 0x50, 0x1d, 0xe6, 0x39, 0x75, 0xdb, 0xc4, + 0x5c, 0x98, 0xc1, 0x85, 0x8a, 0x55, 0x24, 0xb6, 0xc8, 0x0b, 0x02, 0xd2, 0x31, 0x17, 0x55, 0xa5, + 0xa4, 0x28, 0xf4, 0x03, 0x58, 0x0c, 0xfa, 0x3d, 0xd9, 0x2c, 0x74, 0xf6, 0x39, 0x89, 0x98, 0x79, + 0x41, 0x7e, 0x6f, 0x62, 0x16, 0x5d, 0x83, 0x85, 0xa0, 0xdf, 0x3b, 0x10, 0x37, 0xbc, 0x62, 0x5b, + 0x92, 0x6c, 0xe3, 0x93, 0xe8, 0x26, 0x2c, 0x0b, 0x5c, 0xbc, 0xdb, 0x8a, 0x73, 0x59, 0x72, 0xa6, + 0x17, 0xde, 0x43, 0xcf, 0xfc, 0x3e, 0x3a, 0x02, 0xeb, 0x39, 0x54, 0x93, 0xfb, 0x90, 0x81, 0xbd, + 0x33, 0xde, 0x71, 0xcf, 0x10, 0x17, 0x89, 0x86, 0xe3, 0x39, 0x7c, 0xef, 0x69, 0xd4, 0x71, 0x39, + 0xc9, 0xca, 0xbc, 0xe9, 0x0c, 0x74, 0x19, 0x0a, 0x7b, 0x6a, 0xa3, 0xd4, 0xcb, 0xa5, 0xa6, 0xc4, + 0x7c, 0x93, 0x08, 0xe7, 0xe9, 0x74, 0xab, 0x29, 0xfb, 0x0a, 0x58, 0x59, 0xe2, 0x95, 0x33, 0xec, + 0x3f, 0xe7, 0x00, 0x46, 0xc1, 0x80, 0x3e, 0x02, 0xe8, 0x91, 0x8e, 0xe7, 0xfe, 0x9a, 0x8f, 0x1a, + 0xca, 0xb2, 0x9c, 0x91, 0x5d, 0xe5, 0xa8, 0xf4, 0xcf, 0xbd, 0x73, 0xe9, 0x8f, 0x60, 0x8e, 0x79, + 0xdf, 0x10, 0x5d, 0xa6, 0xc8, 0x31, 0x7a, 0x02, 0x15, 0x37, 0x08, 0x42, 0x2e, 0xc3, 0x38, 0x6e, + 0xb6, 0x6f, 0x9d, 0x16, 0xbe, 0xce, 0xd6, 0x88, 0x5f, 0x9d, 0x92, 0xa4, 0x04, 0xeb, 0x3e, 0x2c, + 0x4d, 0x32, 0x9c, 0xab, 0x19, 0xfc, 0xd6, 0x80, 0x0b, 0x13, 0x5b, 0x87, 0x3e, 0x1d, 0x66, 0x01, + 0x63, 0x86, 0xe3, 0x15, 0x27, 0x80, 0x07, 0x50, 0xdd, 0xe2, 0x5c, 0x64, 0x3d, 0x65, 0x9b, 0x6a, + 0xf7, 0x4e, 0xc7, 0x8e, 0x21, 0xec, 0x3f, 0x1a, 0xa3, 0x77, 0xce, 0xcc, 0x9e, 0xff, 0xde, 0x78, + 0xcf, 0x7f, 0x7d, 0xfa, 0xe5, 0xf0, 0x3e, 0x5b, 0xfd, 0x1b, 0x3f, 0x87, 0x0f, 0x33, 0x2f, 0x66, + 0x54, 0x81, 0xe2, 0xfe, 0xc1, 0x16, 0x3e, 0x68, 0x35, 0x97, 0x3e, 0x40, 0x55, 0x28, 0x6d, 0x3f, + 0xd9, 0xdd, 0x7b, 0xdc, 0x3a, 0x68, 0x2d, 0x19, 0x62, 0xa9, 0xd9, 0x12, 0xe3, 0xe6, 0x52, 0xae, + 0xfe, 0x6d, 0x01, 0x8a, 0xdb, 0xea, 0xbf, 0x1e, 0x74, 0x00, 0xe5, 0xe1, 0x9f, 0x00, 0xc8, 0xce, + 0xf0, 0xce, 0xc4, 0xbf, 0x09, 0xd6, 0xc7, 0xa7, 0xf2, 0xe8, 0xc4, 0xfd, 0x10, 0xe6, 0xe5, 0xdf, + 0x21, 0x28, 0xa3, 0xbd, 0x4e, 0xfe, 0x4f, 0x62, 0x9d, 0xfe, 0xf7, 0xc2, 0x86, 0x21, 0x24, 0xc9, + 0xb7, 0x89, 0x2c, 0x49, 0xc9, 0xc7, 0x4b, 0x6b, 0xf5, 0x8c, 0x47, 0x0d, 0xb4, 0x0b, 0x05, 0xdd, + 0xb0, 0x65, 0xb1, 0x26, 0x5f, 0x20, 0xac, 0xb5, 0xe9, 0x0c, 0x4a, 0xd8, 0x86, 0x81, 0x76, 0x87, + 0xef, 0xd1, 0x59, 0xaa, 0x25, 0xab, 0x5d, 0xeb, 0x8c, 0xf5, 0x75, 0x63, 0xc3, 0x40, 0x5f, 0x41, + 0x25, 0x51, 0xcf, 0xa2, 0x8c, 0x6a, 0x2a, 0x5d, 0x1c, 0x5b, 0xd7, 0xcf, 0xe0, 0xd2, 0x96, 0xb7, + 0x60, 0x4e, 0x1e, 0xa4, 0x0c, 0x67, 0x27, 0xca, 0xdd, 0x2c, 0x35, 0xc7, 0xca, 0xdf, 0x43, 0x55, + 0xa0, 0x93, 0x20, 0x19, 0x7d, 0xe8, 0xfa, 0x59, 0xf7, 0xea, 0xd4, 0xb0, 0x49, 0x05, 0xf1, 0x86, + 0x81, 0x42, 0x40, 0xe9, 0xe4, 0x89, 0x7e, 0x94, 0x11, 0x25, 0xd3, 0x32, 0xb8, 0x75, 0x73, 0x36, + 0x66, 0x65, 0x54, 0xa3, 0xfa, 0xea, 0xcd, 0x8a, 0xf1, 0x8f, 0x37, 0x2b, 0xc6, 0xbf, 0xde, 0xac, + 0x18, 0x87, 0x05, 0x59, 0x31, 0xfd, 0xf8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7c, 0xb8, 0xc3, + 0x68, 0x0b, 0x1d, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ControlClient is the client API for Control service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ControlClient interface { + DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) + Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) + Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) + Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) + ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) + Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) + ListenBuildHistory(ctx context.Context, in *BuildHistoryRequest, opts ...grpc.CallOption) (Control_ListenBuildHistoryClient, error) + UpdateBuildHistory(ctx context.Context, in *UpdateBuildHistoryRequest, opts ...grpc.CallOption) (*UpdateBuildHistoryResponse, error) +} + +type controlClient struct { + cc *grpc.ClientConn +} + +func NewControlClient(cc *grpc.ClientConn) ControlClient { + return &controlClient{cc} +} + +func (c *controlClient) DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) { + out := new(DiskUsageResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/DiskUsage", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil -} - -func (m *UsageRecord) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return out, nil } -func (m *UsageRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (c *controlClient) Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) { + stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[0], "/moby.buildkit.v1.Control/Prune", opts...) + if err != nil { + return nil, err } - if len(m.Parents) > 0 { - for iNdEx := len(m.Parents) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Parents[iNdEx]) - copy(dAtA[i:], m.Parents[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.Parents[iNdEx]))) - i-- - dAtA[i] = 0x62 - } + x := &controlPruneClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err } - if m.Shared { - i-- - if m.Shared { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x58 + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err } - if len(m.RecordType) > 0 { - i -= len(m.RecordType) - copy(dAtA[i:], m.RecordType) - i = encodeVarintControl(dAtA, i, uint64(len(m.RecordType))) - i-- - dAtA[i] = 0x52 + return x, nil +} + +type Control_PruneClient interface { + Recv() (*UsageRecord, error) + grpc.ClientStream +} + +type controlPruneClient struct { + grpc.ClientStream +} + +func (x *controlPruneClient) Recv() (*UsageRecord, error) { + m := new(UsageRecord) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintControl(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x4a + return m, nil +} + +func (c *controlClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { + out := new(SolveResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/Solve", in, out, opts...) + if err != nil { + return nil, err } - if m.UsageCount != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.UsageCount)) - i-- - dAtA[i] = 0x40 + return out, nil +} + +func (c *controlClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) { + stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[1], "/moby.buildkit.v1.Control/Status", opts...) + if err != nil { + return nil, err } - if m.LastUsedAt != nil { - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastUsedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUsedAt):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintControl(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x3a + x := &controlStatusClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err } - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err2 != nil { - return 0, err2 + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err } - i -= n2 - i = encodeVarintControl(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x32 - if len(m.Parent) > 0 { - i -= len(m.Parent) - copy(dAtA[i:], m.Parent) - i = encodeVarintControl(dAtA, i, uint64(len(m.Parent))) - i-- - dAtA[i] = 0x2a + return x, nil +} + +type Control_StatusClient interface { + Recv() (*StatusResponse, error) + grpc.ClientStream +} + +type controlStatusClient struct { + grpc.ClientStream +} + +func (x *controlStatusClient) Recv() (*StatusResponse, error) { + m := new(StatusResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err } - if m.Size_ != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x20 + return m, nil +} + +func (c *controlClient) Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) { + stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[2], "/moby.buildkit.v1.Control/Session", opts...) + if err != nil { + return nil, err } - if m.InUse { - i-- - if m.InUse { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + x := &controlSessionClient{stream} + return x, nil +} + +type Control_SessionClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type controlSessionClient struct { + grpc.ClientStream +} + +func (x *controlSessionClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *controlSessionClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err } - if m.Mutable { - i-- - if m.Mutable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + return m, nil +} + +func (c *controlClient) ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) { + out := new(ListWorkersResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/ListWorkers", in, out, opts...) + if err != nil { + return nil, err } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa + return out, nil +} + +func (c *controlClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { + out := new(InfoResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/Info", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *SolveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *controlClient) ListenBuildHistory(ctx context.Context, in *BuildHistoryRequest, opts ...grpc.CallOption) (Control_ListenBuildHistoryClient, error) { + stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[3], "/moby.buildkit.v1.Control/ListenBuildHistory", opts...) if err != nil { return nil, err } - return dAtA[:n], nil + x := &controlListenBuildHistoryClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil } -func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type Control_ListenBuildHistoryClient interface { + Recv() (*BuildHistoryEvent, error) + grpc.ClientStream } -func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +type controlListenBuildHistoryClient struct { + grpc.ClientStream +} + +func (x *controlListenBuildHistoryClient) Recv() (*BuildHistoryEvent, error) { + m := new(BuildHistoryEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err } - if len(m.FrontendInputs) > 0 { - for k := range m.FrontendInputs { - v := m.FrontendInputs[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintControl(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x52 - } + return m, nil +} + +func (c *controlClient) UpdateBuildHistory(ctx context.Context, in *UpdateBuildHistoryRequest, opts ...grpc.CallOption) (*UpdateBuildHistoryResponse, error) { + out := new(UpdateBuildHistoryResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/UpdateBuildHistory", in, out, opts...) + if err != nil { + return nil, err } - if len(m.Entitlements) > 0 { - for iNdEx := len(m.Entitlements) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Entitlements[iNdEx]) - copy(dAtA[i:], m.Entitlements[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.Entitlements[iNdEx]))) - i-- - dAtA[i] = 0x4a - } + return out, nil +} + +// ControlServer is the server API for Control service. +type ControlServer interface { + DiskUsage(context.Context, *DiskUsageRequest) (*DiskUsageResponse, error) + Prune(*PruneRequest, Control_PruneServer) error + Solve(context.Context, *SolveRequest) (*SolveResponse, error) + Status(*StatusRequest, Control_StatusServer) error + Session(Control_SessionServer) error + ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error) + Info(context.Context, *InfoRequest) (*InfoResponse, error) + ListenBuildHistory(*BuildHistoryRequest, Control_ListenBuildHistoryServer) error + UpdateBuildHistory(context.Context, *UpdateBuildHistoryRequest) (*UpdateBuildHistoryResponse, error) +} + +// UnimplementedControlServer can be embedded to have forward compatible implementations. +type UnimplementedControlServer struct { +} + +func (*UnimplementedControlServer) DiskUsage(ctx context.Context, req *DiskUsageRequest) (*DiskUsageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DiskUsage not implemented") +} +func (*UnimplementedControlServer) Prune(req *PruneRequest, srv Control_PruneServer) error { + return status.Errorf(codes.Unimplemented, "method Prune not implemented") +} +func (*UnimplementedControlServer) Solve(ctx context.Context, req *SolveRequest) (*SolveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Solve not implemented") +} +func (*UnimplementedControlServer) Status(req *StatusRequest, srv Control_StatusServer) error { + return status.Errorf(codes.Unimplemented, "method Status not implemented") +} +func (*UnimplementedControlServer) Session(srv Control_SessionServer) error { + return status.Errorf(codes.Unimplemented, "method Session not implemented") +} +func (*UnimplementedControlServer) ListWorkers(ctx context.Context, req *ListWorkersRequest) (*ListWorkersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListWorkers not implemented") +} +func (*UnimplementedControlServer) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} +func (*UnimplementedControlServer) ListenBuildHistory(req *BuildHistoryRequest, srv Control_ListenBuildHistoryServer) error { + return status.Errorf(codes.Unimplemented, "method ListenBuildHistory not implemented") +} +func (*UnimplementedControlServer) UpdateBuildHistory(ctx context.Context, req *UpdateBuildHistoryRequest) (*UpdateBuildHistoryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateBuildHistory not implemented") +} + +func RegisterControlServer(s *grpc.Server, srv ControlServer) { + s.RegisterService(&_Control_serviceDesc, srv) +} + +func _Control_DiskUsage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DiskUsageRequest) + if err := dec(in); err != nil { + return nil, err } - { - size, err := m.Cache.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) + if interceptor == nil { + return srv.(ControlServer).DiskUsage(ctx, in) } - i-- - dAtA[i] = 0x42 - if len(m.FrontendAttrs) > 0 { - for k := range m.FrontendAttrs { - v := m.FrontendAttrs[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintControl(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x3a - } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/DiskUsage", } - if len(m.Frontend) > 0 { - i -= len(m.Frontend) - copy(dAtA[i:], m.Frontend) - i = encodeVarintControl(dAtA, i, uint64(len(m.Frontend))) - i-- - dAtA[i] = 0x32 + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).DiskUsage(ctx, req.(*DiskUsageRequest)) } - if len(m.Session) > 0 { - i -= len(m.Session) - copy(dAtA[i:], m.Session) - i = encodeVarintControl(dAtA, i, uint64(len(m.Session))) - i-- - dAtA[i] = 0x2a + return interceptor(ctx, in, info, handler) +} + +func _Control_Prune_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(PruneRequest) + if err := stream.RecvMsg(m); err != nil { + return err } - if len(m.ExporterAttrs) > 0 { - for k := range m.ExporterAttrs { - v := m.ExporterAttrs[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintControl(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } + return srv.(ControlServer).Prune(m, &controlPruneServer{stream}) +} + +type Control_PruneServer interface { + Send(*UsageRecord) error + grpc.ServerStream +} + +type controlPruneServer struct { + grpc.ServerStream +} + +func (x *controlPruneServer) Send(m *UsageRecord) error { + return x.ServerStream.SendMsg(m) +} + +func _Control_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SolveRequest) + if err := dec(in); err != nil { + return nil, err } - if len(m.Exporter) > 0 { - i -= len(m.Exporter) - copy(dAtA[i:], m.Exporter) - i = encodeVarintControl(dAtA, i, uint64(len(m.Exporter))) - i-- - dAtA[i] = 0x1a + if interceptor == nil { + return srv.(ControlServer).Solve(ctx, in) } - if m.Definition != nil { - { - size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/Solve", } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).Solve(ctx, req.(*SolveRequest)) } - return len(dAtA) - i, nil + return interceptor(ctx, in, info, handler) } -func (m *CacheOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func _Control_Status_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StatusRequest) + if err := stream.RecvMsg(m); err != nil { + return err } - return dAtA[:n], nil + return srv.(ControlServer).Status(m, &controlStatusServer{stream}) } -func (m *CacheOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type Control_StatusServer interface { + Send(*StatusResponse) error + grpc.ServerStream } -func (m *CacheOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +type controlStatusServer struct { + grpc.ServerStream +} + +func (x *controlStatusServer) Send(m *StatusResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Control_Session_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ControlServer).Session(&controlSessionServer{stream}) +} + +type Control_SessionServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type controlSessionServer struct { + grpc.ServerStream +} + +func (x *controlSessionServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *controlSessionServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err } - if len(m.Imports) > 0 { - for iNdEx := len(m.Imports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Imports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } + return m, nil +} + +func _Control_ListWorkers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListWorkersRequest) + if err := dec(in); err != nil { + return nil, err } - if len(m.Exports) > 0 { - for iNdEx := len(m.Exports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Exports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } + if interceptor == nil { + return srv.(ControlServer).ListWorkers(ctx, in) } - if len(m.ExportAttrsDeprecated) > 0 { - for k := range m.ExportAttrsDeprecated { - v := m.ExportAttrsDeprecated[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintControl(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/ListWorkers", } - if len(m.ImportRefsDeprecated) > 0 { - for iNdEx := len(m.ImportRefsDeprecated) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ImportRefsDeprecated[iNdEx]) - copy(dAtA[i:], m.ImportRefsDeprecated[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.ImportRefsDeprecated[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListWorkers(ctx, req.(*ListWorkersRequest)) } - if len(m.ExportRefDeprecated) > 0 { - i -= len(m.ExportRefDeprecated) - copy(dAtA[i:], m.ExportRefDeprecated) - i = encodeVarintControl(dAtA, i, uint64(len(m.ExportRefDeprecated))) - i-- - dAtA[i] = 0xa + return interceptor(ctx, in, info, handler) +} + +func _Control_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InfoRequest) + if err := dec(in); err != nil { + return nil, err } - return len(dAtA) - i, nil + if interceptor == nil { + return srv.(ControlServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).Info(ctx, req.(*InfoRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { +func _Control_ListenBuildHistory_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(BuildHistoryRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ControlServer).ListenBuildHistory(m, &controlListenBuildHistoryServer{stream}) +} + +type Control_ListenBuildHistoryServer interface { + Send(*BuildHistoryEvent) error + grpc.ServerStream +} + +type controlListenBuildHistoryServer struct { + grpc.ServerStream +} + +func (x *controlListenBuildHistoryServer) Send(m *BuildHistoryEvent) error { + return x.ServerStream.SendMsg(m) +} + +func _Control_UpdateBuildHistory_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateBuildHistoryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateBuildHistory(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/UpdateBuildHistory", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateBuildHistory(ctx, req.(*UpdateBuildHistoryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Control_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.buildkit.v1.Control", + HandlerType: (*ControlServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DiskUsage", + Handler: _Control_DiskUsage_Handler, + }, + { + MethodName: "Solve", + Handler: _Control_Solve_Handler, + }, + { + MethodName: "ListWorkers", + Handler: _Control_ListWorkers_Handler, + }, + { + MethodName: "Info", + Handler: _Control_Info_Handler, + }, + { + MethodName: "UpdateBuildHistory", + Handler: _Control_UpdateBuildHistory_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Prune", + Handler: _Control_Prune_Handler, + ServerStreams: true, + }, + { + StreamName: "Status", + Handler: _Control_Status_Handler, + ServerStreams: true, + }, + { + StreamName: "Session", + Handler: _Control_Session_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "ListenBuildHistory", + Handler: _Control_ListenBuildHistory_Handler, + ServerStreams: true, + }, + }, + Metadata: "control.proto", +} + +func (m *PruneRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2221,12 +2623,12 @@ func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CacheOptionsEntry) MarshalTo(dAtA []byte) (int, error) { +func (m *PruneRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PruneRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2235,36 +2637,39 @@ func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Attrs) > 0 { - for k := range m.Attrs { - v := m.Attrs[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintControl(dAtA, i, uint64(len(k))) + if m.KeepBytes != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.KeepBytes)) + i-- + dAtA[i] = 0x20 + } + if m.KeepDuration != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.KeepDuration)) + i-- + dAtA[i] = 0x18 + } + if m.All { + i-- + if m.All { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Filter) > 0 { + for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Filter[iNdEx]) + copy(dAtA[i:], m.Filter[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx]))) i-- dAtA[i] = 0xa - i = encodeVarintControl(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 } } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintControl(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *SolveResponse) Marshal() (dAtA []byte, err error) { +func (m *DiskUsageRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2274,12 +2679,12 @@ func (m *SolveResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *DiskUsageRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DiskUsageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2288,21 +2693,11 @@ func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.ExporterResponse) > 0 { - for k := range m.ExporterResponse { - v := m.ExporterResponse[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + if len(m.Filter) > 0 { + for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Filter[iNdEx]) + copy(dAtA[i:], m.Filter[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx]))) i-- dAtA[i] = 0xa } @@ -2310,7 +2705,7 @@ func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *StatusRequest) Marshal() (dAtA []byte, err error) { +func (m *DiskUsageResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2320,12 +2715,12 @@ func (m *StatusRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *DiskUsageResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DiskUsageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2334,17 +2729,24 @@ func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa + if len(m.Record) > 0 { + for iNdEx := len(m.Record) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Record[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *StatusResponse) Marshal() (dAtA []byte, err error) { +func (m *UsageRecord) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2354,12 +2756,12 @@ func (m *StatusResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *UsageRecord) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *UsageRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2368,239 +2770,93 @@ func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Warnings) > 0 { - for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Warnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } + if len(m.Parents) > 0 { + for iNdEx := len(m.Parents) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Parents[iNdEx]) + copy(dAtA[i:], m.Parents[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Parents[iNdEx]))) i-- - dAtA[i] = 0x22 - } - } - if len(m.Logs) > 0 { - for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Logs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Statuses) > 0 { - for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Vertexes) > 0 { - for iNdEx := len(m.Vertexes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Vertexes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Vertex) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Vertex) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Vertex) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ProgressGroup != nil { - { - size, err := m.ProgressGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintControl(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x3a - } - if m.Completed != nil { - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) - if err7 != nil { - return 0, err7 - } - i -= n7 - i = encodeVarintControl(dAtA, i, uint64(n7)) - i-- - dAtA[i] = 0x32 - } - if m.Started != nil { - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) - if err8 != nil { - return 0, err8 + dAtA[i] = 0x62 } - i -= n8 - i = encodeVarintControl(dAtA, i, uint64(n8)) - i-- - dAtA[i] = 0x2a } - if m.Cached { + if m.Shared { i-- - if m.Cached { + if m.Shared { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x20 + dAtA[i] = 0x58 } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + if len(m.RecordType) > 0 { + i -= len(m.RecordType) + copy(dAtA[i:], m.RecordType) + i = encodeVarintControl(dAtA, i, uint64(len(m.RecordType))) i-- - dAtA[i] = 0x1a - } - if len(m.Inputs) > 0 { - for iNdEx := len(m.Inputs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Inputs[iNdEx]) - copy(dAtA[i:], m.Inputs[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.Inputs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + dAtA[i] = 0x52 } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintControl(dAtA, i, uint64(len(m.Digest))) + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintControl(dAtA, i, uint64(len(m.Description))) i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *VertexStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + dAtA[i] = 0x4a } - if m.Completed != nil { - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) - if err9 != nil { - return 0, err9 - } - i -= n9 - i = encodeVarintControl(dAtA, i, uint64(n9)) + if m.UsageCount != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.UsageCount)) i-- - dAtA[i] = 0x42 + dAtA[i] = 0x40 } - if m.Started != nil { - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) - if err10 != nil { - return 0, err10 + if m.LastUsedAt != nil { + n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastUsedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUsedAt):]) + if err1 != nil { + return 0, err1 } - i -= n10 - i = encodeVarintControl(dAtA, i, uint64(n10)) + i -= n1 + i = encodeVarintControl(dAtA, i, uint64(n1)) i-- dAtA[i] = 0x3a } - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err11 != nil { - return 0, err11 + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) + if err2 != nil { + return 0, err2 } - i -= n11 - i = encodeVarintControl(dAtA, i, uint64(n11)) + i -= n2 + i = encodeVarintControl(dAtA, i, uint64(n2)) i-- dAtA[i] = 0x32 - if m.Total != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.Total)) + if len(m.Parent) > 0 { + i -= len(m.Parent) + copy(dAtA[i:], m.Parent) + i = encodeVarintControl(dAtA, i, uint64(len(m.Parent))) i-- - dAtA[i] = 0x28 + dAtA[i] = 0x2a } - if m.Current != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.Current)) + if m.Size_ != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Size_)) i-- dAtA[i] = 0x20 } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + if m.InUse { i-- - dAtA[i] = 0x1a + if m.InUse { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 } - if len(m.Vertex) > 0 { - i -= len(m.Vertex) - copy(dAtA[i:], m.Vertex) - i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + if m.Mutable { i-- - dAtA[i] = 0x12 + if m.Mutable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 } if len(m.ID) > 0 { i -= len(m.ID) @@ -2612,7 +2868,7 @@ func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *VertexLog) Marshal() (dAtA []byte, err error) { +func (m *SolveRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2622,12 +2878,12 @@ func (m *VertexLog) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VertexLog) MarshalTo(dAtA []byte) (int, error) { +func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VertexLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2636,37 +2892,155 @@ func (m *VertexLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Msg) > 0 { - i -= len(m.Msg) - copy(dAtA[i:], m.Msg) - i = encodeVarintControl(dAtA, i, uint64(len(m.Msg))) + if m.SourcePolicy != nil { + { + size, err := m.SourcePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x22 + dAtA[i] = 0x62 } - if m.Stream != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.Stream)) + if m.Internal { i-- - dAtA[i] = 0x18 - } - n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err12 != nil { - return 0, err12 - } - i -= n12 - i = encodeVarintControl(dAtA, i, uint64(n12)) + if m.Internal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if len(m.FrontendInputs) > 0 { + for k := range m.FrontendInputs { + v := m.FrontendInputs[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if len(m.Entitlements) > 0 { + for iNdEx := len(m.Entitlements) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Entitlements[iNdEx]) + copy(dAtA[i:], m.Entitlements[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Entitlements[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + { + size, err := m.Cache.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x12 - if len(m.Vertex) > 0 { - i -= len(m.Vertex) - copy(dAtA[i:], m.Vertex) - i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + dAtA[i] = 0x42 + if len(m.FrontendAttrs) > 0 { + for k := range m.FrontendAttrs { + v := m.FrontendAttrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Frontend) > 0 { + i -= len(m.Frontend) + copy(dAtA[i:], m.Frontend) + i = encodeVarintControl(dAtA, i, uint64(len(m.Frontend))) + i-- + dAtA[i] = 0x32 + } + if len(m.Session) > 0 { + i -= len(m.Session) + copy(dAtA[i:], m.Session) + i = encodeVarintControl(dAtA, i, uint64(len(m.Session))) + i-- + dAtA[i] = 0x2a + } + if len(m.ExporterAttrs) > 0 { + for k := range m.ExporterAttrs { + v := m.ExporterAttrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Exporter) > 0 { + i -= len(m.Exporter) + copy(dAtA[i:], m.Exporter) + i = encodeVarintControl(dAtA, i, uint64(len(m.Exporter))) + i-- + dAtA[i] = 0x1a + } + if m.Definition != nil { + { + size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VertexWarning) Marshal() (dAtA []byte, err error) { +func (m *CacheOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2676,12 +3050,12 @@ func (m *VertexWarning) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VertexWarning) MarshalTo(dAtA []byte) (int, error) { +func (m *CacheOptions) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VertexWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *CacheOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2690,10 +3064,10 @@ func (m *VertexWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Imports) > 0 { + for iNdEx := len(m.Imports) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Imports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2701,60 +3075,62 @@ func (m *VertexWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintControl(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x3a + dAtA[i] = 0x2a } } - if m.Info != nil { - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Exports) > 0 { + for iNdEx := len(m.Exports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if len(m.Url) > 0 { - i -= len(m.Url) - copy(dAtA[i:], m.Url) - i = encodeVarintControl(dAtA, i, uint64(len(m.Url))) - i-- - dAtA[i] = 0x2a - } - if len(m.Detail) > 0 { - for iNdEx := len(m.Detail) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Detail[iNdEx]) - copy(dAtA[i:], m.Detail[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.Detail[iNdEx]))) i-- dAtA[i] = 0x22 } } - if len(m.Short) > 0 { - i -= len(m.Short) - copy(dAtA[i:], m.Short) - i = encodeVarintControl(dAtA, i, uint64(len(m.Short))) - i-- - dAtA[i] = 0x1a + if len(m.ExportAttrsDeprecated) > 0 { + for k := range m.ExportAttrsDeprecated { + v := m.ExportAttrsDeprecated[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } } - if m.Level != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.Level)) - i-- - dAtA[i] = 0x10 + if len(m.ImportRefsDeprecated) > 0 { + for iNdEx := len(m.ImportRefsDeprecated) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ImportRefsDeprecated[iNdEx]) + copy(dAtA[i:], m.ImportRefsDeprecated[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.ImportRefsDeprecated[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - if len(m.Vertex) > 0 { - i -= len(m.Vertex) - copy(dAtA[i:], m.Vertex) - i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + if len(m.ExportRefDeprecated) > 0 { + i -= len(m.ExportRefDeprecated) + copy(dAtA[i:], m.ExportRefDeprecated) + i = encodeVarintControl(dAtA, i, uint64(len(m.ExportRefDeprecated))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *BytesMessage) Marshal() (dAtA []byte, err error) { +func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2764,12 +3140,12 @@ func (m *BytesMessage) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { +func (m *CacheOptionsEntry) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2778,17 +3154,36 @@ func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintControl(dAtA, i, uint64(len(m.Data))) + if len(m.Attrs) > 0 { + for k := range m.Attrs { + v := m.Attrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintControl(dAtA, i, uint64(len(m.Type))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ListWorkersRequest) Marshal() (dAtA []byte, err error) { +func (m *SolveResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2798,12 +3193,12 @@ func (m *ListWorkersRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ListWorkersRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ListWorkersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2812,11 +3207,21 @@ func (m *ListWorkersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Filter) > 0 { - for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filter[iNdEx]) - copy(dAtA[i:], m.Filter[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx]))) + if len(m.ExporterResponse) > 0 { + for k := range m.ExporterResponse { + v := m.ExporterResponse[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0xa } @@ -2824,7 +3229,7 @@ func (m *ListWorkersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ListWorkersResponse) Marshal() (dAtA []byte, err error) { +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2834,12 +3239,12 @@ func (m *ListWorkersResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ListWorkersResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ListWorkersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2848,10 +3253,86 @@ func (m *ListWorkersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Record) > 0 { - for iNdEx := len(m.Record) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Record[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Warnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Logs) > 0 { + for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Logs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Statuses) > 0 { + for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Vertexes) > 0 { + for iNdEx := len(m.Vertexes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Vertexes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2865,554 +3346,3836 @@ func (m *ListWorkersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintControl(dAtA []byte, offset int, v uint64) int { - offset -= sovControl(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *Vertex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *PruneRequest) Size() (n int) { - if m == nil { - return 0 - } + +func (m *Vertex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Vertex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - l = len(s) - n += 1 + l + sovControl(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ProgressGroup != nil { + { + size, err := m.ProgressGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - if m.All { - n += 2 + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintControl(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x3a } - if m.KeepDuration != 0 { - n += 1 + sovControl(uint64(m.KeepDuration)) + if m.Completed != nil { + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) + if err8 != nil { + return 0, err8 + } + i -= n8 + i = encodeVarintControl(dAtA, i, uint64(n8)) + i-- + dAtA[i] = 0x32 } - if m.KeepBytes != 0 { - n += 1 + sovControl(uint64(m.KeepBytes)) + if m.Started != nil { + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintControl(dAtA, i, uint64(n9)) + i-- + dAtA[i] = 0x2a } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Cached { + i-- + if m.Cached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - return n -} - -func (m *DiskUsageRequest) Size() (n int) { - if m == nil { - return 0 + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a } - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - l = len(s) - n += 1 + l + sovControl(uint64(l)) + if len(m.Inputs) > 0 { + for iNdEx := len(m.Inputs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Inputs[iNdEx]) + copy(dAtA[i:], m.Inputs[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Inputs[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintControl(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *DiskUsageResponse) Size() (n int) { - if m == nil { - return 0 +func (m *VertexStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Record) > 0 { - for _, e := range m.Record { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UsageRecord) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Mutable { - n += 2 - } - if m.InUse { - n += 2 - } - if m.Size_ != 0 { - n += 1 + sovControl(uint64(m.Size_)) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - l = len(m.Parent) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) + if m.Completed != nil { + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) + if err10 != nil { + return 0, err10 + } + i -= n10 + i = encodeVarintControl(dAtA, i, uint64(n10)) + i-- + dAtA[i] = 0x42 } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovControl(uint64(l)) - if m.LastUsedAt != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUsedAt) - n += 1 + l + sovControl(uint64(l)) + if m.Started != nil { + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) + if err11 != nil { + return 0, err11 + } + i -= n11 + i = encodeVarintControl(dAtA, i, uint64(n11)) + i-- + dAtA[i] = 0x3a } - if m.UsageCount != 0 { - n += 1 + sovControl(uint64(m.UsageCount)) + n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err12 != nil { + return 0, err12 } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) + i -= n12 + i = encodeVarintControl(dAtA, i, uint64(n12)) + i-- + dAtA[i] = 0x32 + if m.Total != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x28 } - l = len(m.RecordType) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) + if m.Current != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Current)) + i-- + dAtA[i] = 0x20 } - if m.Shared { - n += 2 + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a } - if len(m.Parents) > 0 { - for _, s := range m.Parents { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } + if len(m.Vertex) > 0 { + i -= len(m.Vertex) + copy(dAtA[i:], m.Vertex) + i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + i-- + dAtA[i] = 0x12 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *SolveRequest) Size() (n int) { - if m == nil { - return 0 +func (m *VertexLog) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *VertexLog) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VertexLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Definition != nil { - l = m.Definition.Size() - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Exporter) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if len(m.ExporterAttrs) > 0 { - for k, v := range m.ExporterAttrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } - l = len(m.Session) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Frontend) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.FrontendAttrs) > 0 { - for k, v := range m.FrontendAttrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } + if len(m.Msg) > 0 { + i -= len(m.Msg) + copy(dAtA[i:], m.Msg) + i = encodeVarintControl(dAtA, i, uint64(len(m.Msg))) + i-- + dAtA[i] = 0x22 } - l = m.Cache.Size() - n += 1 + l + sovControl(uint64(l)) - if len(m.Entitlements) > 0 { - for _, s := range m.Entitlements { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } + if m.Stream != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Stream)) + i-- + dAtA[i] = 0x18 } - if len(m.FrontendInputs) > 0 { - for k, v := range m.FrontendInputs { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovControl(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + l - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } + n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err13 != nil { + return 0, err13 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= n13 + i = encodeVarintControl(dAtA, i, uint64(n13)) + i-- + dAtA[i] = 0x12 + if len(m.Vertex) > 0 { + i -= len(m.Vertex) + copy(dAtA[i:], m.Vertex) + i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *CacheOptions) Size() (n int) { - if m == nil { - return 0 +func (m *VertexWarning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *VertexWarning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VertexWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.ExportRefDeprecated) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.ImportRefsDeprecated) > 0 { - for _, s := range m.ImportRefsDeprecated { - l = len(s) - n += 1 + l + sovControl(uint64(l)) + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } } - if len(m.ExportAttrsDeprecated) > 0 { - for k, v := range m.ExportAttrsDeprecated { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 } - if len(m.Exports) > 0 { - for _, e := range m.Exports { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarintControl(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0x2a } - if len(m.Imports) > 0 { - for _, e := range m.Imports { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) + if len(m.Detail) > 0 { + for iNdEx := len(m.Detail) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Detail[iNdEx]) + copy(dAtA[i:], m.Detail[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Detail[iNdEx]))) + i-- + dAtA[i] = 0x22 } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if len(m.Short) > 0 { + i -= len(m.Short) + copy(dAtA[i:], m.Short) + i = encodeVarintControl(dAtA, i, uint64(len(m.Short))) + i-- + dAtA[i] = 0x1a } - return n + if m.Level != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Level)) + i-- + dAtA[i] = 0x10 + } + if len(m.Vertex) > 0 { + i -= len(m.Vertex) + copy(dAtA[i:], m.Vertex) + i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *CacheOptionsEntry) Size() (n int) { - if m == nil { - return 0 +func (m *BytesMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if len(m.Attrs) > 0 { - for k, v := range m.Attrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintControl(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *SolveResponse) Size() (n int) { - if m == nil { - return 0 +func (m *ListWorkersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ListWorkersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListWorkersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.ExporterResponse) > 0 { - for k, v := range m.ExporterResponse { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n + if len(m.Filter) > 0 { + for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Filter[iNdEx]) + copy(dAtA[i:], m.Filter[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *StatusRequest) Size() (n int) { - if m == nil { - return 0 +func (m *ListWorkersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ListWorkersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListWorkersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n + if len(m.Record) > 0 { + for iNdEx := len(m.Record) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Record[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *StatusResponse) Size() (n int) { - if m == nil { - return 0 +func (m *InfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Vertexes) > 0 { - for _, e := range m.Vertexes { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if len(m.Statuses) > 0 { - for _, e := range m.Statuses { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if len(m.Logs) > 0 { - for _, e := range m.Logs { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if len(m.Warnings) > 0 { - for _, e := range m.Warnings { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n + return len(dAtA) - i, nil } -func (m *Vertex) Size() (n int) { - if m == nil { - return 0 +func (m *InfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Inputs) > 0 { - for _, s := range m.Inputs { - l = len(s) - n += 1 + l + sovControl(uint64(l)) + if m.BuildkitVersion != nil { + { + size, err := m.BuildkitVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Cached { - n += 2 - } - if m.Started != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started) - n += 1 + l + sovControl(uint64(l)) - } - if m.Completed != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed) - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.ProgressGroup != nil { - l = m.ProgressGroup.Size() - n += 1 + l + sovControl(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return len(dAtA) - i, nil } -func (m *VertexStatus) Size() (n int) { - if m == nil { - return 0 +func (m *BuildHistoryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *BuildHistoryRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildHistoryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Vertex) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Current != 0 { - n += 1 + sovControl(uint64(m.Current)) - } - if m.Total != 0 { - n += 1 + sovControl(uint64(m.Total)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovControl(uint64(l)) - if m.Started != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started) - n += 1 + l + sovControl(uint64(l)) + if m.EarlyExit { + i-- + if m.EarlyExit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 } - if m.Completed != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed) - n += 1 + l + sovControl(uint64(l)) + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0x12 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.ActiveOnly { + i-- + if m.ActiveOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *VertexLog) Size() (n int) { - if m == nil { - return 0 +func (m *BuildHistoryEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *BuildHistoryEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildHistoryEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Vertex) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovControl(uint64(l)) - if m.Stream != 0 { - n += 1 + sovControl(uint64(m.Stream)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - l = len(m.Msg) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) + if m.Record != nil { + { + size, err := m.Record.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Type != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *VertexWarning) Size() (n int) { - if m == nil { - return 0 +func (m *BuildHistoryRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *BuildHistoryRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildHistoryRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Vertex) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Level != 0 { - n += 1 + sovControl(uint64(m.Level)) - } - l = len(m.Short) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Detail) > 0 { - for _, b := range m.Detail { - l = len(b) - n += 1 + l + sovControl(uint64(l)) - } + if m.NumCompletedSteps != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.NumCompletedSteps)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 } - l = len(m.Url) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) + if m.NumTotalSteps != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.NumTotalSteps)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 } - if m.Info != nil { - l = m.Info.Size() - n += 1 + l + sovControl(uint64(l)) + if m.NumCachedSteps != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.NumCachedSteps)) + i-- + dAtA[i] = 0x78 } - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) + if m.Pinned { + i-- + if m.Pinned { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x70 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Trace != nil { + { + size, err := m.Trace.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a } - return n -} - -func (m *BytesMessage) Size() (n int) { - if m == nil { - return 0 + if m.Generation != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x60 } - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) + if len(m.Results) > 0 { + for k := range m.Results { + v := m.Results[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - return n -} - -func (m *ListWorkersRequest) Size() (n int) { - if m == nil { - return 0 + if len(m.ExporterResponse) > 0 { + for k := range m.ExporterResponse { + v := m.ExporterResponse[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x4a + } + } + if m.Logs != nil { + { + size, err := m.Logs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.CompletedAt != nil { + n21, err21 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.CompletedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.CompletedAt):]) + if err21 != nil { + return 0, err21 + } + i -= n21 + i = encodeVarintControl(dAtA, i, uint64(n21)) + i-- + dAtA[i] = 0x3a + } + if m.CreatedAt != nil { + n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.CreatedAt):]) + if err22 != nil { + return 0, err22 + } + i -= n22 + i = encodeVarintControl(dAtA, i, uint64(n22)) + i-- + dAtA[i] = 0x32 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Exporters) > 0 { + for iNdEx := len(m.Exporters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exporters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.FrontendAttrs) > 0 { + for k := range m.FrontendAttrs { + v := m.FrontendAttrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Frontend) > 0 { + i -= len(m.Frontend) + copy(dAtA[i:], m.Frontend) + i = encodeVarintControl(dAtA, i, uint64(len(m.Frontend))) + i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateBuildHistoryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *UpdateBuildHistoryRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateBuildHistoryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - l = len(s) - n += 1 + l + sovControl(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Delete { + i-- + if m.Delete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Pinned { + i-- + if m.Pinned { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateBuildHistoryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateBuildHistoryResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateBuildHistoryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Descriptor) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Descriptor) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.Size_ != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Size_)) + i-- + dAtA[i] = 0x18 + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintControl(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0x12 + } + if len(m.MediaType) > 0 { + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintControl(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildResultInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildResultInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildResultInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Attestations) > 0 { + for iNdEx := len(m.Attestations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attestations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Exporter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exporter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exporter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Attrs) > 0 { + for k := range m.Attrs { + v := m.Attrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintControl(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintControl(dAtA []byte, offset int, v uint64) int { + offset -= sovControl(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PruneRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if m.All { + n += 2 + } + if m.KeepDuration != 0 { + n += 1 + sovControl(uint64(m.KeepDuration)) + } + if m.KeepBytes != 0 { + n += 1 + sovControl(uint64(m.KeepBytes)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DiskUsageRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DiskUsageResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Record) > 0 { + for _, e := range m.Record { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UsageRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Mutable { + n += 2 + } + if m.InUse { + n += 2 + } + if m.Size_ != 0 { + n += 1 + sovControl(uint64(m.Size_)) + } + l = len(m.Parent) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) + n += 1 + l + sovControl(uint64(l)) + if m.LastUsedAt != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUsedAt) + n += 1 + l + sovControl(uint64(l)) + } + if m.UsageCount != 0 { + n += 1 + sovControl(uint64(m.UsageCount)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.RecordType) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Shared { + n += 2 + } + if len(m.Parents) > 0 { + for _, s := range m.Parents { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SolveRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Definition != nil { + l = m.Definition.Size() + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Exporter) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.ExporterAttrs) > 0 { + for k, v := range m.ExporterAttrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + l = len(m.Session) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Frontend) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.FrontendAttrs) > 0 { + for k, v := range m.FrontendAttrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + l = m.Cache.Size() + n += 1 + l + sovControl(uint64(l)) + if len(m.Entitlements) > 0 { + for _, s := range m.Entitlements { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.FrontendInputs) > 0 { + for k, v := range m.FrontendInputs { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovControl(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + l + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.Internal { + n += 2 + } + if m.SourcePolicy != nil { + l = m.SourcePolicy.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CacheOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ExportRefDeprecated) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.ImportRefsDeprecated) > 0 { + for _, s := range m.ImportRefsDeprecated { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.ExportAttrsDeprecated) > 0 { + for k, v := range m.ExportAttrsDeprecated { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.Exports) > 0 { + for _, e := range m.Exports { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Imports) > 0 { + for _, e := range m.Imports { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CacheOptionsEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SolveResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExporterResponse) > 0 { + for k, v := range m.ExporterResponse { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Vertexes) > 0 { + for _, e := range m.Vertexes { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Statuses) > 0 { + for _, e := range m.Statuses { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Logs) > 0 { + for _, e := range m.Logs { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Warnings) > 0 { + for _, e := range m.Warnings { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Vertex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Inputs) > 0 { + for _, s := range m.Inputs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Cached { + n += 2 + } + if m.Started != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started) + n += 1 + l + sovControl(uint64(l)) + } + if m.Completed != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed) + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ProgressGroup != nil { + l = m.ProgressGroup.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VertexStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Vertex) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Current != 0 { + n += 1 + sovControl(uint64(m.Current)) + } + if m.Total != 0 { + n += 1 + sovControl(uint64(m.Total)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovControl(uint64(l)) + if m.Started != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started) + n += 1 + l + sovControl(uint64(l)) + } + if m.Completed != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed) + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VertexLog) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Vertex) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovControl(uint64(l)) + if m.Stream != 0 { + n += 1 + sovControl(uint64(m.Stream)) + } + l = len(m.Msg) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VertexWarning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Vertex) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Level != 0 { + n += 1 + sovControl(uint64(m.Level)) + } + l = len(m.Short) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Detail) > 0 { + for _, b := range m.Detail { + l = len(b) + n += 1 + l + sovControl(uint64(l)) + } + } + l = len(m.Url) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BytesMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ListWorkersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ListWorkersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Record) > 0 { + for _, e := range m.Record { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BuildkitVersion != nil { + l = m.BuildkitVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BuildHistoryRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActiveOnly { + n += 2 + } + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.EarlyExit { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BuildHistoryEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovControl(uint64(m.Type)) + } + if m.Record != nil { + l = m.Record.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BuildHistoryRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Frontend) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.FrontendAttrs) > 0 { + for k, v := range m.FrontendAttrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.Exporters) > 0 { + for _, e := range m.Exporters { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.CreatedAt != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.CreatedAt) + n += 1 + l + sovControl(uint64(l)) + } + if m.CompletedAt != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.CompletedAt) + n += 1 + l + sovControl(uint64(l)) + } + if m.Logs != nil { + l = m.Logs.Size() + n += 1 + l + sovControl(uint64(l)) + } + if len(m.ExporterResponse) > 0 { + for k, v := range m.ExporterResponse { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Results) > 0 { + for k, v := range m.Results { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovControl(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + l + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.Generation != 0 { + n += 1 + sovControl(uint64(m.Generation)) + } + if m.Trace != nil { + l = m.Trace.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Pinned { + n += 2 + } + if m.NumCachedSteps != 0 { + n += 1 + sovControl(uint64(m.NumCachedSteps)) + } + if m.NumTotalSteps != 0 { + n += 2 + sovControl(uint64(m.NumTotalSteps)) + } + if m.NumCompletedSteps != 0 { + n += 2 + sovControl(uint64(m.NumCompletedSteps)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UpdateBuildHistoryRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Pinned { + n += 2 + } + if m.Delete { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UpdateBuildHistoryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Descriptor) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MediaType) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Size_ != 0 { + n += 1 + sovControl(uint64(m.Size_)) + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BuildResultInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Attestations) > 0 { + for _, e := range m.Attestations { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Exporter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovControl(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozControl(x uint64) (n int) { + return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PruneRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PruneRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PruneRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.All = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepDuration", wireType) + } + m.KeepDuration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeepDuration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepBytes", wireType) + } + m.KeepBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeepBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiskUsageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiskUsageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiskUsageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiskUsageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Record = append(m.Record, &UsageRecord{}) + if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UsageRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UsageRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UsageRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mutable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Mutable = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InUse", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InUse = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUsedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastUsedAt == nil { + m.LastUsedAt = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.LastUsedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UsageCount", wireType) + } + m.UsageCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UsageCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RecordType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RecordType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Shared", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Shared = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parents", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parents = append(m.Parents, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SolveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definition == nil { + m.Definition = &pb.Definition{} + } + if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exporter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exporter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExporterAttrs == nil { + m.ExporterAttrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExporterAttrs[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Session = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Frontend = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendAttrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrontendAttrs == nil { + m.FrontendAttrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.FrontendAttrs[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Cache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entitlements", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entitlements = append(m.Entitlements, github_com_moby_buildkit_util_entitlements.Entitlement(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendInputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrontendInputs == nil { + m.FrontendInputs = make(map[string]*pb.Definition) + } + var mapkey string + var mapvalue *pb.Definition + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthControl + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthControl + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &pb.Definition{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.FrontendInputs[mapkey] = mapvalue + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Internal = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourcePolicy == nil { + m.SourcePolicy = &pb1.Policy{} + } + if err := m.SourcePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportRefDeprecated", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExportRefDeprecated = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportRefsDeprecated", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImportRefsDeprecated = append(m.ImportRefsDeprecated, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportAttrsDeprecated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExportAttrsDeprecated == nil { + m.ExportAttrsDeprecated = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExportAttrsDeprecated[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exports = append(m.Exports, &CacheOptionsEntry{}) + if err := m.Exports[len(m.Exports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Imports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Imports = append(m.Imports, &CacheOptionsEntry{}) + if err := m.Imports[len(m.Imports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheOptionsEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheOptionsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} -func (m *ListWorkersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Record) > 0 { - for _, e := range m.Record { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if iNdEx > l { + return io.ErrUnexpectedEOF } - return n -} - -func sovControl(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozControl(x uint64) (n int) { - return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return nil } -func (m *PruneRequest) Unmarshal(dAtA []byte) error { +func (m *SolveResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3435,17 +7198,17 @@ func (m *PruneRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PruneRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PruneRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExporterResponse", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -3455,82 +7218,119 @@ func (m *PruneRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.All = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepDuration", wireType) - } - m.KeepDuration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeepDuration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepBytes", wireType) + if m.ExporterResponse == nil { + m.ExporterResponse = make(map[string]string) } - m.KeepBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - m.KeepBytes |= int64(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } + m.ExporterResponse[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipControl(dAtA[iNdEx:]) @@ -3553,7 +7353,7 @@ func (m *PruneRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error { +func (m *StatusRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3576,15 +7376,15 @@ func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DiskUsageRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DiskUsageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3612,7 +7412,7 @@ func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + m.Ref = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3636,7 +7436,7 @@ func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error { +func (m *StatusResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3659,15 +7459,15 @@ func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DiskUsageResponse: wiretype end group for non-group") + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DiskUsageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Vertexes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3694,8 +7494,110 @@ func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Record = append(m.Record, &UsageRecord{}) - if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Vertexes = append(m.Vertexes, &Vertex{}) + if err := m.Vertexes[len(m.Vertexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Statuses = append(m.Statuses, &VertexStatus{}) + if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logs = append(m.Logs, &VertexLog{}) + if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, &VertexWarning{}) + if err := m.Warnings[len(m.Warnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3721,7 +7623,7 @@ func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *UsageRecord) Unmarshal(dAtA []byte) error { +func (m *Vertex) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3744,15 +7646,15 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UsageRecord: wiretype end group for non-group") + return fmt.Errorf("proto: Vertex: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UsageRecord: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Vertex: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3780,13 +7682,13 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ID = string(dAtA[iNdEx:postIndex]) + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mutable", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -3796,54 +7698,27 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Mutable = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InUse", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl } - m.InUse = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + if postIndex > l { + return io.ErrUnexpectedEOF } - case 5: + m.Inputs = append(m.Inputs, github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3871,13 +7746,13 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Parent = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cached", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -3887,28 +7762,15 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: + m.Cached = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUsedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3935,37 +7797,18 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LastUsedAt == nil { - m.LastUsedAt = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.LastUsedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UsageCount", wireType) - } - m.UsageCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UsageCount |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + if m.Started == nil { + m.Started = new(time.Time) } - case 9: + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -3975,27 +7818,31 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.Description = string(dAtA[iNdEx:postIndex]) + if m.Completed == nil { + m.Completed = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RecordType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4023,33 +7870,13 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RecordType = string(dAtA[iNdEx:postIndex]) + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shared", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Shared = bool(v != 0) - case 12: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parents", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProgressGroup", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -4059,23 +7886,27 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.Parents = append(m.Parents, string(dAtA[iNdEx:postIndex])) + if m.ProgressGroup == nil { + m.ProgressGroup = &pb.ProgressGroup{} + } + if err := m.ProgressGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -4099,7 +7930,7 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error { } return nil } -func (m *SolveRequest) Unmarshal(dAtA []byte) error { +func (m *VertexStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4122,15 +7953,15 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VertexStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VertexStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4158,47 +7989,11 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ref = string(dAtA[iNdEx:postIndex]) + m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Definition == nil { - m.Definition = &pb.Definition{} - } - if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exporter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4216,148 +8011,21 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } } intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exporter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ExporterAttrs == nil { - m.ExporterAttrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if intStringLen < 0 { + return ErrInvalidLengthControl } - m.ExporterAttrs[mapkey] = mapvalue + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4385,13 +8053,51 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Session = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + m.Current = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Current |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -4401,27 +8107,28 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.Frontend = string(dAtA[iNdEx:postIndex]) + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendAttrs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4448,109 +8155,105 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.FrontendAttrs == nil { - m.FrontendAttrs = make(map[string]string) + if m.Started == nil { + m.Started = new(time.Time) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.FrontendAttrs[mapkey] = mapvalue + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Completed == nil { + m.Completed = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 8: + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VertexLog) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VertexLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VertexLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -4560,30 +8263,29 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Cache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entitlements", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -4593,29 +8295,49 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.Entitlements = append(m.Entitlements, github_com_moby_buildkit_util_entitlements.Entitlement(dAtA[iNdEx:postIndex])) + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + m.Stream = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Stream |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendInputs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -4625,120 +8347,25 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - if m.FrontendInputs == nil { - m.FrontendInputs = make(map[string]*pb.Definition) - } - var mapkey string - var mapvalue *pb.Definition - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthControl - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthControl - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &pb.Definition{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + m.Msg = append(m.Msg[:0], dAtA[iNdEx:postIndex]...) + if m.Msg == nil { + m.Msg = []byte{} } - m.FrontendInputs[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -4762,7 +8389,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *CacheOptions) Unmarshal(dAtA []byte) error { +func (m *VertexWarning) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4785,17 +8412,102 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CacheOptions: wiretype end group for non-group") + return fmt.Errorf("proto: VertexWarning: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CacheOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VertexWarning: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExportRefDeprecated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + m.Level = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Level |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Short", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Short = append(m.Short[:0], dAtA[iNdEx:postIndex]...) + if m.Short == nil { + m.Short = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Detail", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -4805,27 +8517,27 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExportRefDeprecated = string(dAtA[iNdEx:postIndex]) + m.Detail = append(m.Detail, make([]byte, postIndex-iNdEx)) + copy(m.Detail[len(m.Detail)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImportRefsDeprecated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4853,11 +8565,11 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ImportRefsDeprecated = append(m.ImportRefsDeprecated, string(dAtA[iNdEx:postIndex])) + m.Url = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExportAttrsDeprecated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4884,107 +8596,16 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ExportAttrsDeprecated == nil { - m.ExportAttrsDeprecated = make(map[string]string) + if m.Info == nil { + m.Info = &pb.SourceInfo{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ExportAttrsDeprecated[mapkey] = mapvalue iNdEx = postIndex - case 4: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5011,16 +8632,67 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Exports = append(m.Exports, &CacheOptionsEntry{}) - if err := m.Exports[len(m.Exports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ranges = append(m.Ranges, &pb.Range{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Imports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -5030,24 +8702,24 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.Imports = append(m.Imports, &CacheOptionsEntry{}) - if err := m.Imports[len(m.Imports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} } iNdEx = postIndex default: @@ -5072,7 +8744,7 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { +func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5095,15 +8767,15 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CacheOptionsEntry: wiretype end group for non-group") + return fmt.Errorf("proto: ListWorkersRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CacheOptionsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ListWorkersRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5131,11 +8803,62 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListWorkersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListWorkersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5162,103 +8885,10 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Attrs == nil { - m.Attrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + m.Record = append(m.Record, &types.WorkerRecord{}) + if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Attrs[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -5282,7 +8912,7 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { } return nil } -func (m *SolveResponse) Unmarshal(dAtA []byte) error { +func (m *InfoRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5305,15 +8935,66 @@ func (m *SolveResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") + return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExporterResponse", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BuildkitVersion", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5340,103 +9021,12 @@ func (m *SolveResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ExporterResponse == nil { - m.ExporterResponse = make(map[string]string) + if m.BuildkitVersion == nil { + m.BuildkitVersion = &types.BuildkitVersion{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.BuildkitVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ExporterResponse[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -5460,7 +9050,7 @@ func (m *SolveResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *StatusRequest) Unmarshal(dAtA []byte) error { +func (m *BuildHistoryRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5483,13 +9073,33 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + return fmt.Errorf("proto: BuildHistoryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BuildHistoryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveOnly = bool(v != 0) + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) } @@ -5521,6 +9131,26 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { } m.Ref = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EarlyExit", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EarlyExit = bool(v != 0) default: iNdEx = preIndex skippy, err := skipControl(dAtA[iNdEx:]) @@ -5543,7 +9173,7 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *StatusResponse) Unmarshal(dAtA []byte) error { +func (m *BuildHistoryEvent) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5566,17 +9196,17 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + return fmt.Errorf("proto: BuildHistoryEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BuildHistoryEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vertexes", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -5586,63 +9216,14 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Type |= BuildHistoryEventType(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Vertexes = append(m.Vertexes, &Vertex{}) - if err := m.Vertexes[len(m.Vertexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Statuses = append(m.Statuses, &VertexStatus{}) - if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5669,42 +9250,10 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Logs = append(m.Logs, &VertexLog{}) - if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.Record == nil { + m.Record = &BuildHistoryRecord{} } - m.Warnings = append(m.Warnings, &VertexWarning{}) - if err := m.Warnings[len(m.Warnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5730,7 +9279,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *Vertex) Unmarshal(dAtA []byte) error { +func (m *BuildHistoryRecord) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5753,15 +9302,15 @@ func (m *Vertex) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Vertex: wiretype end group for non-group") + return fmt.Errorf("proto: BuildHistoryRecord: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Vertex: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BuildHistoryRecord: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5789,11 +9338,11 @@ func (m *Vertex) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + m.Ref = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5821,13 +9370,13 @@ func (m *Vertex) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Inputs = append(m.Inputs, github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])) + m.Frontend = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FrontendAttrs", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -5837,47 +9386,122 @@ func (m *Vertex) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Cached", wireType) + if m.FrontendAttrs == nil { + m.FrontendAttrs = make(map[string]string) } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - m.Cached = bool(v != 0) - case 5: + m.FrontendAttrs[mapkey] = mapvalue + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Exporters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5904,16 +9528,14 @@ func (m *Vertex) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Started == nil { - m.Started = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { + m.Exporters = append(m.Exporters, &Exporter{}) + if err := m.Exporters[len(m.Exporters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5940,18 +9562,18 @@ func (m *Vertex) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Completed == nil { - m.Completed = new(time.Time) + if m.Error == nil { + m.Error = &rpc.Status{} } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -5961,27 +9583,31 @@ func (m *Vertex) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.Error = string(dAtA[iNdEx:postIndex]) + if m.CreatedAt == nil { + m.CreatedAt = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 8: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CompletedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6008,69 +9634,18 @@ func (m *Vertex) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ProgressGroup == nil { - m.ProgressGroup = &pb.ProgressGroup{} + if m.CompletedAt == nil { + m.CompletedAt = new(time.Time) } - if err := m.ProgressGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.CompletedAt, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VertexStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VertexStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VertexStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -6080,61 +9655,33 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl + if m.Logs == nil { + m.Logs = &Descriptor{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Logs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExporterResponse", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -6144,65 +9691,122 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - m.Current = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Current |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + if m.ExporterResponse == nil { + m.ExporterResponse = make(map[string]string) } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= int64(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - case 6: + m.ExporterResponse[mapkey] = mapvalue + iNdEx = postIndex + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6229,13 +9833,16 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + if m.Result == nil { + m.Result = &BuildResultInfo{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6262,16 +9869,128 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Started == nil { - m.Started = new(time.Time) + if m.Results == nil { + m.Results = make(map[string]*BuildResultInfo) } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue *BuildResultInfo + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthControl + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthControl + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BuildResultInfo{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Results[mapkey] = mapvalue iNdEx = postIndex - case 8: + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Trace", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6298,69 +10017,18 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Completed == nil { - m.Completed = new(time.Time) + if m.Trace == nil { + m.Trace = &Descriptor{} } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { + if err := m.Trace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VertexLog) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VertexLog: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VertexLog: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pinned", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -6370,29 +10038,17 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + m.Pinned = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumCachedSteps", wireType) } - var msglen int + m.NumCachedSteps = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -6402,30 +10058,16 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.NumCachedSteps |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: + case 16: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumTotalSteps", wireType) } - m.Stream = 0 + m.NumTotalSteps = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -6435,16 +10077,16 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Stream |= int64(b&0x7F) << shift + m.NumTotalSteps |= int32(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) - } - var byteLen int + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumCompletedSteps", wireType) + } + m.NumCompletedSteps = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -6454,26 +10096,11 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.NumCompletedSteps |= int32(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Msg = append(m.Msg[:0], dAtA[iNdEx:postIndex]...) - if m.Msg == nil { - m.Msg = []byte{} - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipControl(dAtA[iNdEx:]) @@ -6496,7 +10123,7 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error { } return nil } -func (m *VertexWarning) Unmarshal(dAtA []byte) error { +func (m *UpdateBuildHistoryRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6519,15 +10146,15 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VertexWarning: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateBuildHistoryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VertexWarning: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateBuildHistoryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6555,13 +10182,13 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + m.Ref = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pinned", wireType) } - m.Level = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -6571,16 +10198,17 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Level |= int64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.Pinned = bool(v != 0) case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Short", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) } - var byteLen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -6590,31 +10218,119 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + m.Delete = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + byteLen - if postIndex < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateBuildHistoryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateBuildHistoryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateBuildHistoryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthControl } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Short = append(m.Short[:0], dAtA[iNdEx:postIndex]...) - if m.Short == nil { - m.Short = []byte{} + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Descriptor) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl } - iNdEx = postIndex - case 4: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Descriptor: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Descriptor: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Detail", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -6624,27 +10340,27 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.Detail = append(m.Detail, make([]byte, postIndex-iNdEx)) - copy(m.Detail[len(m.Detail)-1], dAtA[iNdEx:postIndex]) + m.MediaType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6672,13 +10388,13 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Url = string(dAtA[iNdEx:postIndex]) + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) } - var msglen int + m.Size_ = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -6688,31 +10404,14 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Size_ |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Info == nil { - m.Info = &pb.SourceInfo{} - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6729,20 +10428,113 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error { break } } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, &pb.Range{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -6766,7 +10558,7 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error { } return nil } -func (m *BytesMessage) Unmarshal(dAtA []byte) error { +func (m *BuildResultInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6789,17 +10581,17 @@ func (m *BytesMessage) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") + return fmt.Errorf("proto: BuildResultInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BuildResultInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowControl @@ -6809,24 +10601,60 @@ func (m *BytesMessage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthControl } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthControl } if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} + if m.Result == nil { + m.Result = &Descriptor{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attestations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attestations = append(m.Attestations, &Descriptor{}) + if err := m.Attestations[len(m.Attestations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -6851,7 +10679,7 @@ func (m *BytesMessage) Unmarshal(dAtA []byte) error { } return nil } -func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error { +func (m *Exporter) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6874,15 +10702,15 @@ func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ListWorkersRequest: wiretype end group for non-group") + return fmt.Errorf("proto: Exporter: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ListWorkersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Exporter: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6910,62 +10738,11 @@ func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListWorkersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListWorkersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6992,10 +10769,103 @@ func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Record = append(m.Record, &types.WorkerRecord{}) - if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Attrs[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/api/services/control/control.proto b/api/services/control/control.proto index a468a293af88..327c9eeaf420 100644 --- a/api/services/control/control.proto +++ b/api/services/control/control.proto @@ -6,6 +6,9 @@ import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "github.com/moby/buildkit/solver/pb/ops.proto"; import "github.com/moby/buildkit/api/types/worker.proto"; +// import "github.com/containerd/containerd/api/types/descriptor.proto"; +import "github.com/gogo/googleapis/google/rpc/status.proto"; +import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto"; option (gogoproto.sizer_all) = true; option (gogoproto.marshaler_all) = true; @@ -18,7 +21,10 @@ service Control { rpc Status(StatusRequest) returns (stream StatusResponse); rpc Session(stream BytesMessage) returns (stream BytesMessage); rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse); - // rpc Info(InfoRequest) returns (InfoResponse); + rpc Info(InfoRequest) returns (InfoResponse); + + rpc ListenBuildHistory(BuildHistoryRequest) returns (stream BuildHistoryEvent); + rpc UpdateBuildHistory(UpdateBuildHistoryRequest) returns (UpdateBuildHistoryResponse); } message PruneRequest { @@ -62,6 +68,8 @@ message SolveRequest { CacheOptions Cache = 8 [(gogoproto.nullable) = false]; repeated string Entitlements = 9 [(gogoproto.customtype) = "github.com/moby/buildkit/util/entitlements.Entitlement" ]; map FrontendInputs = 10; + bool Internal = 11; // Internal builds are not recorded in build history + moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 12; } message CacheOptions { @@ -157,3 +165,73 @@ message ListWorkersRequest { message ListWorkersResponse { repeated moby.buildkit.v1.types.WorkerRecord record = 1; } + +message InfoRequest {} + +message InfoResponse { + moby.buildkit.v1.types.BuildkitVersion buildkitVersion = 1; +} + +message BuildHistoryRequest { + bool ActiveOnly = 1; + string Ref = 2; + bool EarlyExit = 3; +} + +enum BuildHistoryEventType { + STARTED = 0; + COMPLETE = 1; + DELETED = 2; +} + +message BuildHistoryEvent { + BuildHistoryEventType type = 1; + BuildHistoryRecord record = 2; +} + +message BuildHistoryRecord { + string Ref = 1; + string Frontend = 2; + map FrontendAttrs = 3; + repeated Exporter Exporters = 4; + google.rpc.Status error = 5; + google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp CompletedAt = 7 [(gogoproto.stdtime) = true]; + Descriptor logs = 8; + map ExporterResponse = 9; + BuildResultInfo Result = 10; + map Results = 11; + int32 Generation = 12; + Descriptor trace = 13; + bool pinned = 14; + int32 numCachedSteps = 15; + int32 numTotalSteps = 16; + int32 numCompletedSteps = 17; + // TODO: tags + // TODO: unclipped logs +} + +message UpdateBuildHistoryRequest { + string Ref = 1; + bool Pinned = 2; + bool Delete = 3; +} + +message UpdateBuildHistoryResponse {} + +message Descriptor { + string media_type = 1; + string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + int64 size = 3; + map annotations = 5; +} + +message BuildResultInfo { + Descriptor Result = 1; + repeated Descriptor Attestations = 2; +} + +message Exporter { + string Type = 1; + map Attrs = 2; +} diff --git a/api/types/worker.pb.go b/api/types/worker.pb.go index 54cbd605e14c..e1b3928cba52 100644 --- a/api/types/worker.pb.go +++ b/api/types/worker.pb.go @@ -29,6 +29,7 @@ type WorkerRecord struct { Labels map[string]string `protobuf:"bytes,2,rep,name=Labels,proto3" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms"` GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy,proto3" json:"GCPolicy,omitempty"` + BuildkitVersion *BuildkitVersion `protobuf:"bytes,5,opt,name=BuildkitVersion,proto3" json:"BuildkitVersion,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -95,6 +96,13 @@ func (m *WorkerRecord) GetGCPolicy() []*GCPolicy { return nil } +func (m *WorkerRecord) GetBuildkitVersion() *BuildkitVersion { + if m != nil { + return m.BuildkitVersion + } + return nil +} + type GCPolicy struct { All bool `protobuf:"varint,1,opt,name=all,proto3" json:"all,omitempty"` KeepDuration int64 `protobuf:"varint,2,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"` @@ -166,39 +174,106 @@ func (m *GCPolicy) GetFilters() []string { return nil } +type BuildkitVersion struct { + Package string `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Revision string `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BuildkitVersion) Reset() { *m = BuildkitVersion{} } +func (m *BuildkitVersion) String() string { return proto.CompactTextString(m) } +func (*BuildkitVersion) ProtoMessage() {} +func (*BuildkitVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_e4ff6184b07e587a, []int{2} +} +func (m *BuildkitVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildkitVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BuildkitVersion.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BuildkitVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildkitVersion.Merge(m, src) +} +func (m *BuildkitVersion) XXX_Size() int { + return m.Size() +} +func (m *BuildkitVersion) XXX_DiscardUnknown() { + xxx_messageInfo_BuildkitVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildkitVersion proto.InternalMessageInfo + +func (m *BuildkitVersion) GetPackage() string { + if m != nil { + return m.Package + } + return "" +} + +func (m *BuildkitVersion) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *BuildkitVersion) GetRevision() string { + if m != nil { + return m.Revision + } + return "" +} + func init() { proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.types.WorkerRecord") proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.types.WorkerRecord.LabelsEntry") proto.RegisterType((*GCPolicy)(nil), "moby.buildkit.v1.types.GCPolicy") + proto.RegisterType((*BuildkitVersion)(nil), "moby.buildkit.v1.types.BuildkitVersion") } func init() { proto.RegisterFile("worker.proto", fileDescriptor_e4ff6184b07e587a) } var fileDescriptor_e4ff6184b07e587a = []byte{ - // 355 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4e, 0xea, 0x40, - 0x14, 0x86, 0x6f, 0x5b, 0x2e, 0x97, 0x0e, 0xcd, 0x8d, 0x99, 0x18, 0xd3, 0x10, 0x83, 0x84, 0x15, - 0x0b, 0x9d, 0xa2, 0x6e, 0xd4, 0xb8, 0x42, 0x8c, 0x92, 0xb8, 0x20, 0xb3, 0x71, 0xdd, 0x81, 0x01, - 0x9b, 0x0e, 0x9c, 0xc9, 0x74, 0x8a, 0xf6, 0x39, 0x7c, 0x29, 0x96, 0x3e, 0x81, 0x31, 0x3c, 0x89, - 0x99, 0x29, 0x08, 0x26, 0xba, 0x3b, 0xff, 0x9f, 0xff, 0xfb, 0xe7, 0x9c, 0x0c, 0x0a, 0x9e, 0x41, - 0xa5, 0x5c, 0x11, 0xa9, 0x40, 0x03, 0x3e, 0x98, 0x01, 0x2b, 0x08, 0xcb, 0x13, 0x31, 0x4e, 0x13, - 0x4d, 0x16, 0xa7, 0x44, 0x17, 0x92, 0x67, 0x8d, 0x93, 0x69, 0xa2, 0x9f, 0x72, 0x46, 0x46, 0x30, - 0x8b, 0xa6, 0x30, 0x85, 0xc8, 0xc6, 0x59, 0x3e, 0xb1, 0xca, 0x0a, 0x3b, 0x95, 0x35, 0x8d, 0xe3, - 0x9d, 0xb8, 0x69, 0x8c, 0x36, 0x8d, 0x51, 0x06, 0x62, 0xc1, 0x55, 0x24, 0x59, 0x04, 0x32, 0x2b, - 0xd3, 0xed, 0x57, 0x17, 0x05, 0x8f, 0x76, 0x0b, 0xca, 0x47, 0xa0, 0xc6, 0xf8, 0x3f, 0x72, 0x07, - 0xfd, 0xd0, 0x69, 0x39, 0x1d, 0x9f, 0xba, 0x83, 0x3e, 0xbe, 0x47, 0xd5, 0x87, 0x98, 0x71, 0x91, - 0x85, 0x6e, 0xcb, 0xeb, 0xd4, 0xcf, 0xba, 0xe4, 0xe7, 0x35, 0xc9, 0x6e, 0x0b, 0x29, 0x91, 0xdb, - 0xb9, 0x56, 0x05, 0x5d, 0xf3, 0xb8, 0x8b, 0x7c, 0x29, 0x62, 0x3d, 0x01, 0x35, 0xcb, 0x42, 0xcf, - 0x96, 0x05, 0x44, 0x32, 0x32, 0x5c, 0x9b, 0xbd, 0xca, 0xf2, 0xfd, 0xe8, 0x0f, 0xdd, 0x86, 0xf0, - 0x35, 0xaa, 0xdd, 0xdd, 0x0c, 0x41, 0x24, 0xa3, 0x22, 0xac, 0x58, 0xa0, 0xf5, 0xdb, 0xeb, 0x9b, - 0x1c, 0xfd, 0x22, 0x1a, 0x97, 0xa8, 0xbe, 0xb3, 0x06, 0xde, 0x43, 0x5e, 0xca, 0x8b, 0xf5, 0x65, - 0x66, 0xc4, 0xfb, 0xe8, 0xef, 0x22, 0x16, 0x39, 0x0f, 0x5d, 0xeb, 0x95, 0xe2, 0xca, 0xbd, 0x70, - 0xda, 0x2f, 0xdb, 0x87, 0x0d, 0x17, 0x0b, 0x61, 0xb9, 0x1a, 0x35, 0x23, 0x6e, 0xa3, 0x20, 0xe5, - 0x5c, 0xf6, 0x73, 0x15, 0xeb, 0x04, 0xe6, 0x16, 0xf7, 0xe8, 0x37, 0x0f, 0x1f, 0x22, 0xdf, 0xe8, - 0x5e, 0xa1, 0xb9, 0x39, 0xd6, 0x04, 0xb6, 0x06, 0x0e, 0xd1, 0xbf, 0x49, 0x22, 0x34, 0x57, 0x99, - 0xbd, 0xcb, 0xa7, 0x1b, 0xd9, 0x0b, 0x96, 0xab, 0xa6, 0xf3, 0xb6, 0x6a, 0x3a, 0x1f, 0xab, 0xa6, - 0xc3, 0xaa, 0xf6, 0x93, 0xce, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x79, 0x52, 0x6a, 0x29, - 0x02, 0x00, 0x00, + // 416 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0xc1, 0x8e, 0xd3, 0x30, + 0x10, 0x25, 0xc9, 0xee, 0xd2, 0xb8, 0x11, 0x20, 0x0b, 0xa1, 0x28, 0x42, 0x25, 0xca, 0x85, 0x1e, + 0xc0, 0x59, 0x96, 0x0b, 0x20, 0x4e, 0xa1, 0x08, 0x56, 0xe2, 0xb0, 0xf8, 0x00, 0x67, 0x3b, 0xeb, + 0x86, 0x28, 0xee, 0xda, 0x72, 0x9c, 0x40, 0xfe, 0xb0, 0x47, 0xbe, 0x00, 0xa1, 0x1e, 0xf8, 0x0e, + 0x64, 0x27, 0x69, 0x4b, 0xd9, 0xde, 0xe6, 0xcd, 0xbc, 0xf7, 0x3c, 0xf3, 0x64, 0x10, 0x7c, 0x17, + 0xaa, 0x62, 0x0a, 0x49, 0x25, 0xb4, 0x80, 0x8f, 0x56, 0x82, 0x76, 0x88, 0x36, 0x25, 0xbf, 0xae, + 0x4a, 0x8d, 0xda, 0x17, 0x48, 0x77, 0x92, 0xd5, 0xd1, 0xf3, 0xa2, 0xd4, 0xdf, 0x1a, 0x8a, 0x72, + 0xb1, 0x4a, 0x0b, 0x51, 0x88, 0xd4, 0xd2, 0x69, 0xb3, 0xb4, 0xc8, 0x02, 0x5b, 0xf5, 0x36, 0xd1, + 0xb3, 0x3d, 0xba, 0x71, 0x4c, 0x47, 0xc7, 0xb4, 0x16, 0xbc, 0x65, 0x2a, 0x95, 0x34, 0x15, 0xb2, + 0xee, 0xd9, 0xc9, 0x1f, 0x17, 0x04, 0x5f, 0xed, 0x16, 0x98, 0xe5, 0x42, 0x5d, 0xc3, 0x7b, 0xc0, + 0xbd, 0x5c, 0x84, 0x4e, 0xec, 0xcc, 0x7d, 0xec, 0x5e, 0x2e, 0xe0, 0x47, 0x70, 0xf6, 0x89, 0x50, + 0xc6, 0xeb, 0xd0, 0x8d, 0xbd, 0xf9, 0xf4, 0xe2, 0x1c, 0xdd, 0xbe, 0x26, 0xda, 0x77, 0x41, 0xbd, + 0xe4, 0xfd, 0x8d, 0x56, 0x1d, 0x1e, 0xf4, 0xf0, 0x1c, 0xf8, 0x92, 0x13, 0xbd, 0x14, 0x6a, 0x55, + 0x87, 0x9e, 0x35, 0x0b, 0x90, 0xa4, 0xe8, 0x6a, 0x68, 0x66, 0x27, 0xeb, 0x5f, 0x4f, 0xee, 0xe0, + 0x1d, 0x09, 0xbe, 0x05, 0x93, 0x0f, 0xef, 0xae, 0x04, 0x2f, 0xf3, 0x2e, 0x3c, 0xb1, 0x82, 0xf8, + 0xd8, 0xeb, 0x23, 0x0f, 0x6f, 0x15, 0xf0, 0x33, 0xb8, 0x9f, 0x0d, 0xbc, 0x2f, 0x4c, 0xd5, 0xa5, + 0xb8, 0x09, 0x4f, 0x63, 0x67, 0x3e, 0xbd, 0x78, 0x7a, 0xcc, 0xe4, 0x80, 0x8e, 0x0f, 0xf5, 0xd1, + 0x6b, 0x30, 0xdd, 0xbb, 0x0c, 0x3e, 0x00, 0x5e, 0xc5, 0xba, 0x21, 0x2c, 0x53, 0xc2, 0x87, 0xe0, + 0xb4, 0x25, 0xbc, 0x61, 0xa1, 0x6b, 0x7b, 0x3d, 0x78, 0xe3, 0xbe, 0x72, 0x92, 0x1f, 0xbb, 0x5b, + 0x8c, 0x8e, 0x70, 0x6e, 0x75, 0x13, 0x6c, 0x4a, 0x98, 0x80, 0xa0, 0x62, 0x4c, 0x2e, 0x1a, 0x45, + 0xb4, 0x59, 0xd4, 0xc8, 0x3d, 0xfc, 0x4f, 0x0f, 0x3e, 0x06, 0xbe, 0xc1, 0x59, 0xa7, 0x99, 0xc9, + 0xcf, 0x10, 0x76, 0x0d, 0x18, 0x82, 0xbb, 0xcb, 0x92, 0x6b, 0xa6, 0x6a, 0x1b, 0x95, 0x8f, 0x47, + 0x98, 0x90, 0xff, 0x72, 0x30, 0x64, 0x49, 0xf2, 0x8a, 0x14, 0x6c, 0x58, 0x7e, 0x84, 0x66, 0xd2, + 0x0e, 0x61, 0xf5, 0x27, 0x8c, 0x10, 0x46, 0x60, 0xa2, 0x58, 0x5b, 0xda, 0x91, 0x67, 0x47, 0x5b, + 0x9c, 0x05, 0xeb, 0xcd, 0xcc, 0xf9, 0xb9, 0x99, 0x39, 0xbf, 0x37, 0x33, 0x87, 0x9e, 0xd9, 0xaf, + 0xf5, 0xf2, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x19, 0xcf, 0xd5, 0xdf, 0x02, 0x00, 0x00, } func (m *WorkerRecord) Marshal() (dAtA []byte, err error) { @@ -225,6 +300,18 @@ func (m *WorkerRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.BuildkitVersion != nil { + { + size, err := m.BuildkitVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorker(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if len(m.GCPolicy) > 0 { for iNdEx := len(m.GCPolicy) - 1; iNdEx >= 0; iNdEx-- { { @@ -338,6 +425,54 @@ func (m *GCPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *BuildkitVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildkitVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildkitVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Revision) > 0 { + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = encodeVarintWorker(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x1a + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintWorker(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + } + if len(m.Package) > 0 { + i -= len(m.Package) + copy(dAtA[i:], m.Package) + i = encodeVarintWorker(dAtA, i, uint64(len(m.Package))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintWorker(dAtA []byte, offset int, v uint64) int { offset -= sovWorker(v) base := offset @@ -379,6 +514,10 @@ func (m *WorkerRecord) Size() (n int) { n += 1 + l + sovWorker(uint64(l)) } } + if m.BuildkitVersion != nil { + l = m.BuildkitVersion.Size() + n += 1 + l + sovWorker(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -412,6 +551,30 @@ func (m *GCPolicy) Size() (n int) { return n } +func (m *BuildkitVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Package) + if l > 0 { + n += 1 + l + sovWorker(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovWorker(uint64(l)) + } + l = len(m.Revision) + if l > 0 { + n += 1 + l + sovWorker(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovWorker(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -674,6 +837,42 @@ func (m *WorkerRecord) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildkitVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BuildkitVersion == nil { + m.BuildkitVersion = &BuildkitVersion{} + } + if err := m.BuildkitVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipWorker(dAtA[iNdEx:]) @@ -837,6 +1036,153 @@ func (m *GCPolicy) Unmarshal(dAtA []byte) error { } return nil } +func (m *BuildkitVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildkitVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildkitVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Package", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Package = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorker(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWorker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipWorker(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/api/types/worker.proto b/api/types/worker.proto index 82dd7ad65145..476fcc62e104 100644 --- a/api/types/worker.proto +++ b/api/types/worker.proto @@ -14,6 +14,7 @@ message WorkerRecord { map Labels = 2; repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false]; repeated GCPolicy GCPolicy = 4; + BuildkitVersion BuildkitVersion = 5; } message GCPolicy { @@ -22,3 +23,9 @@ message GCPolicy { int64 keepBytes = 3; repeated string filters = 4; } + +message BuildkitVersion { + string package = 1; + string version = 2; + string revision = 3; +} diff --git a/cache/blobs.go b/cache/blobs.go index 8d2beefd0654..716be9093471 100644 --- a/cache/blobs.go +++ b/cache/blobs.go @@ -1,19 +1,15 @@ package cache import ( - "compress/gzip" "context" "fmt" - "io" "os" "strconv" - "github.com/containerd/containerd/content" "github.com/containerd/containerd/diff" "github.com/containerd/containerd/diff/walking" "github.com/containerd/containerd/leases" "github.com/containerd/containerd/mount" - "github.com/klauspost/compress/zstd" "github.com/moby/buildkit/session" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/flightcontrol" @@ -40,6 +36,14 @@ func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded boo if _, ok := leases.FromContext(ctx); !ok { return errors.Errorf("missing lease requirement for computeBlobChain") } + if !createIfNeeded { + sr.mu.Lock() + if sr.equalMutable != nil { + sr.mu.Unlock() + return nil + } + sr.mu.Unlock() + } if err := sr.Finalize(ctx); err != nil { return err @@ -57,8 +61,6 @@ func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded boo return computeBlobChain(ctx, sr, createIfNeeded, comp, s, filter) } -type compressor func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) - func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool, comp compression.Config, s session.Group, filter map[string]struct{}) error { eg, ctx := errgroup.WithContext(ctx) switch sr.kind() { @@ -92,28 +94,8 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool return nil, errors.WithStack(ErrNoBlobs) } - var mediaType string - var compressorFunc compressor - var finalize func(context.Context, content.Store) (map[string]string, error) - switch comp.Type { - case compression.Uncompressed: - mediaType = ocispecs.MediaTypeImageLayer - case compression.Gzip: - compressorFunc = func(dest io.Writer, _ string) (io.WriteCloser, error) { - return gzipWriter(comp)(dest) - } - mediaType = ocispecs.MediaTypeImageLayerGzip - case compression.EStargz: - compressorFunc, finalize = compressEStargz(comp) - mediaType = ocispecs.MediaTypeImageLayerGzip - case compression.Zstd: - compressorFunc = func(dest io.Writer, _ string) (io.WriteCloser, error) { - return zstdWriter(comp)(dest) - } - mediaType = ocispecs.MediaTypeImageLayer + "+zstd" - default: - return nil, errors.Errorf("unknown layer compression type: %q", comp.Type) - } + compressorFunc, finalize := comp.Type.Compress(ctx, comp) + mediaType := comp.Type.MediaType() var lowerRef *immutableRef switch sr.kind() { @@ -206,7 +188,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool } } - if desc.Digest == "" && !isTypeWindows(sr) && (comp.Type == compression.Zstd || comp.Type == compression.EStargz) { + if desc.Digest == "" && !isTypeWindows(sr) && comp.Type.NeedsComputeDiffBySelf() { // These compression types aren't supported by containerd differ. So try to compute diff on buildkit side. // This case can be happen on containerd worker + non-overlayfs snapshotter (e.g. native). // See also: https://github.com/containerd/containerd/issues/4263 @@ -433,7 +415,7 @@ func isTypeWindows(sr *immutableRef) bool { // ensureCompression ensures the specified ref has the blob of the specified compression Type. func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error { - _, err := g.Do(ctx, fmt.Sprintf("%s-%d", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) { + _, err := g.Do(ctx, fmt.Sprintf("%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) { desc, err := ref.ociDesc(ctx, ref.descHandlers, true) if err != nil { return nil, err @@ -480,38 +462,3 @@ func ensureCompression(ctx context.Context, ref *immutableRef, comp compression. }) return err } - -func gzipWriter(comp compression.Config) func(io.Writer) (io.WriteCloser, error) { - return func(dest io.Writer) (io.WriteCloser, error) { - level := gzip.DefaultCompression - if comp.Level != nil { - level = *comp.Level - } - return gzip.NewWriterLevel(dest, level) - } -} - -func zstdWriter(comp compression.Config) func(io.Writer) (io.WriteCloser, error) { - return func(dest io.Writer) (io.WriteCloser, error) { - level := zstd.SpeedDefault - if comp.Level != nil { - level = toZstdEncoderLevel(*comp.Level) - } - return zstd.NewWriter(dest, zstd.WithEncoderLevel(level)) - } -} - -func toZstdEncoderLevel(level int) zstd.EncoderLevel { - // map zstd compression levels to go-zstd levels - // once we also have c based implementation move this to helper pkg - if level < 0 { - return zstd.SpeedDefault - } else if level < 3 { - return zstd.SpeedFastest - } else if level < 7 { - return zstd.SpeedDefault - } else if level < 9 { - return zstd.SpeedBetterCompression - } - return zstd.SpeedBestCompression -} diff --git a/cache/blobs_linux.go b/cache/blobs_linux.go index fcb8850a02a9..ce41275e6b74 100644 --- a/cache/blobs_linux.go +++ b/cache/blobs_linux.go @@ -12,6 +12,7 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/overlay" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" @@ -24,7 +25,7 @@ var emptyDesc = ocispecs.Descriptor{} // diff between lower and upper snapshot. If the passed mounts cannot // be computed (e.g. because the mounts aren't overlayfs), it returns // an error. -func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) { +func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compression.Compressor) (_ ocispecs.Descriptor, ok bool, err error) { // Get upperdir location if mounts are overlayfs that can be processed by this differ. upperdir, err := overlay.GetUpperdir(lower, upper) if err != nil { @@ -57,11 +58,14 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper if err != nil { return emptyDesc, false, errors.Wrap(err, "failed to get compressed stream") } - err = overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower) - compressed.Close() - if err != nil { + // Close ensure compressorFunc does some finalization works. + defer compressed.Close() + if err := overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower); err != nil { return emptyDesc, false, errors.Wrap(err, "failed to write compressed diff") } + if err := compressed.Close(); err != nil { + return emptyDesc, false, errors.Wrap(err, "failed to close compressed diff writer") + } if labels == nil { labels = map[string]string{} } diff --git a/cache/blobs_nolinux.go b/cache/blobs_nolinux.go index 2ccee770e2a8..1567768c1939 100644 --- a/cache/blobs_nolinux.go +++ b/cache/blobs_nolinux.go @@ -6,11 +6,12 @@ package cache import ( "context" + "github.com/moby/buildkit/util/compression" "github.com/containerd/containerd/mount" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) { +func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compression.Compressor) (_ ocispecs.Descriptor, ok bool, err error) { return ocispecs.Descriptor{}, true, errors.Errorf("overlayfs-based diff computing is unsupported") } diff --git a/cache/compression.go b/cache/compression.go new file mode 100644 index 000000000000..bede8d932278 --- /dev/null +++ b/cache/compression.go @@ -0,0 +1,16 @@ +//go:build !nydus +// +build !nydus + +package cache + +import ( + "context" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/cache/config" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool { + return refCfg.Compression.Force +} diff --git a/cache/compression_nydus.go b/cache/compression_nydus.go new file mode 100644 index 000000000000..48b61a4b36ff --- /dev/null +++ b/cache/compression_nydus.go @@ -0,0 +1,147 @@ +//go:build nydus +// +build nydus + +package cache + +import ( + "compress/gzip" + "context" + "encoding/json" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/compression" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + + nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" +) + +func init() { + additionalAnnotations = append( + additionalAnnotations, + nydusify.LayerAnnotationNydusBlob, nydusify.LayerAnnotationNydusBootstrap, nydusify.LayerAnnotationNydusBlobIDs, + ) +} + +// Nydus compression type can't be mixed with other compression types in the same image, +// so if `source` is this kind of layer, but the target is other compression type, we +// should do the forced compression. +func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool { + if refCfg.Compression.Force { + return true + } + isNydusBlob, _ := compression.Nydus.Is(ctx, cs, source) + if refCfg.Compression.Type == compression.Nydus { + return !isNydusBlob + } + return isNydusBlob +} + +// MergeNydus does two steps: +// 1. Extracts nydus bootstrap from nydus format (nydus blob + nydus bootstrap) for each layer. +// 2. Merge all nydus bootstraps into a final bootstrap (will as an extra layer). +// The nydus bootstrap size is very small, so the merge operation is fast. +func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, s session.Group) (*ocispecs.Descriptor, error) { + iref, ok := ref.(*immutableRef) + if !ok { + return nil, errors.Errorf("unsupported ref type %T", ref) + } + refs := iref.layerChain() + if len(refs) == 0 { + return nil, errors.Errorf("refs can't be empty") + } + + // Extracts nydus bootstrap from nydus format for each layer. + var cm *cacheManager + layers := []nydusify.Layer{} + blobIDs := []string{} + for _, ref := range refs { + blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, comp, s) + if err != nil { + return nil, errors.Wrapf(err, "get compression blob %q", comp.Type) + } + ra, err := ref.cm.ContentStore.ReaderAt(ctx, blobDesc) + if err != nil { + return nil, errors.Wrapf(err, "get reader for compression blob %q", comp.Type) + } + defer ra.Close() + if cm == nil { + cm = ref.cm + } + blobIDs = append(blobIDs, blobDesc.Digest.Hex()) + layers = append(layers, nydusify.Layer{ + Digest: blobDesc.Digest, + ReaderAt: ra, + }) + } + + // Merge all nydus bootstraps into a final nydus bootstrap. + pr, pw := io.Pipe() + go func() { + defer pw.Close() + if _, err := nydusify.Merge(ctx, layers, pw, nydusify.MergeOption{ + WithTar: true, + }); err != nil { + pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap")) + } + }() + + // Compress final nydus bootstrap to tar.gz and write into content store. + cw, err := content.OpenWriter(ctx, cm.ContentStore, content.WithRef("nydus-merge-"+iref.getChainID().String())) + if err != nil { + return nil, errors.Wrap(err, "open content store writer") + } + defer cw.Close() + + gw := gzip.NewWriter(cw) + uncompressedDgst := digest.SHA256.Digester() + compressed := io.MultiWriter(gw, uncompressedDgst.Hash()) + if _, err := io.Copy(compressed, pr); err != nil { + return nil, errors.Wrapf(err, "copy bootstrap targz into content store") + } + if err := gw.Close(); err != nil { + return nil, errors.Wrap(err, "close gzip writer") + } + + compressedDgst := cw.Digest() + if err := cw.Commit(ctx, 0, compressedDgst, content.WithLabels(map[string]string{ + containerdUncompressed: uncompressedDgst.Digest().String(), + })); err != nil { + if !errdefs.IsAlreadyExists(err) { + return nil, errors.Wrap(err, "commit to content store") + } + } + if err := cw.Close(); err != nil { + return nil, errors.Wrap(err, "close content store writer") + } + + info, err := cm.ContentStore.Info(ctx, compressedDgst) + if err != nil { + return nil, errors.Wrap(err, "get info from content store") + } + + blobIDsBytes, err := json.Marshal(blobIDs) + if err != nil { + return nil, errors.Wrap(err, "marshal blob ids") + } + + desc := ocispecs.Descriptor{ + Digest: compressedDgst, + Size: info.Size, + MediaType: ocispecs.MediaTypeImageLayerGzip, + Annotations: map[string]string{ + containerdUncompressed: uncompressedDgst.Digest().String(), + // Use this annotation to identify nydus bootstrap layer. + nydusify.LayerAnnotationNydusBootstrap: "true", + // Track all blob digests for nydus snapshotter. + nydusify.LayerAnnotationNydusBlobIDs: string(blobIDsBytes), + }, + } + + return &desc, nil +} diff --git a/cache/contenthash/checksum.go b/cache/contenthash/checksum.go index a59523dd2956..dcf424a6b4fc 100644 --- a/cache/contenthash/checksum.go +++ b/cache/contenthash/checksum.go @@ -11,13 +11,13 @@ import ( "strings" "sync" - "github.com/docker/docker/pkg/fileutils" iradix "github.com/hashicorp/go-immutable-radix" "github.com/hashicorp/golang-lru/simplelru" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/locker" + "github.com/moby/patternmatcher" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/tonistiigi/fsutil" @@ -79,8 +79,8 @@ type includedPath struct { path string record *CacheRecord included bool - includeMatchInfo fileutils.MatchInfo - excludeMatchInfo fileutils.MatchInfo + includeMatchInfo patternmatcher.MatchInfo + excludeMatchInfo patternmatcher.MatchInfo } type cacheManager struct { @@ -496,17 +496,17 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o endsInSep := len(p) != 0 && p[len(p)-1] == filepath.Separator p = keyPath(p) - var includePatternMatcher *fileutils.PatternMatcher + var includePatternMatcher *patternmatcher.PatternMatcher if len(opts.IncludePatterns) != 0 { - includePatternMatcher, err = fileutils.NewPatternMatcher(opts.IncludePatterns) + includePatternMatcher, err = patternmatcher.New(opts.IncludePatterns) if err != nil { return nil, errors.Wrapf(err, "invalid includepatterns: %s", opts.IncludePatterns) } } - var excludePatternMatcher *fileutils.PatternMatcher + var excludePatternMatcher *patternmatcher.PatternMatcher if len(opts.ExcludePatterns) != 0 { - excludePatternMatcher, err = fileutils.NewPatternMatcher(opts.ExcludePatterns) + excludePatternMatcher, err = patternmatcher.New(opts.ExcludePatterns) if err != nil { return nil, errors.Wrapf(err, "invalid excludepatterns: %s", opts.ExcludePatterns) } @@ -695,21 +695,21 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o func shouldIncludePath( candidate string, - includePatternMatcher *fileutils.PatternMatcher, - excludePatternMatcher *fileutils.PatternMatcher, + includePatternMatcher *patternmatcher.PatternMatcher, + excludePatternMatcher *patternmatcher.PatternMatcher, maybeIncludedPath *includedPath, parentDir *includedPath, ) (bool, error) { var ( m bool - matchInfo fileutils.MatchInfo + matchInfo patternmatcher.MatchInfo err error ) if includePatternMatcher != nil { if parentDir != nil { m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, parentDir.includeMatchInfo) } else { - m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, fileutils.MatchInfo{}) + m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, patternmatcher.MatchInfo{}) } if err != nil { return false, errors.Wrap(err, "failed to match includepatterns") @@ -724,7 +724,7 @@ func shouldIncludePath( if parentDir != nil { m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, parentDir.excludeMatchInfo) } else { - m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, fileutils.MatchInfo{}) + m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, patternmatcher.MatchInfo{}) } if err != nil { return false, errors.Wrap(err, "failed to match excludepatterns") @@ -799,7 +799,7 @@ func splitWildcards(p string) (d1, d2 string) { p2 = append(p2, p) } } - return filepath.Join(p1...), filepath.Join(p2...) + return path.Join(p1...), path.Join(p2...) } func containsWildcards(name string) bool { @@ -1015,7 +1015,7 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr Type: CacheRecordTypeSymlink, Linkname: filepath.ToSlash(link), } - k := []byte(filepath.Join("/", filepath.ToSlash(p))) + k := []byte(path.Join("/", filepath.ToSlash(p))) k = convertPathToKey(k) txn.Insert(k, cr) return nil @@ -1024,15 +1024,15 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr return err } - err = filepath.Walk(parentPath, func(path string, fi os.FileInfo, err error) error { + err = filepath.Walk(parentPath, func(itemPath string, fi os.FileInfo, err error) error { if err != nil { - return errors.Wrapf(err, "failed to walk %s", path) + return errors.Wrapf(err, "failed to walk %s", itemPath) } - rel, err := filepath.Rel(mp, path) + rel, err := filepath.Rel(mp, itemPath) if err != nil { return err } - k := []byte(filepath.Join("/", filepath.ToSlash(rel))) + k := []byte(path.Join("/", filepath.ToSlash(rel))) if string(k) == "/" { k = []byte{} } @@ -1043,7 +1043,7 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr } if fi.Mode()&os.ModeSymlink != 0 { cr.Type = CacheRecordTypeSymlink - link, err := os.Readlink(path) + link, err := os.Readlink(itemPath) if err != nil { return err } diff --git a/cache/contenthash/checksum_test.go b/cache/contenthash/checksum_test.go index 713c7a560913..cfdbfe2b465e 100644 --- a/cache/contenthash/checksum_test.go +++ b/cache/contenthash/checksum_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -43,14 +42,12 @@ const ( func TestChecksumSymlinkNoParentScan(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD aa dir", @@ -72,14 +69,12 @@ func TestChecksumSymlinkNoParentScan(t *testing.T) { func TestChecksumHardlinks(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD abc dir", @@ -155,14 +150,12 @@ func TestChecksumHardlinks(t *testing.T) { func TestChecksumWildcardOrFilter(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD bar file data1", @@ -212,14 +205,16 @@ func TestChecksumWildcardOrFilter(t *testing.T) { func TestChecksumWildcardWithBadMountable(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) + + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ref := createRef(t, cm, nil) @@ -232,14 +227,12 @@ func TestChecksumWildcardWithBadMountable(t *testing.T) { func TestSymlinksNoFollow(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD target file data0", @@ -291,14 +284,12 @@ func TestSymlinksNoFollow(t *testing.T) { func TestChecksumBasicFile(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD foo file data0", @@ -449,14 +440,12 @@ func TestChecksumIncludeExclude(t *testing.T) { func testChecksumIncludeExclude(t *testing.T, wildcard bool) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD foo file data0", @@ -584,14 +573,12 @@ func testChecksumIncludeExclude(t *testing.T, wildcard bool) { func TestChecksumIncludeDoubleStar(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD prefix dir", @@ -652,14 +639,12 @@ func TestChecksumIncludeDoubleStar(t *testing.T) { func TestChecksumIncludeSymlink(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD data dir", @@ -725,14 +710,16 @@ func TestChecksumIncludeSymlink(t *testing.T) { func TestHandleChange(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) + + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD foo file data0", @@ -803,14 +790,16 @@ func TestHandleChange(t *testing.T) { func TestHandleRecursiveDir(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) + + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD d0 dir", @@ -852,14 +841,16 @@ func TestHandleRecursiveDir(t *testing.T) { func TestChecksumUnorderedFiles(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) + + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD d0 dir", @@ -905,14 +896,12 @@ func TestChecksumUnorderedFiles(t *testing.T) { func TestSymlinkInPathScan(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD d0 dir", @@ -936,14 +925,12 @@ func TestSymlinkInPathScan(t *testing.T) { func TestSymlinkNeedsScan(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD c0 dir", @@ -969,14 +956,12 @@ func TestSymlinkNeedsScan(t *testing.T) { func TestSymlinkAbsDirSuffix(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD c0 dir", @@ -996,14 +981,12 @@ func TestSymlinkAbsDirSuffix(t *testing.T) { func TestSymlinkThroughParent(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD lib dir", @@ -1051,14 +1034,16 @@ func TestSymlinkThroughParent(t *testing.T) { func TestSymlinkInPathHandleChange(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) + + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD d1 dir", @@ -1114,14 +1099,12 @@ func TestSymlinkInPathHandleChange(t *testing.T) { func TestPersistence(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - cm, closeBolt := setupCacheManager(t, tmpdir, "native", snapshotter) - defer cm.Close() + cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ch := []string{ "ADD foo file data0", @@ -1155,12 +1138,10 @@ func TestPersistence(t *testing.T) { time.Sleep(100 * time.Millisecond) // saving happens on the background // we can't close snapshotter and open it twice (especially, its internal bbolt store) - cm.Close() - closeBolt() + cleanup() getDefaultManager().lru.Purge() - cm, closeBolt = setupCacheManager(t, tmpdir, "native", snapshotter) - defer closeBolt() - defer cm.Close() + cm, cleanup = setupCacheManager(t, tmpdir, "native", snapshotter) + t.Cleanup(cleanup) ref, err = cm.Get(context.TODO(), id, nil) require.NoError(t, err) @@ -1229,6 +1210,8 @@ func setupCacheManager(t *testing.T, tmpdir string, snapshotterName string, snap return cm, func() { db.Close() + md.Close() + cm.Close() } } diff --git a/cache/contenthash/filehash.go b/cache/contenthash/filehash.go index 0b5267101b03..246f8f7f1c80 100644 --- a/cache/contenthash/filehash.go +++ b/cache/contenthash/filehash.go @@ -51,6 +51,8 @@ func NewFromStat(stat *fstypes.Stat) (hash.Hash, error) { hdr.Name = "" // note: empty name is different from current has in docker build. Name is added on recursive directory scan instead hdr.Devmajor = stat.Devmajor hdr.Devminor = stat.Devminor + hdr.Uid = int(stat.Uid) + hdr.Gid = int(stat.Gid) if len(stat.Xattrs) > 0 { hdr.PAXRecords = make(map[string]string, len(stat.Xattrs)) diff --git a/cache/contenthash/tarsum.go b/cache/contenthash/tarsum.go index 182c46118428..456e1ad7f12a 100644 --- a/cache/contenthash/tarsum.go +++ b/cache/contenthash/tarsum.go @@ -37,10 +37,10 @@ func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { pax := h.PAXRecords - if len(h.Xattrs) > 0 { //nolint deprecated + if len(h.Xattrs) > 0 { //nolint:staticcheck // field deprecated in stdlib if pax == nil { pax = map[string]string{} - for k, v := range h.Xattrs { //nolint deprecated + for k, v := range h.Xattrs { //nolint:staticcheck // field deprecated in stdlib pax["SCHILY.xattr."+k] = v } } diff --git a/cache/converter.go b/cache/converter.go index a7e4df193aff..f19412b7086a 100644 --- a/cache/converter.go +++ b/cache/converter.go @@ -7,120 +7,46 @@ import ( "io" "sync" - cdcompression "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" "github.com/containerd/containerd/images/converter" "github.com/containerd/containerd/labels" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/iohelper" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -// needsConversion indicates whether a conversion is needed for the specified descriptor to -// be the compressionType. -func needsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) (bool, error) { - mediaType := desc.MediaType - switch compressionType { - case compression.Uncompressed: - if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Uncompressed { - return false, nil - } - case compression.Gzip: - esgz, err := isEStargz(ctx, cs, desc.Digest) - if err != nil { - return false, err - } - if (!images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Gzip) && !esgz { - return false, nil - } - case compression.Zstd: - if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Zstd { - return false, nil - } - case compression.EStargz: - esgz, err := isEStargz(ctx, cs, desc.Digest) - if err != nil { - return false, err - } - if !images.IsLayerType(mediaType) || esgz { - return false, nil - } - default: - return false, fmt.Errorf("unknown compression type during conversion: %q", compressionType) - } - return true, nil -} - // getConverter returns converter function according to the specified compression type. // If no conversion is needed, this returns nil without error. func getConverter(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, comp compression.Config) (converter.ConvertFunc, error) { - if needs, err := needsConversion(ctx, cs, desc, comp.Type); err != nil { + if needs, err := comp.Type.NeedsConversion(ctx, cs, desc); err != nil { return nil, errors.Wrapf(err, "failed to determine conversion needs") } else if !needs { // No conversion. No need to return an error here. return nil, nil } - c := conversion{target: comp} - - from := compression.FromMediaType(desc.MediaType) - switch from { - case compression.Uncompressed: - case compression.Gzip, compression.Zstd: - c.decompress = func(ctx context.Context, desc ocispecs.Descriptor) (r io.ReadCloser, err error) { - ra, err := cs.ReaderAt(ctx, desc) - if err != nil { - return nil, err - } - esgz, err := isEStargz(ctx, cs, desc.Digest) - if err != nil { - return nil, err - } else if esgz { - r, err = decompressEStargz(io.NewSectionReader(ra, 0, ra.Size())) - if err != nil { - return nil, err - } - } else { - r, err = cdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size())) - if err != nil { - return nil, err - } - } - return &readCloser{r, ra.Close}, nil - } - default: - return nil, errors.Errorf("unsupported source compression type %q from mediatype %q", from, desc.MediaType) + from, err := compression.FromMediaType(desc.MediaType) + if err != nil { + return nil, err } - switch comp.Type { - case compression.Uncompressed: - case compression.Gzip: - c.compress = gzipWriter(comp) - case compression.Zstd: - c.compress = zstdWriter(comp) - case compression.EStargz: - compressorFunc, finalize := compressEStargz(comp) - c.compress = func(w io.Writer) (io.WriteCloser, error) { - return compressorFunc(w, ocispecs.MediaTypeImageLayerGzip) - } - c.finalize = finalize - default: - return nil, errors.Errorf("unknown target compression type during conversion: %q", comp.Type) - } + c := conversion{target: comp} + c.compress, c.finalize = comp.Type.Compress(ctx, comp) + c.decompress = from.Decompress return (&c).convert, nil } type conversion struct { target compression.Config - decompress func(context.Context, ocispecs.Descriptor) (io.ReadCloser, error) - compress func(w io.Writer) (io.WriteCloser, error) - finalize func(context.Context, content.Store) (map[string]string, error) + decompress compression.Decompressor + compress compression.Compressor + finalize compression.Finalizer } var bufioPool = sync.Pool{ @@ -151,34 +77,20 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec bufW = bufio.NewWriterSize(w, 128*1024) } defer bufioPool.Put(bufW) - var zw io.WriteCloser = &nopWriteCloser{bufW} - if c.compress != nil { - zw, err = c.compress(zw) - if err != nil { - return nil, err - } + zw, err := c.compress(&iohelper.NopWriteCloser{Writer: bufW}, c.target.Type.MediaType()) + if err != nil { + return nil, err } zw = &onceWriteCloser{WriteCloser: zw} defer zw.Close() // convert this layer diffID := digest.Canonical.Digester() - var rdr io.Reader - if c.decompress == nil { - ra, err := cs.ReaderAt(ctx, desc) - if err != nil { - return nil, err - } - defer ra.Close() - rdr = io.NewSectionReader(ra, 0, ra.Size()) - } else { - rc, err := c.decompress(ctx, desc) - if err != nil { - return nil, err - } - defer rc.Close() - rdr = rc + rdr, err := c.decompress(ctx, cs, desc) + if err != nil { + return nil, err } + defer rdr.Close() if _, err := io.Copy(zw, io.TeeReader(rdr, diffID.Hash())); err != nil { return nil, err } @@ -201,7 +113,7 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec } newDesc := desc - newDesc.MediaType = c.target.Type.DefaultMediaType() + newDesc.MediaType = c.target.Type.MediaType() newDesc.Digest = info.Digest newDesc.Size = info.Size newDesc.Annotations = map[string]string{labels.LabelUncompressed: diffID.Digest().String()} @@ -217,28 +129,6 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec return &newDesc, nil } -type readCloser struct { - io.ReadCloser - closeFunc func() error -} - -func (rc *readCloser) Close() error { - err1 := rc.ReadCloser.Close() - err2 := rc.closeFunc() - if err1 != nil { - return errors.Wrapf(err1, "failed to close: %v", err2) - } - return err2 -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { - return nil -} - type onceWriteCloser struct { io.WriteCloser closeOnce sync.Once diff --git a/cache/filelist.go b/cache/filelist.go new file mode 100644 index 000000000000..c2c7921fd5db --- /dev/null +++ b/cache/filelist.go @@ -0,0 +1,90 @@ +package cache + +import ( + "archive/tar" + "context" + "encoding/json" + "fmt" + "io" + "path" + "sort" + + cdcompression "github.com/containerd/containerd/archive/compression" + "github.com/moby/buildkit/session" +) + +const keyFileList = "filelist" + +// FileList returns an ordered list of files present in the cache record that were +// changed compared to the parent. The paths of the files are in same format as they +// are in the tar stream (AUFS whiteout format). If the reference does not have a +// a blob associated with it, the list is empty. +func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string, error) { + res, err := g.Do(ctx, fmt.Sprintf("filelist-%s", sr.ID()), func(ctx context.Context) (interface{}, error) { + dt, err := sr.GetExternal(keyFileList) + if err == nil && dt != nil { + var files []string + if err := json.Unmarshal(dt, &files); err != nil { + return nil, err + } + return files, nil + } + + if sr.getBlob() == "" { + return nil, nil + } + + // lazy blobs need to be pulled first + if err := sr.Extract(ctx, s); err != nil { + return nil, err + } + + desc, err := sr.ociDesc(ctx, sr.descHandlers, false) + if err != nil { + return nil, err + } + + ra, err := sr.cm.ContentStore.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + + r, err := cdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size())) + if err != nil { + return nil, err + } + defer r.Close() + + var files []string + + rdr := tar.NewReader(r) + for { + hdr, err := rdr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + name := path.Clean(hdr.Name) + files = append(files, name) + } + sort.Strings(files) + + dt, err = json.Marshal(files) + if err != nil { + return nil, err + } + if err := sr.SetExternal(keyFileList, dt); err != nil { + return nil, err + } + return files, nil + }) + if err != nil { + return nil, err + } + if res == nil { + return nil, nil + } + return res.([]string), nil +} diff --git a/cache/manager.go b/cache/manager.go index 8f91d3c7a0a6..d579a6007ba0 100644 --- a/cache/manager.go +++ b/cache/manager.go @@ -222,10 +222,8 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor, id := identity.NewID() snapshotID := chainID.String() - blobOnly := true if link != nil { snapshotID = link.getSnapshotID() - blobOnly = link.getBlobOnly() go link.Release(context.TODO()) } @@ -289,7 +287,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor, rec.queueChainID(chainID) rec.queueBlobChainID(blobChainID) rec.queueSnapshotID(snapshotID) - rec.queueBlobOnly(blobOnly) + rec.queueBlobOnly(true) rec.queueMediaType(desc.MediaType) rec.queueBlobSize(desc.Size) rec.appendURLs(desc.URLs) @@ -301,7 +299,14 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor, cm.records[id] = rec - return rec.ref(true, descHandlers, nil), nil + ref := rec.ref(true, descHandlers, nil) + if s := unlazySessionOf(opts...); s != nil { + if err := ref.unlazy(ctx, ref.descHandlers, ref.progress, s, true); err != nil { + return nil, err + } + } + + return ref, nil } // init loads all snapshots from metadata state and tries to load the records @@ -458,6 +463,13 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOpt cacheMetadata: md, } + // TODO:(sipsma) this is kludge to deal with a bug in v0.10.{0,1} where + // merge and diff refs didn't have committed set to true: + // https://github.com/moby/buildkit/issues/2740 + if kind := rec.kind(); kind == Merge || kind == Diff { + rec.mutable = false + } + // the record was deleted but we crashed before data on disk was removed if md.getDeleted() { if err := rec.remove(ctx, true); err != nil { @@ -496,6 +508,11 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOpt } func (cm *cacheManager) parentsOf(ctx context.Context, md *cacheMetadata, opts ...RefOption) (ps parentRefs, rerr error) { + defer func() { + if rerr != nil { + ps.release(context.TODO()) + } + }() if parentID := md.getParent(); parentID != "" { p, err := cm.get(ctx, parentID, nil, append(opts, NoUpdateLastUsed)) if err != nil { @@ -794,7 +811,7 @@ func (cm *cacheManager) createMergeRef(ctx context.Context, parents parentRefs, } rec.queueSnapshotID(snapshotID) - + rec.queueCommitted(true) if err := rec.commitMetadata(); err != nil { return nil, err } @@ -969,6 +986,7 @@ func (cm *cacheManager) createDiffRef(ctx context.Context, parents parentRefs, d } rec.queueSnapshotID(snapshotID) + rec.queueCommitted(true) if err := rec.commitMetadata(); err != nil { return nil, err } diff --git a/cache/manager_test.go b/cache/manager_test.go index 78387a023b10..cd58a4042b29 100644 --- a/cache/manager_test.go +++ b/cache/manager_test.go @@ -8,7 +8,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -43,6 +42,7 @@ import ( "github.com/moby/buildkit/solver" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/iohelper" "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/winlayers" digest "github.com/opencontainers/go-digest" @@ -65,7 +65,7 @@ type cmOut struct { cs content.Store } -func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() error, err error) { +func newCacheManager(ctx context.Context, t *testing.T, opt cmOpt) (co *cmOut, cleanup func(), err error) { ns, ok := namespaces.Namespace(ctx) if !ok { return nil, nil, errors.Errorf("namespace required for test") @@ -75,31 +75,24 @@ func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() opt.snapshotterName = "native" } - tmpdir, err := ioutil.TempDir("", "cachemanager") - if err != nil { - return nil, nil, err - } + tmpdir := t.TempDir() defers := make([]func() error, 0) - cleanup = func() error { + cleanup = func() { var err error for i := range defers { if err1 := defers[len(defers)-1-i](); err1 != nil && err == nil { err = err1 } } - return err + require.NoError(t, err) } defer func() { if err != nil && cleanup != nil { cleanup() } }() - if opt.tmpdir == "" { - defers = append(defers, func() error { - return os.RemoveAll(tmpdir) - }) - } else { + if opt.tmpdir != "" { os.RemoveAll(tmpdir) tmpdir = opt.tmpdir } @@ -142,6 +135,9 @@ func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() if err != nil { return nil, nil, err } + defers = append(defers, func() error { + return md.Close() + }) cm, err := NewManager(ManagerOpt{ Snapshotter: snapshot.FromContainerdSnapshotter(opt.snapshotterName, containerdsnapshot.NSSnapshotter(ns, mdb.Snapshotter(opt.snapshotterName)), nil), @@ -156,6 +152,10 @@ func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() if err != nil { return nil, nil, err } + defers = append(defers, func() error { + return cm.Close() + }) + return &cmOut{ manager: cm, lm: lm, @@ -167,22 +167,20 @@ func TestSharableMountPoolCleanup(t *testing.T) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() // Emulate the situation where the pool dir is dirty mountPoolDir := filepath.Join(tmpdir, "cachemounts") require.NoError(t, os.MkdirAll(mountPoolDir, 0700)) - _, err = ioutil.TempDir(mountPoolDir, "buildkit") + _, err := os.MkdirTemp(mountPoolDir, "buildkit") require.NoError(t, err) // Initialize cache manager and check if pool is cleaned up - _, cleanup, err := newCacheManager(ctx, cmOpt{ + _, cleanup, err := newCacheManager(ctx, t, cmOpt{ tmpdir: tmpdir, }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) files, err := os.ReadDir(mountPoolDir) require.NoError(t, err) @@ -194,20 +192,21 @@ func TestManager(t *testing.T) { ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) + t.Cleanup(cleanup) - defer cleanup() cm := co.manager _, err = cm.Get(ctx, "foobar", nil) @@ -317,7 +316,7 @@ func TestManager(t *testing.T) { err = cm.Close() require.NoError(t, err) - dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err := os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 0, len(dirs)) } @@ -326,19 +325,17 @@ func TestLazyGetByBlob(t *testing.T) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) cm := co.manager // Test for #2226 https://github.com/moby/buildkit/issues/2226, create lazy blobs with the same diff ID but @@ -371,19 +368,17 @@ func TestMergeBlobchainID(t *testing.T) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) cm := co.manager // create a merge ref that has 3 inputs, with each input being a 3 layer blob chain @@ -444,20 +439,17 @@ func TestSnapshotExtract(t *testing.T) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - - defer cleanup() + t.Cleanup(cleanup) cm := co.manager @@ -487,7 +479,7 @@ func TestSnapshotExtract(t *testing.T) { require.Equal(t, false, !snap2.(*immutableRef).getBlobOnly()) - dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err := os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 0, len(dirs)) @@ -499,7 +491,7 @@ func TestSnapshotExtract(t *testing.T) { require.Equal(t, true, !snap.(*immutableRef).getBlobOnly()) require.Equal(t, true, !snap2.(*immutableRef).getBlobOnly()) - dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 2, len(dirs)) @@ -512,7 +504,7 @@ func TestSnapshotExtract(t *testing.T) { require.Equal(t, len(buf.all), 0) - dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 2, len(dirs)) @@ -530,7 +522,7 @@ func TestSnapshotExtract(t *testing.T) { checkDiskUsage(ctx, t, cm, 2, 0) - dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 2, len(dirs)) @@ -553,7 +545,7 @@ func TestSnapshotExtract(t *testing.T) { require.Equal(t, len(buf.all), 1) - dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 1, len(dirs)) @@ -569,7 +561,7 @@ func TestSnapshotExtract(t *testing.T) { checkDiskUsage(ctx, t, cm, 0, 0) - dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 0, len(dirs)) @@ -584,20 +576,17 @@ func TestExtractOnMutable(t *testing.T) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - - defer cleanup() + t.Cleanup(cleanup) cm := co.manager @@ -643,7 +632,7 @@ func TestExtractOnMutable(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(len(b2)), size) - dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err := os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 1, len(dirs)) @@ -664,7 +653,7 @@ func TestExtractOnMutable(t *testing.T) { require.Equal(t, len(buf.all), 0) - dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 2, len(dirs)) @@ -682,7 +671,7 @@ func TestExtractOnMutable(t *testing.T) { require.Equal(t, len(buf.all), 2) - dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 0, len(dirs)) @@ -693,20 +682,20 @@ func TestSetBlob(t *testing.T) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - - defer cleanup() + t.Cleanup(cleanup) ctx, done, err := leaseutil.WithLease(ctx, co.lm, leaseutil.MakeTemporary) require.NoError(t, err) @@ -866,20 +855,21 @@ func TestPrune(t *testing.T) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) + t.Cleanup(cleanup) - defer cleanup() cm := co.manager active, err := cm.New(ctx, nil, nil) @@ -896,7 +886,7 @@ func TestPrune(t *testing.T) { checkDiskUsage(ctx, t, cm, 2, 0) - dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err := os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 2, len(dirs)) @@ -909,7 +899,7 @@ func TestPrune(t *testing.T) { checkDiskUsage(ctx, t, cm, 2, 0) require.Equal(t, len(buf.all), 0) - dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 2, len(dirs)) @@ -927,7 +917,7 @@ func TestPrune(t *testing.T) { checkDiskUsage(ctx, t, cm, 1, 0) require.Equal(t, len(buf.all), 1) - dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 1, len(dirs)) @@ -967,7 +957,7 @@ func TestPrune(t *testing.T) { checkDiskUsage(ctx, t, cm, 0, 0) require.Equal(t, len(buf.all), 2) - dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) + dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots")) require.NoError(t, err) require.Equal(t, 0, len(dirs)) } @@ -977,14 +967,15 @@ func TestLazyCommit(t *testing.T) { ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ tmpdir: tmpdir, snapshotter: snapshotter, snapshotterName: "native", @@ -1074,7 +1065,7 @@ func TestLazyCommit(t *testing.T) { cleanup() // we can't close snapshotter and open it twice (especially, its internal bbolt store) - co, cleanup, err = newCacheManager(ctx, cmOpt{ + co, cleanup, err = newCacheManager(ctx, t, cmOpt{ tmpdir: tmpdir, snapshotter: snapshotter, snapshotterName: "native", @@ -1103,13 +1094,13 @@ func TestLazyCommit(t *testing.T) { cleanup() - co, cleanup, err = newCacheManager(ctx, cmOpt{ + co, cleanup, err = newCacheManager(ctx, t, cmOpt{ tmpdir: tmpdir, snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) cm = co.manager snap2, err = cm.Get(ctx, snap.ID(), nil) @@ -1135,19 +1126,17 @@ func TestLoopLeaseContent(t *testing.T) { ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) cm := co.manager ctx, done, err := leaseutil.WithLease(ctx, co.lm, leaseutil.MakeTemporary) @@ -1252,19 +1241,17 @@ func TestSharingCompressionVariant(t *testing.T) { ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) allCompressions := []compression.Type{compression.Uncompressed, compression.Gzip, compression.Zstd, compression.EStargz} @@ -1359,7 +1346,7 @@ func testSharingCompressionVariant(ctx context.Context, t *testing.T, co *cmOut, if err != nil { return nil, "", err } - return cw, testCase.a.DefaultMediaType(), nil + return cw, testCase.a.MediaType(), nil }) require.NoError(t, err) contentBuffer := contentutil.NewBuffer() @@ -1400,9 +1387,9 @@ func testSharingCompressionVariant(ctx context.Context, t *testing.T, co *cmOut, // check if all compression variables are available on the both refs checkCompression := func(desc ocispecs.Descriptor, compressionType compression.Type) { - require.Equal(t, compressionType.DefaultMediaType(), desc.MediaType, "compression: %v", compressionType) + require.Equal(t, compressionType.MediaType(), desc.MediaType, "compression: %v", compressionType) if compressionType == compression.EStargz { - ok, err := isEStargz(ctx, co.cs, desc.Digest) + ok, err := compression.EStargz.Is(ctx, co.cs, desc.Digest) require.NoError(t, err, "compression: %v", compressionType) require.True(t, ok, "compression: %v", compressionType) } @@ -1467,7 +1454,7 @@ func ensurePrune(ctx context.Context, t *testing.T, cm Manager, pruneNum, maxRet func getCompressor(w io.Writer, compressionType compression.Type, customized bool) (io.WriteCloser, error) { switch compressionType { case compression.Uncompressed: - return nil, fmt.Errorf("compression is not requested: %v", compressionType) + return nil, errors.Errorf("compression is not requested: %v", compressionType) case compression.Gzip: if customized { gz, _ := gzip.NewWriterLevel(w, gzip.NoCompression) @@ -1495,7 +1482,7 @@ func getCompressor(w io.Writer, compressionType compression.Type, customized boo } pr.Close() }() - return &writeCloser{pw, func() error { <-done; return nil }}, nil + return &iohelper.WriteCloser{WriteCloser: pw, CloseFunc: func() error { <-done; return nil }}, nil case compression.Zstd: if customized { skippableFrameMagic := []byte{0x50, 0x2a, 0x4d, 0x18} @@ -1508,7 +1495,7 @@ func getCompressor(w io.Writer, compressionType compression.Type, customized boo } return zstd.NewWriter(w) default: - return nil, fmt.Errorf("unknown compression type: %q", compressionType) + return nil, errors.Errorf("unknown compression type: %q", compressionType) } } @@ -1520,19 +1507,17 @@ func TestConversion(t *testing.T) { ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) store := co.cs // Preapre the original tar blob using archive/tar and tar command on the system @@ -1547,7 +1532,7 @@ func TestConversion(t *testing.T) { err = cw.Commit(ctx, 0, cw.Digest()) require.NoError(t, err) - orgBlobBytesSys, orgDescSys, err := mapToSystemTarBlob(m) + orgBlobBytesSys, orgDescSys, err := mapToSystemTarBlob(t, m) require.NoError(t, err) cw, err = store.Writer(ctx, content.WithRef(fmt.Sprintf("write-test-blob-%s", orgDescSys.Digest))) require.NoError(t, err) @@ -1617,19 +1602,17 @@ func TestGetRemotes(t *testing.T) { ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) cm := co.manager ctx, done, err := leaseutil.WithLease(ctx, co.lm, leaseutil.MakeTemporary) @@ -1785,7 +1768,7 @@ func TestGetRemotes(t *testing.T) { r := refChain[i] isLazy, err := r.isLazy(egctx) require.NoError(t, err) - needs, err := needsConversion(ctx, co.cs, desc, compressionType) + needs, err := compressionType.NeedsConversion(ctx, co.cs, desc) require.NoError(t, err) if needs { require.False(t, isLazy, "layer %q requires conversion so it must be unlazied", desc.Digest) @@ -1917,19 +1900,17 @@ func TestNondistributableBlobs(t *testing.T) { ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) cm := co.manager @@ -2018,7 +1999,7 @@ func checkDescriptor(ctx context.Context, t *testing.T, cs content.Store, desc o } // Check annotation values are valid - c := new(counter) + c := new(iohelper.Counter) ra, err := cs.ReaderAt(ctx, desc) if err != nil && errdefs.IsNotFound(err) { return // lazy layer @@ -2033,7 +2014,7 @@ func checkDescriptor(ctx context.Context, t *testing.T, cs content.Store, desc o require.NoError(t, err) require.Equal(t, diffID.Digest().String(), uncompressedDgst) if compressionType == compression.EStargz { - require.Equal(t, c.size(), uncompressedSize) + require.Equal(t, c.Size(), uncompressedSize) } } @@ -2048,19 +2029,17 @@ func TestMergeOp(t *testing.T) { ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) cm := co.manager emptyMerge, err := cm.Merge(ctx, nil, nil) @@ -2092,6 +2071,7 @@ func TestMergeOp(t *testing.T) { singleMerge, err := cm.Merge(ctx, baseRefs[:1], nil) require.NoError(t, err) + require.True(t, singleMerge.(*immutableRef).getCommitted()) m, err := singleMerge.Mount(ctx, true, nil) require.NoError(t, err) ms, unmount, err := m.Mount() @@ -2112,6 +2092,7 @@ func TestMergeOp(t *testing.T) { merge1, err := cm.Merge(ctx, baseRefs[:3], nil) require.NoError(t, err) + require.True(t, merge1.(*immutableRef).getCommitted()) _, err = merge1.Mount(ctx, true, nil) require.NoError(t, err) size1, err := merge1.(*immutableRef).size(ctx) @@ -2121,6 +2102,7 @@ func TestMergeOp(t *testing.T) { merge2, err := cm.Merge(ctx, baseRefs[3:], nil) require.NoError(t, err) + require.True(t, merge2.(*immutableRef).getCommitted()) _, err = merge2.Mount(ctx, true, nil) require.NoError(t, err) size2, err := merge2.(*immutableRef).size(ctx) @@ -2136,6 +2118,7 @@ func TestMergeOp(t *testing.T) { merge3, err := cm.Merge(ctx, []ImmutableRef{merge1, merge2}, nil) require.NoError(t, err) + require.True(t, merge3.(*immutableRef).getCommitted()) require.NoError(t, merge1.Release(ctx)) require.NoError(t, merge2.Release(ctx)) _, err = merge3.Mount(ctx, true, nil) @@ -2164,19 +2147,17 @@ func TestDiffOp(t *testing.T) { ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) cm := co.manager newLower, err := cm.New(ctx, nil, nil) @@ -2243,6 +2224,20 @@ func TestDiffOp(t *testing.T) { checkDiskUsage(ctx, t, cm, 0, 8) require.NoError(t, cm.Prune(ctx, nil, client.PruneInfo{All: true})) checkDiskUsage(ctx, t, cm, 0, 0) + + // Test using nil as upper + newLower, err = cm.New(ctx, nil, nil) + require.NoError(t, err) + lowerB, err := newLower.Commit(ctx) + require.NoError(t, err) + diff, err = cm.Diff(ctx, lowerB, nil, nil) + require.NoError(t, err) + checkDiskUsage(ctx, t, cm, 2, 0) + require.NoError(t, lowerB.Release(ctx)) + require.NoError(t, diff.Release(ctx)) + checkDiskUsage(ctx, t, cm, 0, 2) + require.NoError(t, cm.Prune(ctx, nil, client.PruneInfo{All: true})) + checkDiskUsage(ctx, t, cm, 0, 0) } func TestLoadHalfFinalizedRef(t *testing.T) { @@ -2254,20 +2249,21 @@ func TestLoadHalfFinalizedRef(t *testing.T) { ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ tmpdir: tmpdir, snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) cm := co.manager.(*cacheManager) mref, err := cm.New(ctx, nil, nil, CachePolicyRetain) @@ -2303,15 +2299,15 @@ func TestLoadHalfFinalizedRef(t *testing.T) { require.NoError(t, iref.Release(ctx)) require.NoError(t, cm.Close()) - require.NoError(t, cleanup()) + cleanup() - co, cleanup, err = newCacheManager(ctx, cmOpt{ + co, cleanup, err = newCacheManager(ctx, t, cmOpt{ tmpdir: tmpdir, snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) cm = co.manager.(*cacheManager) _, err = cm.GetMutable(ctx, mutRef.ID()) @@ -2334,19 +2330,17 @@ func TestMountReadOnly(t *testing.T) { ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "overlay", }) require.NoError(t, err) - defer cleanup() + t.Cleanup(cleanup) cm := co.manager mutRef, err := cm.New(ctx, nil, nil) @@ -2395,6 +2389,65 @@ func TestMountReadOnly(t *testing.T) { } } +func TestLoadBrokenParents(t *testing.T) { + // Test that a ref that has a parent that can't be loaded will not result in any leaks + // of other parent refs + t.Parallel() + + ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") + + tmpdir := t.TempDir() + + snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) + + co, cleanup, err := newCacheManager(ctx, t, cmOpt{ + tmpdir: tmpdir, + snapshotter: snapshotter, + snapshotterName: "native", + }) + require.NoError(t, err) + t.Cleanup(cleanup) + cm := co.manager.(*cacheManager) + + mutRef, err := cm.New(ctx, nil, nil) + require.NoError(t, err) + refA, err := mutRef.Commit(ctx) + require.NoError(t, err) + refAID := refA.ID() + mutRef, err = cm.New(ctx, nil, nil) + require.NoError(t, err) + refB, err := mutRef.Commit(ctx) + require.NoError(t, err) + + _, err = cm.Merge(ctx, []ImmutableRef{refA, refB}, nil) + require.NoError(t, err) + checkDiskUsage(ctx, t, cm, 3, 0) + + // set refB as deleted + require.NoError(t, refB.(*immutableRef).queueDeleted()) + require.NoError(t, refB.(*immutableRef).commitMetadata()) + require.NoError(t, cm.Close()) + cleanup() + + co, cleanup, err = newCacheManager(ctx, t, cmOpt{ + tmpdir: tmpdir, + snapshotter: snapshotter, + snapshotterName: "native", + }) + require.NoError(t, err) + t.Cleanup(cleanup) + cm = co.manager.(*cacheManager) + + checkDiskUsage(ctx, t, cm, 0, 1) + refA, err = cm.Get(ctx, refAID, nil) + require.NoError(t, err) + require.Len(t, refA.(*immutableRef).refs, 1) +} + func checkDiskUsage(ctx context.Context, t *testing.T, cm Manager, inuse, unused int) { du, err := cm.DiskUsage(ctx, client.DiskUsageInfo{}) require.NoError(t, err) @@ -2581,17 +2634,13 @@ func fileToBlob(file *os.File, compress bool) ([]byte, ocispecs.Descriptor, erro }, nil } -func mapToSystemTarBlob(m map[string]string) ([]byte, ocispecs.Descriptor, error) { - tmpdir, err := ioutil.TempDir("", "tarcreation") - if err != nil { - return nil, ocispecs.Descriptor{}, err - } - defer os.RemoveAll(tmpdir) +func mapToSystemTarBlob(t *testing.T, m map[string]string) ([]byte, ocispecs.Descriptor, error) { + tmpdir := t.TempDir() expected := map[string]string{} for k, v := range m { expected[k] = v - if err := ioutil.WriteFile(filepath.Join(tmpdir, k), []byte(v), 0600); err != nil { + if err := os.WriteFile(filepath.Join(tmpdir, k), []byte(v), 0600); err != nil { return nil, ocispecs.Descriptor{}, err } } @@ -2620,7 +2669,7 @@ func mapToSystemTarBlob(m map[string]string) ([]byte, ocispecs.Descriptor, error return nil, ocispecs.Descriptor{}, errors.Errorf("unexpected file %s", h.Name) } delete(expected, k) - gotV, err := ioutil.ReadAll(tr) + gotV, err := io.ReadAll(tr) if err != nil { return nil, ocispecs.Descriptor{}, err } diff --git a/cache/metadata.go b/cache/metadata.go index 121110bd13b0..82209a93c0f9 100644 --- a/cache/metadata.go +++ b/cache/metadata.go @@ -251,7 +251,13 @@ func (md *cacheMetadata) queueMediaType(str string) error { } func (md *cacheMetadata) getSnapshotID() string { - return md.GetString(keySnapshot) + sid := md.GetString(keySnapshot) + // Note that historic buildkit releases did not always set the snapshot ID. + // Fallback to record ID is needed for old build cache compatibility. + if sid == "" { + return md.ID() + } + return sid } func (md *cacheMetadata) queueSnapshotID(str string) error { @@ -551,9 +557,7 @@ func (md *cacheMetadata) appendStringSlice(key string, values ...string) error { } for _, existing := range slice { - if _, ok := idx[existing]; ok { - delete(idx, existing) - } + delete(idx, existing) } if len(idx) == 0 { diff --git a/cache/metadata/metadata.go b/cache/metadata/metadata.go index ae957c3e72b7..170c0a8872f4 100644 --- a/cache/metadata/metadata.go +++ b/cache/metadata/metadata.go @@ -317,6 +317,9 @@ func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) { func (s *StorageItem) Commit() error { s.qmu.Lock() defer s.qmu.Unlock() + if len(s.queue) == 0 { + return nil + } return errors.WithStack(s.Update(func(b *bolt.Bucket) error { for _, fn := range s.queue { if err := fn(b); err != nil { diff --git a/cache/metadata/metadata_test.go b/cache/metadata/metadata_test.go index 7e3d5b055df6..0dc362773dda 100644 --- a/cache/metadata/metadata_test.go +++ b/cache/metadata/metadata_test.go @@ -1,8 +1,6 @@ package metadata import ( - "io/ioutil" - "os" "path/filepath" "testing" @@ -13,9 +11,7 @@ import ( func TestGetSetSearch(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-storage") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() dbPath := filepath.Join(tmpdir, "storage.db") @@ -112,9 +108,7 @@ func TestGetSetSearch(t *testing.T) { func TestIndexes(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-storage") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() dbPath := filepath.Join(tmpdir, "storage.db") @@ -172,9 +166,7 @@ func TestIndexes(t *testing.T) { func TestExternalData(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "buildkit-storage") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() dbPath := filepath.Join(tmpdir, "storage.db") diff --git a/cache/opts.go b/cache/opts.go index 92df9989d928..1f1db6ca6105 100644 --- a/cache/opts.go +++ b/cache/opts.go @@ -36,4 +36,13 @@ func (m NeedsRemoteProviderError) Error() string { return fmt.Sprintf("missing descriptor handlers for lazy blobs %+v", []digest.Digest(m)) } -type ProgressKey struct{} +type Unlazy session.Group + +func unlazySessionOf(opts ...RefOption) session.Group { + for _, opt := range opts { + if opt, ok := opt.(session.Group); ok { + return opt + } + } + return nil +} diff --git a/cache/refs.go b/cache/refs.go index c937dd1bfa98..dc2cd561b01d 100644 --- a/cache/refs.go +++ b/cache/refs.go @@ -3,7 +3,6 @@ package cache import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -37,6 +36,8 @@ import ( "golang.org/x/sync/errgroup" ) +var additionalAnnotations = append(compression.EStargzAnnotations, containerdUncompressed) + // Ref is a reference to cacheable objects. type Ref interface { Mountable @@ -56,6 +57,7 @@ type ImmutableRef interface { Extract(ctx context.Context, s session.Group) error // +progress GetRemotes(ctx context.Context, createIfNeeded bool, cfg config.RefConfig, all bool, s session.Group) ([]*solver.Remote, error) LayerChain() RefList + FileList(ctx context.Context, s session.Group) ([]string, error) } type MutableRef interface { @@ -533,7 +535,7 @@ func (cr *cacheRecord) layerDigestChain() []digest.Digest { } switch cr.kind() { case Diff: - if cr.getBlob() == "" { + if cr.getBlob() == "" && cr.diffParents.upper != nil { // this diff just reuses the upper blob cr.layerDigestChainCache = cr.diffParents.upper.layerDigestChain() } else { @@ -768,12 +770,9 @@ func (sr *immutableRef) getBlobWithCompression(ctx context.Context, compressionT } func getBlobWithCompression(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) (ocispecs.Descriptor, error) { - if compressionType == compression.UnknownCompression { - return ocispecs.Descriptor{}, fmt.Errorf("cannot get unknown compression type") - } var target *ocispecs.Descriptor if err := walkBlob(ctx, cs, desc, func(desc ocispecs.Descriptor) bool { - if needs, err := needsConversion(ctx, cs, desc, compressionType); err == nil && !needs { + if needs, err := compressionType.NeedsConversion(ctx, cs, desc); err == nil && !needs { target = &desc return false } @@ -838,11 +837,11 @@ func getBlobDesc(ctx context.Context, cs content.Store, dgst digest.Digest) (oci return ocispecs.Descriptor{}, err } if info.Labels == nil { - return ocispecs.Descriptor{}, fmt.Errorf("no blob metadata is stored for %q", info.Digest) + return ocispecs.Descriptor{}, errors.Errorf("no blob metadata is stored for %q", info.Digest) } mt, ok := info.Labels[blobMediaTypeLabel] if !ok { - return ocispecs.Descriptor{}, fmt.Errorf("no media type is stored for %q", info.Digest) + return ocispecs.Descriptor{}, errors.Errorf("no media type is stored for %q", info.Digest) } desc := ocispecs.Descriptor{ Digest: info.Digest, @@ -882,7 +881,7 @@ func filterAnnotationsForSave(a map[string]string) (b map[string]string) { if a == nil { return nil } - for _, k := range append(eStargzAnnotations, containerdUncompressed) { + for _, k := range additionalAnnotations { v, ok := a[k] if !ok { continue @@ -1552,12 +1551,12 @@ func readonlyOverlay(opt []string) []string { func newSharableMountPool(tmpdirRoot string) (sharableMountPool, error) { if tmpdirRoot != "" { if err := os.MkdirAll(tmpdirRoot, 0700); err != nil { - return sharableMountPool{}, fmt.Errorf("failed to prepare mount pool: %w", err) + return sharableMountPool{}, errors.Wrap(err, "failed to prepare mount pool") } // If tmpdirRoot is specified, remove existing mounts to avoid conflict. files, err := os.ReadDir(tmpdirRoot) if err != nil { - return sharableMountPool{}, fmt.Errorf("failed to read mount pool: %w", err) + return sharableMountPool{}, errors.Wrap(err, "failed to read mount pool") } for _, file := range files { if file.IsDir() { @@ -1591,9 +1590,10 @@ func (p sharableMountPool) setSharable(mounts snapshot.Mountable) snapshot.Mount // This is useful to share writable overlayfs mounts. // // NOTE: Mount() method doesn't return the underlying mount configuration (e.g. overlayfs mounts) -// instead it always return bind mounts of the temporary mount point. So if the caller -// needs to inspect the underlying mount configuration (e.g. for optimized differ for -// overlayfs), this wrapper shouldn't be used. +// +// instead it always return bind mounts of the temporary mount point. So if the caller +// needs to inspect the underlying mount configuration (e.g. for optimized differ for +// overlayfs), this wrapper shouldn't be used. type sharableMountable struct { snapshot.Mountable @@ -1631,7 +1631,7 @@ func (sm *sharableMountable) Mount() (_ []mount.Mount, _ func() error, retErr er // Don't need temporary mount wrapper for non-overlayfs mounts return mounts, release, nil } - dir, err := ioutil.TempDir(sm.mountPoolRoot, "buildkit") + dir, err := os.MkdirTemp(sm.mountPoolRoot, "buildkit") if err != nil { return nil, nil, err } diff --git a/cache/remote.go b/cache/remote.go index d0ac594b6ac8..b80bd79cfb0e 100644 --- a/cache/remote.go +++ b/cache/remote.go @@ -212,8 +212,8 @@ func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refC } } - if refCfg.Compression.Force { - if needs, err := needsConversion(ctx, sr.cm.ContentStore, desc, refCfg.Compression.Type); err != nil { + if needsForceCompression(ctx, sr.cm.ContentStore, desc, refCfg) { + if needs, err := refCfg.Compression.Type.NeedsConversion(ctx, sr.cm.ContentStore, desc); err != nil { return nil, err } else if needs { // ensure the compression type. @@ -228,13 +228,13 @@ func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refC newDesc.Size = blobDesc.Size newDesc.URLs = blobDesc.URLs newDesc.Annotations = nil + if len(addAnnotations) > 0 || len(blobDesc.Annotations) > 0 { + newDesc.Annotations = make(map[string]string) + } for _, k := range addAnnotations { newDesc.Annotations[k] = desc.Annotations[k] } for k, v := range blobDesc.Annotations { - if newDesc.Annotations == nil { - newDesc.Annotations = make(map[string]string) - } newDesc.Annotations[k] = v } desc = newDesc diff --git a/cache/remotecache/azblob/exporter.go b/cache/remotecache/azblob/exporter.go new file mode 100644 index 000000000000..4d76770db26a --- /dev/null +++ b/cache/remotecache/azblob/exporter.go @@ -0,0 +1,214 @@ +package azblob + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/cache/remotecache" + v1 "github.com/moby/buildkit/cache/remotecache/v1" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/progress" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ResolveCacheExporterFunc for "azblob" cache exporter. +func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc { + return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) { + config, err := getConfig(attrs) + if err != nil { + return nil, errors.WithMessage(err, "failed to create azblob config") + } + + containerClient, err := createContainerClient(ctx, config) + if err != nil { + return nil, errors.WithMessage(err, "failed to create container client") + } + + cc := v1.NewCacheChains() + return &exporter{ + CacheExporterTarget: cc, + chains: cc, + containerClient: containerClient, + config: config, + }, nil + } +} + +var _ remotecache.Exporter = &exporter{} + +type exporter struct { + solver.CacheExporterTarget + chains *v1.CacheChains + containerClient *azblob.ContainerClient + config *Config +} + +func (ce *exporter) Name() string { + return "exporting cache to azure blob store" +} + +func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) { + config, descs, err := ce.chains.Marshal(ctx) + if err != nil { + return nil, err + } + + for i, l := range config.Layers { + dgstPair, ok := descs[l.Blob] + if !ok { + return nil, errors.Errorf("missing blob %s", l.Blob) + } + if dgstPair.Descriptor.Annotations == nil { + return nil, errors.Errorf("invalid descriptor without annotations") + } + var diffID digest.Digest + v, ok := dgstPair.Descriptor.Annotations["containerd.io/uncompressed"] + if !ok { + return nil, errors.Errorf("invalid descriptor without uncompressed annotation") + } + dgst, err := digest.Parse(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse uncompressed annotation") + } + diffID = dgst + + key := blobKey(ce.config, dgstPair.Descriptor.Digest.String()) + + exists, err := blobExists(ctx, ce.containerClient, key) + if err != nil { + return nil, err + } + + logrus.Debugf("layers %s exists = %t", key, exists) + + if !exists { + layerDone := progress.OneOff(ctx, fmt.Sprintf("writing layer %s", l.Blob)) + ra, err := dgstPair.Provider.ReaderAt(ctx, dgstPair.Descriptor) + if err != nil { + err = errors.Wrapf(err, "failed to get reader for %s", dgstPair.Descriptor.Digest) + return nil, layerDone(err) + } + if err := ce.uploadBlobIfNotExists(ctx, key, content.NewReader(ra)); err != nil { + return nil, layerDone(err) + } + layerDone(nil) + } + + la := &v1.LayerAnnotations{ + DiffID: diffID, + Size: dgstPair.Descriptor.Size, + MediaType: dgstPair.Descriptor.MediaType, + } + if v, ok := dgstPair.Descriptor.Annotations["buildkit/createdat"]; ok { + var t time.Time + if err := (&t).UnmarshalText([]byte(v)); err != nil { + return nil, err + } + la.CreatedAt = t.UTC() + } + config.Layers[i].Annotations = la + } + + dt, err := json.Marshal(config) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal config") + } + + for _, name := range ce.config.Names { + if innerError := ce.uploadManifest(ctx, manifestKey(ce.config, name), bytesToReadSeekCloser(dt)); innerError != nil { + return nil, errors.Wrapf(innerError, "error writing manifest %s", name) + } + } + + return nil, nil +} + +func (ce *exporter) Config() remotecache.Config { + return remotecache.Config{ + Compression: compression.New(compression.Default), + } +} + +// For uploading manifests, use the Upload API which follows "last writer wins" sematics +// This is slightly slower than UploadStream call but is safe to call concurrently from multiple threads. Refer to: +// https://github.com/Azure/azure-sdk-for-go/issues/18490#issuecomment-1170806877 +func (ce *exporter) uploadManifest(ctx context.Context, manifestKey string, reader io.ReadSeekCloser) error { + defer reader.Close() + blobClient, err := ce.containerClient.NewBlockBlobClient(manifestKey) + if err != nil { + return errors.Wrap(err, "error creating container client") + } + + ctx, cnclFn := context.WithTimeout(ctx, time.Minute*5) + defer cnclFn() + + _, err = blobClient.Upload(ctx, reader, &azblob.BlockBlobUploadOptions{}) + if err != nil { + return errors.Wrapf(err, "failed to upload blob %s: %v", manifestKey, err) + } + + return nil +} + +// For uploading blobs, use the UploadStream with access conditions which state that only upload if the blob +// does not already exist. Since blobs are content addressable, this is the right thing to do for blobs and it gives +// a performance improvement over the Upload API used for uploading manifests. +func (ce *exporter) uploadBlobIfNotExists(ctx context.Context, blobKey string, reader io.Reader) error { + blobClient, err := ce.containerClient.NewBlockBlobClient(blobKey) + if err != nil { + return errors.Wrap(err, "error creating container client") + } + + uploadCtx, cnclFn := context.WithTimeout(ctx, time.Minute*5) + defer cnclFn() + + // Only upload if the blob doesn't exist + eTagAny := azblob.ETagAny + _, err = blobClient.UploadStream(uploadCtx, reader, azblob.UploadStreamOptions{ + BufferSize: IOChunkSize, + MaxBuffers: IOConcurrency, + BlobAccessConditions: &azblob.BlobAccessConditions{ + ModifiedAccessConditions: &azblob.ModifiedAccessConditions{ + IfNoneMatch: &eTagAny, + }, + }, + }) + + if err == nil { + return nil + } + + var se *azblob.StorageError + if errors.As(err, &se) && se.ErrorCode == azblob.StorageErrorCodeBlobAlreadyExists { + return nil + } + + return errors.Wrapf(err, "failed to upload blob %s: %v", blobKey, err) +} + +var _ io.ReadSeekCloser = &readSeekCloser{} + +type readSeekCloser struct { + io.Reader + io.Seeker + io.Closer +} + +func bytesToReadSeekCloser(dt []byte) io.ReadSeekCloser { + bytesReader := bytes.NewReader(dt) + return &readSeekCloser{ + Reader: bytesReader, + Seeker: bytesReader, + Closer: io.NopCloser(bytesReader), + } +} diff --git a/cache/remotecache/azblob/importer.go b/cache/remotecache/azblob/importer.go new file mode 100644 index 000000000000..ea10c59f049a --- /dev/null +++ b/cache/remotecache/azblob/importer.go @@ -0,0 +1,239 @@ +package azblob + +import ( + "context" + "encoding/json" + "fmt" + "io" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/cache/remotecache" + v1 "github.com/moby/buildkit/cache/remotecache/v1" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/worker" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +// ResolveCacheImporterFunc for "azblob" cache importer. +func ResolveCacheImporterFunc() remotecache.ResolveCacheImporterFunc { + return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Importer, ocispecs.Descriptor, error) { + config, err := getConfig(attrs) + if err != nil { + return nil, ocispecs.Descriptor{}, errors.WithMessage(err, "failed to create azblob config") + } + + containerClient, err := createContainerClient(ctx, config) + if err != nil { + return nil, ocispecs.Descriptor{}, errors.WithMessage(err, "failed to create container client") + } + + importer := &importer{ + config: config, + containerClient: containerClient, + } + + return importer, ocispecs.Descriptor{}, nil + } +} + +var _ remotecache.Importer = &importer{} + +type importer struct { + config *Config + containerClient *azblob.ContainerClient +} + +func (ci *importer) Resolve(ctx context.Context, _ ocispecs.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) { + eg, ctx := errgroup.WithContext(ctx) + ccs := make([]*v1.CacheChains, len(ci.config.Names)) + + for i, name := range ci.config.Names { + func(i int, name string) { + eg.Go(func() error { + cc, err := ci.loadManifest(ctx, name) + if err != nil { + return errors.Wrapf(err, "failed to load cache manifest %s", name) + } + ccs[i] = cc + return nil + }) + }(i, name) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + cms := make([]solver.CacheManager, 0, len(ccs)) + + for _, cc := range ccs { + keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w) + if err != nil { + return nil, err + } + cms = append(cms, solver.NewCacheManager(ctx, id, keysStorage, resultStorage)) + } + + return solver.NewCombinedCacheManager(cms, nil), nil +} + +func (ci *importer) loadManifest(ctx context.Context, name string) (*v1.CacheChains, error) { + key := manifestKey(ci.config, name) + exists, err := blobExists(ctx, ci.containerClient, key) + if err != nil { + return nil, err + } + + logrus.Debugf("name %s cache with key %s exists = %v", name, key, exists) + + if !exists { + return v1.NewCacheChains(), nil + } + + blobClient, err := ci.containerClient.NewBlockBlobClient(key) + if err != nil { + return nil, errors.Wrap(err, "error creating container client") + } + + res, err := blobClient.Download(ctx, &azblob.BlobDownloadOptions{}) + if err != nil { + return nil, errors.WithStack(err) + } + + bytes, err := io.ReadAll(res.RawResponse.Body) + if err != nil { + return nil, errors.WithStack(err) + } + + logrus.Debugf("imported config: %s", string(bytes)) + + var config v1.CacheConfig + if err := json.Unmarshal(bytes, &config); err != nil { + return nil, errors.WithStack(err) + } + + allLayers := v1.DescriptorProvider{} + for _, l := range config.Layers { + dpp, err := ci.makeDescriptorProviderPair(l) + if err != nil { + return nil, err + } + allLayers[l.Blob] = *dpp + } + + progress.OneOff(ctx, fmt.Sprintf("found %d layers in cache", len(allLayers)))(nil) + + cc := v1.NewCacheChains() + if err := v1.ParseConfig(config, allLayers, cc); err != nil { + return nil, err + } + + return cc, nil +} + +func (ci *importer) makeDescriptorProviderPair(l v1.CacheLayer) (*v1.DescriptorProviderPair, error) { + if l.Annotations == nil { + return nil, errors.Errorf("cache layer with missing annotations") + } + annotations := map[string]string{} + if l.Annotations.DiffID == "" { + return nil, errors.Errorf("cache layer with missing diffid") + } + annotations["containerd.io/uncompressed"] = l.Annotations.DiffID.String() + if !l.Annotations.CreatedAt.IsZero() { + txt, err := l.Annotations.CreatedAt.MarshalText() + if err != nil { + return nil, errors.WithStack(err) + } + annotations["buildkit/createdat"] = string(txt) + } + desc := ocispecs.Descriptor{ + MediaType: l.Annotations.MediaType, + Digest: l.Blob, + Size: l.Annotations.Size, + Annotations: annotations, + } + return &v1.DescriptorProviderPair{ + Descriptor: desc, + Provider: &ciProvider{ + desc: desc, + containerClient: ci.containerClient, + Provider: contentutil.FromFetcher(&fetcher{containerClient: ci.containerClient, config: ci.config}), + config: ci.config, + }, + }, nil +} + +type fetcher struct { + containerClient *azblob.ContainerClient + config *Config +} + +func (f *fetcher) Fetch(ctx context.Context, desc ocispecs.Descriptor) (io.ReadCloser, error) { + key := blobKey(f.config, desc.Digest.String()) + exists, err := blobExists(ctx, f.containerClient, key) + if err != nil { + return nil, err + } + + if !exists { + return nil, errors.Errorf("blob %s not found", desc.Digest) + } + + logrus.Debugf("reading layer from cache: %s", key) + + blobClient, err := f.containerClient.NewBlockBlobClient(key) + if err != nil { + return nil, errors.Wrap(err, "error creating block blob client") + } + + res, err := blobClient.Download(ctx, &azblob.BlobDownloadOptions{}) + if err != nil { + return nil, err + } + + return res.RawResponse.Body, nil +} + +type ciProvider struct { + content.Provider + desc ocispecs.Descriptor + containerClient *azblob.ContainerClient + config *Config + checkMutex sync.Mutex + checked bool +} + +func (p *ciProvider) CheckDescriptor(ctx context.Context, desc ocispecs.Descriptor) error { + if desc.Digest != p.desc.Digest { + return nil + } + + if p.checked { + return nil + } + + p.checkMutex.Lock() + defer p.checkMutex.Unlock() + + key := blobKey(p.config, desc.Digest.String()) + exists, err := blobExists(ctx, p.containerClient, key) + if err != nil { + return err + } + + if !exists { + return errors.Errorf("blob %s not found", desc.Digest) + } + + p.checked = true + return nil +} diff --git a/cache/remotecache/azblob/utils.go b/cache/remotecache/azblob/utils.go new file mode 100644 index 000000000000..a993b4a485c8 --- /dev/null +++ b/cache/remotecache/azblob/utils.go @@ -0,0 +1,183 @@ +package azblob + +import ( + "context" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/pkg/errors" +) + +const ( + attrSecretAccessKey = "secret_access_key" + attrAccountURL = "account_url" + attrPrefix = "prefix" + attrManifestsPrefix = "manifests_prefix" + attrBlobsPrefix = "blobs_prefix" + attrName = "name" + attrContainer = "container" + IOConcurrency = 4 + IOChunkSize = 32 * 1024 * 1024 +) + +type Config struct { + AccountURL string + Container string + Prefix string + ManifestsPrefix string + BlobsPrefix string + Names []string + AccountName string + secretAccessKey string +} + +func getConfig(attrs map[string]string) (*Config, error) { + accountURLString, ok := attrs[attrAccountURL] + if !ok { + accountURLString, ok = os.LookupEnv("BUILDKIT_AZURE_STORAGE_ACCOUNT_URL") + if !ok { + return &Config{}, errors.New("either ${BUILDKIT_AZURE_STORAGE_ACCOUNT_URL} or account_url attribute is required for azblob cache") + } + } + + accountURL, err := url.Parse(accountURLString) + if err != nil { + return &Config{}, errors.Wrap(err, "azure storage account url provided is not a valid url") + } + + accountName := strings.Split(accountURL.Hostname(), ".")[0] + + container, ok := attrs[attrContainer] + if !ok { + container, ok = os.LookupEnv("BUILDKIT_AZURE_STORAGE_CONTAINER") + if !ok { + container = "buildkit-cache" + } + } + + prefix, ok := attrs[attrPrefix] + if !ok { + prefix, _ = os.LookupEnv("BUILDKIT_AZURE_STORAGE_PREFIX") + } + + manifestsPrefix, ok := attrs[attrManifestsPrefix] + if !ok { + manifestsPrefix = "manifests" + } + + blobsPrefix, ok := attrs[attrBlobsPrefix] + if !ok { + blobsPrefix = "blobs" + } + + names := []string{"buildkit"} + name, ok := attrs[attrName] + if ok { + splittedNames := strings.Split(name, ";") + if len(splittedNames) > 0 { + names = splittedNames + } + } + + secretAccessKey := attrs[attrSecretAccessKey] + + config := Config{ + AccountURL: accountURLString, + AccountName: accountName, + Container: container, + Prefix: prefix, + Names: names, + ManifestsPrefix: manifestsPrefix, + BlobsPrefix: blobsPrefix, + secretAccessKey: secretAccessKey, + } + + return &config, nil +} + +func createContainerClient(ctx context.Context, config *Config) (*azblob.ContainerClient, error) { + var serviceClient *azblob.ServiceClient + if config.secretAccessKey != "" { + sharedKey, err := azblob.NewSharedKeyCredential(config.AccountName, config.secretAccessKey) + if err != nil { + return nil, errors.Wrap(err, "failed to create shared key") + } + serviceClient, err = azblob.NewServiceClientWithSharedKey(config.AccountURL, sharedKey, &azblob.ClientOptions{}) + if err != nil { + return nil, errors.Wrap(err, "failed to created service client from shared key") + } + } else { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + return nil, errors.Wrap(err, "failed to create default azure credentials") + } + + serviceClient, err = azblob.NewServiceClient(config.AccountURL, cred, &azblob.ClientOptions{}) + if err != nil { + return nil, errors.Wrap(err, "failed to create service client") + } + } + + ctx, cnclFn := context.WithTimeout(ctx, time.Second*60) + defer cnclFn() + + containerClient, err := serviceClient.NewContainerClient(config.Container) + if err != nil { + return nil, errors.Wrap(err, "error creating container client") + } + + _, err = containerClient.GetProperties(ctx, &azblob.ContainerGetPropertiesOptions{}) + if err == nil { + return containerClient, nil + } + + var se *azblob.StorageError + if errors.As(err, &se) && se.ErrorCode == azblob.StorageErrorCodeContainerNotFound { + ctx, cnclFn := context.WithTimeout(ctx, time.Minute*5) + defer cnclFn() + _, err := containerClient.Create(ctx, &azblob.ContainerCreateOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "failed to create cache container %s", config.Container) + } + + return containerClient, nil + } + + return nil, errors.Wrapf(err, "failed to get properties of cache container %s", config.Container) +} + +func manifestKey(config *Config, name string) string { + key := filepath.Join(config.Prefix, config.ManifestsPrefix, name) + return key +} + +func blobKey(config *Config, digest string) string { + key := filepath.Join(config.Prefix, config.BlobsPrefix, digest) + return key +} + +func blobExists(ctx context.Context, containerClient *azblob.ContainerClient, blobKey string) (bool, error) { + blobClient, err := containerClient.NewBlobClient(blobKey) + if err != nil { + return false, errors.Wrap(err, "error creating blob client") + } + + ctx, cnclFn := context.WithTimeout(ctx, time.Second*60) + defer cnclFn() + _, err = blobClient.GetProperties(ctx, &azblob.BlobGetPropertiesOptions{}) + if err == nil { + return true, nil + } + + var se *azblob.StorageError + if errors.As(err, &se) && se.ErrorCode == azblob.StorageErrorCodeBlobNotFound { + return false, nil + } + + return false, errors.Wrapf(err, "failed to check blob %s existence", blobKey) +} diff --git a/cache/remotecache/export.go b/cache/remotecache/export.go index 1c3a240cfc19..a0fd7ba7e202 100644 --- a/cache/remotecache/export.go +++ b/cache/remotecache/export.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "fmt" - "time" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" @@ -24,24 +23,10 @@ import ( type ResolveCacheExporterFunc func(ctx context.Context, g session.Group, attrs map[string]string) (Exporter, error) -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.NewFromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} - type Exporter interface { solver.CacheExporterTarget + // Name uniquely identifies the exporter + Name() string // Finalize finalizes and return metadata that are returned to the client // e.g. ExporterResponseManifestDesc Finalize(ctx context.Context) (map[string]string, error) @@ -72,6 +57,10 @@ func NewExporter(ingester content.Ingester, ref string, oci bool, compressionCon return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci, ref: ref, comp: compressionConfig} } +func (ce *contentCacheExporter) Name() string { + return "exporting content cache" +} + func (ce *contentCacheExporter) Config() Config { return Config{ Compression: ce.comp, @@ -107,7 +96,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string if !ok { return nil, errors.Errorf("missing blob %s", l.Blob) } - layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob)) + layerDone := progress.OneOff(ctx, fmt.Sprintf("writing layer %s", l.Blob)) if err := contentutil.Copy(ctx, ce.ingester, dgstPair.Provider, dgstPair.Descriptor, ce.ref, logs.LoggerFromContext(ctx)); err != nil { return nil, layerDone(errors.Wrap(err, "error writing layer blob")) } @@ -127,7 +116,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string Size: int64(len(dt)), MediaType: v1.CacheConfigMediaTypeV0, } - configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst)) + configDone := progress.OneOff(ctx, fmt.Sprintf("writing config %s", dgst)) if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { return nil, configDone(errors.Wrap(err, "error writing config blob")) } @@ -146,7 +135,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string Size: int64(len(dt)), MediaType: mfst.MediaType, } - mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst)) + mfstDone := progress.OneOff(ctx, fmt.Sprintf("writing manifest %s", dgst)) if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { return nil, mfstDone(errors.Wrap(err, "error writing manifest blob")) } diff --git a/cache/remotecache/gha/gha.go b/cache/remotecache/gha/gha.go index cefcf5ce1b98..f36693d3b08d 100644 --- a/cache/remotecache/gha/gha.go +++ b/cache/remotecache/gha/gha.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "fmt" + "io" "os" "sync" "time" @@ -90,6 +91,10 @@ func NewExporter(c *Config) (remotecache.Exporter, error) { return &exporter{CacheExporterTarget: cc, chains: cc, cache: cache, config: c}, nil } +func (*exporter) Name() string { + return "exporting to GitHub cache" +} + func (ce *exporter) Config() remotecache.Config { return remotecache.Config{ Compression: compression.New(compression.Default), @@ -144,7 +149,7 @@ func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) { return nil, err } if b == nil { - layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob)) + layerDone := progress.OneOff(ctx, fmt.Sprintf("writing layer %s", l.Blob)) ra, err := dgstPair.Provider.ReaderAt(ctx, dgstPair.Descriptor) if err != nil { return nil, layerDone(err) @@ -367,22 +372,13 @@ type readerAt struct { desc ocispecs.Descriptor } -func (r *readerAt) Size() int64 { - return r.desc.Size +func (r *readerAt) ReadAt(p []byte, off int64) (int, error) { + if off >= r.desc.Size { + return 0, io.EOF + } + return r.ReaderAtCloser.ReadAt(p, off) } -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.NewFromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } +func (r *readerAt) Size() int64 { + return r.desc.Size } diff --git a/cache/remotecache/inline/inline.go b/cache/remotecache/inline/inline.go index cf11db49596c..036ec059f76e 100644 --- a/cache/remotecache/inline/inline.go +++ b/cache/remotecache/inline/inline.go @@ -30,6 +30,10 @@ type exporter struct { chains *v1.CacheChains } +func (*exporter) Name() string { + return "exporting inline cache" +} + func (ce *exporter) Config() remotecache.Config { return remotecache.Config{ Compression: compression.New(compression.Default), @@ -52,16 +56,20 @@ func (ce *exporter) ExportForLayers(ctx context.Context, layers []digest.Digest) return nil, err } + layerBlobDigests := make([]digest.Digest, len(layers)) + descs2 := map[digest.Digest]v1.DescriptorProviderPair{} - for _, k := range layers { + for i, k := range layers { if v, ok := descs[k]; ok { descs2[k] = v + layerBlobDigests[i] = k continue } // fallback for uncompressed digests for _, v := range descs { if uc := v.Descriptor.Annotations["containerd.io/uncompressed"]; uc == string(k) { descs2[v.Descriptor.Digest] = v + layerBlobDigests[i] = v.Descriptor.Digest } } } @@ -83,7 +91,7 @@ func (ce *exporter) ExportForLayers(ctx context.Context, layers []digest.Digest) // reorder layers based on the order in the image blobIndexes := make(map[digest.Digest]int, len(layers)) - for i, blob := range layers { + for i, blob := range layerBlobDigests { blobIndexes[blob] = i } diff --git a/cache/remotecache/local/local.go b/cache/remotecache/local/local.go index 18c73364c03b..7f3d83b70f49 100644 --- a/cache/remotecache/local/local.go +++ b/cache/remotecache/local/local.go @@ -98,15 +98,28 @@ func getContentStore(ctx context.Context, sm *session.Manager, g session.Group, if err != nil { return nil, err } - return sessioncontent.NewCallerStore(caller, storeID), nil + return &unlazyProvider{sessioncontent.NewCallerStore(caller, storeID), g}, nil +} + +type unlazyProvider struct { + content.Store + s session.Group +} + +func (p *unlazyProvider) UnlazySession(desc ocispecs.Descriptor) session.Group { + return p.s } func attrsToCompression(attrs map[string]string) (*compression.Config, error) { - compressionType := compression.Default + var compressionType compression.Type if v, ok := attrs[attrLayerCompression]; ok { - if c := compression.Parse(v); c != compression.UnknownCompression { - compressionType = c + c, err := compression.Parse(v) + if err != nil { + return nil, err } + compressionType = c + } else { + compressionType = compression.Default } compressionConfig := compression.New(compressionType) if v, ok := attrs[attrForceCompression]; ok { diff --git a/cache/remotecache/registry/registry.go b/cache/remotecache/registry/registry.go index cfe54e52aa6e..e3b32eb29657 100644 --- a/cache/remotecache/registry/registry.go +++ b/cache/remotecache/registry/registry.go @@ -131,11 +131,15 @@ func (dsl *withDistributionSourceLabel) SnapshotLabels(descs []ocispecs.Descript } func attrsToCompression(attrs map[string]string) (*compression.Config, error) { - compressionType := compression.Default + var compressionType compression.Type if v, ok := attrs[attrLayerCompression]; ok { - if c := compression.Parse(v); c != compression.UnknownCompression { - compressionType = c + c, err := compression.Parse(v) + if err != nil { + return nil, err } + compressionType = c + } else { + compressionType = compression.Default } compressionConfig := compression.New(compressionType) if v, ok := attrs[attrForceCompression]; ok { diff --git a/cache/remotecache/s3/readerat.go b/cache/remotecache/s3/readerat.go new file mode 100644 index 000000000000..666606817ec4 --- /dev/null +++ b/cache/remotecache/s3/readerat.go @@ -0,0 +1,75 @@ +package s3 + +import ( + "io" +) + +type ReaderAtCloser interface { + io.ReaderAt + io.Closer +} + +type readerAtCloser struct { + offset int64 + rc io.ReadCloser + ra io.ReaderAt + open func(offset int64) (io.ReadCloser, error) + closed bool +} + +func toReaderAtCloser(open func(offset int64) (io.ReadCloser, error)) ReaderAtCloser { + return &readerAtCloser{ + open: open, + } +} + +func (hrs *readerAtCloser) ReadAt(p []byte, off int64) (n int, err error) { + if hrs.closed { + return 0, io.EOF + } + + if hrs.ra != nil { + return hrs.ra.ReadAt(p, off) + } + + if hrs.rc == nil || off != hrs.offset { + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } + rc, err := hrs.open(off) + if err != nil { + return 0, err + } + hrs.rc = rc + } + if ra, ok := hrs.rc.(io.ReaderAt); ok { + hrs.ra = ra + n, err = ra.ReadAt(p, off) + } else { + for { + var nn int + nn, err = hrs.rc.Read(p) + n += nn + p = p[nn:] + if nn == len(p) || err != nil { + break + } + } + } + + hrs.offset += int64(n) + return +} + +func (hrs *readerAtCloser) Close() error { + if hrs.closed { + return nil + } + hrs.closed = true + if hrs.rc != nil { + return hrs.rc.Close() + } + + return nil +} diff --git a/cache/remotecache/s3/s3.go b/cache/remotecache/s3/s3.go new file mode 100644 index 000000000000..f0e814b2f6e9 --- /dev/null +++ b/cache/remotecache/s3/s3.go @@ -0,0 +1,476 @@ +package s3 + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + aws_config "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/smithy-go" + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/cache/remotecache" + v1 "github.com/moby/buildkit/cache/remotecache/v1" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + attrBucket = "bucket" + attrRegion = "region" + attrPrefix = "prefix" + attrManifestsPrefix = "manifests_prefix" + attrBlobsPrefix = "blobs_prefix" + attrName = "name" + attrTouchRefresh = "touch_refresh" + attrEndpointURL = "endpoint_url" + attrAccessKeyID = "access_key_id" + attrSecretAccessKey = "secret_access_key" + attrSessionToken = "session_token" + attrUsePathStyle = "use_path_style" +) + +type Config struct { + Bucket string + Region string + Prefix string + ManifestsPrefix string + BlobsPrefix string + Names []string + TouchRefresh time.Duration + EndpointURL string + AccessKeyID string + SecretAccessKey string + SessionToken string + UsePathStyle bool +} + +func getConfig(attrs map[string]string) (Config, error) { + bucket, ok := attrs[attrBucket] + if !ok { + bucket, ok = os.LookupEnv("AWS_BUCKET") + if !ok { + return Config{}, errors.Errorf("bucket ($AWS_BUCKET) not set for s3 cache") + } + } + + region, ok := attrs[attrRegion] + if !ok { + region, ok = os.LookupEnv("AWS_REGION") + if !ok { + return Config{}, errors.Errorf("region ($AWS_REGION) not set for s3 cache") + } + } + + prefix := attrs[attrPrefix] + + manifestsPrefix, ok := attrs[attrManifestsPrefix] + if !ok { + manifestsPrefix = "manifests/" + } + + blobsPrefix, ok := attrs[attrBlobsPrefix] + if !ok { + blobsPrefix = "blobs/" + } + + names := []string{"buildkit"} + name, ok := attrs[attrName] + if ok { + splittedNames := strings.Split(name, ";") + if len(splittedNames) > 0 { + names = splittedNames + } + } + + touchRefresh := 24 * time.Hour + + touchRefreshStr, ok := attrs[attrTouchRefresh] + if ok { + touchRefreshFromUser, err := time.ParseDuration(touchRefreshStr) + if err == nil { + touchRefresh = touchRefreshFromUser + } + } + + endpointURL := attrs[attrEndpointURL] + accessKeyID := attrs[attrAccessKeyID] + secretAccessKey := attrs[attrSecretAccessKey] + sessionToken := attrs[attrSessionToken] + + usePathStyle := false + usePathStyleStr, ok := attrs[attrUsePathStyle] + if ok { + usePathStyleUser, err := strconv.ParseBool(usePathStyleStr) + if err == nil { + usePathStyle = usePathStyleUser + } + } + + return Config{ + Bucket: bucket, + Region: region, + Prefix: prefix, + ManifestsPrefix: manifestsPrefix, + BlobsPrefix: blobsPrefix, + Names: names, + TouchRefresh: touchRefresh, + EndpointURL: endpointURL, + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + SessionToken: sessionToken, + UsePathStyle: usePathStyle, + }, nil +} + +// ResolveCacheExporterFunc for s3 cache exporter. +func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc { + return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) { + config, err := getConfig(attrs) + if err != nil { + return nil, err + } + + s3Client, err := newS3Client(ctx, config) + if err != nil { + return nil, err + } + cc := v1.NewCacheChains() + return &exporter{CacheExporterTarget: cc, chains: cc, s3Client: s3Client, config: config}, nil + } +} + +type exporter struct { + solver.CacheExporterTarget + chains *v1.CacheChains + s3Client *s3Client + config Config +} + +func (*exporter) Name() string { + return "exporting cache to s3" +} + +func (e *exporter) Config() remotecache.Config { + return remotecache.Config{ + Compression: compression.New(compression.Default), + } +} + +func (e *exporter) Finalize(ctx context.Context) (map[string]string, error) { + cacheConfig, descs, err := e.chains.Marshal(ctx) + if err != nil { + return nil, err + } + + for i, l := range cacheConfig.Layers { + dgstPair, ok := descs[l.Blob] + if !ok { + return nil, errors.Errorf("missing blob %s", l.Blob) + } + if dgstPair.Descriptor.Annotations == nil { + return nil, errors.Errorf("invalid descriptor without annotations") + } + v, ok := dgstPair.Descriptor.Annotations["containerd.io/uncompressed"] + if !ok { + return nil, errors.Errorf("invalid descriptor without uncompressed annotation") + } + diffID, err := digest.Parse(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse uncompressed annotation") + } + + key := e.s3Client.blobKey(dgstPair.Descriptor.Digest) + exists, err := e.s3Client.exists(ctx, key) + if err != nil { + return nil, errors.Wrapf(err, "failed to check file presence in cache") + } + if exists != nil { + if time.Since(*exists) > e.config.TouchRefresh { + err = e.s3Client.touch(ctx, key) + if err != nil { + return nil, errors.Wrapf(err, "failed to touch file") + } + } + } else { + layerDone := progress.OneOff(ctx, fmt.Sprintf("writing layer %s", l.Blob)) + dt, err := content.ReadBlob(ctx, dgstPair.Provider, dgstPair.Descriptor) + if err != nil { + return nil, layerDone(err) + } + if err := e.s3Client.saveMutable(ctx, key, dt); err != nil { + return nil, layerDone(errors.Wrap(err, "error writing layer blob")) + } + layerDone(nil) + } + + la := &v1.LayerAnnotations{ + DiffID: diffID, + Size: dgstPair.Descriptor.Size, + MediaType: dgstPair.Descriptor.MediaType, + } + if v, ok := dgstPair.Descriptor.Annotations["buildkit/createdat"]; ok { + var t time.Time + if err := (&t).UnmarshalText([]byte(v)); err != nil { + return nil, err + } + la.CreatedAt = t.UTC() + } + cacheConfig.Layers[i].Annotations = la + } + + dt, err := json.Marshal(cacheConfig) + if err != nil { + return nil, err + } + + for _, name := range e.config.Names { + if err := e.s3Client.saveMutable(ctx, e.s3Client.manifestKey(name), dt); err != nil { + return nil, errors.Wrapf(err, "error writing manifest: %s", name) + } + } + return nil, nil +} + +// ResolveCacheImporterFunc for s3 cache importer. +func ResolveCacheImporterFunc() remotecache.ResolveCacheImporterFunc { + return func(ctx context.Context, _ session.Group, attrs map[string]string) (remotecache.Importer, ocispecs.Descriptor, error) { + config, err := getConfig(attrs) + if err != nil { + return nil, ocispecs.Descriptor{}, err + } + s3Client, err := newS3Client(ctx, config) + if err != nil { + return nil, ocispecs.Descriptor{}, err + } + return &importer{s3Client, config}, ocispecs.Descriptor{}, nil + } +} + +type importer struct { + s3Client *s3Client + config Config +} + +func (i *importer) makeDescriptorProviderPair(l v1.CacheLayer) (*v1.DescriptorProviderPair, error) { + if l.Annotations == nil { + return nil, errors.Errorf("cache layer with missing annotations") + } + if l.Annotations.DiffID == "" { + return nil, errors.Errorf("cache layer with missing diffid") + } + annotations := map[string]string{} + annotations["containerd.io/uncompressed"] = l.Annotations.DiffID.String() + if !l.Annotations.CreatedAt.IsZero() { + txt, err := l.Annotations.CreatedAt.MarshalText() + if err != nil { + return nil, err + } + annotations["buildkit/createdat"] = string(txt) + } + return &v1.DescriptorProviderPair{ + Provider: i.s3Client, + Descriptor: ocispecs.Descriptor{ + MediaType: l.Annotations.MediaType, + Digest: l.Blob, + Size: l.Annotations.Size, + Annotations: annotations, + }, + }, nil +} + +func (i *importer) load(ctx context.Context) (*v1.CacheChains, error) { + var config v1.CacheConfig + found, err := i.s3Client.getManifest(ctx, i.s3Client.manifestKey(i.config.Names[0]), &config) + if err != nil { + return nil, err + } + if !found { + return v1.NewCacheChains(), nil + } + + allLayers := v1.DescriptorProvider{} + + for _, l := range config.Layers { + dpp, err := i.makeDescriptorProviderPair(l) + if err != nil { + return nil, err + } + allLayers[l.Blob] = *dpp + } + + cc := v1.NewCacheChains() + if err := v1.ParseConfig(config, allLayers, cc); err != nil { + return nil, err + } + return cc, nil +} + +func (i *importer) Resolve(ctx context.Context, _ ocispecs.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) { + cc, err := i.load(ctx) + if err != nil { + return nil, err + } + + keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w) + if err != nil { + return nil, err + } + + return solver.NewCacheManager(ctx, id, keysStorage, resultStorage), nil +} + +type readerAt struct { + ReaderAtCloser + size int64 +} + +func (r *readerAt) Size() int64 { + return r.size +} + +type s3Client struct { + *s3.Client + *manager.Uploader + bucket string + prefix string + blobsPrefix string + manifestsPrefix string +} + +func newS3Client(ctx context.Context, config Config) (*s3Client, error) { + cfg, err := aws_config.LoadDefaultConfig(ctx, aws_config.WithRegion(config.Region)) + if err != nil { + return nil, errors.Errorf("Unable to load AWS SDK config, %v", err) + } + client := s3.NewFromConfig(cfg, func(options *s3.Options) { + if config.AccessKeyID != "" && config.SecretAccessKey != "" { + options.Credentials = credentials.NewStaticCredentialsProvider(config.AccessKeyID, config.SecretAccessKey, config.SessionToken) + } + if config.EndpointURL != "" { + options.UsePathStyle = config.UsePathStyle + options.EndpointResolver = s3.EndpointResolverFromURL(config.EndpointURL) + } + }) + + return &s3Client{ + Client: client, + Uploader: manager.NewUploader(client), + bucket: config.Bucket, + prefix: config.Prefix, + blobsPrefix: config.BlobsPrefix, + manifestsPrefix: config.ManifestsPrefix, + }, nil +} + +func (s3Client *s3Client) getManifest(ctx context.Context, key string, config *v1.CacheConfig) (bool, error) { + input := &s3.GetObjectInput{ + Bucket: &s3Client.bucket, + Key: &key, + } + + output, err := s3Client.GetObject(ctx, input) + if err != nil { + if isNotFound(err) { + return false, nil + } + return false, err + } + defer output.Body.Close() + + decoder := json.NewDecoder(output.Body) + if err := decoder.Decode(config); err != nil { + return false, errors.WithStack(err) + } + + return true, nil +} + +func (s3Client *s3Client) getReader(ctx context.Context, key string) (io.ReadCloser, error) { + input := &s3.GetObjectInput{ + Bucket: &s3Client.bucket, + Key: &key, + } + + output, err := s3Client.GetObject(ctx, input) + if err != nil { + return nil, err + } + return output.Body, nil +} + +func (s3Client *s3Client) saveMutable(ctx context.Context, key string, value []byte) error { + input := &s3.PutObjectInput{ + Bucket: &s3Client.bucket, + Key: &key, + + Body: bytes.NewReader(value), + } + _, err := s3Client.Upload(ctx, input) + return err +} + +func (s3Client *s3Client) exists(ctx context.Context, key string) (*time.Time, error) { + input := &s3.HeadObjectInput{ + Bucket: &s3Client.bucket, + Key: &key, + } + + head, err := s3Client.HeadObject(ctx, input) + if err != nil { + if isNotFound(err) { + return nil, nil + } + return nil, err + } + return head.LastModified, nil +} + +func (s3Client *s3Client) touch(ctx context.Context, key string) error { + copySource := fmt.Sprintf("%s/%s", s3Client.bucket, key) + cp := &s3.CopyObjectInput{ + Bucket: &s3Client.bucket, + CopySource: ©Source, + Key: &key, + Metadata: map[string]string{"updated-at": time.Now().String()}, + MetadataDirective: "REPLACE", + } + + _, err := s3Client.CopyObject(ctx, cp) + + return err +} + +func (s3Client *s3Client) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { + readerAtCloser := toReaderAtCloser(func(offset int64) (io.ReadCloser, error) { + return s3Client.getReader(ctx, s3Client.blobKey(desc.Digest)) + }) + return &readerAt{ReaderAtCloser: readerAtCloser, size: desc.Size}, nil +} + +func (s3Client *s3Client) manifestKey(name string) string { + return s3Client.prefix + s3Client.manifestsPrefix + name +} + +func (s3Client *s3Client) blobKey(dgst digest.Digest) string { + return s3Client.prefix + s3Client.blobsPrefix + dgst.String() +} + +func isNotFound(err error) bool { + var errapi smithy.APIError + return errors.As(err, &errapi) && (errapi.ErrorCode() == "NoSuchKey" || errapi.ErrorCode() == "NotFound") +} diff --git a/cache/remotecache/v1/cachestorage.go b/cache/remotecache/v1/cachestorage.go index 7ba7eb0f6059..a4f7f6ad055f 100644 --- a/cache/remotecache/v1/cachestorage.go +++ b/cache/remotecache/v1/cachestorage.go @@ -276,7 +276,7 @@ func (cs *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheR // Any of blobs in the remote must meet the specified compression option. match := false for _, desc := range r.result.Descriptors { - m := compressionopts.Type.IsMediaType(desc.MediaType) + m := compression.IsMediaType(compressionopts.Type, desc.MediaType) match = match || m if compressionopts.Force && !m { match = false diff --git a/cache/remotecache/v1/chains.go b/cache/remotecache/v1/chains.go index 306e037f7f1c..8c8bbde5dc76 100644 --- a/cache/remotecache/v1/chains.go +++ b/cache/remotecache/v1/chains.go @@ -146,7 +146,7 @@ func (c *item) removeLink(src *item) bool { return found } -func (c *item) AddResult(createdAt time.Time, result *solver.Remote) { +func (c *item) AddResult(_ digest.Digest, _ int, createdAt time.Time, result *solver.Remote) { c.resultTime = createdAt c.result = result } @@ -214,7 +214,7 @@ func (c *item) walkAllResults(fn func(i *item) error, visited map[*item]struct{} type nopRecord struct { } -func (c *nopRecord) AddResult(createdAt time.Time, result *solver.Remote) { +func (c *nopRecord) AddResult(_ digest.Digest, _ int, createdAt time.Time, result *solver.Remote) { } func (c *nopRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) { diff --git a/cache/remotecache/v1/chains_test.go b/cache/remotecache/v1/chains_test.go index 7e2a2f525769..5e7bcd0691c7 100644 --- a/cache/remotecache/v1/chains_test.go +++ b/cache/remotecache/v1/chains_test.go @@ -29,7 +29,7 @@ func TestSimpleMarshal(t *testing.T) { Digest: dgst("d1"), }}, } - baz.AddResult(time.Now(), r0) + baz.AddResult("", 0, time.Now(), r0) } addRecords() diff --git a/cache/remotecache/v1/doc.go b/cache/remotecache/v1/doc.go index 97d21a452068..a1b00d86f68f 100644 --- a/cache/remotecache/v1/doc.go +++ b/cache/remotecache/v1/doc.go @@ -1,6 +1,6 @@ package cacheimport -// Distibutable build cache +// Distributable build cache // // Main manifest is OCI image index // https://github.com/opencontainers/image-spec/blob/master/image-index.md . @@ -13,7 +13,7 @@ package cacheimport // Cache config file layout: // //{ -// "layers": [ +// "layers": [ <- layers contains references to blobs // { // "blob": "sha256:deadbeef", <- digest of layer blob in index // "parent": -1 <- index of parent layer, -1 if no parent @@ -24,20 +24,26 @@ package cacheimport // } // ], // -// "records": [ +// "records": [ <- records contains chains of cache keys // { // "digest": "sha256:deadbeef", <- base digest for the record // }, // { // "digest": "sha256:deadbeef", // "output": 1, <- optional output index -// "layers": [ <- optional array or layer chains +// "layers": [ <- optional array of layer pointers // { // "createdat": "", -// "layer": 1, <- index to the layer +// "layer": 1, <- index to the layers array, layer is loaded with all of its parents // } // ], -// "inputs": [ <- dependant records +// "chains": [ <- optional array of layer pointer lists +// { +// "createdat": "", +// "layers": [1], <- indexes to the layers array, all layers are loaded in specified order without parents +// } +// ], +// "inputs": [ <- dependant records, this is how cache keys are linked together // [ <- index of the dependency (0) // { // "selector": "sel", <- optional selector diff --git a/cache/remotecache/v1/parse.go b/cache/remotecache/v1/parse.go index 65a6e441f575..3c8294a602c0 100644 --- a/cache/remotecache/v1/parse.go +++ b/cache/remotecache/v1/parse.go @@ -61,7 +61,7 @@ func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver. return nil, err } if remote != nil { - r.AddResult(res.CreatedAt, remote) + r.AddResult("", 0, res.CreatedAt, remote) } } @@ -86,7 +86,7 @@ func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver. } if remote != nil { remote.Provider = mp - r.AddResult(res.CreatedAt, remote) + r.AddResult("", 0, res.CreatedAt, remote) } } diff --git a/cache/util/fsutil.go b/cache/util/fsutil.go index b425a002a542..e90ed45f77f4 100644 --- a/cache/util/fsutil.go +++ b/cache/util/fsutil.go @@ -3,7 +3,6 @@ package util import ( "context" "io" - "io/ioutil" "os" "path/filepath" @@ -59,7 +58,7 @@ func ReadFile(ctx context.Context, mount snapshot.Mountable, req ReadRequest) ([ } if req.Range == nil { - dt, err = ioutil.ReadFile(fp) + dt, err = os.ReadFile(fp) if err != nil { return errors.WithStack(err) } @@ -68,7 +67,7 @@ func ReadFile(ctx context.Context, mount snapshot.Mountable, req ReadRequest) ([ if err != nil { return errors.WithStack(err) } - dt, err = ioutil.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length))) + dt, err = io.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length))) f.Close() if err != nil { return errors.WithStack(err) diff --git a/client/build.go b/client/build.go index 25b3aa6d7ccf..2a4bc9e105d1 100644 --- a/client/build.go +++ b/client/build.go @@ -20,17 +20,14 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF } }() - if opt.Frontend != "" { - return nil, errors.New("invalid SolveOpt, Build interface cannot use Frontend") - } + feOpts := opt.FrontendAttrs + + opt.Frontend = "" if product == "" { product = apicaps.ExportedProduct } - feOpts := opt.FrontendAttrs - opt.FrontendAttrs = nil - workers, err := c.ListWorkers(ctx) if err != nil { return nil, errors.Wrap(err, "listing workers for Build") @@ -113,6 +110,19 @@ func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.Sta return g.gateway.StatFile(ctx, in, opts...) } +func (g *gatewayClientForBuild) Evaluate(ctx context.Context, in *gatewayapi.EvaluateRequest, opts ...grpc.CallOption) (*gatewayapi.EvaluateResponse, error) { + if err := g.caps.Supports(gatewayapi.CapGatewayEvaluate); err != nil { + if err2 := g.caps.Supports(gatewayapi.CapStatFile); err2 != nil { + return nil, err + } + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + _, err := g.gateway.StatFile(ctx, &gatewayapi.StatFileRequest{Ref: in.Ref, Path: "."}, opts...) + return &gatewayapi.EvaluateResponse{}, err + } + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.Evaluate(ctx, in, opts...) +} + func (g *gatewayClientForBuild) Ping(ctx context.Context, in *gatewayapi.PingRequest, opts ...grpc.CallOption) (*gatewayapi.PongResponse, error) { ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.Ping(ctx, in, opts...) diff --git a/client/build_test.go b/client/build_test.go index 58fa5f4684cb..1376c1515ae9 100644 --- a/client/build_test.go +++ b/client/build_test.go @@ -3,11 +3,8 @@ package client import ( "bytes" "context" - "encoding/base64" - "encoding/json" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strconv" @@ -17,7 +14,6 @@ import ( "time" "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend/gateway/client" gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" @@ -26,7 +22,6 @@ import ( "github.com/moby/buildkit/session/sshforward/sshprovider" "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/entitlements" utilsystem "github.com/moby/buildkit/util/system" "github.com/moby/buildkit/util/testutil/echoserver" @@ -59,7 +54,8 @@ func TestClientGatewayIntegration(t *testing.T) { testClientGatewayContainerExtraHosts, testClientGatewayContainerSignal, testWarnings, - testClientGatewayFrontendAttrs, + testClientGatewayNilResult, + testClientGatewayEmptyImageExec, ), integration.WithMirroredImages(integration.OfficialImages("busybox:latest"))) integration.Run(t, integration.TestFuncs( @@ -134,9 +130,7 @@ func testClientGatewaySolve(t *testing.T, sb integration.Sandbox) { return r, nil } - tmpdir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testStr := "This is a test" @@ -153,7 +147,7 @@ func testClientGatewaySolve(t *testing.T, sb integration.Sandbox) { }, product, b, nil) require.NoError(t, err) - read, err := ioutil.ReadFile(filepath.Join(tmpdir, "foo")) + read, err := os.ReadFile(filepath.Join(tmpdir, "foo")) require.NoError(t, err) require.Equal(t, testStr, string(read)) @@ -476,7 +470,7 @@ func testClientGatewayContainerExecPipe(t *testing.T, sb integration.Sandbox) { Args: []string{"cat"}, Cwd: "/", Tty: false, - Stdin: ioutil.NopCloser(stdin2), + Stdin: io.NopCloser(stdin2), Stdout: stdout2, }) @@ -688,17 +682,14 @@ func testClientGatewayContainerMounts(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) defer c.Close() - tmpdir, err := ioutil.TempDir("", "buildkit-buildctl") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() - err = ioutil.WriteFile(filepath.Join(tmpdir, "local-file"), []byte("local"), 0644) + err = os.WriteFile(filepath.Join(tmpdir, "local-file"), []byte("local"), 0644) require.NoError(t, err) a := agent.NewKeyring() - sockPath, clean, err := makeSSHAgentSock(a) + sockPath, err := makeSSHAgentSock(t, a) require.NoError(t, err) - defer clean() ssh, err := sshprovider.NewSSHAgentProvider([]sshprovider.AgentConfig{{ ID: t.Name(), @@ -1624,6 +1615,7 @@ func testClientGatewayExecFileActionError(t *testing.T, sb integration.Sandbox) // testClientGatewayContainerSecurityMode ensures that the correct security mode // is propagated to the gateway container func testClientGatewayContainerSecurityMode(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureSecurityMode) requiresLinux(t) ctx := sb.Context() @@ -1650,7 +1642,6 @@ func testClientGatewayContainerSecurityMode(t *testing.T, sb integration.Sandbox } allowedEntitlements = []entitlements.Entitlement{} } else { - skipDockerd(t, sb) assertCaps = func(caps uint64) { /* $ capsh --decode=0000003fffffffff @@ -1999,62 +1990,86 @@ func testClientGatewayContainerSignal(t *testing.T, sb integration.Sandbox) { checkAllReleasable(t, c, sb, true) } -// moby/buildkit#2476 -func testClientGatewayFrontendAttrs(t *testing.T, sb integration.Sandbox) { +func testClientGatewayNilResult(t *testing.T, sb integration.Sandbox) { requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - fooattrval := "bar" - bazattrval := "fuu" - b := func(ctx context.Context, c client.Client) (*client.Result, error) { - st := llb.Image("busybox:latest").Run( - llb.ReadonlyRootFS(), - llb.Args([]string{"/bin/sh", "-c", `echo hello`}), - ) - def, err := st.Marshal(sb.Context()) + st := llb.Image("busybox:latest") + diff := llb.Diff(st, st) + def, err := diff.Marshal(sb.Context()) if err != nil { return nil, err } res, err := c.Solve(ctx, client.SolveRequest{ Definition: def.ToPB(), - FrontendOpt: map[string]string{ - "build-arg:foo": fooattrval, - }, + Evaluate: true, }) require.NoError(t, err) - require.Contains(t, res.Metadata, exptypes.ExporterBuildInfo) - - var bi binfotypes.BuildInfo - require.NoError(t, json.Unmarshal(res.Metadata[exptypes.ExporterBuildInfo], &bi)) - require.Contains(t, bi.Attrs, "build-arg:foo") - bi.Attrs["build-arg:baz"] = &bazattrval - bmbi, err := json.Marshal(bi) + ref, err := res.SingleRef() require.NoError(t, err) - res.AddMeta(exptypes.ExporterBuildInfo, bmbi) - return res, err + dirEnts, err := ref.ReadDir(ctx, client.ReadDirRequest{ + Path: "/", + }) + require.NoError(t, err) + require.Len(t, dirEnts, 0) + return nil, nil } - res, err := c.Build(sb.Context(), SolveOpt{}, "", b, nil) + _, err = c.Build(sb.Context(), SolveOpt{}, "", b, nil) require.NoError(t, err) +} - require.Contains(t, res.ExporterResponse, exptypes.ExporterBuildInfo) - decbi, err := base64.StdEncoding.DecodeString(res.ExporterResponse[exptypes.ExporterBuildInfo]) +func testClientGatewayEmptyImageExec(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) + c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) + defer c.Close() - var bi binfotypes.BuildInfo - require.NoError(t, json.Unmarshal(decbi, &bi)) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + target := registry + "/buildkit/testemptyimage:latest" - require.Contains(t, bi.Attrs, "build-arg:foo") - require.Equal(t, &fooattrval, bi.Attrs["build-arg:foo"]) - require.Contains(t, bi.Attrs, "build-arg:baz") - require.Equal(t, &bazattrval, bi.Attrs["build-arg:baz"]) + // push an empty image + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, "", func(ctx context.Context, c client.Client) (*client.Result, error) { + return client.NewResult(), nil + }, nil) + require.NoError(t, err) - checkAllReleasable(t, c, sb, true) + _, err = c.Build(sb.Context(), SolveOpt{}, "", func(ctx context.Context, gw client.Client) (*client.Result, error) { + // create an exec on that empty image (expected to fail, but not to panic) + st := llb.Image(target).Run( + llb.Args([]string{"echo", "hello"}), + ).Root() + def, err := st.Marshal(sb.Context()) + if err != nil { + return nil, err + } + _, err = gw.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + Evaluate: true, + }) + require.ErrorContains(t, err, `process "echo hello" did not complete successfully`) + return nil, nil + }, nil) + require.NoError(t, err) } type nopCloser struct { diff --git a/client/client.go b/client/client.go index 8c9259a4a9d1..deac2507a996 100644 --- a/client/client.go +++ b/client/client.go @@ -4,11 +4,12 @@ import ( "context" "crypto/tls" "crypto/x509" - "io/ioutil" "net" "net/url" + "os" "strings" + contentapi "github.com/containerd/containerd/api/services/content/v1" "github.com/containerd/containerd/defaults" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" controlapi "github.com/moby/buildkit/api/services/control" @@ -168,12 +169,16 @@ func (c *Client) setupDelegatedTracing(ctx context.Context, td TracerDelegate) e return td.SetSpanExporter(ctx, e) } -func (c *Client) controlClient() controlapi.ControlClient { +func (c *Client) ControlClient() controlapi.ControlClient { return controlapi.NewControlClient(c.conn) } +func (c *Client) ContentClient() contentapi.ContentClient { + return contentapi.NewContentClient(c.conn) +} + func (c *Client) Dialer() session.Dialer { - return grpchijack.Dialer(c.controlClient()) + return grpchijack.Dialer(c.ControlClient()) } func (c *Client) Close() error { @@ -212,7 +217,7 @@ func WithCredentials(serverName, ca, cert, key string) ClientOpt { } func loadCredentials(opts *withCredentials) (grpc.DialOption, error) { - ca, err := ioutil.ReadFile(opts.CACert) + ca, err := os.ReadFile(opts.CACert) if err != nil { return nil, errors.Wrap(err, "could not read ca certificate") } @@ -234,7 +239,6 @@ func loadCredentials(opts *withCredentials) (grpc.DialOption, error) { return nil, errors.Wrap(err, "could not read certificate/key") } cfg.Certificates = []tls.Certificate{cert} - cfg.BuildNameToCertificate() } return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil diff --git a/client/client_nydus_test.go b/client/client_nydus_test.go new file mode 100644 index 000000000000..ecaffba6bbfd --- /dev/null +++ b/client/client_nydus_test.go @@ -0,0 +1,139 @@ +//go:build nydus +// +build nydus + +package client + +import ( + "fmt" + "strconv" + "testing" + + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/namespaces" + nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/testutil/integration" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func TestNydusIntegration(t *testing.T) { + testIntegration( + t, + testBuildExportNydusWithHybrid, + ) +} + +func testBuildExportNydusWithHybrid(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) + requiresLinux(t) + + cdAddress := sb.ContainerdAddress() + if cdAddress == "" { + t.Skip("test requires containerd worker") + } + + client, err := newContainerd(cdAddress) + require.NoError(t, err) + defer client.Close() + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + var ( + imageService = client.ImageService() + contentStore = client.ContentStore() + ctx = namespaces.WithNamespace(sb.Context(), "buildkit") + ) + + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + buildNydus := func(file string) { + orgImage := "docker.io/library/alpine:latest" + baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/" + file})) + def, err := baseDef.Marshal(sb.Context()) + require.NoError(t, err) + + target := registry + "/nydus/alpine:" + identity.NewID() + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "compression": "nydus", + "oci-mediatypes": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + img, err := imageService.Get(ctx, target) + require.NoError(t, err) + + manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) + require.NoError(t, err) + + require.Equal(t, len(manifest.Layers), 3) + require.Equal(t, "true", manifest.Layers[0].Annotations[nydusify.LayerAnnotationNydusBlob]) + require.Equal(t, "true", manifest.Layers[1].Annotations[nydusify.LayerAnnotationNydusBlob]) + require.Equal(t, "true", manifest.Layers[2].Annotations[nydusify.LayerAnnotationNydusBootstrap]) + } + + buildOther := func(file string, compType compression.Type, forceCompression bool) { + orgImage := "docker.io/library/alpine:latest" + baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/" + file})) + def, err := baseDef.Marshal(sb.Context()) + require.NoError(t, err) + + mediaTypes := map[compression.Type]string{ + compression.Gzip: ocispecs.MediaTypeImageLayerGzip, + compression.Zstd: ocispecs.MediaTypeImageLayer + "+zstd", + } + target := fmt.Sprintf("%s/%s/alpine:%s", registry, compType, identity.NewID()) + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "compression": compType.String(), + "oci-mediatypes": "true", + "force-compression": strconv.FormatBool(forceCompression), + }, + }, + }, + }, nil) + require.NoError(t, err) + + img, err := imageService.Get(ctx, target) + require.NoError(t, err) + + manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) + require.NoError(t, err) + + require.Equal(t, 2, len(manifest.Layers)) + require.Equal(t, mediaTypes[compType], manifest.Layers[0].MediaType) + require.Equal(t, mediaTypes[compType], manifest.Layers[1].MediaType) + } + + // Make sure that the nydus compression layer is not mixed with other + // types of compression layers in an image. + buildNydus("foo") + buildOther("foo", compression.Gzip, false) + buildOther("foo", compression.Zstd, true) + + buildOther("bar", compression.Gzip, false) + buildOther("bar", compression.Zstd, true) + buildNydus("bar") +} diff --git a/client/client_test.go b/client/client_test.go index cda4e58c59e4..b97eb75f274b 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -12,10 +12,10 @@ import ( "encoding/pem" "fmt" "io" - "io/ioutil" "net" "net/http" "os" + "path" "path/filepath" "runtime" "strconv" @@ -26,6 +26,7 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/local" ctderrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/namespaces" @@ -33,31 +34,42 @@ import ( "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/snapshots" "github.com/containerd/continuity/fs/fstest" + intoto "github.com/in-toto/in-toto-golang/in_toto" + controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/exporter/containerimage/exptypes" gateway "github.com/moby/buildkit/frontend/gateway/client" + gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/secrets/secretsprovider" "github.com/moby/buildkit/session/sshforward/sshprovider" "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/solver/result" + "github.com/moby/buildkit/sourcepolicy" + sourcepolicypb "github.com/moby/buildkit/sourcepolicy/pb" + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/moby/buildkit/util/attestation" binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/entitlements" + "github.com/moby/buildkit/util/purl" "github.com/moby/buildkit/util/testutil" "github.com/moby/buildkit/util/testutil/echoserver" "github.com/moby/buildkit/util/testutil/httpserver" "github.com/moby/buildkit/util/testutil/integration" + digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + spdx "github.com/spdx/tools-golang/spdx/v2_3" "github.com/stretchr/testify/require" "golang.org/x/crypto/ssh/agent" "golang.org/x/sync/errgroup" ) func init() { - if os.Getenv("TEST_DOCKERD") == "1" { + if integration.IsTestDockerd() { integration.InitDockerdWorker() } else { integration.InitOCIWorker() @@ -72,26 +84,25 @@ type nopWriteCloser struct { func (nopWriteCloser) Close() error { return nil } func TestIntegration(t *testing.T) { - mirroredImages := integration.OfficialImages("busybox:latest", "alpine:latest") - mirroredImages["tonistiigi/test:nolayers"] = "docker.io/tonistiigi/test:nolayers" - mirroredImages["cpuguy83/buildkit-foreign:latest"] = "docker.io/cpuguy83/buildkit-foreign:latest" - mirrors := integration.WithMirroredImages(mirroredImages) - - tests := integration.TestFuncs( + testIntegration( + t, testCacheExportCacheKeyLoop, testRelativeWorkDir, testFileOpMkdirMkfile, testFileOpCopyRm, testFileOpCopyIncludeExclude, testFileOpRmWildcard, + testFileOpCopyUIDCache, testCallDiskUsage, testBuildMultiMount, testBuildHTTPSource, testBuildPushAndValidate, testBuildExportWithUncompressed, + testBuildExportScratch, testResolveAndHosts, testUser, testOCIExporter, + testOCIExporterContentStore, testWhiteoutParentDir, testFrontendImageNaming, testDuplicateWhiteouts, @@ -143,6 +154,8 @@ func TestIntegration(t *testing.T) { testFileOpInputSwap, testRelativeMountpoint, testLocalSourceDiffer, + testOCILayoutSource, + testOCILayoutPlatformSource, testBuildExportZstd, testPullZstdImage, testMergeOp, @@ -156,12 +169,42 @@ func TestIntegration(t *testing.T) { testBuildInfoInline, testBuildInfoNoExport, testZstdLocalCacheExport, + testCacheExportIgnoreError, testZstdRegistryCacheImportExport, testZstdLocalCacheImportExport, testUncompressedLocalCacheImportExport, testUncompressedRegistryCacheImportExport, testStargzLazyRegistryCacheImportExport, + testValidateDigestOrigin, + testCallInfo, + testPullWithLayerLimit, + testExportAnnotations, + testExportAnnotationsMediaTypes, + testExportAttestations, + testAttestationDefaultSubject, + testSourceDateEpochLayerTimestamps, + testSourceDateEpochClamp, + testSourceDateEpochReset, + testSourceDateEpochLocalExporter, + testSourceDateEpochTarExporter, + testAttestationBundle, + testSBOMScan, + testSBOMScanSingleRef, + testSBOMSupplements, + testMultipleCacheExports, + testMountStubsDirectory, + testMountStubsTimestamp, + testSourcePolicy, ) +} + +func testIntegration(t *testing.T, funcs ...func(t *testing.T, sb integration.Sandbox)) { + mirroredImages := integration.OfficialImages("busybox:latest", "alpine:latest") + mirroredImages["tonistiigi/test:nolayers"] = "docker.io/tonistiigi/test:nolayers" + mirroredImages["cpuguy83/buildkit-foreign:latest"] = "docker.io/cpuguy83/buildkit-foreign:latest" + mirrors := integration.WithMirroredImages(mirroredImages) + + tests := integration.TestFuncs(funcs...) tests = append(tests, diffOpTestCases()...) integration.Run(t, tests, mirrors) @@ -186,6 +229,15 @@ func TestIntegration(t *testing.T) { "host": hostNetwork, }), ) + + integration.Run(t, integration.TestFuncs( + testBridgeNetworkingDNSNoRootless, + ), + mirrors, + integration.WithMatrix("netmode", map[string]interface{}{ + "dns": bridgeDNSNetwork, + }), + ) } func newContainerd(cdAddress string) (*containerd.Client, error) { @@ -194,15 +246,14 @@ func newContainerd(cdAddress string) (*containerd.Client, error) { // moby/buildkit#1336 func testCacheExportCacheKeyLoop(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - tmpdir, err := ioutil.TempDir("", "buildkit-buildctl") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() - err = ioutil.WriteFile(filepath.Join(tmpdir, "foo"), []byte("foodata"), 0600) + err = os.WriteFile(filepath.Join(tmpdir, "foo"), []byte("foodata"), 0600) require.NoError(t, err) for _, mode := range []bool{false, true} { @@ -258,6 +309,46 @@ func testBridgeNetworking(t *testing.T, sb integration.Sandbox) { _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) require.Error(t, err) } + +func testBridgeNetworkingDNSNoRootless(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCNINetwork) + if os.Getenv("BUILDKIT_RUN_NETWORK_INTEGRATION_TESTS") == "" { + t.SkipNow() + } + + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + name := identity.NewID() + server, err := llb.Image("busybox"). + Run( + llb.Shlexf(`sh -c 'test "$(nc -l -p 1234)" = "foo"'`), + llb.Hostname(name), + ). + Marshal(sb.Context()) + require.NoError(t, err) + + client, err := llb.Image("busybox"). + Run( + llb.Shlexf("sh -c 'until echo foo | nc " + name + " 1234 -w0; do sleep 0.1; done'"), + ). + Marshal(sb.Context()) + require.NoError(t, err) + + eg, ctx := errgroup.WithContext(context.Background()) + eg.Go(func() error { + _, err := c.Solve(ctx, server, SolveOpt{}, nil) + return err + }) + eg.Go(func() error { + _, err := c.Solve(ctx, client, SolveOpt{}, nil) + return err + }) + err = eg.Wait() + require.NoError(t, err) +} + func testHostNetworking(t *testing.T, sb integration.Sandbox) { if os.Getenv("BUILDKIT_RUN_NETWORK_INTEGRATION_TESTS") == "" { t.SkipNow() @@ -297,9 +388,7 @@ func testExportBusyboxLocal(t *testing.T, sb integration.Sandbox) { def, err := llb.Image("busybox").Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -390,9 +479,8 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) { err = a.Add(agent.AddedKey{PrivateKey: k}) require.NoError(t, err) - sockPath, clean, err := makeSSHAgentSock(a) + sockPath, err := makeSSHAgentSock(t, a) require.NoError(t, err) - defer clean() ssh, err := sshprovider.NewSSHAgentProvider([]sshprovider.AgentConfig{{ Paths: []string{sockPath}, @@ -439,9 +527,7 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) { def, err = out.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -454,11 +540,11 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "sock")) + dt, err := os.ReadFile(filepath.Join(destDir, "sock")) require.NoError(t, err) require.Equal(t, "/run/buildkit/ssh_agent.0", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err = os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) require.Contains(t, string(dt), "2048") require.Contains(t, string(dt), "(RSA)") @@ -486,7 +572,7 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err = os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) require.Contains(t, string(dt), "agent refused operation") @@ -500,7 +586,7 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) { def, err = out.Marshal(sb.Context()) require.NoError(t, err) - k, err = rsa.GenerateKey(rand.Reader, 1024) + k, err = rsa.GenerateKey(rand.Reader, 2048) require.NoError(t, err) dt = pem.EncodeToMemory( @@ -510,11 +596,9 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) { }, ) - tmpDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() - err = ioutil.WriteFile(filepath.Join(tmpDir, "key"), dt, 0600) + err = os.WriteFile(filepath.Join(tmpDir, "key"), dt, 0600) require.NoError(t, err) ssh, err = sshprovider.NewSSHAgentProvider([]sshprovider.AgentConfig{{ @@ -522,9 +606,7 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) { }}) require.NoError(t, err) - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -537,9 +619,9 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err = os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) - require.Contains(t, string(dt), "1024") + require.Contains(t, string(dt), "2048") require.Contains(t, string(dt), "(RSA)") } @@ -572,9 +654,7 @@ func testShmSize(t *testing.T, sb integration.Sandbox) { def, err := out.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -586,7 +666,7 @@ func testShmSize(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err := os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) require.Contains(t, string(dt), `size=131072k`) } @@ -609,9 +689,7 @@ func testUlimit(t *testing.T, sb integration.Sandbox) { def, err := st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -623,11 +701,11 @@ func testUlimit(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "first")) + dt, err := os.ReadFile(filepath.Join(destDir, "first")) require.NoError(t, err) require.Equal(t, `1062`, strings.TrimSpace(string(dt))) - dt2, err := ioutil.ReadFile(filepath.Join(destDir, "second")) + dt2, err := os.ReadFile(filepath.Join(destDir, "second")) require.NoError(t, err) require.NotEqual(t, `1062`, strings.TrimSpace(string(dt2))) } @@ -654,9 +732,7 @@ func testCgroupParent(t *testing.T, sb integration.Sandbox) { def, err := st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -668,11 +744,11 @@ func testCgroupParent(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "first")) + dt, err := os.ReadFile(filepath.Join(destDir, "first")) require.NoError(t, err) require.Contains(t, strings.TrimSpace(string(dt)), `/foocgroup/buildkit/`) - dt2, err := ioutil.ReadFile(filepath.Join(destDir, "second")) + dt2, err := os.ReadFile(filepath.Join(destDir, "second")) require.NoError(t, err) require.NotContains(t, strings.TrimSpace(string(dt2)), `/foocgroup/buildkit/`) } @@ -706,7 +782,7 @@ func testNetworkMode(t *testing.T, sb integration.Sandbox) { } func testPushByDigest(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -751,6 +827,7 @@ func testPushByDigest(t *testing.T, sb integration.Sandbox) { } func testSecurityMode(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureSecurityMode) command := `sh -c 'cat /proc/self/status | grep CapEff | cut -f 2 > /out'` mode := llb.SecurityModeSandbox var allowedEntitlements []entitlements.Entitlement @@ -767,7 +844,6 @@ func testSecurityMode(t *testing.T, sb integration.Sandbox) { } allowedEntitlements = []entitlements.Entitlement{} } else { - skipDockerd(t, sb) assertCaps = func(caps uint64) { /* $ capsh --decode=0000003fffffffff @@ -796,9 +872,7 @@ func testSecurityMode(t *testing.T, sb integration.Sandbox) { def, err := st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -812,7 +886,7 @@ func testSecurityMode(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) - contents, err := ioutil.ReadFile(filepath.Join(destDir, "out")) + contents, err := os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) caps, err := strconv.ParseUint(strings.TrimSpace(string(contents)), 16, 64) @@ -824,6 +898,7 @@ func testSecurityMode(t *testing.T, sb integration.Sandbox) { } func testSecurityModeSysfs(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureSecurityMode) if sb.Rootless() { t.SkipNow() } @@ -834,7 +909,6 @@ func testSecurityModeSysfs(t *testing.T, sb integration.Sandbox) { if secMode == securitySandbox { allowedEntitlements = []entitlements.Entitlement{} } else { - skipDockerd(t, sb) mode = llb.SecurityModeInsecure allowedEntitlements = []entitlements.Entitlement{entitlements.EntitlementSecurityInsecure} } @@ -843,7 +917,12 @@ func testSecurityModeSysfs(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) defer c.Close() - command := `mkdir /sys/fs/cgroup/cpuset/securitytest` + cg := "/sys/fs/cgroup/cpuset/securitytest" // cgroup v1 + if _, err := os.Stat("/sys/fs/cgroup/cpuset"); errors.Is(err, os.ErrNotExist) { + cg = "/sys/fs/cgroup/securitytest" // cgroup v2 + } + + command := "mkdir " + cg st := llb.Image("busybox:latest"). Run(llb.Shlex(command), llb.Security(mode)) @@ -858,7 +937,7 @@ func testSecurityModeSysfs(t *testing.T, sb integration.Sandbox) { if secMode == securitySandbox { require.Error(t, err) require.Contains(t, err.Error(), "did not complete successfully") - require.Contains(t, err.Error(), "mkdir /sys/fs/cgroup/cpuset/securitytest") + require.Contains(t, err.Error(), "mkdir "+cg) } else { require.NoError(t, err) } @@ -896,7 +975,7 @@ func testSecurityModeErrors(t *testing.T, sb integration.Sandbox) { } func testFrontendImageNaming(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureDirectPush) requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -911,13 +990,12 @@ func testFrontendImageNaming(t *testing.T, sb integration.Sandbox) { checkImageName := map[string]func(out, imageName string, exporterResponse map[string]string){ ExporterOCI: func(out, imageName string, exporterResponse map[string]string) { // Nothing to check - return }, ExporterDocker: func(out, imageName string, exporterResponse map[string]string) { require.Contains(t, exporterResponse, "image.name") require.Equal(t, exporterResponse["image.name"], "docker.io/library/"+imageName) - dt, err := ioutil.ReadFile(out) + dt, err := os.ReadFile(out) require.NoError(t, err) m, err := testutil.ReadTarToMap(dt, false) @@ -989,9 +1067,7 @@ func testFrontendImageNaming(t *testing.T, sb integration.Sandbox) { for _, exp := range []string{ExporterOCI, ExporterDocker, ExporterImage} { exp := exp // capture loop variable. t.Run(exp, func(t *testing.T) { - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() so := SolveOpt{ Exports: []ExportEntry{ @@ -1063,9 +1139,9 @@ func testSecretMounts(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - // test optional + // test optional, mount should not exist when secret not present in SolveOpt st = llb.Image("busybox:latest"). - Run(llb.Shlex(`echo secret2`), llb.AddSecret("/run/secrets/mysecret2", llb.SecretOptional)) + Run(llb.Shlex(`test ! -f /run/secrets/mysecret2`), llb.AddSecret("/run/secrets/mysecret2", llb.SecretOptional)) def, err = st.Marshal(sb.Context()) require.NoError(t, err) @@ -1102,6 +1178,20 @@ func testSecretMounts(t *testing.T, sb integration.Sandbox) { })}, }, nil) require.NoError(t, err) + + // test empty cert still creates secret file + st = llb.Image("busybox:latest"). + Run(llb.Shlex(`test -f /run/secrets/mysecret5`), llb.AddSecret("/run/secrets/mysecret5", llb.SecretID("mysecret"))) + + def, err = st.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Session: []session.Attachable{secretsprovider.FromMap(map[string][]byte{ + "mysecret": []byte(""), + })}, + }, nil) + require.NoError(t, err) } func testSecretEnv(t *testing.T, sb integration.Sandbox) { @@ -1202,7 +1292,8 @@ func testLocalSymlinkEscape(t *testing.T, sb integration.Sandbox) { [[ $(readlink /mount/sub/bar) == "../../../etc/group" ]] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, // point to absolute path that is not part of dir fstest.Symlink("/etc/passwd", "foo"), fstest.CreateDir("sub", 0700), @@ -1220,7 +1311,6 @@ func testLocalSymlinkEscape(t *testing.T, sb integration.Sandbox) { fstest.CreateFile("test.sh", test, 0700), ) require.NoError(t, err) - defer os.RemoveAll(dir) local := llb.Local("mylocal", llb.FollowPaths([]string{ "test.sh", "foo", "sub/bar", "bax", "sub/sub2/file", @@ -1255,9 +1345,7 @@ func testRelativeWorkDir(t *testing.T, sb integration.Sandbox) { def, err := pwd.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -1269,7 +1357,7 @@ func testRelativeWorkDir(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "pwd")) + dt, err := os.ReadFile(filepath.Join(destDir, "pwd")) require.NoError(t, err) require.Equal(t, []byte("/test1/test2\n"), dt) } @@ -1286,9 +1374,7 @@ func testFileOpMkdirMkfile(t *testing.T, sb integration.Sandbox) { def, err := st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -1304,7 +1390,7 @@ func testFileOpMkdirMkfile(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) require.Equal(t, true, fi.IsDir()) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar")) + dt, err := os.ReadFile(filepath.Join(destDir, "bar")) require.NoError(t, err) require.Equal(t, []byte("contents"), dt) } @@ -1315,20 +1401,20 @@ func testFileOpCopyRm(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) defer c.Close() - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("myfile", []byte("data0"), 0600), fstest.CreateDir("sub", 0700), fstest.CreateFile("sub/foo", []byte("foo0"), 0600), fstest.CreateFile("sub/bar", []byte("bar0"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) - dir2, err := tmpdir( + dir2, err := integration.Tmpdir( + t, fstest.CreateFile("file2", []byte("file2"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) st := llb.Scratch(). File( @@ -1340,9 +1426,7 @@ func testFileOpCopyRm(t *testing.T, sb integration.Sandbox) { def, err := st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -1358,7 +1442,7 @@ func testFileOpCopyRm(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "myfile2")) + dt, err := os.ReadFile(filepath.Join(destDir, "myfile2")) require.NoError(t, err) require.Equal(t, []byte("data0"), dt) @@ -1366,32 +1450,91 @@ func testFileOpCopyRm(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) require.Equal(t, true, fi.IsDir()) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out/bar")) + dt, err = os.ReadFile(filepath.Join(destDir, "out/bar")) require.NoError(t, err) require.Equal(t, []byte("bar0"), dt) _, err = os.Stat(filepath.Join(destDir, "out/foo")) require.ErrorIs(t, err, os.ErrNotExist) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "file2")) + dt, err = os.ReadFile(filepath.Join(destDir, "file2")) require.NoError(t, err) require.Equal(t, []byte("file2"), dt) } +// moby/buildkit#3291 +func testFileOpCopyUIDCache(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + st := llb.Scratch().File( + llb.Copy(llb.Image("alpine").Run(llb.Shlex(`sh -c 'echo 123 > /foo && chown 1000:1000 /foo'`)).Root(), "foo", "foo")) + + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + var buf bytes.Buffer + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterTar, + Output: fixedWriteCloser(&nopWriteCloser{&buf}), + }, + }, + }, nil) + require.NoError(t, err) + + m, err := testutil.ReadTarToMap(buf.Bytes(), false) + require.NoError(t, err) + + fi, ok := m["foo"] + require.True(t, ok) + require.Equal(t, 1000, fi.Header.Uid) + require.Equal(t, 1000, fi.Header.Gid) + + // repeat to check cache does not apply for different uid + st = llb.Scratch().File( + llb.Copy(llb.Image("alpine").Run(llb.Shlex(`sh -c 'echo 123 > /foo'`)).Root(), "foo", "foo")) + + def, err = st.Marshal(sb.Context()) + require.NoError(t, err) + + buf = bytes.Buffer{} + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterTar, + Output: fixedWriteCloser(&nopWriteCloser{&buf}), + }, + }, + }, nil) + require.NoError(t, err) + + m, err = testutil.ReadTarToMap(buf.Bytes(), false) + require.NoError(t, err) + + fi, ok = m["foo"] + require.True(t, ok) + require.Equal(t, 0, fi.Header.Uid) + require.Equal(t, 0, fi.Header.Gid) +} + func testFileOpCopyIncludeExclude(t *testing.T, sb integration.Sandbox) { requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("myfile", []byte("data0"), 0600), fstest.CreateDir("sub", 0700), fstest.CreateFile("sub/foo", []byte("foo0"), 0600), fstest.CreateFile("sub/bar", []byte("bar0"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) st := llb.Scratch().File( llb.Copy( @@ -1411,9 +1554,7 @@ func testFileOpCopyIncludeExclude(t *testing.T, sb integration.Sandbox) { def, err := st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -1428,7 +1569,7 @@ func testFileOpCopyIncludeExclude(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "sub", "foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "sub", "foo")) require.NoError(t, err) require.Equal(t, []byte("foo0"), dt) @@ -1437,7 +1578,7 @@ func testFileOpCopyIncludeExclude(t *testing.T, sb integration.Sandbox) { require.ErrorIs(t, err, os.ErrNotExist) } - randBytes, err := ioutil.ReadFile(filepath.Join(destDir, "unique")) + randBytes, err := os.ReadFile(filepath.Join(destDir, "unique")) require.NoError(t, err) // Create additional file which doesn't match the include pattern, and make @@ -1460,9 +1601,7 @@ func testFileOpCopyIncludeExclude(t *testing.T, sb integration.Sandbox) { def, err = st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -1477,7 +1616,7 @@ func testFileOpCopyIncludeExclude(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - randBytes2, err := ioutil.ReadFile(filepath.Join(destDir, "unique")) + randBytes2, err := os.ReadFile(filepath.Join(destDir, "unique")) require.NoError(t, err) require.Equal(t, randBytes, randBytes2) @@ -1527,11 +1666,11 @@ func testLocalSourceWithDiffer(t *testing.T, sb integration.Sandbox, d llb.DiffT require.NoError(t, err) defer c.Close() - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("foo", []byte("foo"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) tv := syscall.NsecToTimespec(time.Now().UnixNano()) @@ -1543,9 +1682,7 @@ func testLocalSourceWithDiffer(t *testing.T, sb integration.Sandbox, d llb.DiffT def, err := st.Marshal(context.TODO()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(context.TODO(), def, SolveOpt{ Exports: []ExportEntry{ @@ -1560,11 +1697,11 @@ func testLocalSourceWithDiffer(t *testing.T, sb integration.Sandbox, d llb.DiffT }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) require.Equal(t, []byte("foo"), dt) - err = ioutil.WriteFile(filepath.Join(dir, "foo"), []byte("bar"), 0600) + err = os.WriteFile(filepath.Join(dir, "foo"), []byte("bar"), 0600) require.NoError(t, err) err = syscall.UtimesNano(filepath.Join(dir, "foo"), []syscall.Timespec{tv, tv}) @@ -1583,7 +1720,7 @@ func testLocalSourceWithDiffer(t *testing.T, sb integration.Sandbox, d llb.DiffT }, nil) require.NoError(t, err) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err = os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) if d == llb.DiffMetadata { require.Equal(t, []byte("foo"), dt) @@ -1593,135 +1730,443 @@ func testLocalSourceWithDiffer(t *testing.T, sb integration.Sandbox, d llb.DiffT } } -func testFileOpRmWildcard(t *testing.T, sb integration.Sandbox) { +func testOCILayoutSource(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureOCILayout) requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) + c, err := New(context.TODO(), sb.Address()) require.NoError(t, err) defer c.Close() - dir, err := tmpdir( - fstest.CreateDir("foo", 0700), - fstest.CreateDir("bar", 0700), - fstest.CreateFile("foo/target", []byte("foo0"), 0600), - fstest.CreateFile("bar/target", []byte("bar0"), 0600), - fstest.CreateFile("bar/remaining", []byte("bar1"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) + // create a tempdir where we will store the OCI layout + dir := t.TempDir() - st := llb.Scratch().File( - llb.Copy(llb.Local("mylocal"), "foo", "foo"). - Copy(llb.Local("mylocal"), "bar", "bar"), - ).File( - llb.Rm("*/target", llb.WithAllowWildcard(true)), - ) - def, err := st.Marshal(sb.Context()) - require.NoError(t, err) + // make an image that is exported there + busybox := llb.Image("busybox:latest") + st := llb.Scratch() + + run := func(cmd string) { + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + } + + run(`sh -c "echo -n first > foo"`) + run(`sh -c "echo -n second > bar"`) - destDir, err := ioutil.TempDir("", "buildkit") + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - defer os.RemoveAll(destDir) + outW := bytes.NewBuffer(nil) + attrs := map[string]string{} _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterLocal, - OutputDir: destDir, + Type: ExporterOCI, + Attrs: attrs, + Output: fixedWriteCloser(nopWriteCloser{outW}), }, }, - LocalDirs: map[string]string{ - "mylocal": dir, - }, }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar/remaining")) - require.NoError(t, err) - require.Equal(t, []byte("bar1"), dt) - - fi, err := os.Stat(filepath.Join(destDir, "foo")) + // extract the tar stream to the directory as OCI layout + m, err := testutil.ReadTarToMap(outW.Bytes(), false) require.NoError(t, err) - require.Equal(t, true, fi.IsDir()) - _, err = os.Stat(filepath.Join(destDir, "foo/target")) - require.ErrorIs(t, err, os.ErrNotExist) - - _, err = os.Stat(filepath.Join(destDir, "bar/target")) - require.ErrorIs(t, err, os.ErrNotExist) -} + for filename, content := range m { + fullFilename := path.Join(dir, filename) + err = os.MkdirAll(path.Dir(fullFilename), 0755) + require.NoError(t, err) + if content.Header.FileInfo().IsDir() { + err = os.MkdirAll(fullFilename, 0755) + require.NoError(t, err) + } else { + err = os.WriteFile(fullFilename, content.Data, 0644) + require.NoError(t, err) + } + } -func testCallDiskUsage(t *testing.T, sb integration.Sandbox) { - c, err := New(sb.Context(), sb.Address()) + var index ocispecs.Index + err = json.Unmarshal(m["index.json"].Data, &index) require.NoError(t, err) - defer c.Close() - _, err = c.DiskUsage(sb.Context()) + require.Equal(t, 1, len(index.Manifests)) + digest := index.Manifests[0].Digest + + store, err := local.NewStore(dir) require.NoError(t, err) -} -func testBuildMultiMount(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) + // reference the OCI Layout in a build + // note that the key does not need to be the directory name, just something + // unique. since we are doing just one build with one remote here, we can + // give it any ID + csID := "my-content-store" + st = llb.OCILayout(fmt.Sprintf("not/real@%s", digest), llb.OCIStore("", csID)) + + def, err = st.Marshal(context.TODO()) require.NoError(t, err) - defer c.Close() - alpine := llb.Image("docker.io/library/alpine:latest") - ls := alpine.Run(llb.Shlex("/bin/ls -l")) - busybox := llb.Image("docker.io/library/busybox:latest") - cp := ls.Run(llb.Shlex("/bin/cp -a /busybox/etc/passwd baz")) - cp.AddMount("/busybox", busybox) + destDir := t.TempDir() - def, err := cp.Marshal(sb.Context()) + _, err = c.Solve(context.TODO(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + OCIStores: map[string]content.Store{ + csID: store, + }, + }, nil) require.NoError(t, err) - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + dt, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) + require.Equal(t, []byte("first"), dt) - checkAllReleasable(t, c, sb, true) + dt, err = os.ReadFile(filepath.Join(destDir, "bar")) + require.NoError(t, err) + require.Equal(t, []byte("second"), dt) } -func testBuildHTTPSource(t *testing.T, sb integration.Sandbox) { - c, err := New(sb.Context(), sb.Address()) +func testOCILayoutPlatformSource(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureOCILayout) + requiresLinux(t) + c, err := New(context.TODO(), sb.Address()) require.NoError(t, err) defer c.Close() - modTime := time.Now().Add(-24 * time.Hour) // avoid falso positive with current time - - resp := httpserver.Response{ - Etag: identity.NewID(), - Content: []byte("content1"), - LastModified: &modTime, - } + // create a tempdir where we will store the OCI layout + dir := t.TempDir() - server := httpserver.NewTestServer(map[string]httpserver.Response{ - "/foo": resp, - }) - defer server.Close() + platformsToTest := []string{"linux/amd64", "linux/arm64"} - // invalid URL first - st := llb.HTTP(server.URL + "/bar") + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() + expPlatforms := &exptypes.Platforms{ + Platforms: make([]exptypes.Platform, len(platformsToTest)), + } + for i, platform := range platformsToTest { + st := llb.Scratch().File( + llb.Mkfile("platform", 0600, []byte(platform)), + ) + + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } - def, err := st.Marshal(sb.Context()) - require.NoError(t, err) + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid response status 404") + ref, err := r.SingleRef() + if err != nil { + return nil, err + } - // first correct request - st = llb.HTTP(server.URL + "/foo") + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddRef(platform, ref) - def, err = st.Marshal(sb.Context()) - require.NoError(t, err) + expPlatforms.Platforms[i] = exptypes.Platform{ + ID: platform, + Platform: platforms.MustParse(platform), + } + } + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.NoError(t, err) + return res, nil + } + attrs := map[string]string{} + outW := bytes.NewBuffer(nil) + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterOCI, + Attrs: attrs, + Output: fixedWriteCloser(nopWriteCloser{outW}), + }, + }, + }, "", frontend, nil) + require.NoError(t, err) + + // extract the tar stream to the directory as OCI layout + m, err := testutil.ReadTarToMap(outW.Bytes(), false) + require.NoError(t, err) + + for filename, tarItem := range m { + fullFilename := path.Join(dir, filename) + err = os.MkdirAll(path.Dir(fullFilename), 0755) + require.NoError(t, err) + if tarItem.Header.FileInfo().IsDir() { + err = os.MkdirAll(fullFilename, 0755) + require.NoError(t, err) + } else { + err = os.WriteFile(fullFilename, tarItem.Data, 0644) + require.NoError(t, err) + } + } + + var index ocispecs.Index + err = json.Unmarshal(m["index.json"].Data, &index) + require.NoError(t, err) + require.Equal(t, 1, len(index.Manifests)) + digest := index.Manifests[0].Digest + + store, err := local.NewStore(dir) + require.NoError(t, err) + csID := "my-content-store" + + destDir := t.TempDir() + + frontendOCILayout := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() + expPlatforms := &exptypes.Platforms{ + Platforms: make([]exptypes.Platform, len(platformsToTest)), + } + for i, platform := range platformsToTest { + st := llb.OCILayout(fmt.Sprintf("not/real@%s", digest), llb.OCIStore("", csID)) + + def, err := st.Marshal(ctx, llb.Platform(platforms.MustParse(platform))) + if err != nil { + return nil, err + } + + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + + ref, err := r.SingleRef() + if err != nil { + return nil, err + } + + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddRef(platform, ref) + + expPlatforms.Platforms[i] = exptypes.Platform{ + ID: platform, + Platform: platforms.MustParse(platform), + } + } + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + + return res, nil + } + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + OCIStores: map[string]content.Store{ + csID: store, + }, + }, "", frontendOCILayout, nil) + require.NoError(t, err) + + for _, platform := range platformsToTest { + dt, err := os.ReadFile(filepath.Join(destDir, strings.ReplaceAll(platform, "/", "_"), "platform")) + require.NoError(t, err) + require.Equal(t, []byte(platform), dt) + } +} + +func testFileOpRmWildcard(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + dir, err := integration.Tmpdir( + t, + fstest.CreateDir("foo", 0700), + fstest.CreateDir("bar", 0700), + fstest.CreateFile("foo/target", []byte("foo0"), 0600), + fstest.CreateFile("bar/target", []byte("bar0"), 0600), + fstest.CreateFile("bar/remaining", []byte("bar1"), 0600), + ) + require.NoError(t, err) + + st := llb.Scratch().File( + llb.Copy(llb.Local("mylocal"), "foo", "foo"). + Copy(llb.Local("mylocal"), "bar", "bar"), + ).File( + llb.Rm("*/target", llb.WithAllowWildcard(true)), + ) + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + destDir := t.TempDir() + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + LocalDirs: map[string]string{ + "mylocal": dir, + }, + }, nil) + require.NoError(t, err) + + dt, err := os.ReadFile(filepath.Join(destDir, "bar/remaining")) + require.NoError(t, err) + require.Equal(t, []byte("bar1"), dt) + + fi, err := os.Stat(filepath.Join(destDir, "foo")) + require.NoError(t, err) + require.Equal(t, true, fi.IsDir()) + + _, err = os.Stat(filepath.Join(destDir, "foo/target")) + require.ErrorIs(t, err, os.ErrNotExist) + + _, err = os.Stat(filepath.Join(destDir, "bar/target")) + require.ErrorIs(t, err, os.ErrNotExist) +} + +func testCallDiskUsage(t *testing.T, sb integration.Sandbox) { + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + _, err = c.DiskUsage(sb.Context()) + require.NoError(t, err) +} + +func testBuildMultiMount(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + alpine := llb.Image("docker.io/library/alpine:latest") + ls := alpine.Run(llb.Shlex("/bin/ls -l")) + busybox := llb.Image("docker.io/library/busybox:latest") + cp := ls.Run(llb.Shlex("/bin/cp -a /busybox/etc/passwd baz")) + cp.AddMount("/busybox", busybox) + + def, err := cp.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.NoError(t, err) + + checkAllReleasable(t, c, sb, true) +} + +func testBuildExportScratch(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + st := llb.Scratch() + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + target := registry + "/buildkit/build/exporter:withnocompressed" + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "compression": "uncompressed", + }, + }, + }, + }, nil) + require.NoError(t, err) + + ctx := namespaces.WithNamespace(sb.Context(), "buildkit") + cdAddress := sb.ContainerdAddress() + var client *containerd.Client + if cdAddress != "" { + client, err = newContainerd(cdAddress) + require.NoError(t, err) + defer client.Close() + + img, err := client.GetImage(ctx, target) + require.NoError(t, err) + mfst, err := images.Manifest(ctx, client.ContentStore(), img.Target(), nil) + require.NoError(t, err) + require.Equal(t, 0, len(mfst.Layers)) + err = client.ImageService().Delete(ctx, target, images.SynchronousDelete()) + require.NoError(t, err) + } +} + +func testBuildHTTPSource(t *testing.T, sb integration.Sandbox) { + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + modTime := time.Now().Add(-24 * time.Hour) // avoid falso positive with current time + + resp := httpserver.Response{ + Etag: identity.NewID(), + Content: []byte("content1"), + LastModified: &modTime, + } + + server := httpserver.NewTestServer(map[string]httpserver.Response{ + "/foo": resp, + }) + defer server.Close() + + // invalid URL first + st := llb.HTTP(server.URL + "/bar") + + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid response status 404") + + // first correct request + st = llb.HTTP(server.URL + "/foo") + + def, err = st.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.NoError(t, err) require.Equal(t, server.Stats("/foo").AllRequests, 1) require.Equal(t, server.Stats("/foo").CachedRequests, 0) - tmpdir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -1736,7 +2181,7 @@ func testBuildHTTPSource(t *testing.T, sb integration.Sandbox) { require.Equal(t, server.Stats("/foo").AllRequests, 2) require.Equal(t, server.Stats("/foo").CachedRequests, 1) - dt, err := ioutil.ReadFile(filepath.Join(tmpdir, "foo")) + dt, err := os.ReadFile(filepath.Join(tmpdir, "foo")) require.NoError(t, err) require.Equal(t, []byte("content1"), dt) @@ -1759,7 +2204,7 @@ func testBuildHTTPSource(t *testing.T, sb integration.Sandbox) { require.Equal(t, server.Stats("/foo").AllRequests, 3) require.Equal(t, server.Stats("/foo").CachedRequests, 1) - dt, err = ioutil.ReadFile(filepath.Join(tmpdir, "bar")) + dt, err = os.ReadFile(filepath.Join(tmpdir, "bar")) require.NoError(t, err) require.Equal(t, []byte("content1"), dt) @@ -1792,9 +2237,7 @@ func testResolveAndHosts(t *testing.T, sb integration.Sandbox) { def, err := st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -1806,11 +2249,11 @@ func testResolveAndHosts(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "resolv.conf")) + dt, err := os.ReadFile(filepath.Join(destDir, "resolv.conf")) require.NoError(t, err) require.Contains(t, string(dt), "nameserver") - dt, err = ioutil.ReadFile(filepath.Join(destDir, "hosts")) + dt, err = os.ReadFile(filepath.Join(destDir, "hosts")) require.NoError(t, err) require.Contains(t, string(dt), "127.0.0.1 localhost") } @@ -1846,9 +2289,7 @@ func testUser(t *testing.T, sb integration.Sandbox) { def, err := out.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ @@ -1860,32 +2301,32 @@ func testUser(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "user")) + dt, err := os.ReadFile(filepath.Join(destDir, "user")) require.NoError(t, err) require.Equal(t, "daemon", strings.TrimSpace(string(dt))) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "group")) + dt, err = os.ReadFile(filepath.Join(destDir, "group")) require.NoError(t, err) require.Equal(t, "daemon", strings.TrimSpace(string(dt))) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "nobody")) + dt, err = os.ReadFile(filepath.Join(destDir, "nobody")) require.NoError(t, err) require.Equal(t, "nobody", strings.TrimSpace(string(dt))) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "userone")) + dt, err = os.ReadFile(filepath.Join(destDir, "userone")) require.NoError(t, err) require.Equal(t, "1", strings.TrimSpace(string(dt))) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "root_supplementary")) + dt, err = os.ReadFile(filepath.Join(destDir, "root_supplementary")) require.NoError(t, err) require.True(t, strings.HasPrefix(string(dt), "root ")) require.True(t, strings.Contains(string(dt), "wheel")) - dt2, err := ioutil.ReadFile(filepath.Join(destDir, "default_supplementary")) + dt2, err := os.ReadFile(filepath.Join(destDir, "default_supplementary")) require.NoError(t, err) require.Equal(t, string(dt), string(dt2)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "default_uid")) + dt, err = os.ReadFile(filepath.Join(destDir, "default_uid")) require.NoError(t, err) require.Equal(t, "0", strings.TrimSpace(string(dt))) @@ -1893,7 +2334,7 @@ func testUser(t *testing.T, sb integration.Sandbox) { } func testOCIExporter(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter) requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -1913,9 +2354,7 @@ func testOCIExporter(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) for _, exp := range []string{ExporterOCI, ExporterDocker} { - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() out := filepath.Join(destDir, "out.tar") outW, err := os.Create(out) @@ -1936,7 +2375,7 @@ func testOCIExporter(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(out) + dt, err := os.ReadFile(out) require.NoError(t, err) m, err := testutil.ReadTarToMap(dt, false) @@ -1995,199 +2434,270 @@ func testOCIExporter(t *testing.T, sb integration.Sandbox) { checkAllReleasable(t, c, sb, true) } -func testFrontendMetadataReturn(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) +func testOCIExporterContentStore(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter) requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - res := gateway.NewResult() - res.AddMeta("frontend.returned", []byte("true")) - res.AddMeta("not-frontend.not-returned", []byte("false")) - res.AddMeta("frontendnot.returned.either", []byte("false")) - return res, nil + busybox := llb.Image("busybox:latest") + st := llb.Scratch() + + run := func(cmd string) { + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) } - res, err := c.Build(sb.Context(), SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterOCI, - Attrs: map[string]string{}, - Output: fixedWriteCloser(nopWriteCloser{ioutil.Discard}), - }, - }, - }, "", frontend, nil) + run(`sh -c "echo -n first > foo"`) + run(`sh -c "echo -n second > bar"`) + + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - require.Contains(t, res.ExporterResponse, "frontend.returned") - require.Equal(t, res.ExporterResponse["frontend.returned"], "true") - require.NotContains(t, res.ExporterResponse, "not-frontend.not-returned") - require.NotContains(t, res.ExporterResponse, "frontendnot.returned.either") - checkAllReleasable(t, c, sb, true) -} - -func testFrontendUseSolveResults(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - st := llb.Scratch().File( - llb.Mkfile("foo", 0600, []byte("data")), - ) - def, err := st.Marshal(sb.Context()) - if err != nil { - return nil, err - } + for _, exp := range []string{ExporterOCI, ExporterDocker} { + destDir := t.TempDir() + target := "example.com/buildkit/testoci:latest" - res, err := c.Solve(ctx, gateway.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return nil, err + outTar := filepath.Join(destDir, "out.tar") + outW, err := os.Create(outTar) + require.NoError(t, err) + attrs := map[string]string{} + if exp == ExporterDocker { + attrs["name"] = target } + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: exp, + Attrs: attrs, + Output: fixedWriteCloser(outW), + }, + }, + }, nil) + require.NoError(t, err) - ref, err := res.SingleRef() - if err != nil { - return nil, err + outDir := filepath.Join(destDir, "out.d") + attrs = map[string]string{ + "tar": "false", } - - st2, err := ref.ToState() - if err != nil { - return nil, err + if exp == ExporterDocker { + attrs["name"] = target } + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: exp, + Attrs: attrs, + OutputDir: outDir, + }, + }, + }, nil) + require.NoError(t, err) - st = llb.Scratch().File( - llb.Copy(st2, "foo", "foo2"), - ) + dt, err := os.ReadFile(outTar) + require.NoError(t, err) + m, err := testutil.ReadTarToMap(dt, false) + require.NoError(t, err) - def, err = st.Marshal(sb.Context()) - if err != nil { - return nil, err - } + filepath.Walk(outDir, func(filename string, fi os.FileInfo, err error) error { + filename = strings.TrimPrefix(filename, outDir) + filename = strings.Trim(filename, "/") + if filename == "" || filename == "ingest" { + return nil + } - return c.Solve(ctx, gateway.SolveRequest{ - Definition: def.ToPB(), + if fi.IsDir() { + require.Contains(t, m, filename+"/") + } else { + require.Contains(t, m, filename) + if filename == "index.json" { + // this file has a timestamp in it, so we can't compare + return nil + } + f, err := os.Open(path.Join(outDir, filename)) + require.NoError(t, err) + data, err := io.ReadAll(f) + require.NoError(t, err) + require.Equal(t, m[filename].Data, data) + } + return nil }) } - destDir, err := ioutil.TempDir("", "buildkit") + checkAllReleasable(t, c, sb, true) +} + +func testSourceDateEpochLayerTimestamps(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureSourceDateEpoch) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + busybox := llb.Image("busybox:latest") + st := llb.Scratch() + + run := func(cmd string) { + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + } + + run(`sh -c "echo -n first > foo"`) + run(`sh -c "echo -n second > bar"`) + + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + destDir, err := os.MkdirTemp("", "buildkit") require.NoError(t, err) defer os.RemoveAll(destDir) - _, err = c.Build(sb.Context(), SolveOpt{ + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) + require.NoError(t, err) + + tm := time.Date(2015, time.October, 21, 7, 28, 0, 0, time.UTC) + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:SOURCE_DATE_EPOCH": fmt.Sprintf("%d", tm.Unix()), + }, Exports: []ExportEntry{ { - Type: ExporterLocal, - OutputDir: destDir, + Type: ExporterOCI, + Output: fixedWriteCloser(outW), }, }, - }, "", frontend, nil) + }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo2")) + dt, err := os.ReadFile(out) require.NoError(t, err) - require.Equal(t, dt, []byte("data")) -} -func skipDockerd(t *testing.T, sb integration.Sandbox) { - // TODO: remove me once dockerd supports the image and exporter. - t.Helper() - if os.Getenv("TEST_DOCKERD") == "1" { - t.Skip("dockerd missing a required exporter, cache exporter, or entitlement") - } + tmsX, err := readImageTimestamps(dt) + require.NoError(t, err) + tms := tmsX.FromImage + + require.Equal(t, len(tms), 3) + + expected := tm.UTC().Format(time.RFC3339Nano) + require.Equal(t, expected, tms[0]) + require.Equal(t, expected, tms[1]) + require.Equal(t, expected, tms[2]) + + checkAllReleasable(t, c, sb, true) } -func testExporterTargetExists(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) +func testSourceDateEpochClamp(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureSourceDateEpoch) requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - st := llb.Image("busybox:latest") - def, err := st.Marshal(sb.Context()) + var bboxConfig []byte + _, err = c.Build(sb.Context(), SolveOpt{}, "", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + _, bboxConfig, err = c.ResolveImageConfig(ctx, "docker.io/library/busybox:latest", llb.ResolveImageConfigOpt{}) + if err != nil { + return nil, err + } + return nil, nil + }, nil) require.NoError(t, err) - var mdDgst string - res, err := c.Solve(sb.Context(), def, SolveOpt{ + m := map[string]json.RawMessage{} + require.NoError(t, json.Unmarshal(bboxConfig, &m)) + delete(m, "created") + bboxConfig, err = json.Marshal(m) + require.NoError(t, err) + + busybox, err := llb.Image("busybox:latest").WithImageConfig(bboxConfig) + require.NoError(t, err) + + def, err := busybox.Marshal(sb.Context()) + require.NoError(t, err) + + destDir, err := os.MkdirTemp("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterOCI, - Attrs: map[string]string{}, - Output: func(m map[string]string) (io.WriteCloser, error) { - mdDgst = m[exptypes.ExporterImageDigestKey] - return nil, nil + Type: ExporterOCI, + Attrs: map[string]string{ + exptypes.ExporterImageConfigKey: string(bboxConfig), }, + Output: fixedWriteCloser(outW), }, }, }, nil) require.NoError(t, err) - dgst := res.ExporterResponse[exptypes.ExporterImageDigestKey] - require.True(t, strings.HasPrefix(dgst, "sha256:")) - require.Equal(t, dgst, mdDgst) + dt, err := os.ReadFile(out) + require.NoError(t, err) - require.True(t, strings.HasPrefix(res.ExporterResponse[exptypes.ExporterImageConfigDigestKey], "sha256:")) -} + busyboxTmsX, err := readImageTimestamps(dt) + require.NoError(t, err) + busyboxTms := busyboxTmsX.FromImage -func testTarExporterWithSocket(t *testing.T, sb integration.Sandbox) { - if os.Getenv("TEST_DOCKERD") == "1" { - t.Skip("tar exporter is temporarily broken on dockerd") - } + require.True(t, len(busyboxTms) > 1) + bboxLayerLen := len(busyboxTms) - 1 - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) + tm, err := time.Parse(time.RFC3339Nano, busyboxTms[1]) require.NoError(t, err) - defer c.Close() - alpine := llb.Image("docker.io/library/alpine:latest") - def, err := alpine.Run(llb.Args([]string{"sh", "-c", "nc -l -s local:/socket.sock & usleep 100000; kill %1"})).Marshal(sb.Context()) + next := tm.Add(time.Hour).Truncate(time.Second) + + st := busybox.Run(llb.Shlex("touch /foo")) + + def, err = st.Marshal(sb.Context()) + require.NoError(t, err) + + out = filepath.Join(destDir, "out.tar") + outW, err = os.Create(out) require.NoError(t, err) _, err = c.Solve(sb.Context(), def, SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:SOURCE_DATE_EPOCH": fmt.Sprintf("%d", next.Unix()), + }, Exports: []ExportEntry{ { - Type: ExporterTar, - Attrs: map[string]string{}, - Output: func(m map[string]string) (io.WriteCloser, error) { - return nopWriteCloser{ioutil.Discard}, nil + Type: ExporterOCI, + Attrs: map[string]string{ + exptypes.ExporterImageConfigKey: string(bboxConfig), }, + Output: fixedWriteCloser(outW), }, }, }, nil) require.NoError(t, err) -} - -func testTarExporterWithSocketCopy(t *testing.T, sb integration.Sandbox) { - if os.Getenv("TEST_DOCKERD") == "1" { - t.Skip("tar exporter is temporarily broken on dockerd") - } - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) + dt, err = os.ReadFile(out) require.NoError(t, err) - defer c.Close() - - alpine := llb.Image("docker.io/library/alpine:latest") - state := alpine.Run(llb.Args([]string{"sh", "-c", "nc -l -s local:/root/socket.sock & usleep 100000; kill %1"})).Root() - fa := llb.Copy(state, "/root", "/roo2", &llb.CopyInfo{}) + tmsX, err := readImageTimestamps(dt) + require.NoError(t, err) + tms := tmsX.FromImage - scratchCopy := llb.Scratch().File(fa) + require.Equal(t, len(tms), bboxLayerLen+2) - def, err := scratchCopy.Marshal(sb.Context()) - require.NoError(t, err) + expected := next.UTC().Format(time.RFC3339Nano) + require.Equal(t, expected, tms[0]) + require.Equal(t, busyboxTms[1], tms[1]) + require.Equal(t, expected, tms[bboxLayerLen+1]) + require.Equal(t, expected, tmsX.FromAnnotation) - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.NoError(t, err) + checkAllReleasable(t, c, sb, true) } -// moby/buildkit#1418 -func testTarExporterSymlink(t *testing.T, sb integration.Sandbox) { +// testSourceDateEpochReset tests that the SOURCE_DATE_EPOCH is reset if exporter option is set +func testSourceDateEpochReset(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureSourceDateEpoch) requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -2200,507 +2710,346 @@ func testTarExporterSymlink(t *testing.T, sb integration.Sandbox) { st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) } - run(`sh -c "echo -n first > foo;ln -s foo bar"`) + run(`sh -c "echo -n first > foo"`) + run(`sh -c "echo -n second > bar"`) def, err := st.Marshal(sb.Context()) require.NoError(t, err) - var buf bytes.Buffer + destDir, err := os.MkdirTemp("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) + require.NoError(t, err) + + tm := time.Date(2015, time.October, 21, 7, 28, 0, 0, time.UTC) + _, err = c.Solve(sb.Context(), def, SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:SOURCE_DATE_EPOCH": fmt.Sprintf("%d", tm.Unix()), + }, Exports: []ExportEntry{ { - Type: ExporterTar, - Output: fixedWriteCloser(&nopWriteCloser{&buf}), + Type: ExporterOCI, + Attrs: map[string]string{"source-date-epoch": ""}, + Output: fixedWriteCloser(outW), }, }, }, nil) require.NoError(t, err) - m, err := testutil.ReadTarToMap(buf.Bytes(), false) + dt, err := os.ReadFile(out) require.NoError(t, err) - item, ok := m["foo"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) - require.Equal(t, []byte("first"), item.Data) + tmsX, err := readImageTimestamps(dt) + require.NoError(t, err) + tms := tmsX.FromImage - item, ok = m["bar"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeSymlink) - require.Equal(t, "foo", item.Header.Linkname) -} + require.Equal(t, len(tms), 3) -func testBuildExportWithForeignLayer(t *testing.T, sb integration.Sandbox) { - if os.Getenv("TEST_DOCKERD") == "1" { - t.Skip("image exporter is missing in dockerd") - } + expected := tm.UTC().Format(time.RFC3339Nano) + require.NotEqual(t, expected, tms[0]) + require.NotEqual(t, expected, tms[1]) + require.NotEqual(t, expected, tms[2]) + + require.Equal(t, tms[0], tms[2]) + require.NotEqual(t, tms[2], tms[1]) + + checkAllReleasable(t, c, sb, true) +} +func testSourceDateEpochLocalExporter(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureSourceDateEpoch) + requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - st := llb.Image("cpuguy83/buildkit-foreign:latest") + busybox := llb.Image("busybox:latest") + st := llb.Scratch() + + run := func(cmd string) { + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + } + + run(`sh -c "echo -n first > foo"`) + run(`sh -c "echo -n second > bar"`) + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - t.Run("propagate=1", func(t *testing.T) { - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } - require.NoError(t, err) + destDir, err := os.MkdirTemp("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) - target := registry + "/buildkit/build/exporter/foreign:latest" - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - "prefer-nondist-layers": "true", - }, - }, - }, - }, nil) - require.NoError(t, err) - - ctx := namespaces.WithNamespace(sb.Context(), "buildkit") - - resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true}) - name, desc, err := resolver.Resolve(ctx, target) - require.NoError(t, err) + tm := time.Date(2015, time.October, 21, 7, 28, 0, 0, time.UTC) - fetcher, err := resolver.Fetcher(ctx, name) - require.NoError(t, err) - mfst, err := images.Manifest(ctx, contentutil.FromFetcher(fetcher), desc, platforms.Any()) - require.NoError(t, err) - - require.Equal(t, 2, len(mfst.Layers)) - require.Equal(t, images.MediaTypeDockerSchema2LayerForeign, mfst.Layers[0].MediaType) - require.Len(t, mfst.Layers[0].URLs, 1) - require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[1].MediaType) - - rc, err := fetcher.Fetch(ctx, ocispecs.Descriptor{Digest: mfst.Layers[0].Digest, Size: mfst.Layers[0].Size}) - require.NoError(t, err) - defer rc.Close() - - // `Fetch` doesn't error (in the docker resolver), it just returns a reader immediately and does not make a request. - // The request is only made when we attempt to read from the reader. - buf := make([]byte, 1) - _, err = rc.Read(buf) - require.Truef(t, ctderrdefs.IsNotFound(err), "expected error for blob that should not be in registry: %s, %v", mfst.Layers[0].Digest, err) - }) - t.Run("propagate=0", func(t *testing.T) { - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } - require.NoError(t, err) - target := registry + "/buildkit/build/exporter/noforeign:latest" - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - }, - }, + _, err = c.Solve(sb.Context(), def, SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:SOURCE_DATE_EPOCH": fmt.Sprintf("%d", tm.Unix()), + }, + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, }, - }, nil) - require.NoError(t, err) - - ctx := namespaces.WithNamespace(sb.Context(), "buildkit") - - resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true}) - name, desc, err := resolver.Resolve(ctx, target) - require.NoError(t, err) - - fetcher, err := resolver.Fetcher(ctx, name) - require.NoError(t, err) - - mfst, err := images.Manifest(ctx, contentutil.FromFetcher(fetcher), desc, platforms.Any()) - require.NoError(t, err) + }, + }, nil) + require.NoError(t, err) - require.Equal(t, 2, len(mfst.Layers)) - require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[0].MediaType) - require.Len(t, mfst.Layers[0].URLs, 0) - require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[1].MediaType) + fi, err := os.Stat(filepath.Join(destDir, "foo")) + require.NoError(t, err) + require.Equal(t, fi.ModTime().Format(time.RFC3339), tm.UTC().Format(time.RFC3339)) - rc, err := fetcher.Fetch(ctx, ocispecs.Descriptor{Digest: mfst.Layers[0].Digest, Size: mfst.Layers[0].Size}) - require.NoError(t, err) - defer rc.Close() + fi, err = os.Stat(filepath.Join(destDir, "bar")) + require.NoError(t, err) + require.Equal(t, fi.ModTime().Format(time.RFC3339), tm.UTC().Format(time.RFC3339)) - // `Fetch` doesn't error (in the docker resolver), it just returns a reader immediately and does not make a request. - // The request is only made when we attempt to read from the reader. - buf := make([]byte, 1) - _, err = rc.Read(buf) - require.NoError(t, err) - }) + checkAllReleasable(t, c, sb, true) } -func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) { - if os.Getenv("TEST_DOCKERD") == "1" { - t.Skip("image exporter is missing in dockerd") - } +func testSourceDateEpochTarExporter(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureSourceDateEpoch) requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() busybox := llb.Image("busybox:latest") - cmd := `sh -e -c "echo -n uncompressed > data"` - st := llb.Scratch() - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + + run := func(cmd string) { + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + } + + run(`sh -c "echo -n first > foo"`) + run(`sh -c "echo -n second > bar"`) def, err := st.Marshal(sb.Context()) require.NoError(t, err) - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } + destDir, err := os.MkdirTemp("", "buildkit") require.NoError(t, err) + defer os.RemoveAll(destDir) - target := registry + "/buildkit/build/exporter:withnocompressed" + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) + require.NoError(t, err) + + tm := time.Date(2015, time.October, 21, 7, 28, 0, 0, time.UTC) _, err = c.Solve(sb.Context(), def, SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:SOURCE_DATE_EPOCH": fmt.Sprintf("%d", tm.Unix()), + }, Exports: []ExportEntry{ { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - "compression": "uncompressed", - }, + Type: ExporterTar, + Output: fixedWriteCloser(outW), }, }, }, nil) require.NoError(t, err) - ctx := namespaces.WithNamespace(sb.Context(), "buildkit") - cdAddress := sb.ContainerdAddress() - var client *containerd.Client - if cdAddress != "" { - client, err = newContainerd(cdAddress) - require.NoError(t, err) - defer client.Close() + dt, err := os.ReadFile(out) + require.NoError(t, err) - img, err := client.GetImage(ctx, target) - require.NoError(t, err) - mfst, err := images.Manifest(ctx, client.ContentStore(), img.Target(), nil) - require.NoError(t, err) - require.Equal(t, 1, len(mfst.Layers)) - require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[0].MediaType) - } + m, err := testutil.ReadTarToMap(dt, false) + require.NoError(t, err) - // new layer with gzip compression - targetImg := llb.Image(target) - cmd = `sh -e -c "echo -n gzip > data"` - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", targetImg) + require.Equal(t, len(m), 2) - def, err = st.Marshal(sb.Context()) - require.NoError(t, err) + require.Equal(t, tm.Format(time.RFC3339), m["foo"].Header.ModTime.Format(time.RFC3339)) + require.Equal(t, tm.Format(time.RFC3339), m["bar"].Header.ModTime.Format(time.RFC3339)) - compressedTarget := registry + "/buildkit/build/exporter:withcompressed" - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterImage, - Attrs: map[string]string{ - "name": compressedTarget, - "push": "true", - }, - }, - }, - }, nil) + checkAllReleasable(t, c, sb, true) +} +func testFrontendMetadataReturn(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) + defer c.Close() - allCompressedTarget := registry + "/buildkit/build/exporter:withallcompressed" - _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterImage, - Attrs: map[string]string{ - "name": allCompressedTarget, - "push": "true", - "compression": "gzip", - "force-compression": "true", - }, - }, - }, - }, nil) - require.NoError(t, err) + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() + res.AddMeta("frontend.returned", []byte("true")) + res.AddMeta("not-frontend.not-returned", []byte("false")) + res.AddMeta("frontendnot.returned.either", []byte("false")) + return res, nil + } - if cdAddress == "" { - t.Skip("rest of test requires containerd worker") + var exports []ExportEntry + if integration.IsTestDockerdMoby(sb) { + exports = []ExportEntry{{ + Type: "moby", + Attrs: map[string]string{ + "name": "reg.dummy:5000/buildkit/test:latest", + }, + }} + } else { + exports = []ExportEntry{{ + Type: ExporterOCI, + Attrs: map[string]string{}, + Output: fixedWriteCloser(nopWriteCloser{io.Discard}), + }} } - err = client.ImageService().Delete(ctx, target, images.SynchronousDelete()) - require.NoError(t, err) - err = client.ImageService().Delete(ctx, compressedTarget, images.SynchronousDelete()) - require.NoError(t, err) - err = client.ImageService().Delete(ctx, allCompressedTarget, images.SynchronousDelete()) + res, err := c.Build(sb.Context(), SolveOpt{ + Exports: exports, + }, "", frontend, nil) require.NoError(t, err) - + require.Contains(t, res.ExporterResponse, "frontend.returned") + require.Equal(t, res.ExporterResponse["frontend.returned"], "true") + require.NotContains(t, res.ExporterResponse, "not-frontend.not-returned") + require.NotContains(t, res.ExporterResponse, "frontendnot.returned.either") checkAllReleasable(t, c, sb, true) +} - // check if the new layer is compressed with compression option - img, err := client.Pull(ctx, compressedTarget) +func testFrontendUseSolveResults(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) + defer c.Close() - dt, err := content.ReadBlob(ctx, img.ContentStore(), img.Target()) - require.NoError(t, err) + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + st := llb.Scratch().File( + llb.Mkfile("foo", 0600, []byte("data")), + ) - var mfst = struct { - MediaType string `json:"mediaType,omitempty"` - ocispecs.Manifest - }{} + def, err := st.Marshal(sb.Context()) + if err != nil { + return nil, err + } - err = json.Unmarshal(dt, &mfst) - require.NoError(t, err) - require.Equal(t, 2, len(mfst.Layers)) - require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[0].MediaType) - require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[1].MediaType) + res, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } - dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[0].Digest}) - require.NoError(t, err) + ref, err := res.SingleRef() + if err != nil { + return nil, err + } - m, err := testutil.ReadTarToMap(dt, false) - require.NoError(t, err) + st2, err := ref.ToState() + if err != nil { + return nil, err + } - item, ok := m["data"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) - require.Equal(t, []byte("uncompressed"), item.Data) + st = llb.Scratch().File( + llb.Copy(st2, "foo", "foo2"), + ) - dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[1].Digest}) - require.NoError(t, err) - - m, err = testutil.ReadTarToMap(dt, true) - require.NoError(t, err) - - item, ok = m["data"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) - require.Equal(t, []byte("gzip"), item.Data) - - err = client.ImageService().Delete(ctx, compressedTarget, images.SynchronousDelete()) - require.NoError(t, err) - - checkAllReleasable(t, c, sb, true) - - // check if all layers are compressed with force-compressoin option - img, err = client.Pull(ctx, allCompressedTarget) - require.NoError(t, err) - - dt, err = content.ReadBlob(ctx, img.ContentStore(), img.Target()) - require.NoError(t, err) - - mfst = struct { - MediaType string `json:"mediaType,omitempty"` - ocispecs.Manifest - }{} - - err = json.Unmarshal(dt, &mfst) - require.NoError(t, err) - require.Equal(t, 2, len(mfst.Layers)) - require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[0].MediaType) - require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[1].MediaType) - - dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[0].Digest}) - require.NoError(t, err) + def, err = st.Marshal(sb.Context()) + if err != nil { + return nil, err + } - m, err = testutil.ReadTarToMap(dt, true) - require.NoError(t, err) + return c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + } - item, ok = m["data"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) - require.Equal(t, []byte("uncompressed"), item.Data) + destDir := t.TempDir() - dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[1].Digest}) + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + }, "", frontend, nil) require.NoError(t, err) - m, err = testutil.ReadTarToMap(dt, true) + dt, err := os.ReadFile(filepath.Join(destDir, "foo2")) require.NoError(t, err) - - item, ok = m["data"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) - require.Equal(t, []byte("gzip"), item.Data) + require.Equal(t, dt, []byte("data")) } -func testBuildExportZstd(t *testing.T, sb integration.Sandbox) { +func testExporterTargetExists(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter) + requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - busybox := llb.Image("busybox:latest") - cmd := `sh -e -c "echo -n zstd > data"` - - st := llb.Scratch() - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - + st := llb.Image("busybox:latest") def, err := st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) - - _, err = c.Solve(sb.Context(), def, SolveOpt{ + var mdDgst string + res, err := c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterOCI, - Output: fixedWriteCloser(outW), - Attrs: map[string]string{ - "compression": "zstd", + Type: ExporterOCI, + Attrs: map[string]string{}, + Output: func(m map[string]string) (io.WriteCloser, error) { + mdDgst = m[exptypes.ExporterImageDigestKey] + return nil, nil }, }, }, - // compression option should work even with inline cache exports - CacheExports: []CacheOptionsEntry{ - { - Type: "inline", - }, - }, }, nil) require.NoError(t, err) + dgst := res.ExporterResponse[exptypes.ExporterImageDigestKey] - dt, err := ioutil.ReadFile(out) - require.NoError(t, err) - - m, err := testutil.ReadTarToMap(dt, false) - require.NoError(t, err) + require.True(t, strings.HasPrefix(dgst, "sha256:")) + require.Equal(t, dgst, mdDgst) - var index ocispecs.Index - err = json.Unmarshal(m["index.json"].Data, &index) - require.NoError(t, err) + require.True(t, strings.HasPrefix(res.ExporterResponse[exptypes.ExporterImageConfigDigestKey], "sha256:")) +} - var mfst ocispecs.Manifest - err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) +func testTarExporterWithSocket(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) + defer c.Close() - lastLayer := mfst.Layers[len(mfst.Layers)-1] - require.Equal(t, ocispecs.MediaTypeImageLayer+"+zstd", lastLayer.MediaType) - - zstdLayerDigest := lastLayer.Digest.Hex() - require.Equal(t, m["blobs/sha256/"+zstdLayerDigest].Data[:4], []byte{0x28, 0xb5, 0x2f, 0xfd}) - - // repeat without oci mediatype - outW, err = os.Create(out) + alpine := llb.Image("docker.io/library/alpine:latest") + def, err := alpine.Run(llb.Args([]string{"sh", "-c", "nc -l -s local:/socket.sock & usleep 100000; kill %1"})).Marshal(sb.Context()) require.NoError(t, err) _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterOCI, - Output: fixedWriteCloser(outW), - Attrs: map[string]string{ - "compression": "zstd", - "oci-mediatypes": "false", + Type: ExporterTar, + Attrs: map[string]string{}, + Output: func(m map[string]string) (io.WriteCloser, error) { + return nopWriteCloser{io.Discard}, nil }, }, }, }, nil) require.NoError(t, err) - - dt, err = ioutil.ReadFile(out) - require.NoError(t, err) - - m, err = testutil.ReadTarToMap(dt, false) - require.NoError(t, err) - - err = json.Unmarshal(m["index.json"].Data, &index) - require.NoError(t, err) - - err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) - require.NoError(t, err) - - lastLayer = mfst.Layers[len(mfst.Layers)-1] - require.Equal(t, images.MediaTypeDockerSchema2Layer+".zstd", lastLayer.MediaType) - - require.Equal(t, lastLayer.Digest.Hex(), zstdLayerDigest) } -func testPullZstdImage(t *testing.T, sb integration.Sandbox) { +func testTarExporterWithSocketCopy(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - busybox := llb.Image("busybox:latest") - cmd := `sh -e -c "echo -n zstd > data"` - - st := llb.Scratch() - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - - def, err := st.Marshal(sb.Context()) - require.NoError(t, err) - - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } - require.NoError(t, err) - - target := registry + "/buildkit/build/exporter:zstd" - - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - "compression": "zstd", - - // containerd applier supports only zstd with oci-mediatype. - "oci-mediatypes": "true", - }, - }, - }, - }, nil) - require.NoError(t, err) - - if sb.Name() == "containerd-1.4" { - // containerd 1.4 doesn't support zstd compression - return - } - - ensurePruneAll(t, c, sb) - - st = llb.Scratch().File(llb.Copy(llb.Image(target), "/data", "/zdata")) + alpine := llb.Image("docker.io/library/alpine:latest") + state := alpine.Run(llb.Args([]string{"sh", "-c", "nc -l -s local:/root/socket.sock & usleep 100000; kill %1"})).Root() - def, err = st.Marshal(sb.Context()) - require.NoError(t, err) + fa := llb.Copy(state, "/root", "/roo2", &llb.CopyInfo{}) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + scratchCopy := llb.Scratch().File(fa) - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterLocal, - OutputDir: destDir, - }, - }, - }, nil) + def, err := scratchCopy.Marshal(sb.Context()) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "zdata")) + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) require.NoError(t, err) - require.Equal(t, dt, []byte("zstd")) } -func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) + +// moby/buildkit#1418 +func testTarExporterSymlink(t *testing.T, sb integration.Sandbox) { requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -2713,130 +3062,253 @@ func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) { st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) } - run(`sh -e -c "mkdir -p foo/sub; echo -n first > foo/sub/bar; chmod 0741 foo;"`) - run(`true`) // this doesn't create a layer - run(`sh -c "echo -n second > foo/sub/baz"`) + run(`sh -c "echo -n first > foo;ln -s foo bar"`) def, err := st.Marshal(sb.Context()) require.NoError(t, err) - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } - require.NoError(t, err) - - target := registry + "/buildkit/testpush:latest" - + var buf bytes.Buffer _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - }, + Type: ExporterTar, + Output: fixedWriteCloser(&nopWriteCloser{&buf}), }, }, }, nil) require.NoError(t, err) - // test existence of the image with next build - firstBuild := llb.Image(target) - - def, err = firstBuild.Marshal(sb.Context()) + m, err := testutil.ReadTarToMap(buf.Bytes(), false) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + item, ok := m["foo"] + require.True(t, ok) + require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) + require.Equal(t, []byte("first"), item.Data) - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterLocal, - OutputDir: destDir, - }, - }, - }, nil) - require.NoError(t, err) + item, ok = m["bar"] + require.True(t, ok) + require.Equal(t, int32(item.Header.Typeflag), tar.TypeSymlink) + require.Equal(t, "foo", item.Header.Linkname) +} - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo/sub/bar")) +func testBuildExportWithForeignLayer(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter) + c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) - require.Equal(t, dt, []byte("first")) + defer c.Close() - dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo/sub/baz")) + st := llb.Image("cpuguy83/buildkit-foreign:latest") + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - require.Equal(t, dt, []byte("second")) - fi, err := os.Stat(filepath.Join(destDir, "foo")) - require.NoError(t, err) - require.Equal(t, 0741, int(fi.Mode()&0777)) + t.Run("propagate=1", func(t *testing.T) { + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) - checkAllReleasable(t, c, sb, false) + target := registry + "/buildkit/build/exporter/foreign:latest" + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "prefer-nondist-layers": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) - // examine contents of exported tars (requires containerd) - cdAddress := sb.ContainerdAddress() - if cdAddress == "" { - t.Skip("rest of test requires containerd worker") - } + ctx := namespaces.WithNamespace(sb.Context(), "buildkit") - // TODO: make public pull helper function so this can be checked for standalone as well + resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true}) + name, desc, err := resolver.Resolve(ctx, target) + require.NoError(t, err) - client, err := newContainerd(cdAddress) + fetcher, err := resolver.Fetcher(ctx, name) + require.NoError(t, err) + mfst, err := images.Manifest(ctx, contentutil.FromFetcher(fetcher), desc, platforms.Any()) + require.NoError(t, err) + + require.Equal(t, 2, len(mfst.Layers)) + require.Equal(t, images.MediaTypeDockerSchema2LayerForeign, mfst.Layers[0].MediaType) + require.Len(t, mfst.Layers[0].URLs, 1) + require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[1].MediaType) + + rc, err := fetcher.Fetch(ctx, ocispecs.Descriptor{Digest: mfst.Layers[0].Digest, Size: mfst.Layers[0].Size}) + require.NoError(t, err) + defer rc.Close() + + // `Fetch` doesn't error (in the docker resolver), it just returns a reader immediately and does not make a request. + // The request is only made when we attempt to read from the reader. + buf := make([]byte, 1) + _, err = rc.Read(buf) + require.Truef(t, ctderrdefs.IsNotFound(err), "expected error for blob that should not be in registry: %s, %v", mfst.Layers[0].Digest, err) + }) + t.Run("propagate=0", func(t *testing.T) { + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + target := registry + "/buildkit/build/exporter/noforeign:latest" + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + ctx := namespaces.WithNamespace(sb.Context(), "buildkit") + + resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true}) + name, desc, err := resolver.Resolve(ctx, target) + require.NoError(t, err) + + fetcher, err := resolver.Fetcher(ctx, name) + require.NoError(t, err) + + mfst, err := images.Manifest(ctx, contentutil.FromFetcher(fetcher), desc, platforms.Any()) + require.NoError(t, err) + + require.Equal(t, 2, len(mfst.Layers)) + require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[0].MediaType) + require.Len(t, mfst.Layers[0].URLs, 0) + require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[1].MediaType) + + rc, err := fetcher.Fetch(ctx, ocispecs.Descriptor{Digest: mfst.Layers[0].Digest, Size: mfst.Layers[0].Size}) + require.NoError(t, err) + defer rc.Close() + + // `Fetch` doesn't error (in the docker resolver), it just returns a reader immediately and does not make a request. + // The request is only made when we attempt to read from the reader. + buf := make([]byte, 1) + _, err = rc.Read(buf) + require.NoError(t, err) + }) +} + +func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) - defer client.Close() + defer c.Close() - ctx := namespaces.WithNamespace(sb.Context(), "buildkit") + busybox := llb.Image("busybox:latest") + cmd := `sh -e -c "echo -n uncompressed > data"` - // check image in containerd - _, err = client.ImageService().Get(ctx, target) + st := llb.Scratch() + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - // deleting image should release all content - err = client.ImageService().Delete(ctx, target, images.SynchronousDelete()) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) - checkAllReleasable(t, c, sb, true) + target := registry + "/buildkit/build/exporter:withnocompressed" - img, err := client.Pull(ctx, target) + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "compression": "uncompressed", + }, + }, + }, + }, nil) require.NoError(t, err) - desc, err := img.Config(ctx) + ctx := namespaces.WithNamespace(sb.Context(), "buildkit") + cdAddress := sb.ContainerdAddress() + var client *containerd.Client + if cdAddress != "" { + client, err = newContainerd(cdAddress) + require.NoError(t, err) + defer client.Close() + + img, err := client.GetImage(ctx, target) + require.NoError(t, err) + mfst, err := images.Manifest(ctx, client.ContentStore(), img.Target(), nil) + require.NoError(t, err) + require.Equal(t, 1, len(mfst.Layers)) + require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[0].MediaType) + } + + // new layer with gzip compression + targetImg := llb.Image(target) + cmd = `sh -e -c "echo -n gzip > data"` + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", targetImg) + + def, err = st.Marshal(sb.Context()) require.NoError(t, err) - dt, err = content.ReadBlob(ctx, img.ContentStore(), desc) + compressedTarget := registry + "/buildkit/build/exporter:withcompressed" + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": compressedTarget, + "push": "true", + }, + }, + }, + }, nil) require.NoError(t, err) - var ociimg ocispecs.Image - err = json.Unmarshal(dt, &ociimg) + allCompressedTarget := registry + "/buildkit/build/exporter:withallcompressed" + _, err = c.Solve(context.TODO(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": allCompressedTarget, + "push": "true", + "compression": "gzip", + "force-compression": "true", + }, + }, + }, + }, nil) require.NoError(t, err) - require.NotEqual(t, "", ociimg.OS) - require.NotEqual(t, "", ociimg.Architecture) - require.NotEqual(t, "", ociimg.Config.WorkingDir) - require.Equal(t, "layers", ociimg.RootFS.Type) - require.Equal(t, 3, len(ociimg.RootFS.DiffIDs)) - require.NotNil(t, ociimg.Created) - require.True(t, time.Since(*ociimg.Created) < 2*time.Minute) - require.Condition(t, func() bool { - for _, env := range ociimg.Config.Env { - if strings.HasPrefix(env, "PATH=") { - return true - } - } - return false - }) + if cdAddress == "" { + t.Skip("rest of test requires containerd worker") + } - require.Equal(t, 3, len(ociimg.History)) - require.Contains(t, ociimg.History[0].CreatedBy, "foo/sub/bar") - require.Contains(t, ociimg.History[1].CreatedBy, "true") - require.Contains(t, ociimg.History[2].CreatedBy, "foo/sub/baz") - require.False(t, ociimg.History[0].EmptyLayer) - require.False(t, ociimg.History[1].EmptyLayer) - require.False(t, ociimg.History[2].EmptyLayer) + err = client.ImageService().Delete(ctx, target, images.SynchronousDelete()) + require.NoError(t, err) + err = client.ImageService().Delete(ctx, compressedTarget, images.SynchronousDelete()) + require.NoError(t, err) + err = client.ImageService().Delete(ctx, allCompressedTarget, images.SynchronousDelete()) + require.NoError(t, err) - dt, err = content.ReadBlob(ctx, img.ContentStore(), img.Target()) + checkAllReleasable(t, c, sb, true) + + // check if the new layer is compressed with compression option + img, err := client.Pull(ctx, compressedTarget) + require.NoError(t, err) + + dt, err := content.ReadBlob(ctx, img.ContentStore(), img.Target()) require.NoError(t, err) var mfst = struct { @@ -2846,947 +3318,625 @@ func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) { err = json.Unmarshal(dt, &mfst) require.NoError(t, err) - - require.Equal(t, images.MediaTypeDockerSchema2Manifest, mfst.MediaType) - require.Equal(t, 3, len(mfst.Layers)) - require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[0].MediaType) + require.Equal(t, 2, len(mfst.Layers)) + require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[0].MediaType) require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[1].MediaType) dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[0].Digest}) require.NoError(t, err) - m, err := testutil.ReadTarToMap(dt, true) + m, err := testutil.ReadTarToMap(dt, false) require.NoError(t, err) - item, ok := m["foo/"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir) - require.Equal(t, 0741, int(item.Header.Mode&0777)) - - item, ok = m["foo/sub/"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir) - - item, ok = m["foo/sub/bar"] + item, ok := m["data"] require.True(t, ok) require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) - require.Equal(t, []byte("first"), item.Data) - - _, ok = m["foo/sub/baz"] - require.False(t, ok) + require.Equal(t, []byte("uncompressed"), item.Data) - dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[2].Digest}) + dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[1].Digest}) require.NoError(t, err) m, err = testutil.ReadTarToMap(dt, true) require.NoError(t, err) - item, ok = m["foo/sub/baz"] + item, ok = m["data"] require.True(t, ok) require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) - require.Equal(t, []byte("second"), item.Data) + require.Equal(t, []byte("gzip"), item.Data) - item, ok = m["foo/"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir) - require.Equal(t, 0741, int(item.Header.Mode&0777)) + err = client.ImageService().Delete(ctx, compressedTarget, images.SynchronousDelete()) + require.NoError(t, err) - item, ok = m["foo/sub/"] - require.True(t, ok) - require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir) + checkAllReleasable(t, c, sb, true) - _, ok = m["foo/sub/bar"] - require.False(t, ok) -} + // check if all layers are compressed with force-compressoin option + img, err = client.Pull(ctx, allCompressedTarget) + require.NoError(t, err) -func testStargzLazyRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - requiresLinux(t) + dt, err = content.ReadBlob(ctx, img.ContentStore(), img.Target()) + require.NoError(t, err) - cdAddress := sb.ContainerdAddress() - if cdAddress == "" || sb.Snapshotter() != "stargz" { - t.Skip("test requires containerd worker with stargz snapshotter") - } + mfst = struct { + MediaType string `json:"mediaType,omitempty"` + ocispecs.Manifest + }{} - client, err := newContainerd(cdAddress) + err = json.Unmarshal(dt, &mfst) require.NoError(t, err) - defer client.Close() - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } + require.Equal(t, 2, len(mfst.Layers)) + require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[0].MediaType) + require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[1].MediaType) + + dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[0].Digest}) require.NoError(t, err) - var ( - imageService = client.ImageService() - contentStore = client.ContentStore() - ctx = namespaces.WithNamespace(sb.Context(), "buildkit") - ) + m, err = testutil.ReadTarToMap(dt, true) + require.NoError(t, err) + + item, ok = m["data"] + require.True(t, ok) + require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) + require.Equal(t, []byte("uncompressed"), item.Data) + + dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[1].Digest}) + require.NoError(t, err) + + m, err = testutil.ReadTarToMap(dt, true) + require.NoError(t, err) + + item, ok = m["data"] + require.True(t, ok) + require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) + require.Equal(t, []byte("gzip"), item.Data) +} +func testBuildExportZstd(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + busybox := llb.Image("busybox:latest") + cmd := `sh -e -c "echo -n zstd > data"` - // Prepare stargz registry cache - orgImage := "docker.io/library/alpine:latest" - sgzCache := registry + "/stargz/alpinecache:" + identity.NewID() - baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/foo"})) - def, err := baseDef.Marshal(sb.Context()) + st := llb.Scratch() + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + + def, err := st.Marshal(sb.Context()) require.NoError(t, err) + + destDir := t.TempDir() + out := filepath.Join(destDir, "out.tar") outW, err := os.Create(out) require.NoError(t, err) + _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { Type: ExporterOCI, Output: fixedWriteCloser(outW), - }, - }, - CacheExports: []CacheOptionsEntry{ - { - Type: "registry", - Attrs: map[string]string{ - "ref": sgzCache, - "compression": "estargz", - "oci-mediatypes": "true", - "force-compression": "true", - }, - }, - }, - }, nil) - require.NoError(t, err) - - // clear all local state out - ensurePruneAll(t, c, sb) - - // stargz layers should be lazy even for executing something on them - def, err = baseDef. - Run(llb.Args([]string{"/bin/touch", "/bar"})). - Marshal(sb.Context()) - require.NoError(t, err) - target := registry + "/buildkit/testlazyimage:" + identity.NewID() - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterImage, Attrs: map[string]string{ - "name": target, - "push": "true", + "compression": "zstd", }, }, }, - CacheImports: []CacheOptionsEntry{ + // compression option should work even with inline cache exports + CacheExports: []CacheOptionsEntry{ { - Type: "registry", - Attrs: map[string]string{ - "ref": sgzCache, - }, + Type: "inline", }, }, }, nil) require.NoError(t, err) - img, err := imageService.Get(ctx, target) + dt, err := os.ReadFile(out) require.NoError(t, err) - manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) + m, err := testutil.ReadTarToMap(dt, false) require.NoError(t, err) - // Check if image layers are lazy. - // The topmost(last) layer created by `Run` isn't lazy so we skip the check for the layer. - var sgzLayers []ocispecs.Descriptor - for i, layer := range manifest.Layers[:len(manifest.Layers)-1] { - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v on layer %+v (%d)", err, layer, i) - sgzLayers = append(sgzLayers, layer) - } - require.NotEqual(t, 0, len(sgzLayers), "no layer can be used for checking lazypull") - - // The topmost(last) layer created by `Run` shouldn't be lazy - _, err = contentStore.Info(ctx, manifest.Layers[len(manifest.Layers)-1].Digest) + var index ocispecs.Index + err = json.Unmarshal(m["index.json"].Data, &index) require.NoError(t, err) - // clear all local state out - err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) + var mfst ocispecs.Manifest + err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) require.NoError(t, err) - checkAllReleasable(t, c, sb, true) - // stargz layers should be exportable - out = filepath.Join(destDir, "out2.tar") + lastLayer := mfst.Layers[len(mfst.Layers)-1] + require.Equal(t, ocispecs.MediaTypeImageLayer+"+zstd", lastLayer.MediaType) + + zstdLayerDigest := lastLayer.Digest.Hex() + require.Equal(t, m["blobs/sha256/"+zstdLayerDigest].Data[:4], []byte{0x28, 0xb5, 0x2f, 0xfd}) + + // repeat without oci mediatype outW, err = os.Create(out) require.NoError(t, err) + _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { Type: ExporterOCI, Output: fixedWriteCloser(outW), - }, - }, - CacheImports: []CacheOptionsEntry{ - { - Type: "registry", Attrs: map[string]string{ - "ref": sgzCache, + "compression": "zstd", + "oci-mediatypes": "false", }, }, }, }, nil) require.NoError(t, err) - // Check if image layers are un-lazied - for _, layer := range sgzLayers { - _, err = contentStore.Info(ctx, layer.Digest) - require.NoError(t, err) - } + dt, err = os.ReadFile(out) + require.NoError(t, err) - ensurePruneAll(t, c, sb) + m, err = testutil.ReadTarToMap(dt, false) + require.NoError(t, err) + + err = json.Unmarshal(m["index.json"].Data, &index) + require.NoError(t, err) + + err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) + require.NoError(t, err) + + lastLayer = mfst.Layers[len(mfst.Layers)-1] + require.Equal(t, images.MediaTypeDockerSchema2Layer+".zstd", lastLayer.MediaType) + + require.Equal(t, lastLayer.Digest.Hex(), zstdLayerDigest) } -func testStargzLazyInlineCacheImportExport(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - requiresLinux(t) +func testPullZstdImage(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() - cdAddress := sb.ContainerdAddress() - if cdAddress == "" || sb.Snapshotter() != "stargz" { - t.Skip("test requires containerd worker with stargz snapshotter") - } + busybox := llb.Image("busybox:latest") + cmd := `sh -e -c "echo -n zstd > data"` - client, err := newContainerd(cdAddress) + st := llb.Scratch() + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - defer client.Close() + registry, err := sb.NewRegistry() if errors.Is(err, integration.ErrRequirements) { t.Skip(err.Error()) } require.NoError(t, err) - var ( - imageService = client.ImageService() - contentStore = client.ContentStore() - ctx = namespaces.WithNamespace(sb.Context(), "buildkit") - ) - - c, err := New(sb.Context(), sb.Address()) - require.NoError(t, err) - defer c.Close() + target := registry + "/buildkit/build/exporter:zstd" - // Prepare stargz inline cache - orgImage := "docker.io/library/alpine:latest" - sgzImage := registry + "/stargz/alpine:" + identity.NewID() - baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/foo"})) - def, err := baseDef.Marshal(sb.Context()) - require.NoError(t, err) _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { Type: ExporterImage, Attrs: map[string]string{ - "name": sgzImage, - "push": "true", - "compression": "estargz", - "oci-mediatypes": "true", - "force-compression": "true", + "name": target, + "push": "true", + "compression": "zstd", + + // containerd applier supports only zstd with oci-mediatype. + "oci-mediatypes": "true", }, }, }, - CacheExports: []CacheOptionsEntry{ - { - Type: "inline", - }, - }, }, nil) require.NoError(t, err) - // clear all local state out - err = imageService.Delete(ctx, sgzImage, images.SynchronousDelete()) - require.NoError(t, err) - checkAllReleasable(t, c, sb, true) + ensurePruneAll(t, c, sb) - // stargz layers should be lazy even for executing something on them - def, err = baseDef. - Run(llb.Args([]string{"/bin/touch", "/bar"})). - Marshal(sb.Context()) - require.NoError(t, err) - target := registry + "/buildkit/testlazyimage:" + identity.NewID() - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - "oci-mediatypes": "true", - "compression": "estargz", - }, - }, - }, - CacheExports: []CacheOptionsEntry{ - { - Type: "inline", - }, - }, - CacheImports: []CacheOptionsEntry{ - { - Type: "registry", - Attrs: map[string]string{ - "ref": sgzImage, - }, - }, - }, - }, nil) - require.NoError(t, err) - - img, err := imageService.Get(ctx, target) - require.NoError(t, err) - - manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) - require.NoError(t, err) - - // Check if image layers are lazy. - // The topmost(last) layer created by `Run` isn't lazy so we skip the check for the layer. - var sgzLayers []ocispecs.Descriptor - for i, layer := range manifest.Layers[:len(manifest.Layers)-1] { - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v on layer %+v (%d)", err, layer, i) - sgzLayers = append(sgzLayers, layer) - } - require.NotEqual(t, 0, len(sgzLayers), "no layer can be used for checking lazypull") + st = llb.Scratch().File(llb.Copy(llb.Image(target), "/data", "/zdata")) - // The topmost(last) layer created by `Run` shouldn't be lazy - _, err = contentStore.Info(ctx, manifest.Layers[len(manifest.Layers)-1].Digest) + def, err = st.Marshal(sb.Context()) require.NoError(t, err) - // clear all local state out - err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) - require.NoError(t, err) - checkAllReleasable(t, c, sb, true) + destDir := t.TempDir() - // stargz layers should be exportable - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterOCI, - Output: fixedWriteCloser(outW), - }, - }, - CacheImports: []CacheOptionsEntry{ - { - Type: "registry", - Attrs: map[string]string{ - "ref": sgzImage, - }, + Type: ExporterLocal, + OutputDir: destDir, }, }, }, nil) require.NoError(t, err) - // Check if image layers are un-lazied - for _, layer := range sgzLayers { - _, err = contentStore.Info(ctx, layer.Digest) - require.NoError(t, err) - } - - ensurePruneAll(t, c, sb) + dt, err := os.ReadFile(filepath.Join(destDir, "zdata")) + require.NoError(t, err) + require.Equal(t, dt, []byte("zstd")) } - -func testStargzLazyPull(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) +func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() - cdAddress := sb.ContainerdAddress() - if cdAddress == "" || sb.Snapshotter() != "stargz" { - t.Skip("test requires containerd worker with stargz snapshotter") + busybox := llb.Image("busybox:latest") + st := llb.Scratch() + + run := func(cmd string) { + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) } - client, err := newContainerd(cdAddress) + run(`sh -e -c "mkdir -p foo/sub; echo -n first > foo/sub/bar; chmod 0741 foo;"`) + run(`true`) // this doesn't create a layer + run(`sh -c "echo -n second > foo/sub/baz"`) + + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - defer client.Close() + registry, err := sb.NewRegistry() if errors.Is(err, integration.ErrRequirements) { t.Skip(err.Error()) } require.NoError(t, err) - var ( - imageService = client.ImageService() - contentStore = client.ContentStore() - ctx = namespaces.WithNamespace(sb.Context(), "buildkit") - ) - - c, err := New(sb.Context(), sb.Address()) - require.NoError(t, err) - defer c.Close() + target := registry + "/buildkit/testpush:latest" - // Prepare stargz image - orgImage := "docker.io/library/alpine:latest" - sgzImage := registry + "/stargz/alpine:" + identity.NewID() - def, err := llb.Image(orgImage).Marshal(sb.Context()) - require.NoError(t, err) _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { Type: ExporterImage, Attrs: map[string]string{ - "name": sgzImage, - "push": "true", - "compression": "estargz", - "oci-mediatypes": "true", - "force-compression": "true", + "name": target, + "push": "true", }, }, }, }, nil) require.NoError(t, err) - // clear all local state out - err = imageService.Delete(ctx, sgzImage, images.SynchronousDelete()) - require.NoError(t, err) - checkAllReleasable(t, c, sb, true) + // test existence of the image with next build + firstBuild := llb.Image(target) - // stargz layers should be lazy even for executing something on them - def, err = llb.Image(sgzImage). - Run(llb.Args([]string{"/bin/touch", "/foo"})). - Marshal(sb.Context()) + def, err = firstBuild.Marshal(sb.Context()) require.NoError(t, err) - target := registry + "/buildkit/testlazyimage:" + identity.NewID() + + destDir := t.TempDir() + _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - "oci-mediatypes": "true", - }, + Type: ExporterLocal, + OutputDir: destDir, }, }, }, nil) require.NoError(t, err) - img, err := imageService.Get(ctx, target) - require.NoError(t, err) - - manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) - require.NoError(t, err) - - // Check if image layers are lazy. - // The topmost(last) layer created by `Run` isn't lazy so we skip the check for the layer. - var sgzLayers []ocispecs.Descriptor - for _, layer := range manifest.Layers[:len(manifest.Layers)-1] { - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) - sgzLayers = append(sgzLayers, layer) - } - require.NotEqual(t, 0, len(sgzLayers), "no layer can be used for checking lazypull") - - // The topmost(last) layer created by `Run` shouldn't be lazy - _, err = contentStore.Info(ctx, manifest.Layers[len(manifest.Layers)-1].Digest) + dt, err := os.ReadFile(filepath.Join(destDir, "foo/sub/bar")) require.NoError(t, err) + require.Equal(t, dt, []byte("first")) - // clear all local state out - err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) + dt, err = os.ReadFile(filepath.Join(destDir, "foo/sub/baz")) require.NoError(t, err) - checkAllReleasable(t, c, sb, true) + require.Equal(t, dt, []byte("second")) - // stargz layers should be exportable - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterOCI, - Output: fixedWriteCloser(outW), - }, - }, - }, nil) + fi, err := os.Stat(filepath.Join(destDir, "foo")) require.NoError(t, err) + require.Equal(t, 0741, int(fi.Mode()&0777)) - // Check if image layers are un-lazied - for _, layer := range sgzLayers { - _, err = contentStore.Info(ctx, layer.Digest) - require.NoError(t, err) - } - - ensurePruneAll(t, c, sb) -} - -func testLazyImagePush(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - requiresLinux(t) + checkAllReleasable(t, c, sb, false) + // examine contents of exported tars (requires containerd) cdAddress := sb.ContainerdAddress() if cdAddress == "" { - t.Skip("test requires containerd worker") + t.Skip("rest of test requires containerd worker") } + // TODO: make public pull helper function so this can be checked for standalone as well + client, err := newContainerd(cdAddress) require.NoError(t, err) defer client.Close() ctx := namespaces.WithNamespace(sb.Context(), "buildkit") - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } - require.NoError(t, err) - - c, err := New(sb.Context(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - // push the busybox image to the mutable registry - sourceImage := "busybox:latest" - def, err := llb.Image(sourceImage).Marshal(sb.Context()) + // check image in containerd + _, err = client.ImageService().Get(ctx, target) require.NoError(t, err) - targetNoTag := registry + "/buildkit/testlazyimage:" - target := targetNoTag + "latest" - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - }, - }, - }, - }, nil) + // deleting image should release all content + err = client.ImageService().Delete(ctx, target, images.SynchronousDelete()) require.NoError(t, err) - imageService := client.ImageService() - contentStore := client.ContentStore() + checkAllReleasable(t, c, sb, true) - img, err := imageService.Get(ctx, target) + img, err := client.Pull(ctx, target) require.NoError(t, err) - manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) + desc, err := img.Config(ctx) require.NoError(t, err) - for _, layer := range manifest.Layers { - _, err = contentStore.Info(ctx, layer.Digest) - require.NoError(t, err) - } - - // clear all local state out - err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) + dt, err = content.ReadBlob(ctx, img.ContentStore(), desc) require.NoError(t, err) - checkAllReleasable(t, c, sb, true) - // retag the image we just pushed with no actual changes, which - // should not result in the image getting un-lazied - def, err = llb.Image(target).Marshal(sb.Context()) + var ociimg ocispecs.Image + err = json.Unmarshal(dt, &ociimg) require.NoError(t, err) - target2 := targetNoTag + "newtag" - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target2, - "push": "true", - }, - }, - }, - }, nil) - require.NoError(t, err) + require.NotEqual(t, "", ociimg.OS) + require.NotEqual(t, "", ociimg.Architecture) + require.NotEqual(t, "", ociimg.Config.WorkingDir) + require.Equal(t, "layers", ociimg.RootFS.Type) + require.Equal(t, 3, len(ociimg.RootFS.DiffIDs)) + require.NotNil(t, ociimg.Created) + require.True(t, time.Since(*ociimg.Created) < 2*time.Minute) + require.Condition(t, func() bool { + for _, env := range ociimg.Config.Env { + if strings.HasPrefix(env, "PATH=") { + return true + } + } + return false + }) - img, err = imageService.Get(ctx, target2) + require.Equal(t, 3, len(ociimg.History)) + require.Contains(t, ociimg.History[0].CreatedBy, "foo/sub/bar") + require.Contains(t, ociimg.History[1].CreatedBy, "true") + require.Contains(t, ociimg.History[2].CreatedBy, "foo/sub/baz") + require.False(t, ociimg.History[0].EmptyLayer) + require.False(t, ociimg.History[1].EmptyLayer) + require.False(t, ociimg.History[2].EmptyLayer) + + dt, err = content.ReadBlob(ctx, img.ContentStore(), img.Target()) require.NoError(t, err) - manifest, err = images.Manifest(ctx, contentStore, img.Target, nil) + var mfst = struct { + MediaType string `json:"mediaType,omitempty"` + ocispecs.Manifest + }{} + + err = json.Unmarshal(dt, &mfst) require.NoError(t, err) - for _, layer := range manifest.Layers { - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) - } + require.Equal(t, images.MediaTypeDockerSchema2Manifest, mfst.MediaType) + require.Equal(t, 3, len(mfst.Layers)) + require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[0].MediaType) + require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[1].MediaType) - // clear all local state out again - err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) + dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[0].Digest}) require.NoError(t, err) - checkAllReleasable(t, c, sb, true) - // try a cross-repo push to same registry, which should still result in the - // image remaining lazy - target3 := registry + "/buildkit/testlazycrossrepo:latest" - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target3, - "push": "true", - }, - }, - }, - }, nil) + m, err := testutil.ReadTarToMap(dt, true) require.NoError(t, err) - img, err = imageService.Get(ctx, target3) + item, ok := m["foo/"] + require.True(t, ok) + require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir) + require.Equal(t, 0741, int(item.Header.Mode&0777)) + + item, ok = m["foo/sub/"] + require.True(t, ok) + require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir) + + item, ok = m["foo/sub/bar"] + require.True(t, ok) + require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) + require.Equal(t, []byte("first"), item.Data) + + _, ok = m["foo/sub/baz"] + require.False(t, ok) + + dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[2].Digest}) require.NoError(t, err) - manifest, err = images.Manifest(ctx, contentStore, img.Target, nil) + m, err = testutil.ReadTarToMap(dt, true) require.NoError(t, err) - for _, layer := range manifest.Layers { - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) + item, ok = m["foo/sub/baz"] + require.True(t, ok) + require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg) + require.Equal(t, []byte("second"), item.Data) + + item, ok = m["foo/"] + require.True(t, ok) + require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir) + require.Equal(t, 0741, int(item.Header.Mode&0777)) + + item, ok = m["foo/sub/"] + require.True(t, ok) + require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir) + + _, ok = m["foo/sub/bar"] + require.False(t, ok) +} + +func testStargzLazyRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) + requiresLinux(t) + cdAddress := sb.ContainerdAddress() + if cdAddress == "" || sb.Snapshotter() != "stargz" { + t.Skip("test requires containerd worker with stargz snapshotter") } - // check that a subsequent build can use the previously lazy image in an exec - def, err = llb.Image(target2).Run(llb.Args([]string{"true"})).Marshal(sb.Context()) + client, err := newContainerd(cdAddress) require.NoError(t, err) - - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + defer client.Close() + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) -} -func testZstdLocalCacheExport(t *testing.T, sb integration.Sandbox) { + var ( + imageService = client.ImageService() + contentStore = client.ContentStore() + ctx = namespaces.WithNamespace(sb.Context(), "buildkit") + ) + c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - busybox := llb.Image("busybox:latest") - cmd := `sh -e -c "echo -n zstd > data"` - - st := llb.Scratch() - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - - def, err := st.Marshal(sb.Context()) - require.NoError(t, err) + destDir := t.TempDir() - destDir, err := ioutil.TempDir("", "buildkit") + // Prepare stargz registry cache + orgImage := "docker.io/library/alpine:latest" + sgzCache := registry + "/stargz/alpinecache:" + identity.NewID() + baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/foo"})) + def, err := baseDef.Marshal(sb.Context()) require.NoError(t, err) - defer os.RemoveAll(destDir) - - destOutDir, err := ioutil.TempDir("", "buildkit") + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) require.NoError(t, err) - defer os.RemoveAll(destOutDir) - _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterLocal, - OutputDir: destOutDir, + Type: ExporterOCI, + Output: fixedWriteCloser(outW), }, }, - // compression option should work even with inline cache exports CacheExports: []CacheOptionsEntry{ { - Type: "local", + Type: "registry", Attrs: map[string]string{ - "dest": destDir, - "compression": "zstd", + "ref": sgzCache, + "compression": "estargz", + "oci-mediatypes": "true", + "force-compression": "true", }, }, }, }, nil) require.NoError(t, err) - var index ocispecs.Index - dt, err := ioutil.ReadFile(filepath.Join(destDir, "index.json")) + // clear all local state out + ensurePruneAll(t, c, sb) + + // stargz layers should be lazy even for executing something on them + def, err = baseDef. + Run(llb.Args([]string{"/bin/touch", "/bar"})). + Marshal(sb.Context()) require.NoError(t, err) - err = json.Unmarshal(dt, &index) + target := registry + "/buildkit/testlazyimage:" + identity.NewID() + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "store": "true", + "unsafe-internal-store-allow-incomplete": "true", + }, + }, + }, + CacheImports: []CacheOptionsEntry{ + { + Type: "registry", + Attrs: map[string]string{ + "ref": sgzCache, + }, + }, + }, + }, nil) require.NoError(t, err) - var layerIndex ocispecs.Index - dt, err = ioutil.ReadFile(filepath.Join(destDir, "blobs/sha256/"+index.Manifests[0].Digest.Hex())) + img, err := imageService.Get(ctx, target) require.NoError(t, err) - err = json.Unmarshal(dt, &layerIndex) + + manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) require.NoError(t, err) - lastLayer := layerIndex.Manifests[len(layerIndex.Manifests)-2] - require.Equal(t, ocispecs.MediaTypeImageLayer+"+zstd", lastLayer.MediaType) + // Check if image layers are lazy. + // The topmost(last) layer created by `Run` isn't lazy so we skip the check for the layer. + var sgzLayers []ocispecs.Descriptor + for i, layer := range manifest.Layers[:len(manifest.Layers)-1] { + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v on layer %+v (%d)", err, layer, i) + sgzLayers = append(sgzLayers, layer) + } + require.NotEqual(t, 0, len(sgzLayers), "no layer can be used for checking lazypull") - zstdLayerDigest := lastLayer.Digest.Hex() - dt, err = ioutil.ReadFile(filepath.Join(destDir, "blobs/sha256/"+zstdLayerDigest)) + // The topmost(last) layer created by `Run` shouldn't be lazy + _, err = contentStore.Info(ctx, manifest.Layers[len(manifest.Layers)-1].Digest) require.NoError(t, err) - require.Equal(t, dt[:4], []byte{0x28, 0xb5, 0x2f, 0xfd}) -} -func testUncompressedLocalCacheImportExport(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - dir, err := ioutil.TempDir("", "buildkit") + // clear all local state out + err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) require.NoError(t, err) - defer os.RemoveAll(dir) - im := CacheOptionsEntry{ - Type: "local", - Attrs: map[string]string{ - "src": dir, + checkAllReleasable(t, c, sb, true) + + // stargz layers should be exportable + out = filepath.Join(destDir, "out2.tar") + outW, err = os.Create(out) + require.NoError(t, err) + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterOCI, + Output: fixedWriteCloser(outW), + }, }, - } - ex := CacheOptionsEntry{ - Type: "local", - Attrs: map[string]string{ - "dest": dir, - "compression": "uncompressed", - "force-compression": "true", + CacheImports: []CacheOptionsEntry{ + { + Type: "registry", + Attrs: map[string]string{ + "ref": sgzCache, + }, + }, }, + }, nil) + require.NoError(t, err) + + // Check if image layers are un-lazied + for _, layer := range sgzLayers { + _, err = contentStore.Info(ctx, layer.Digest) + require.NoError(t, err) } - testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex}) + + ensurePruneAll(t, c, sb) } -func testUncompressedRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } - require.NoError(t, err) - target := registry + "/buildkit/testexport:latest" - im := CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{ - "ref": target, - }, - } - ex := CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{ - "ref": target, - "compression": "uncompressed", - "force-compression": "true", - }, - } - testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex}) -} - -func testZstdLocalCacheImportExport(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - if sb.Name() == "containerd-1.4" { - // containerd 1.4 doesn't support zstd compression - return - } - dir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(dir) - im := CacheOptionsEntry{ - Type: "local", - Attrs: map[string]string{ - "src": dir, - }, - } - ex := CacheOptionsEntry{ - Type: "local", - Attrs: map[string]string{ - "dest": dir, - "compression": "zstd", - "force-compression": "true", - "oci-mediatypes": "true", // containerd applier supports only zstd with oci-mediatype. - }, - } - testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex}) -} - -func testZstdRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - if sb.Name() == "containerd-1.4" { - // containerd 1.4 doesn't support zstd compression - return - } - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } - require.NoError(t, err) - target := registry + "/buildkit/testexport:latest" - im := CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{ - "ref": target, - }, - } - ex := CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{ - "ref": target, - "compression": "zstd", - "force-compression": "true", - "oci-mediatypes": "true", // containerd applier supports only zstd with oci-mediatype. - }, - } - testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex}) -} - -func testBasicCacheImportExport(t *testing.T, sb integration.Sandbox, cacheOptionsEntryImport, cacheOptionsEntryExport []CacheOptionsEntry) { +func testStargzLazyInlineCacheImportExport(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := llb.Scratch() - - run := func(cmd string) { - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + cdAddress := sb.ContainerdAddress() + if cdAddress == "" || sb.Snapshotter() != "stargz" { + t.Skip("test requires containerd worker with stargz snapshotter") } - run(`sh -c "echo -n foobar > const"`) - run(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > unique"`) - - def, err := st.Marshal(sb.Context()) - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterLocal, - OutputDir: destDir, - }, - }, - CacheExports: cacheOptionsEntryExport, - }, nil) - require.NoError(t, err) - - dt, err := ioutil.ReadFile(filepath.Join(destDir, "const")) - require.NoError(t, err) - require.Equal(t, string(dt), "foobar") - - dt, err = ioutil.ReadFile(filepath.Join(destDir, "unique")) - require.NoError(t, err) - - ensurePruneAll(t, c, sb) - - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterLocal, - OutputDir: destDir, - }}, - CacheImports: cacheOptionsEntryImport, - }, nil) - require.NoError(t, err) - - dt2, err := ioutil.ReadFile(filepath.Join(destDir, "const")) - require.NoError(t, err) - require.Equal(t, string(dt2), "foobar") - - dt2, err = ioutil.ReadFile(filepath.Join(destDir, "unique")) - require.NoError(t, err) - require.Equal(t, string(dt), string(dt2)) -} - -func testBasicRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } + client, err := newContainerd(cdAddress) require.NoError(t, err) - target := registry + "/buildkit/testexport:latest" - o := CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{ - "ref": target, - }, - } - testBasicCacheImportExport(t, sb, []CacheOptionsEntry{o}, []CacheOptionsEntry{o}) -} - -func testMultipleRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) + defer client.Close() registry, err := sb.NewRegistry() if errors.Is(err, integration.ErrRequirements) { t.Skip(err.Error()) } require.NoError(t, err) - target := registry + "/buildkit/testexport:latest" - o := CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{ - "ref": target, - }, - } - o2 := CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{ - "ref": target + "notexist", - }, - } - testBasicCacheImportExport(t, sb, []CacheOptionsEntry{o, o2}, []CacheOptionsEntry{o}) -} - -func testBasicLocalCacheImportExport(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - dir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(dir) - im := CacheOptionsEntry{ - Type: "local", - Attrs: map[string]string{ - "src": dir, - }, - } - ex := CacheOptionsEntry{ - Type: "local", - Attrs: map[string]string{ - "dest": dir, - }, - } - testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex}) -} -func testBasicInlineCacheImportExport(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - requiresLinux(t) - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } - require.NoError(t, err) + var ( + imageService = client.ImageService() + contentStore = client.ContentStore() + ctx = namespaces.WithNamespace(sb.Context(), "buildkit") + ) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - busybox := llb.Image("busybox:latest") - st := llb.Scratch() - - run := func(cmd string) { - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - } - - run(`sh -c "echo -n foobar > const"`) - run(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > unique"`) - - def, err := st.Marshal(sb.Context()) + // Prepare stargz inline cache + orgImage := "docker.io/library/alpine:latest" + sgzImage := registry + "/stargz/alpine:" + identity.NewID() + baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/foo"})) + def, err := baseDef.Marshal(sb.Context()) require.NoError(t, err) - - target := registry + "/buildkit/testexportinline:latest" - - resp, err := c.Solve(sb.Context(), def, SolveOpt{ + _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { Type: ExporterImage, Attrs: map[string]string{ - "name": target, - "push": "true", + "name": sgzImage, + "push": "true", + "compression": "estargz", + "oci-mediatypes": "true", + "force-compression": "true", }, }, }, @@ -3798,23 +3948,28 @@ func testBasicInlineCacheImportExport(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - dgst, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey] - require.Equal(t, ok, true) - - unique, err := readFileInImage(sb.Context(), c, target+"@"+dgst, "/unique") + // clear all local state out + err = imageService.Delete(ctx, sgzImage, images.SynchronousDelete()) require.NoError(t, err) + checkAllReleasable(t, c, sb, true) - ensurePruneAll(t, c, sb) - - resp, err = c.Solve(sb.Context(), def, SolveOpt{ - // specifying inline cache exporter is needed for reproducing containerimage.digest - // (not needed for reproducing rootfs/unique) + // stargz layers should be lazy even for executing something on them + def, err = baseDef. + Run(llb.Args([]string{"/bin/touch", "/bar"})). + Marshal(sb.Context()) + require.NoError(t, err) + target := registry + "/buildkit/testlazyimage:" + identity.NewID() + _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { Type: ExporterImage, Attrs: map[string]string{ - "name": target, - "push": "true", + "name": target, + "push": "true", + "oci-mediatypes": "true", + "compression": "estargz", + "store": "true", + "unsafe-internal-store-allow-incomplete": "true", }, }, }, @@ -3827,514 +3982,720 @@ func testBasicInlineCacheImportExport(t *testing.T, sb integration.Sandbox) { { Type: "registry", Attrs: map[string]string{ - "ref": target, + "ref": sgzImage, }, }, }, }, nil) require.NoError(t, err) - dgst2, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey] - require.Equal(t, ok, true) - - require.Equal(t, dgst, dgst2) + img, err := imageService.Get(ctx, target) + require.NoError(t, err) - ensurePruneAll(t, c, sb) + manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) + require.NoError(t, err) - // Export the cache again with compression - resp, err = c.Solve(sb.Context(), def, SolveOpt{ - // specifying inline cache exporter is needed for reproducing containerimage.digest - // (not needed for reproducing rootfs/unique) + // Check if image layers are lazy. + // The topmost(last) layer created by `Run` isn't lazy so we skip the check for the layer. + var sgzLayers []ocispecs.Descriptor + for i, layer := range manifest.Layers[:len(manifest.Layers)-1] { + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v on layer %+v (%d)", err, layer, i) + sgzLayers = append(sgzLayers, layer) + } + require.NotEqual(t, 0, len(sgzLayers), "no layer can be used for checking lazypull") + + // The topmost(last) layer created by `Run` shouldn't be lazy + _, err = contentStore.Info(ctx, manifest.Layers[len(manifest.Layers)-1].Digest) + require.NoError(t, err) + + // clear all local state out + err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) + require.NoError(t, err) + checkAllReleasable(t, c, sb, true) + + // stargz layers should be exportable + destDir := t.TempDir() + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) + require.NoError(t, err) + _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - "compression": "uncompressed", // inline cache should work with compression - "force-compression": "true", - }, - }, - }, - CacheExports: []CacheOptionsEntry{ - { - Type: "inline", + Type: ExporterOCI, + Output: fixedWriteCloser(outW), }, }, CacheImports: []CacheOptionsEntry{ { Type: "registry", Attrs: map[string]string{ - "ref": target, + "ref": sgzImage, }, }, }, }, nil) require.NoError(t, err) - dgst2uncompress, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey] - require.Equal(t, ok, true) + // Check if image layers are un-lazied + for _, layer := range sgzLayers { + _, err = contentStore.Info(ctx, layer.Digest) + require.NoError(t, err) + } - // dgst2uncompress != dgst, because the compression type is different - unique2uncompress, err := readFileInImage(sb.Context(), c, target+"@"+dgst2uncompress, "/unique") + ensurePruneAll(t, c, sb) +} + +func testStargzLazyPull(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + + cdAddress := sb.ContainerdAddress() + if cdAddress == "" || sb.Snapshotter() != "stargz" { + t.Skip("test requires containerd worker with stargz snapshotter") + } + + client, err := newContainerd(cdAddress) + require.NoError(t, err) + defer client.Close() + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) - require.EqualValues(t, unique, unique2uncompress) - ensurePruneAll(t, c, sb) + var ( + imageService = client.ImageService() + contentStore = client.ContentStore() + ctx = namespaces.WithNamespace(sb.Context(), "buildkit") + ) - resp, err = c.Solve(sb.Context(), def, SolveOpt{ + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + // Prepare stargz image + orgImage := "docker.io/library/alpine:latest" + sgzImage := registry + "/stargz/alpine:" + identity.NewID() + def, err := llb.Image(orgImage).Marshal(sb.Context()) + require.NoError(t, err) + _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { Type: ExporterImage, Attrs: map[string]string{ - "name": target, - "push": "true", + "name": sgzImage, + "push": "true", + "compression": "estargz", + "oci-mediatypes": "true", + "force-compression": "true", }, }, }, - CacheImports: []CacheOptionsEntry{ + }, nil) + require.NoError(t, err) + + // clear all local state out + err = imageService.Delete(ctx, sgzImage, images.SynchronousDelete()) + require.NoError(t, err) + checkAllReleasable(t, c, sb, true) + + // stargz layers should be lazy even for executing something on them + def, err = llb.Image(sgzImage). + Run(llb.Args([]string{"/bin/touch", "/foo"})). + Marshal(sb.Context()) + require.NoError(t, err) + target := registry + "/buildkit/testlazyimage:" + identity.NewID() + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ { - Type: "registry", + Type: ExporterImage, Attrs: map[string]string{ - "ref": target, + "name": target, + "push": "true", + "oci-mediatypes": "true", + "store": "true", + "unsafe-internal-store-allow-incomplete": "true", }, }, }, }, nil) require.NoError(t, err) - dgst3, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey] - require.Equal(t, ok, true) + img, err := imageService.Get(ctx, target) + require.NoError(t, err) - // dgst3 != dgst, because inline cache is not exported for dgst3 - unique3, err := readFileInImage(sb.Context(), c, target+"@"+dgst3, "/unique") + manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) require.NoError(t, err) - require.EqualValues(t, unique, unique3) -} -func readFileInImage(ctx context.Context, c *Client, ref, path string) ([]byte, error) { - def, err := llb.Image(ref).Marshal(ctx) - if err != nil { - return nil, err - } - destDir, err := ioutil.TempDir("", "buildkit") - if err != nil { - return nil, err + // Check if image layers are lazy. + // The topmost(last) layer created by `Run` isn't lazy so we skip the check for the layer. + var sgzLayers []ocispecs.Descriptor + for _, layer := range manifest.Layers[:len(manifest.Layers)-1] { + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) + sgzLayers = append(sgzLayers, layer) } - defer os.RemoveAll(destDir) + require.NotEqual(t, 0, len(sgzLayers), "no layer can be used for checking lazypull") - _, err = c.Solve(ctx, def, SolveOpt{ + // The topmost(last) layer created by `Run` shouldn't be lazy + _, err = contentStore.Info(ctx, manifest.Layers[len(manifest.Layers)-1].Digest) + require.NoError(t, err) + + // clear all local state out + err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) + require.NoError(t, err) + checkAllReleasable(t, c, sb, true) + + // stargz layers should be exportable + destDir := t.TempDir() + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) + require.NoError(t, err) + _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterLocal, - OutputDir: destDir, + Type: ExporterOCI, + Output: fixedWriteCloser(outW), }, }, }, nil) - if err != nil { - return nil, err + require.NoError(t, err) + + // Check if image layers are un-lazied + for _, layer := range sgzLayers { + _, err = contentStore.Info(ctx, layer.Digest) + require.NoError(t, err) } - return ioutil.ReadFile(filepath.Join(destDir, filepath.Clean(path))) + + ensurePruneAll(t, c, sb) } -func testCachedMounts(t *testing.T, sb integration.Sandbox) { +func testLazyImagePush(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) requiresLinux(t) + cdAddress := sb.ContainerdAddress() + if cdAddress == "" { + t.Skip("test requires containerd worker") + } + + client, err := newContainerd(cdAddress) + require.NoError(t, err) + defer client.Close() + + ctx := namespaces.WithNamespace(sb.Context(), "buildkit") + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - busybox := llb.Image("busybox:latest") - // setup base for one of the cache sources - st := busybox.Run(llb.Shlex(`sh -c "echo -n base > baz"`), llb.Dir("/wd")) - base := st.AddMount("/wd", llb.Scratch()) - - st = busybox.Run(llb.Shlex(`sh -c "echo -n first > foo"`), llb.Dir("/wd")) - st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - st = st.Run(llb.Shlex(`sh -c "cat foo && echo -n second > /wd2/bar"`), llb.Dir("/wd")) - st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - st.AddMount("/wd2", base, llb.AsPersistentCacheDir("mycache2", llb.CacheMountShared)) + // push the busybox image to the mutable registry + sourceImage := "busybox:latest" + def, err := llb.Image(sourceImage).Marshal(sb.Context()) + require.NoError(t, err) - def, err := st.Marshal(sb.Context()) + targetNoTag := registry + "/buildkit/testlazyimage:" + target := targetNoTag + "latest" + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "store": "true", + "unsafe-internal-store-allow-incomplete": "true", + }, + }, + }, + }, nil) require.NoError(t, err) - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + imageService := client.ImageService() + contentStore := client.ContentStore() + + img, err := imageService.Get(ctx, target) require.NoError(t, err) - // repeat to make sure cache works - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) require.NoError(t, err) - // second build using cache directories - st = busybox.Run(llb.Shlex(`sh -c "cp /src0/foo . && cp /src1/bar . && cp /src1/baz ."`), llb.Dir("/wd")) - out := st.AddMount("/wd", llb.Scratch()) - st.AddMount("/src0", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - st.AddMount("/src1", base, llb.AsPersistentCacheDir("mycache2", llb.CacheMountShared)) + for _, layer := range manifest.Layers { + _, err = contentStore.Info(ctx, layer.Digest) + require.NoError(t, err) + } - destDir, err := ioutil.TempDir("", "buildkit") + // clear all local state out + err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) require.NoError(t, err) - defer os.RemoveAll(destDir) + checkAllReleasable(t, c, sb, true) - def, err = out.Marshal(sb.Context()) + // retag the image we just pushed with no actual changes, which + // should not result in the image getting un-lazied + def, err = llb.Image(target).Marshal(sb.Context()) require.NoError(t, err) + target2 := targetNoTag + "newtag" _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterLocal, - OutputDir: destDir, + Type: ExporterImage, + Attrs: map[string]string{ + "name": target2, + "push": "true", + "store": "true", + "unsafe-internal-store-allow-incomplete": "true", + }, }, }, }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + img, err = imageService.Get(ctx, target2) require.NoError(t, err) - require.Equal(t, string(dt), "first") - dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar")) + manifest, err = images.Manifest(ctx, contentStore, img.Target, nil) require.NoError(t, err) - require.Equal(t, string(dt), "second") - dt, err = ioutil.ReadFile(filepath.Join(destDir, "baz")) - require.NoError(t, err) - require.Equal(t, string(dt), "base") + for _, layer := range manifest.Layers { + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) + } + // clear all local state out again + err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) + require.NoError(t, err) checkAllReleasable(t, c, sb, true) -} -func testSharedCacheMounts(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) + // try a cross-repo push to same registry, which should still result in the + // image remaining lazy + target3 := registry + "/buildkit/testlazycrossrepo:latest" + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target3, + "push": "true", + "store": "true", + "unsafe-internal-store-allow-incomplete": "true", + }, + }, + }, + }, nil) require.NoError(t, err) - defer c.Close() - busybox := llb.Image("busybox:latest") - st := busybox.Run(llb.Shlex(`sh -e -c "touch one; while [[ ! -f two ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd")) - st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) + img, err = imageService.Get(ctx, target3) + require.NoError(t, err) - st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; while [[ ! -f one ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd")) - st2.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) + manifest, err = images.Manifest(ctx, contentStore, img.Target, nil) + require.NoError(t, err) - out := busybox.Run(llb.Shlex("true")) - out.AddMount("/m1", st.Root()) - out.AddMount("/m2", st2.Root()) + for _, layer := range manifest.Layers { + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) + } - def, err := out.Marshal(sb.Context()) + // check that a subsequent build can use the previously lazy image in an exec + def, err = llb.Image(target2).Run(llb.Args([]string{"true"})).Marshal(sb.Context()) require.NoError(t, err) _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) require.NoError(t, err) } -// #2334 -func testSharedCacheMountsNoScratch(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) +func testZstdLocalCacheExport(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() busybox := llb.Image("busybox:latest") - st := busybox.Run(llb.Shlex(`sh -e -c "touch one; while [[ ! -f two ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd")) - st.AddMount("/wd", llb.Image("busybox:latest"), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - - st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; while [[ ! -f one ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd")) - st2.AddMount("/wd", llb.Image("busybox:latest"), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) + cmd := `sh -e -c "echo -n zstd > data"` - out := busybox.Run(llb.Shlex("true")) - out.AddMount("/m1", st.Root()) - out.AddMount("/m2", st2.Root()) + st := llb.Scratch() + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - def, err := out.Marshal(sb.Context()) + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.NoError(t, err) -} + destDir := t.TempDir() + destOutDir := t.TempDir() -func testLockedCacheMounts(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destOutDir, + }, + }, + // compression option should work even with inline cache exports + CacheExports: []CacheOptionsEntry{ + { + Type: "local", + Attrs: map[string]string{ + "dest": destDir, + "compression": "zstd", + }, + }, + }, + }, nil) require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := busybox.Run(llb.Shlex(`sh -e -c "touch one; if [[ -f two ]]; then exit 0; fi; for i in $(seq 10); do if [[ -f two ]]; then exit 1; fi; usleep 200000; done"`), llb.Dir("/wd")) - st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - - st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; if [[ -f one ]]; then exit 0; fi; for i in $(seq 10); do if [[ -f one ]]; then exit 1; fi; usleep 200000; done"`), llb.Dir("/wd")) - st2.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - - out := busybox.Run(llb.Shlex("true")) - out.AddMount("/m1", st.Root()) - out.AddMount("/m2", st2.Root()) - def, err := out.Marshal(sb.Context()) + var index ocispecs.Index + dt, err := os.ReadFile(filepath.Join(destDir, "index.json")) require.NoError(t, err) - - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + err = json.Unmarshal(dt, &index) require.NoError(t, err) -} -func testDuplicateCacheMount(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) + var layerIndex ocispecs.Index + dt, err = os.ReadFile(filepath.Join(destDir, "blobs/sha256/"+index.Manifests[0].Digest.Hex())) require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - - out := busybox.Run(llb.Shlex(`sh -e -c "[[ ! -f /m2/foo ]]; touch /m1/foo; [[ -f /m2/foo ]];"`)) - out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - - def, err := out.Marshal(sb.Context()) + err = json.Unmarshal(dt, &layerIndex) require.NoError(t, err) - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + lastLayer := layerIndex.Manifests[len(layerIndex.Manifests)-2] + require.Equal(t, ocispecs.MediaTypeImageLayer+"+zstd", lastLayer.MediaType) + + zstdLayerDigest := lastLayer.Digest.Hex() + dt, err = os.ReadFile(filepath.Join(destDir, "blobs/sha256/"+zstdLayerDigest)) require.NoError(t, err) + require.Equal(t, dt[:4], []byte{0x28, 0xb5, 0x2f, 0xfd}) } -func testRunCacheWithMounts(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) +func testCacheExportIgnoreError(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() busybox := llb.Image("busybox:latest") + cmd := `sh -e -c "echo -n ignore-error > data"` - out := busybox.Run(llb.Shlex(`sh -e -c "[[ -f /m1/sbin/apk ]]"`)) - out.AddMount("/m1", llb.Image("alpine:latest"), llb.Readonly) + st := llb.Scratch() + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - def, err := out.Marshal(sb.Context()) + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.NoError(t, err) + tests := map[string]struct { + Exports []ExportEntry + CacheExports []CacheOptionsEntry + expectedErrors []string + }{ + "local-ignore-error": { + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: t.TempDir(), + }, + }, + CacheExports: []CacheOptionsEntry{ + { + Type: "local", + Attrs: map[string]string{ + "dest": "éèç", + }, + }, + }, + expectedErrors: []string{"failed to solve", "contains value with non-printable ASCII characters"}, + }, + "registry-ignore-error": { + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": "test-registry-ignore-error", + "push": "false", + }, + }, + }, + CacheExports: []CacheOptionsEntry{ + { + Type: "registry", + Attrs: map[string]string{ + "ref": "fake-url:5000/myrepo:buildcache", + }, + }, + }, + expectedErrors: []string{"failed to solve", "dial tcp: lookup fake-url", "no such host"}, + }, + "s3-ignore-error": { + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: t.TempDir(), + }, + }, + CacheExports: []CacheOptionsEntry{ + { + Type: "s3", + Attrs: map[string]string{ + "endpoint_url": "http://fake-url:9000", + "bucket": "my-bucket", + "region": "us-east-1", + "access_key_id": "minioadmin", + "secret_access_key": "minioadmin", + "use_path_style": "true", + }, + }, + }, + expectedErrors: []string{"failed to solve", "dial tcp: lookup fake-url", "no such host"}, + }, + } + ignoreErrorValues := []bool{true, false} + for _, ignoreError := range ignoreErrorValues { + ignoreErrStr := strconv.FormatBool(ignoreError) + for n, test := range tests { + require.Equal(t, 1, len(test.Exports)) + require.Equal(t, 1, len(test.CacheExports)) + require.NotEmpty(t, test.CacheExports[0].Attrs) + test.CacheExports[0].Attrs["ignore-error"] = ignoreErrStr + testName := fmt.Sprintf("%s-%s", n, ignoreErrStr) + t.Run(testName, func(t *testing.T) { + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: test.Exports, + CacheExports: test.CacheExports, + }, nil) + if ignoreError { + require.NoError(t, err) + } else { + require.Error(t, err) + for _, errStr := range test.expectedErrors { + require.Contains(t, err.Error(), errStr) + } + } + }) + } + } +} - out = busybox.Run(llb.Shlex(`sh -e -c "[[ -f /m1/sbin/apk ]]"`)) - out.AddMount("/m1", llb.Image("busybox:latest"), llb.Readonly) +func testUncompressedLocalCacheImportExport(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) + dir := t.TempDir() + im := CacheOptionsEntry{ + Type: "local", + Attrs: map[string]string{ + "src": dir, + }, + } + ex := CacheOptionsEntry{ + Type: "local", + Attrs: map[string]string{ + "dest": dir, + "compression": "uncompressed", + "force-compression": "true", + }, + } + testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex}) +} - def, err = out.Marshal(sb.Context()) +func testUncompressedRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) + target := registry + "/buildkit/testexport:latest" + im := CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{ + "ref": target, + }, + } + ex := CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{ + "ref": target, + "compression": "uncompressed", + "force-compression": "true", + }, + } + testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex}) +} - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.Error(t, err) +func testZstdLocalCacheImportExport(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) + dir := t.TempDir() + im := CacheOptionsEntry{ + Type: "local", + Attrs: map[string]string{ + "src": dir, + }, + } + ex := CacheOptionsEntry{ + Type: "local", + Attrs: map[string]string{ + "dest": dir, + "compression": "zstd", + "force-compression": "true", + "oci-mediatypes": "true", // containerd applier supports only zstd with oci-mediatype. + }, + } + testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex}) } -func testCacheMountNoCache(t *testing.T, sb integration.Sandbox) { +func testZstdRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + target := registry + "/buildkit/testexport:latest" + im := CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{ + "ref": target, + }, + } + ex := CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{ + "ref": target, + "compression": "zstd", + "force-compression": "true", + "oci-mediatypes": "true", // containerd applier supports only zstd with oci-mediatype. + }, + } + testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex}) +} + +func testBasicCacheImportExport(t *testing.T, sb integration.Sandbox, cacheOptionsEntryImport, cacheOptionsEntryExport []CacheOptionsEntry) { requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() busybox := llb.Image("busybox:latest") - - out := busybox.Run(llb.Shlex(`sh -e -c "touch /m1/foo; touch /m2/bar"`)) - out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache2", llb.CacheMountLocked)) - - def, err := out.Marshal(sb.Context()) - require.NoError(t, err) - - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.NoError(t, err) - - out = busybox.Run(llb.Shlex(`sh -e -c "[[ ! -f /m1/foo ]]; touch /m1/foo2;"`), llb.IgnoreCache) - out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - - def, err = out.Marshal(sb.Context()) - require.NoError(t, err) - - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.NoError(t, err) - - out = busybox.Run(llb.Shlex(`sh -e -c "[[ -f /m1/foo2 ]]; [[ -f /m2/bar ]];"`)) - out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache2", llb.CacheMountLocked)) - - def, err = out.Marshal(sb.Context()) - require.NoError(t, err) - - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.NoError(t, err) -} - -func testCopyFromEmptyImage(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - for _, image := range []llb.State{llb.Scratch(), llb.Image("tonistiigi/test:nolayers")} { - st := llb.Scratch().File(llb.Copy(image, "/", "/")) - def, err := st.Marshal(sb.Context()) - require.NoError(t, err) - - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.NoError(t, err) - - st = llb.Scratch().File(llb.Copy(image, "/foo", "/")) - def, err = st.Marshal(sb.Context()) - require.NoError(t, err) - - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "/foo: no such file or directory") - - busybox := llb.Image("busybox:latest") - - out := busybox.Run(llb.Shlex(`sh -e -c '[ $(ls /scratch | wc -l) = '0' ]'`)) - out.AddMount("/scratch", image, llb.Readonly) - - def, err = out.Marshal(sb.Context()) - require.NoError(t, err) - - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.NoError(t, err) - } -} - -// containerd/containerd#2119 -func testDuplicateWhiteouts(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) - require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := llb.Scratch() + st := llb.Scratch() run := func(cmd string) { st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) } - run(`sh -e -c "mkdir -p d0 d1; echo -n first > d1/bar;"`) - run(`sh -c "rm -rf d0 d1"`) + run(`sh -c "echo -n foobar > const"`) + run(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > unique"`) def, err := st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterOCI, - Output: fixedWriteCloser(outW), + Type: ExporterLocal, + OutputDir: destDir, }, }, + CacheExports: cacheOptionsEntryExport, }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(out) - require.NoError(t, err) - - m, err := testutil.ReadTarToMap(dt, false) - require.NoError(t, err) - - var index ocispecs.Index - err = json.Unmarshal(m["index.json"].Data, &index) - require.NoError(t, err) - - var mfst ocispecs.Manifest - err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) - require.NoError(t, err) - - lastLayer := mfst.Layers[len(mfst.Layers)-1] - - layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()] - require.True(t, ok) - - m, err = testutil.ReadTarToMap(layer.Data, true) + dt, err := os.ReadFile(filepath.Join(destDir, "const")) require.NoError(t, err) + require.Equal(t, string(dt), "foobar") - _, ok = m[".wh.d0"] - require.True(t, ok) - - _, ok = m[".wh.d1"] - require.True(t, ok) - - // check for a bug that added whiteout for subfile - _, ok = m["d1/.wh.bar"] - require.True(t, !ok) -} - -// #276 -func testWhiteoutParentDir(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) + dt, err = os.ReadFile(filepath.Join(destDir, "unique")) require.NoError(t, err) - defer c.Close() - - busybox := llb.Image("busybox:latest") - st := llb.Scratch() - - run := func(cmd string) { - st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) - } - - run(`sh -c "mkdir -p foo; echo -n first > foo/bar;"`) - run(`rm foo/bar`) - def, err := st.Marshal(sb.Context()) - require.NoError(t, err) + ensurePruneAll(t, c, sb) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterOCI, - Output: fixedWriteCloser(outW), - }, - }, + Type: ExporterLocal, + OutputDir: destDir, + }}, + CacheImports: cacheOptionsEntryImport, }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(out) + dt2, err := os.ReadFile(filepath.Join(destDir, "const")) require.NoError(t, err) + require.Equal(t, string(dt2), "foobar") - m, err := testutil.ReadTarToMap(dt, false) + dt2, err = os.ReadFile(filepath.Join(destDir, "unique")) require.NoError(t, err) + require.Equal(t, string(dt), string(dt2)) +} - var index ocispecs.Index - err = json.Unmarshal(m["index.json"].Data, &index) +func testBasicRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) + target := registry + "/buildkit/testexport:latest" + o := CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{ + "ref": target, + }, + } + testBasicCacheImportExport(t, sb, []CacheOptionsEntry{o}, []CacheOptionsEntry{o}) +} - var mfst ocispecs.Manifest - err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) +func testMultipleRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) + target := registry + "/buildkit/testexport:latest" + o := CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{ + "ref": target, + }, + } + o2 := CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{ + "ref": target + "notexist", + }, + } + testBasicCacheImportExport(t, sb, []CacheOptionsEntry{o, o2}, []CacheOptionsEntry{o}) +} - lastLayer := mfst.Layers[len(mfst.Layers)-1] - - layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()] - require.True(t, ok) +func testBasicLocalCacheImportExport(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) + dir := t.TempDir() + im := CacheOptionsEntry{ + Type: "local", + Attrs: map[string]string{ + "src": dir, + }, + } + ex := CacheOptionsEntry{ + Type: "local", + Attrs: map[string]string{ + "dest": dir, + }, + } + testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex}) +} - m, err = testutil.ReadTarToMap(layer.Data, true) +func testBasicInlineCacheImportExport(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureCacheImport) + requiresLinux(t) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) - _, ok = m["foo/.wh.bar"] - require.True(t, ok) - - _, ok = m["foo/"] - require.True(t, ok) -} - -// #2490 -func testMoveParentDir(t *testing.T, sb integration.Sandbox) { c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() @@ -4346,68 +4707,183 @@ func testMoveParentDir(t *testing.T, sb integration.Sandbox) { st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) } - run(`sh -c "mkdir -p foo; echo -n first > foo/bar;"`) - run(`mv foo foo2`) + run(`sh -c "echo -n foobar > const"`) + run(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > unique"`) def, err := st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + target := registry + "/buildkit/testexportinline:latest" - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) - _, err = c.Solve(sb.Context(), def, SolveOpt{ + resp, err := c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterOCI, - Output: fixedWriteCloser(outW), + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + CacheExports: []CacheOptionsEntry{ + { + Type: "inline", }, }, }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(out) + dgst, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey] + require.Equal(t, ok, true) + + unique, err := readFileInImage(sb.Context(), t, c, target+"@"+dgst, "/unique") require.NoError(t, err) - m, err := testutil.ReadTarToMap(dt, false) - require.NoError(t, err) + ensurePruneAll(t, c, sb) - var index ocispecs.Index - err = json.Unmarshal(m["index.json"].Data, &index) + resp, err = c.Solve(sb.Context(), def, SolveOpt{ + // specifying inline cache exporter is needed for reproducing containerimage.digest + // (not needed for reproducing rootfs/unique) + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + CacheExports: []CacheOptionsEntry{ + { + Type: "inline", + }, + }, + CacheImports: []CacheOptionsEntry{ + { + Type: "registry", + Attrs: map[string]string{ + "ref": target, + }, + }, + }, + }, nil) require.NoError(t, err) - var mfst ocispecs.Manifest - err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) + dgst2, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey] + require.Equal(t, ok, true) + + require.Equal(t, dgst, dgst2) + + ensurePruneAll(t, c, sb) + + // Export the cache again with compression + resp, err = c.Solve(sb.Context(), def, SolveOpt{ + // specifying inline cache exporter is needed for reproducing containerimage.digest + // (not needed for reproducing rootfs/unique) + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "compression": "uncompressed", // inline cache should work with compression + "force-compression": "true", + }, + }, + }, + CacheExports: []CacheOptionsEntry{ + { + Type: "inline", + }, + }, + CacheImports: []CacheOptionsEntry{ + { + Type: "registry", + Attrs: map[string]string{ + "ref": target, + }, + }, + }, + }, nil) require.NoError(t, err) - lastLayer := mfst.Layers[len(mfst.Layers)-1] + dgst2uncompress, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey] + require.Equal(t, ok, true) - layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()] - require.True(t, ok) + // dgst2uncompress != dgst, because the compression type is different + unique2uncompress, err := readFileInImage(sb.Context(), t, c, target+"@"+dgst2uncompress, "/unique") + require.NoError(t, err) + require.EqualValues(t, unique, unique2uncompress) - m, err = testutil.ReadTarToMap(layer.Data, true) + ensurePruneAll(t, c, sb) + + resp, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + CacheImports: []CacheOptionsEntry{ + { + Type: "registry", + Attrs: map[string]string{ + "ref": target, + }, + }, + }, + }, nil) require.NoError(t, err) - _, ok = m[".wh.foo"] - require.True(t, ok) + dgst3, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey] + require.Equal(t, ok, true) - _, ok = m["foo2/"] - require.True(t, ok) + // dgst3 != dgst, because inline cache is not exported for dgst3 + unique3, err := readFileInImage(sb.Context(), t, c, target+"@"+dgst3, "/unique") + require.NoError(t, err) + require.EqualValues(t, unique, unique3) +} - _, ok = m["foo2/bar"] - require.True(t, ok) +func readFileInImage(ctx context.Context, t *testing.T, c *Client, ref, path string) ([]byte, error) { + def, err := llb.Image(ref).Marshal(ctx) + if err != nil { + return nil, err + } + destDir := t.TempDir() + + _, err = c.Solve(ctx, def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + }, nil) + if err != nil { + return nil, err + } + return os.ReadFile(filepath.Join(destDir, filepath.Clean(path))) } -// #296 -func testSchema1Image(t *testing.T, sb integration.Sandbox) { +func testCachedMounts(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - st := llb.Image("gcr.io/google_containers/pause:3.0@sha256:0d093c962a6c2dd8bb8727b661e2b5f13e9df884af9945b4cc7088d9350cd3ee") + busybox := llb.Image("busybox:latest") + // setup base for one of the cache sources + st := busybox.Run(llb.Shlex(`sh -c "echo -n base > baz"`), llb.Dir("/wd")) + base := st.AddMount("/wd", llb.Scratch()) + + st = busybox.Run(llb.Shlex(`sh -c "echo -n first > foo"`), llb.Dir("/wd")) + st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) + st = st.Run(llb.Shlex(`sh -c "cat foo && echo -n second > /wd2/bar"`), llb.Dir("/wd")) + st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) + st.AddMount("/wd2", base, llb.AsPersistentCacheDir("mycache2", llb.CacheMountShared)) def, err := st.Marshal(sb.Context()) require.NoError(t, err) @@ -4415,1122 +4891,3571 @@ func testSchema1Image(t *testing.T, sb integration.Sandbox) { _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) require.NoError(t, err) - checkAllReleasable(t, c, sb, true) -} - -// #319 -func testMountWithNoSource(t *testing.T, sb integration.Sandbox) { - c, err := New(sb.Context(), sb.Address()) + // repeat to make sure cache works + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) require.NoError(t, err) - defer c.Close() - busybox := llb.Image("docker.io/library/busybox:latest") - st := llb.Scratch() + // second build using cache directories + st = busybox.Run(llb.Shlex(`sh -c "cp /src0/foo . && cp /src1/bar . && cp /src1/baz ."`), llb.Dir("/wd")) + out := st.AddMount("/wd", llb.Scratch()) + st.AddMount("/src0", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) + st.AddMount("/src1", base, llb.AsPersistentCacheDir("mycache2", llb.CacheMountShared)) - var nilState llb.State + destDir := t.TempDir() - // This should never actually be run, but we want to succeed - // if it was, because we expect an error below, or a daemon - // panic if the issue has regressed. - run := busybox.Run( - llb.Args([]string{"/bin/true"}), - llb.AddMount("/nil", nilState, llb.SourcePath("/"), llb.Readonly)) + def, err = out.Marshal(sb.Context()) + require.NoError(t, err) - st = run.AddMount("/mnt", st) + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + }, nil) + require.NoError(t, err) - def, err := st.Marshal(sb.Context()) + dt, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) + require.Equal(t, string(dt), "first") - _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + dt, err = os.ReadFile(filepath.Join(destDir, "bar")) require.NoError(t, err) + require.Equal(t, string(dt), "second") + + dt, err = os.ReadFile(filepath.Join(destDir, "baz")) + require.NoError(t, err) + require.Equal(t, string(dt), "base") checkAllReleasable(t, c, sb, true) } -// #324 -func testReadonlyRootFS(t *testing.T, sb integration.Sandbox) { +func testSharedCacheMounts(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - busybox := llb.Image("docker.io/library/busybox:latest") - st := llb.Scratch() + busybox := llb.Image("busybox:latest") + st := busybox.Run(llb.Shlex(`sh -e -c "touch one; while [[ ! -f two ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd")) + st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - // The path /foo should be unwriteable. - run := busybox.Run( - llb.ReadonlyRootFS(), - llb.Args([]string{"/bin/touch", "/foo"})) - st = run.AddMount("/mnt", st) + st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; while [[ ! -f one ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd")) + st2.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - def, err := st.Marshal(sb.Context()) + out := busybox.Run(llb.Shlex("true")) + out.AddMount("/m1", st.Root()) + out.AddMount("/m2", st2.Root()) + + def, err := out.Marshal(sb.Context()) require.NoError(t, err) _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.Error(t, err) - // Would prefer to detect more specifically "Read-only file - // system" but that isn't exposed here (it is on the stdio - // which we don't see). - require.Contains(t, err.Error(), "process \"/bin/touch /foo\" did not complete successfully") - - checkAllReleasable(t, c, sb, true) + require.NoError(t, err) } -func testSourceMap(t *testing.T, sb integration.Sandbox) { +// #2334 +func testSharedCacheMountsNoScratch(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - sm1 := llb.NewSourceMap(nil, "foo", []byte("data1")) - sm2 := llb.NewSourceMap(nil, "bar", []byte("data2")) + busybox := llb.Image("busybox:latest") + st := busybox.Run(llb.Shlex(`sh -e -c "touch one; while [[ ! -f two ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd")) + st.AddMount("/wd", llb.Image("busybox:latest"), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - st := llb.Scratch().Run( - llb.Shlex("not-exist"), - sm1.Location([]*pb.Range{{Start: pb.Position{Line: 7}}}), - sm2.Location([]*pb.Range{{Start: pb.Position{Line: 8}}}), - sm1.Location([]*pb.Range{{Start: pb.Position{Line: 9}}}), - ) + st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; while [[ ! -f one ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd")) + st2.AddMount("/wd", llb.Image("busybox:latest"), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared)) - def, err := st.Marshal(sb.Context()) + out := busybox.Run(llb.Shlex("true")) + out.AddMount("/m1", st.Root()) + out.AddMount("/m2", st2.Root()) + + def, err := out.Marshal(sb.Context()) require.NoError(t, err) _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) - require.Error(t, err) + require.NoError(t, err) +} - srcs := errdefs.Sources(err) - require.Equal(t, 3, len(srcs)) +func testLockedCacheMounts(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() - // Source errors are wrapped in the order provided as llb.ConstraintOpts, so - // when they are unwrapped, the first unwrapped error is the last location - // provided. - require.Equal(t, "foo", srcs[0].Info.Filename) - require.Equal(t, []byte("data1"), srcs[0].Info.Data) - require.Nil(t, srcs[0].Info.Definition) + busybox := llb.Image("busybox:latest") + st := busybox.Run(llb.Shlex(`sh -e -c "touch one; if [[ -f two ]]; then exit 0; fi; for i in $(seq 10); do if [[ -f two ]]; then exit 1; fi; usleep 200000; done"`), llb.Dir("/wd")) + st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - require.Equal(t, 1, len(srcs[0].Ranges)) - require.Equal(t, int32(9), srcs[0].Ranges[0].Start.Line) - require.Equal(t, int32(0), srcs[0].Ranges[0].Start.Character) - - require.Equal(t, "bar", srcs[1].Info.Filename) - require.Equal(t, []byte("data2"), srcs[1].Info.Data) - require.Nil(t, srcs[1].Info.Definition) + st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; if [[ -f one ]]; then exit 0; fi; for i in $(seq 10); do if [[ -f one ]]; then exit 1; fi; usleep 200000; done"`), llb.Dir("/wd")) + st2.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - require.Equal(t, 1, len(srcs[1].Ranges)) - require.Equal(t, int32(8), srcs[1].Ranges[0].Start.Line) - require.Equal(t, int32(0), srcs[1].Ranges[0].Start.Character) + out := busybox.Run(llb.Shlex("true")) + out.AddMount("/m1", st.Root()) + out.AddMount("/m2", st2.Root()) - require.Equal(t, "foo", srcs[2].Info.Filename) - require.Equal(t, []byte("data1"), srcs[2].Info.Data) - require.Nil(t, srcs[2].Info.Definition) + def, err := out.Marshal(sb.Context()) + require.NoError(t, err) - require.Equal(t, 1, len(srcs[2].Ranges)) - require.Equal(t, int32(7), srcs[2].Ranges[0].Start.Line) - require.Equal(t, int32(0), srcs[2].Ranges[0].Start.Character) + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.NoError(t, err) } -func testSourceMapFromRef(t *testing.T, sb integration.Sandbox) { +func testDuplicateCacheMount(t *testing.T, sb integration.Sandbox) { requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - srcState := llb.Scratch().File( - llb.Mkfile("foo", 0600, []byte("data"))) - sm := llb.NewSourceMap(&srcState, "bar", []byte("bardata")) - - frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - st := llb.Scratch().File( - llb.Mkdir("foo/bar", 0600), //fails because /foo doesn't exist - sm.Location([]*pb.Range{{Start: pb.Position{Line: 3, Character: 1}}}), - ) + busybox := llb.Image("busybox:latest") - def, err := st.Marshal(sb.Context()) - if err != nil { - return nil, err - } + out := busybox.Run(llb.Shlex(`sh -e -c "[[ ! -f /m2/foo ]]; touch /m1/foo; [[ -f /m2/foo ]];"`)) + out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) + out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) - res, err := c.Solve(ctx, gateway.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return nil, err - } + def, err := out.Marshal(sb.Context()) + require.NoError(t, err) - ref, err := res.SingleRef() - if err != nil { - return nil, err - } + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.NoError(t, err) +} - st2, err := ref.ToState() - if err != nil { - return nil, err - } +func testRunCacheWithMounts(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() - st = llb.Scratch().File( - llb.Copy(st2, "foo", "foo2"), - ) + busybox := llb.Image("busybox:latest") - def, err = st.Marshal(sb.Context()) - if err != nil { - return nil, err - } + out := busybox.Run(llb.Shlex(`sh -e -c "[[ -f /m1/sbin/apk ]]"`)) + out.AddMount("/m1", llb.Image("alpine:latest"), llb.Readonly) - return c.Solve(ctx, gateway.SolveRequest{ - Definition: def.ToPB(), - }) - } + def, err := out.Marshal(sb.Context()) + require.NoError(t, err) - _, err = c.Build(sb.Context(), SolveOpt{}, "", frontend, nil) - require.Error(t, err) + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.NoError(t, err) - srcs := errdefs.Sources(err) - require.Equal(t, 1, len(srcs)) + out = busybox.Run(llb.Shlex(`sh -e -c "[[ -f /m1/sbin/apk ]]"`)) + out.AddMount("/m1", llb.Image("busybox:latest"), llb.Readonly) - require.Equal(t, "bar", srcs[0].Info.Filename) - require.Equal(t, []byte("bardata"), srcs[0].Info.Data) - require.NotNil(t, srcs[0].Info.Definition) + def, err = out.Marshal(sb.Context()) + require.NoError(t, err) - require.Equal(t, 1, len(srcs[0].Ranges)) - require.Equal(t, int32(3), srcs[0].Ranges[0].Start.Line) - require.Equal(t, int32(1), srcs[0].Ranges[0].Start.Character) + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.Error(t, err) } -func testRmSymlink(t *testing.T, sb integration.Sandbox) { +func testCacheMountNoCache(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - // Test that if FileOp.Rm is called on a symlink, then - // the symlink is removed rather than the target - mnt := llb.Image("alpine"). - Run(llb.Shlex("touch /mnt/target")). - AddMount("/mnt", llb.Scratch()) + busybox := llb.Image("busybox:latest") - mnt = llb.Image("alpine"). - Run(llb.Shlex("ln -s target /mnt/link")). - AddMount("/mnt", mnt) + out := busybox.Run(llb.Shlex(`sh -e -c "touch /m1/foo; touch /m2/bar"`)) + out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) + out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache2", llb.CacheMountLocked)) - def, err := mnt.File(llb.Rm("link")).Marshal(sb.Context()) + def, err := out.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) require.NoError(t, err) - defer os.RemoveAll(destDir) - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterLocal, - OutputDir: destDir, - }, - }, - }, nil) + out = busybox.Run(llb.Shlex(`sh -e -c "[[ ! -f /m1/foo ]]; touch /m1/foo2;"`), llb.IgnoreCache) + out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) + + def, err = out.Marshal(sb.Context()) require.NoError(t, err) - require.NoError(t, fstest.CheckDirectoryEqualWithApplier(destDir, fstest.CreateFile("target", nil, 0644))) + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.NoError(t, err) + + out = busybox.Run(llb.Shlex(`sh -e -c "[[ -f /m1/foo2 ]]; [[ -f /m2/bar ]];"`)) + out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked)) + out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache2", llb.CacheMountLocked)) + + def, err = out.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.NoError(t, err) } -func testProxyEnv(t *testing.T, sb integration.Sandbox) { +func testCopyFromEmptyImage(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - base := llb.Image("docker.io/library/busybox:latest").Dir("/out") - cmd := `sh -c "echo -n $HTTP_PROXY-$HTTPS_PROXY-$NO_PROXY-$no_proxy-$ALL_PROXY-$all_proxy > env"` + for _, image := range []llb.State{llb.Scratch(), llb.Image("tonistiigi/test:nolayers")} { + st := llb.Scratch().File(llb.Copy(image, "/", "/")) + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) - st := base.Run(llb.Shlex(cmd), llb.WithProxy(llb.ProxyEnv{ - HTTPProxy: "httpvalue", - HTTPSProxy: "httpsvalue", - NoProxy: "noproxyvalue", - AllProxy: "allproxyvalue", - })) - out := st.AddMount("/out", llb.Scratch()) + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.NoError(t, err) - def, err := out.Marshal(sb.Context()) + st = llb.Scratch().File(llb.Copy(image, "/foo", "/")) + def, err = st.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "/foo: no such file or directory") + + busybox := llb.Image("busybox:latest") + + out := busybox.Run(llb.Shlex(`sh -e -c '[ $(ls /scratch | wc -l) = '0' ]'`)) + out.AddMount("/scratch", image, llb.Readonly) + + def, err = out.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.NoError(t, err) + } +} + +// containerd/containerd#2119 +func testDuplicateWhiteouts(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + busybox := llb.Image("busybox:latest") + st := llb.Scratch() + + run := func(cmd string) { + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + } + + run(`sh -e -c "mkdir -p d0 d1; echo -n first > d1/bar;"`) + run(`sh -c "rm -rf d0 d1"`) + + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") + destDir := t.TempDir() + + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) require.NoError(t, err) - defer os.RemoveAll(destDir) _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterLocal, - OutputDir: destDir, + Type: ExporterOCI, + Output: fixedWriteCloser(outW), }, }, }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "env")) + dt, err := os.ReadFile(out) require.NoError(t, err) - require.Equal(t, string(dt), "httpvalue-httpsvalue-noproxyvalue-noproxyvalue-allproxyvalue-allproxyvalue") - - // repeat to make sure proxy doesn't change cache - st = base.Run(llb.Shlex(cmd), llb.WithProxy(llb.ProxyEnv{ - HTTPSProxy: "httpsvalue2", - NoProxy: "noproxyvalue2", - })) - out = st.AddMount("/out", llb.Scratch()) - def, err = out.Marshal(sb.Context()) + m, err := testutil.ReadTarToMap(dt, false) require.NoError(t, err) - destDir, err = ioutil.TempDir("", "buildkit") + var index ocispecs.Index + err = json.Unmarshal(m["index.json"].Data, &index) require.NoError(t, err) - defer os.RemoveAll(destDir) - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterLocal, - OutputDir: destDir, - }, - }, - }, nil) + var mfst ocispecs.Manifest + err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) require.NoError(t, err) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "env")) + lastLayer := mfst.Layers[len(mfst.Layers)-1] + + layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()] + require.True(t, ok) + + m, err = testutil.ReadTarToMap(layer.Data, true) require.NoError(t, err) - require.Equal(t, string(dt), "httpvalue-httpsvalue-noproxyvalue-noproxyvalue-allproxyvalue-allproxyvalue") + + _, ok = m[".wh.d0"] + require.True(t, ok) + + _, ok = m[".wh.d1"] + require.True(t, ok) + + // check for a bug that added whiteout for subfile + _, ok = m["d1/.wh.bar"] + require.True(t, !ok) } -func testMergeOp(t *testing.T, sb integration.Sandbox) { +// #276 +func testWhiteoutParentDir(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter) requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - ctx := sb.Context() - registry, err := sb.NewRegistry() - if !errors.Is(err, integration.ErrRequirements) { - require.NoError(t, err) - } + busybox := llb.Image("busybox:latest") + st := llb.Scratch() - var imageTarget string - if os.Getenv("TEST_DOCKERD") == "1" { - // do image export but use a fake url as the image should just end up in moby's - // local store - imageTarget = "fake.invalid:33333/buildkit/testmergeop:latest" - } else if registry != "" { - imageTarget = registry + "/buildkit/testmergeop:latest" + run := func(cmd string) { + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) } - stateA := llb.Scratch(). - File(llb.Mkfile("/foo", 0755, []byte("A"))). - File(llb.Mkfile("/a", 0755, []byte("A"))). - File(llb.Mkdir("/bar", 0700)). - File(llb.Mkfile("/bar/A", 0755, []byte("A"))) - stateB := stateA. - File(llb.Rm("/foo")). - File(llb.Mkfile("/b", 0755, []byte("B"))). - File(llb.Mkfile("/bar/B", 0754, []byte("B"))) - stateC := llb.Scratch(). - File(llb.Mkfile("/foo", 0755, []byte("C"))). - File(llb.Mkfile("/c", 0755, []byte("C"))). - File(llb.Mkdir("/bar", 0755)). - File(llb.Mkfile("/bar/A", 0400, []byte("C"))) + run(`sh -c "mkdir -p foo; echo -n first > foo/bar;"`) + run(`rm foo/bar`) - mergeA := llb.Merge([]llb.State{stateA, stateC}) - requireContents(ctx, t, c, sb, mergeA, nil, nil, imageTarget, - fstest.CreateFile("foo", []byte("C"), 0755), - fstest.CreateFile("c", []byte("C"), 0755), - fstest.CreateDir("bar", 0755), - fstest.CreateFile("bar/A", []byte("C"), 0400), - fstest.CreateFile("a", []byte("A"), 0755), - ) + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) - mergeB := llb.Merge([]llb.State{stateC, stateB}) - requireContents(ctx, t, c, sb, mergeB, nil, nil, imageTarget, - fstest.CreateFile("a", []byte("A"), 0755), - fstest.CreateFile("b", []byte("B"), 0755), - fstest.CreateFile("c", []byte("C"), 0755), - fstest.CreateDir("bar", 0700), - fstest.CreateFile("bar/A", []byte("A"), 0755), - fstest.CreateFile("bar/B", []byte("B"), 0754), - ) + destDir := t.TempDir() - stateD := llb.Scratch().File(llb.Mkdir("/qaz", 0755)) - mergeC := llb.Merge([]llb.State{mergeA, mergeB, stateD}) - requireContents(ctx, t, c, sb, mergeC, nil, nil, imageTarget, - fstest.CreateFile("a", []byte("A"), 0755), - fstest.CreateFile("b", []byte("B"), 0755), - fstest.CreateFile("c", []byte("C"), 0755), - fstest.CreateDir("bar", 0700), - fstest.CreateFile("bar/A", []byte("A"), 0755), - fstest.CreateFile("bar/B", []byte("B"), 0754), - fstest.CreateDir("qaz", 0755), - ) + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) + require.NoError(t, err) + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterOCI, + Output: fixedWriteCloser(outW), + }, + }, + }, nil) + require.NoError(t, err) - runA := runShell(llb.Merge([]llb.State{llb.Image("alpine"), mergeC}), - // turn /a file into a dir, mv b and c into it - "rm /a", - "mkdir /a", - "mv /b /c /a/", - // remove+recreate /bar to make it opaque on overlay snapshotters - "rm -rf /bar", - "mkdir -m 0755 /bar", - "echo -n D > /bar/D", - // turn /qaz dir into a file - "rm -rf /qaz", - "touch /qaz", - ) - stateE := llb.Scratch(). - File(llb.Mkfile("/foo", 0755, []byte("E"))). - File(llb.Mkdir("/bar", 0755)). - File(llb.Mkfile("/bar/A", 0755, []byte("A"))). - File(llb.Mkfile("/bar/E", 0755, nil)) - mergeD := llb.Merge([]llb.State{stateE, runA}) - requireEqualContents(ctx, t, c, mergeD, llb.Image("alpine"). - File(llb.Mkdir("a", 0755)). - File(llb.Mkfile("a/b", 0755, []byte("B"))). - File(llb.Mkfile("a/c", 0755, []byte("C"))). - File(llb.Mkdir("bar", 0755)). - File(llb.Mkfile("bar/D", 0644, []byte("D"))). - File(llb.Mkfile("bar/E", 0755, nil)). - File(llb.Mkfile("qaz", 0644, nil)), - // /foo from stateE is not here because it is deleted in stateB, which is part of a submerge of mergeD - ) -} + dt, err := os.ReadFile(out) + require.NoError(t, err) -func testMergeOpCacheInline(t *testing.T, sb integration.Sandbox) { - testMergeOpCache(t, sb, "inline") -} + m, err := testutil.ReadTarToMap(dt, false) + require.NoError(t, err) -func testMergeOpCacheMin(t *testing.T, sb integration.Sandbox) { - testMergeOpCache(t, sb, "min") -} + var index ocispecs.Index + err = json.Unmarshal(m["index.json"].Data, &index) + require.NoError(t, err) -func testMergeOpCacheMax(t *testing.T, sb integration.Sandbox) { - testMergeOpCache(t, sb, "max") -} + var mfst ocispecs.Manifest + err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) + require.NoError(t, err) -func testMergeOpCache(t *testing.T, sb integration.Sandbox, mode string) { - t.Helper() - skipDockerd(t, sb) - requiresLinux(t) + lastLayer := mfst.Layers[len(mfst.Layers)-1] - cdAddress := sb.ContainerdAddress() - if cdAddress == "" { - t.Skip("test requires containerd worker") - } + layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()] + require.True(t, ok) - client, err := newContainerd(cdAddress) + m, err = testutil.ReadTarToMap(layer.Data, true) require.NoError(t, err) - defer client.Close() - ctx := namespaces.WithNamespace(sb.Context(), "buildkit") + _, ok = m["foo/.wh.bar"] + require.True(t, ok) - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } - require.NoError(t, err) + _, ok = m["foo/"] + require.True(t, ok) +} +// #2490 +func testMoveParentDir(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - // push the busybox image to the mutable registry - sourceImage := "busybox:latest" - def, err := llb.Image(sourceImage).Marshal(sb.Context()) + busybox := llb.Image("busybox:latest") + st := llb.Scratch() + + run := func(cmd string) { + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + } + + run(`sh -c "mkdir -p foo; echo -n first > foo/bar;"`) + run(`mv foo foo2`) + + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - busyboxTargetNoTag := registry + "/buildkit/testlazyimage:" - busyboxTarget := busyboxTargetNoTag + "latest" + destDir := t.TempDir() + + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) + require.NoError(t, err) _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterImage, - Attrs: map[string]string{ - "name": busyboxTarget, - "push": "true", - }, + Type: ExporterOCI, + Output: fixedWriteCloser(outW), }, }, }, nil) require.NoError(t, err) - imageService := client.ImageService() - contentStore := client.ContentStore() + dt, err := os.ReadFile(out) + require.NoError(t, err) - busyboxImg, err := imageService.Get(ctx, busyboxTarget) + m, err := testutil.ReadTarToMap(dt, false) require.NoError(t, err) - busyboxManifest, err := images.Manifest(ctx, contentStore, busyboxImg.Target, nil) + var index ocispecs.Index + err = json.Unmarshal(m["index.json"].Data, &index) require.NoError(t, err) - for _, layer := range busyboxManifest.Layers { - _, err = contentStore.Info(ctx, layer.Digest) - require.NoError(t, err) - } + var mfst ocispecs.Manifest + err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) + require.NoError(t, err) - // clear all local state out - err = imageService.Delete(ctx, busyboxImg.Name, images.SynchronousDelete()) + lastLayer := mfst.Layers[len(mfst.Layers)-1] + + layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()] + require.True(t, ok) + + m, err = testutil.ReadTarToMap(layer.Data, true) require.NoError(t, err) - checkAllReleasable(t, c, sb, true) - for _, layer := range busyboxManifest.Layers { - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) - } + _, ok = m[".wh.foo"] + require.True(t, ok) - // make a new merge that includes the lazy busybox as a base and exports inline cache - input1 := llb.Scratch(). - File(llb.Mkdir("/dir", 0777)). - File(llb.Mkfile("/dir/1", 0777, nil)) - input1Copy := llb.Scratch().File(llb.Copy(input1, "/dir/1", "/foo/1", &llb.CopyInfo{CreateDestPath: true})) + _, ok = m["foo2/"] + require.True(t, ok) - // put random contents in the file to ensure it's not re-run later - input2 := runShell(llb.Image("alpine:latest"), - "mkdir /dir", - "cat /dev/urandom | head -c 100 | sha256sum > /dir/2") - input2Copy := llb.Scratch().File(llb.Copy(input2, "/dir/2", "/bar/2", &llb.CopyInfo{CreateDestPath: true})) + _, ok = m["foo2/bar"] + require.True(t, ok) +} - merge := llb.Merge([]llb.State{llb.Image(busyboxTarget), input1Copy, input2Copy}) +// #296 +func testSchema1Image(t *testing.T, sb integration.Sandbox) { + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() - def, err = merge.Marshal(sb.Context()) + st := llb.Image("gcr.io/google_containers/pause:3.0@sha256:0d093c962a6c2dd8bb8727b661e2b5f13e9df884af9945b4cc7088d9350cd3ee") + + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - target := registry + "/buildkit/testmerge:latest" - cacheTarget := registry + "/buildkit/testmergecache:latest" + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.NoError(t, err) - var cacheExports []CacheOptionsEntry - var cacheImports []CacheOptionsEntry - switch mode { - case "inline": - cacheExports = []CacheOptionsEntry{{ - Type: "inline", - }} - cacheImports = []CacheOptionsEntry{{ - Type: "registry", - Attrs: map[string]string{ - "ref": target, - }, - }} - case "min": - cacheExports = []CacheOptionsEntry{{ - Type: "registry", - Attrs: map[string]string{ - "ref": cacheTarget, - }, - }} - cacheImports = []CacheOptionsEntry{{ - Type: "registry", - Attrs: map[string]string{ - "ref": cacheTarget, + checkAllReleasable(t, c, sb, true) +} + +// #319 +func testMountWithNoSource(t *testing.T, sb integration.Sandbox) { + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + busybox := llb.Image("docker.io/library/busybox:latest") + st := llb.Scratch() + + var nilState llb.State + + // This should never actually be run, but we want to succeed + // if it was, because we expect an error below, or a daemon + // panic if the issue has regressed. + run := busybox.Run( + llb.Args([]string{"/bin/true"}), + llb.AddMount("/nil", nilState, llb.SourcePath("/"), llb.Readonly)) + + st = run.AddMount("/mnt", st) + + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.NoError(t, err) + + checkAllReleasable(t, c, sb, true) +} + +// #324 +func testReadonlyRootFS(t *testing.T, sb integration.Sandbox) { + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + busybox := llb.Image("docker.io/library/busybox:latest") + st := llb.Scratch() + + // The path /foo should be unwriteable. + run := busybox.Run( + llb.ReadonlyRootFS(), + llb.Args([]string{"/bin/touch", "/foo"})) + st = run.AddMount("/mnt", st) + + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.Error(t, err) + // Would prefer to detect more specifically "Read-only file + // system" but that isn't exposed here (it is on the stdio + // which we don't see). + require.Contains(t, err.Error(), "process \"/bin/touch /foo\" did not complete successfully") + + checkAllReleasable(t, c, sb, true) +} + +func testSourceMap(t *testing.T, sb integration.Sandbox) { + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + sm1 := llb.NewSourceMap(nil, "foo", []byte("data1")) + sm2 := llb.NewSourceMap(nil, "bar", []byte("data2")) + + st := llb.Scratch().Run( + llb.Shlex("not-exist"), + sm1.Location([]*pb.Range{{Start: pb.Position{Line: 7}}}), + sm2.Location([]*pb.Range{{Start: pb.Position{Line: 8}}}), + sm1.Location([]*pb.Range{{Start: pb.Position{Line: 9}}}), + ) + + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.Error(t, err) + + srcs := errdefs.Sources(err) + require.Equal(t, 3, len(srcs)) + + // Source errors are wrapped in the order provided as llb.ConstraintOpts, so + // when they are unwrapped, the first unwrapped error is the last location + // provided. + require.Equal(t, "foo", srcs[0].Info.Filename) + require.Equal(t, []byte("data1"), srcs[0].Info.Data) + require.Nil(t, srcs[0].Info.Definition) + + require.Equal(t, 1, len(srcs[0].Ranges)) + require.Equal(t, int32(9), srcs[0].Ranges[0].Start.Line) + require.Equal(t, int32(0), srcs[0].Ranges[0].Start.Character) + + require.Equal(t, "bar", srcs[1].Info.Filename) + require.Equal(t, []byte("data2"), srcs[1].Info.Data) + require.Nil(t, srcs[1].Info.Definition) + + require.Equal(t, 1, len(srcs[1].Ranges)) + require.Equal(t, int32(8), srcs[1].Ranges[0].Start.Line) + require.Equal(t, int32(0), srcs[1].Ranges[0].Start.Character) + + require.Equal(t, "foo", srcs[2].Info.Filename) + require.Equal(t, []byte("data1"), srcs[2].Info.Data) + require.Nil(t, srcs[2].Info.Definition) + + require.Equal(t, 1, len(srcs[2].Ranges)) + require.Equal(t, int32(7), srcs[2].Ranges[0].Start.Line) + require.Equal(t, int32(0), srcs[2].Ranges[0].Start.Character) +} + +func testSourceMapFromRef(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + srcState := llb.Scratch().File( + llb.Mkfile("foo", 0600, []byte("data"))) + sm := llb.NewSourceMap(&srcState, "bar", []byte("bardata")) + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + st := llb.Scratch().File( + llb.Mkdir("foo/bar", 0600), //fails because /foo doesn't exist + sm.Location([]*pb.Range{{Start: pb.Position{Line: 3, Character: 1}}}), + ) + + def, err := st.Marshal(sb.Context()) + if err != nil { + return nil, err + } + + res, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + + ref, err := res.SingleRef() + if err != nil { + return nil, err + } + + st2, err := ref.ToState() + if err != nil { + return nil, err + } + + st = llb.Scratch().File( + llb.Copy(st2, "foo", "foo2"), + ) + + def, err = st.Marshal(sb.Context()) + if err != nil { + return nil, err + } + + return c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + } + + _, err = c.Build(sb.Context(), SolveOpt{}, "", frontend, nil) + require.Error(t, err) + + srcs := errdefs.Sources(err) + require.Equal(t, 1, len(srcs)) + + require.Equal(t, "bar", srcs[0].Info.Filename) + require.Equal(t, []byte("bardata"), srcs[0].Info.Data) + require.NotNil(t, srcs[0].Info.Definition) + + require.Equal(t, 1, len(srcs[0].Ranges)) + require.Equal(t, int32(3), srcs[0].Ranges[0].Start.Line) + require.Equal(t, int32(1), srcs[0].Ranges[0].Start.Character) +} + +func testRmSymlink(t *testing.T, sb integration.Sandbox) { + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + // Test that if FileOp.Rm is called on a symlink, then + // the symlink is removed rather than the target + mnt := llb.Image("alpine"). + Run(llb.Shlex("touch /mnt/target")). + AddMount("/mnt", llb.Scratch()) + + mnt = llb.Image("alpine"). + Run(llb.Shlex("ln -s target /mnt/link")). + AddMount("/mnt", mnt) + + def, err := mnt.File(llb.Rm("link")).Marshal(sb.Context()) + require.NoError(t, err) + + destDir := t.TempDir() + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + }, nil) + require.NoError(t, err) + + require.NoError(t, fstest.CheckDirectoryEqualWithApplier(destDir, fstest.CreateFile("target", nil, 0644))) +} + +func testProxyEnv(t *testing.T, sb integration.Sandbox) { + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + base := llb.Image("docker.io/library/busybox:latest").Dir("/out") + cmd := `sh -c "echo -n $HTTP_PROXY-$HTTPS_PROXY-$NO_PROXY-$no_proxy-$ALL_PROXY-$all_proxy > env"` + + st := base.Run(llb.Shlex(cmd), llb.WithProxy(llb.ProxyEnv{ + HTTPProxy: "httpvalue", + HTTPSProxy: "httpsvalue", + NoProxy: "noproxyvalue", + AllProxy: "allproxyvalue", + })) + out := st.AddMount("/out", llb.Scratch()) + + def, err := out.Marshal(sb.Context()) + require.NoError(t, err) + + destDir := t.TempDir() + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + }, nil) + require.NoError(t, err) + + dt, err := os.ReadFile(filepath.Join(destDir, "env")) + require.NoError(t, err) + require.Equal(t, string(dt), "httpvalue-httpsvalue-noproxyvalue-noproxyvalue-allproxyvalue-allproxyvalue") + + // repeat to make sure proxy doesn't change cache + st = base.Run(llb.Shlex(cmd), llb.WithProxy(llb.ProxyEnv{ + HTTPSProxy: "httpsvalue2", + NoProxy: "noproxyvalue2", + })) + out = st.AddMount("/out", llb.Scratch()) + + def, err = out.Marshal(sb.Context()) + require.NoError(t, err) + + destDir = t.TempDir() + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + }, nil) + require.NoError(t, err) + + dt, err = os.ReadFile(filepath.Join(destDir, "env")) + require.NoError(t, err) + require.Equal(t, string(dt), "httpvalue-httpsvalue-noproxyvalue-noproxyvalue-allproxyvalue-allproxyvalue") +} + +func testMergeOp(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + ctx := sb.Context() + registry, err := sb.NewRegistry() + if !errors.Is(err, integration.ErrRequirements) { + require.NoError(t, err) + } + + var imageTarget string + if integration.IsTestDockerdMoby(sb) { + // do image export but use a fake url as the image should just end up in moby's + // local store + imageTarget = "fake.invalid:33333/buildkit/testmergeop:latest" + } else if registry != "" { + imageTarget = registry + "/buildkit/testmergeop:latest" + } + + stateA := llb.Scratch(). + File(llb.Mkfile("/foo", 0755, []byte("A"))). + File(llb.Mkfile("/a", 0755, []byte("A"))). + File(llb.Mkdir("/bar", 0700)). + File(llb.Mkfile("/bar/A", 0755, []byte("A"))) + stateB := stateA. + File(llb.Rm("/foo")). + File(llb.Mkfile("/b", 0755, []byte("B"))). + File(llb.Mkfile("/bar/B", 0754, []byte("B"))) + stateC := llb.Scratch(). + File(llb.Mkfile("/foo", 0755, []byte("C"))). + File(llb.Mkfile("/c", 0755, []byte("C"))). + File(llb.Mkdir("/bar", 0755)). + File(llb.Mkfile("/bar/A", 0400, []byte("C"))) + + mergeA := llb.Merge([]llb.State{stateA, stateC}) + requireContents(ctx, t, c, sb, mergeA, nil, nil, imageTarget, + fstest.CreateFile("foo", []byte("C"), 0755), + fstest.CreateFile("c", []byte("C"), 0755), + fstest.CreateDir("bar", 0755), + fstest.CreateFile("bar/A", []byte("C"), 0400), + fstest.CreateFile("a", []byte("A"), 0755), + ) + + mergeB := llb.Merge([]llb.State{stateC, stateB}) + requireContents(ctx, t, c, sb, mergeB, nil, nil, imageTarget, + fstest.CreateFile("a", []byte("A"), 0755), + fstest.CreateFile("b", []byte("B"), 0755), + fstest.CreateFile("c", []byte("C"), 0755), + fstest.CreateDir("bar", 0700), + fstest.CreateFile("bar/A", []byte("A"), 0755), + fstest.CreateFile("bar/B", []byte("B"), 0754), + ) + + stateD := llb.Scratch().File(llb.Mkdir("/qaz", 0755)) + mergeC := llb.Merge([]llb.State{mergeA, mergeB, stateD}) + requireContents(ctx, t, c, sb, mergeC, nil, nil, imageTarget, + fstest.CreateFile("a", []byte("A"), 0755), + fstest.CreateFile("b", []byte("B"), 0755), + fstest.CreateFile("c", []byte("C"), 0755), + fstest.CreateDir("bar", 0700), + fstest.CreateFile("bar/A", []byte("A"), 0755), + fstest.CreateFile("bar/B", []byte("B"), 0754), + fstest.CreateDir("qaz", 0755), + ) + + runA := runShell(llb.Merge([]llb.State{llb.Image("alpine"), mergeC}), + // turn /a file into a dir, mv b and c into it + "rm /a", + "mkdir /a", + "mv /b /c /a/", + // remove+recreate /bar to make it opaque on overlay snapshotters + "rm -rf /bar", + "mkdir -m 0755 /bar", + "echo -n D > /bar/D", + // turn /qaz dir into a file + "rm -rf /qaz", + "touch /qaz", + ) + stateE := llb.Scratch(). + File(llb.Mkfile("/foo", 0755, []byte("E"))). + File(llb.Mkdir("/bar", 0755)). + File(llb.Mkfile("/bar/A", 0755, []byte("A"))). + File(llb.Mkfile("/bar/E", 0755, nil)) + mergeD := llb.Merge([]llb.State{stateE, runA}) + requireEqualContents(ctx, t, c, mergeD, llb.Image("alpine"). + File(llb.Mkdir("a", 0755)). + File(llb.Mkfile("a/b", 0755, []byte("B"))). + File(llb.Mkfile("a/c", 0755, []byte("C"))). + File(llb.Mkdir("bar", 0755)). + File(llb.Mkfile("bar/D", 0644, []byte("D"))). + File(llb.Mkfile("bar/E", 0755, nil)). + File(llb.Mkfile("qaz", 0644, nil)), + // /foo from stateE is not here because it is deleted in stateB, which is part of a submerge of mergeD + ) +} + +func testMergeOpCacheInline(t *testing.T, sb integration.Sandbox) { + testMergeOpCache(t, sb, "inline") +} + +func testMergeOpCacheMin(t *testing.T, sb integration.Sandbox) { + testMergeOpCache(t, sb, "min") +} + +func testMergeOpCacheMax(t *testing.T, sb integration.Sandbox) { + testMergeOpCache(t, sb, "max") +} + +func testMergeOpCache(t *testing.T, sb integration.Sandbox, mode string) { + t.Helper() + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) + requiresLinux(t) + + cdAddress := sb.ContainerdAddress() + if cdAddress == "" { + t.Skip("test requires containerd worker") + } + + client, err := newContainerd(cdAddress) + require.NoError(t, err) + defer client.Close() + + ctx := namespaces.WithNamespace(sb.Context(), "buildkit") + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + // push the busybox image to the mutable registry + sourceImage := "busybox:latest" + def, err := llb.Image(sourceImage).Marshal(sb.Context()) + require.NoError(t, err) + + busyboxTargetNoTag := registry + "/buildkit/testlazyimage:" + busyboxTarget := busyboxTargetNoTag + "latest" + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": busyboxTarget, + "push": "true", + "store": "true", + "unsafe-internal-store-allow-incomplete": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + imageService := client.ImageService() + contentStore := client.ContentStore() + + busyboxImg, err := imageService.Get(ctx, busyboxTarget) + require.NoError(t, err) + + busyboxManifest, err := images.Manifest(ctx, contentStore, busyboxImg.Target, nil) + require.NoError(t, err) + + for _, layer := range busyboxManifest.Layers { + _, err = contentStore.Info(ctx, layer.Digest) + require.NoError(t, err) + } + + // clear all local state out + err = imageService.Delete(ctx, busyboxImg.Name, images.SynchronousDelete()) + require.NoError(t, err) + checkAllReleasable(t, c, sb, true) + + for _, layer := range busyboxManifest.Layers { + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) + } + + // make a new merge that includes the lazy busybox as a base and exports inline cache + input1 := llb.Scratch(). + File(llb.Mkdir("/dir", 0777)). + File(llb.Mkfile("/dir/1", 0777, nil)) + input1Copy := llb.Scratch().File(llb.Copy(input1, "/dir/1", "/foo/1", &llb.CopyInfo{CreateDestPath: true})) + + // put random contents in the file to ensure it's not re-run later + input2 := runShell(llb.Image("alpine:latest"), + "mkdir /dir", + "cat /dev/urandom | head -c 100 | sha256sum > /dir/2") + input2Copy := llb.Scratch().File(llb.Copy(input2, "/dir/2", "/bar/2", &llb.CopyInfo{CreateDestPath: true})) + + merge := llb.Merge([]llb.State{llb.Image(busyboxTarget), input1Copy, input2Copy}) + + def, err = merge.Marshal(sb.Context()) + require.NoError(t, err) + + target := registry + "/buildkit/testmerge:latest" + cacheTarget := registry + "/buildkit/testmergecache:latest" + + var cacheExports []CacheOptionsEntry + var cacheImports []CacheOptionsEntry + switch mode { + case "inline": + cacheExports = []CacheOptionsEntry{{ + Type: "inline", + }} + cacheImports = []CacheOptionsEntry{{ + Type: "registry", + Attrs: map[string]string{ + "ref": target, + }, + }} + case "min": + cacheExports = []CacheOptionsEntry{{ + Type: "registry", + Attrs: map[string]string{ + "ref": cacheTarget, + }, + }} + cacheImports = []CacheOptionsEntry{{ + Type: "registry", + Attrs: map[string]string{ + "ref": cacheTarget, + }, + }} + case "max": + cacheExports = []CacheOptionsEntry{{ + Type: "registry", + Attrs: map[string]string{ + "ref": cacheTarget, + "mode": "max", + }, + }} + cacheImports = []CacheOptionsEntry{{ + Type: "registry", + Attrs: map[string]string{ + "ref": cacheTarget, + }, + }} + default: + require.Fail(t, "unknown cache mode: %s", mode) + } + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "store": "true", + "unsafe-internal-store-allow-incomplete": "true", + }, + }, + }, + CacheExports: cacheExports, + }, nil) + require.NoError(t, err) + + // verify that the busybox image stayed lazy + for _, layer := range busyboxManifest.Layers { + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) + } + + // get the random value at /bar/2 + destDir := t.TempDir() + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + }, nil) + require.NoError(t, err) + + bar2Contents, err := os.ReadFile(filepath.Join(destDir, "bar", "2")) + require.NoError(t, err) + + // clear all local state out + img, err := imageService.Get(ctx, target) + require.NoError(t, err) + + manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) + require.NoError(t, err) + + err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) + require.NoError(t, err) + checkAllReleasable(t, c, sb, true) + + for _, layer := range manifest.Layers { + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) + } + + // re-run the same build with cache imports and verify everything stays lazy + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{{ + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "store": "true", + "unsafe-internal-store-allow-incomplete": "true", + }, + }}, + CacheImports: cacheImports, + CacheExports: cacheExports, + }, nil) + require.NoError(t, err) + + // verify everything from before stayed lazy + img, err = imageService.Get(ctx, target) + require.NoError(t, err) + + manifest, err = images.Manifest(ctx, contentStore, img.Target, nil) + require.NoError(t, err) + + for i, layer := range manifest.Layers { + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v for index %d", err, i) + } + + // re-run the build with a change only to input1 using the remote cache + input1 = llb.Scratch(). + File(llb.Mkdir("/dir", 0777)). + File(llb.Mkfile("/dir/1", 0444, nil)) + input1Copy = llb.Scratch().File(llb.Copy(input1, "/dir/1", "/foo/1", &llb.CopyInfo{CreateDestPath: true})) + + merge = llb.Merge([]llb.State{llb.Image(busyboxTarget), input1Copy, input2Copy}) + + def, err = merge.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{{ + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "store": "true", + "unsafe-internal-store-allow-incomplete": "true", + }, + }}, + CacheExports: cacheExports, + CacheImports: cacheImports, + }, nil) + require.NoError(t, err) + + // verify everything from before stayed lazy except the middle layer for input1Copy + img, err = imageService.Get(ctx, target) + require.NoError(t, err) + + manifest, err = images.Manifest(ctx, contentStore, img.Target, nil) + require.NoError(t, err) + + for i, layer := range manifest.Layers { + switch i { + case 0, 2: + // bottom and top layer should stay lazy as they didn't change + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v for index %d", err, i) + case 1: + // middle layer had to be rebuilt, should exist locally + _, err = contentStore.Info(ctx, layer.Digest) + require.NoError(t, err) + default: + require.Fail(t, "unexpected layer index %d", i) + } + } + + // check the random value at /bar/2 didn't change + destDir = t.TempDir() + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + CacheImports: cacheImports, + }, nil) + require.NoError(t, err) + + newBar2Contents, err := os.ReadFile(filepath.Join(destDir, "bar", "2")) + require.NoError(t, err) + + require.Equalf(t, bar2Contents, newBar2Contents, "bar/2 contents changed") + + // Now test the case with a layer on top of a merge. + err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) + require.NoError(t, err) + checkAllReleasable(t, c, sb, true) + + for _, layer := range manifest.Layers { + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) + } + + mergePlusLayer := merge.File(llb.Mkfile("/3", 0444, nil)) + + def, err = mergePlusLayer.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{{ + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "store": "true", + "unsafe-internal-store-allow-incomplete": "true", + }, + }}, + CacheExports: cacheExports, + CacheImports: cacheImports, + }, nil) + require.NoError(t, err) + + // check the random value at /bar/2 didn't change + destDir = t.TempDir() + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + CacheImports: cacheImports, + }, nil) + require.NoError(t, err) + + newBar2Contents, err = os.ReadFile(filepath.Join(destDir, "bar", "2")) + require.NoError(t, err) + + require.Equalf(t, bar2Contents, newBar2Contents, "bar/2 contents changed") + + // clear local state, repeat the build, verify everything stays lazy + err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) + require.NoError(t, err) + checkAllReleasable(t, c, sb, true) + + for _, layer := range manifest.Layers { + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) + } + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{{ + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "store": "true", + "unsafe-internal-store-allow-incomplete": "true", + }, + }}, + CacheImports: cacheImports, + CacheExports: cacheExports, + }, nil) + require.NoError(t, err) + + img, err = imageService.Get(ctx, target) + require.NoError(t, err) + + manifest, err = images.Manifest(ctx, contentStore, img.Target, nil) + require.NoError(t, err) + + for i, layer := range manifest.Layers { + _, err = contentStore.Info(ctx, layer.Digest) + require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v for index %d", err, i) + } +} + +func requireContents(ctx context.Context, t *testing.T, c *Client, sb integration.Sandbox, state llb.State, cacheImports, cacheExports []CacheOptionsEntry, imageTarget string, files ...fstest.Applier) { + t.Helper() + + def, err := state.Marshal(ctx) + require.NoError(t, err) + + destDir := t.TempDir() + + _, err = c.Solve(ctx, def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + CacheImports: cacheImports, + CacheExports: cacheExports, + }, nil) + require.NoError(t, err) + + require.NoError(t, fstest.CheckDirectoryEqualWithApplier(destDir, fstest.Apply(files...))) + + if imageTarget != "" { + var exports []ExportEntry + if integration.IsTestDockerdMoby(sb) { + exports = []ExportEntry{{ + Type: "moby", + Attrs: map[string]string{ + "name": imageTarget, + }, + }} + } else { + exports = []ExportEntry{{ + Type: ExporterImage, + Attrs: map[string]string{ + "name": imageTarget, + "push": "true", + }, + }} + } + + _, err = c.Solve(ctx, def, SolveOpt{Exports: exports, CacheImports: cacheImports, CacheExports: cacheExports}, nil) + require.NoError(t, err) + resetState(t, c, sb) + requireContents(ctx, t, c, sb, llb.Image(imageTarget, llb.ResolveModePreferLocal), cacheImports, nil, "", files...) + } +} + +func requireEqualContents(ctx context.Context, t *testing.T, c *Client, stateA, stateB llb.State) { + t.Helper() + + defA, err := stateA.Marshal(ctx) + require.NoError(t, err) + + destDirA := t.TempDir() + + _, err = c.Solve(ctx, defA, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDirA, + }, + }, + }, nil) + require.NoError(t, err) + + defB, err := stateB.Marshal(ctx) + require.NoError(t, err) + + destDirB := t.TempDir() + + _, err = c.Solve(ctx, defB, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDirB, + }, + }, + }, nil) + require.NoError(t, err) + + require.NoError(t, fstest.CheckDirectoryEqual(destDirA, destDirB)) +} + +func runShellExecState(base llb.State, cmds ...string) llb.ExecState { + return base.Run(llb.Args([]string{"sh", "-c", strings.Join(cmds, " && ")})) +} + +func runShell(base llb.State, cmds ...string) llb.State { + return runShellExecState(base, cmds...).Root() +} + +func chainRunShells(base llb.State, cmdss ...[]string) llb.State { + for _, cmds := range cmdss { + base = runShell(base, cmds...) + } + return base +} + +func requiresLinux(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("unsupported GOOS: %s", runtime.GOOS) + } +} + +// ensurePruneAll tries to ensure Prune completes with retries. +// Current cache implementation defers release-related logic using goroutine so +// there can be situation where a build has finished but the following prune doesn't +// cleanup cache because some records still haven't been released. +// This function tries to ensure prune by retrying it. +func ensurePruneAll(t *testing.T, c *Client, sb integration.Sandbox) { + for i := 0; i < 2; i++ { + require.NoError(t, c.Prune(sb.Context(), nil, PruneAll)) + for j := 0; j < 20; j++ { + du, err := c.DiskUsage(sb.Context()) + require.NoError(t, err) + if len(du) == 0 { + return + } + time.Sleep(500 * time.Millisecond) + } + t.Logf("retrying prune(%d)", i) + } + t.Fatalf("failed to ensure prune") +} + +func checkAllReleasable(t *testing.T, c *Client, sb integration.Sandbox, checkContent bool) { + cl, err := c.ControlClient().ListenBuildHistory(sb.Context(), &controlapi.BuildHistoryRequest{ + EarlyExit: true, + }) + require.NoError(t, err) + + for { + resp, err := cl.Recv() + if err == io.EOF { + break + } + require.NoError(t, err) + _, err = c.ControlClient().UpdateBuildHistory(sb.Context(), &controlapi.UpdateBuildHistoryRequest{ + Ref: resp.Record.Ref, + Delete: true, + }) + require.NoError(t, err) + } + + retries := 0 +loop0: + for { + require.True(t, 20 > retries) + retries++ + du, err := c.DiskUsage(sb.Context()) + require.NoError(t, err) + for _, d := range du { + if d.InUse { + time.Sleep(500 * time.Millisecond) + continue loop0 + } + } + break + } + + err = c.Prune(sb.Context(), nil, PruneAll) + require.NoError(t, err) + + du, err := c.DiskUsage(sb.Context()) + require.NoError(t, err) + require.Equal(t, 0, len(du)) + + // examine contents of exported tars (requires containerd) + cdAddress := sb.ContainerdAddress() + if cdAddress == "" { + t.Logf("checkAllReleasable: skipping check for exported tars in non-containerd test") + return + } + + // TODO: make public pull helper function so this can be checked for standalone as well + + client, err := newContainerd(cdAddress) + require.NoError(t, err) + defer client.Close() + + ctx := namespaces.WithNamespace(sb.Context(), "buildkit") + snapshotService := client.SnapshotService("overlayfs") + + retries = 0 + for { + count := 0 + err = snapshotService.Walk(ctx, func(context.Context, snapshots.Info) error { + count++ + return nil + }) + require.NoError(t, err) + if count == 0 { + break + } + require.True(t, 20 > retries) + retries++ + time.Sleep(500 * time.Millisecond) + } + + if !checkContent { + return + } + + retries = 0 + for { + count := 0 + var infos []content.Info + err = client.ContentStore().Walk(ctx, func(info content.Info) error { + count++ + infos = append(infos, info) + return nil + }) + require.NoError(t, err) + if count == 0 { + break + } + if retries >= 50 { + require.FailNowf(t, "content still exists", "%+v", infos) + } + retries++ + time.Sleep(500 * time.Millisecond) + } +} + +func testInvalidExporter(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + def, err := llb.Image("busybox:latest").Marshal(sb.Context()) + require.NoError(t, err) + + destDir := t.TempDir() + + target := "example.com/buildkit/testoci:latest" + attrs := map[string]string{ + "name": target, + } + for _, exp := range []string{ExporterOCI, ExporterDocker} { + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: exp, + Attrs: attrs, + }, + }, + }, nil) + // output file writer is required + require.Error(t, err) + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: exp, + Attrs: attrs, + OutputDir: destDir, + }, + }, + }, nil) + // output directory is not supported + require.Error(t, err) + } + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + Attrs: attrs, + }, + }, + }, nil) + // output directory is required + require.Error(t, err) + + f, err := os.Create(filepath.Join(destDir, "a")) + require.NoError(t, err) + defer f.Close() + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + Attrs: attrs, + Output: fixedWriteCloser(f), + }, + }, + }, nil) + // output file writer is not supported + require.Error(t, err) + + checkAllReleasable(t, c, sb, true) +} + +// moby/buildkit#492 +func testParallelLocalBuilds(t *testing.T, sb integration.Sandbox) { + ctx, cancel := context.WithCancel(sb.Context()) + defer cancel() + + c, err := New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + eg, ctx := errgroup.WithContext(ctx) + + for i := 0; i < 3; i++ { + func(i int) { + eg.Go(func() error { + fn := fmt.Sprintf("test%d", i) + srcDir, err := integration.Tmpdir( + t, + fstest.CreateFile(fn, []byte("contents"), 0600), + ) + require.NoError(t, err) + + def, err := llb.Local("source").Marshal(sb.Context()) + require.NoError(t, err) + + destDir := t.TempDir() + + _, err = c.Solve(ctx, def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + LocalDirs: map[string]string{ + "source": srcDir, + }, + }, nil) + require.NoError(t, err) + + act, err := os.ReadFile(filepath.Join(destDir, fn)) + require.NoError(t, err) + + require.Equal(t, "contents", string(act)) + return nil + }) + }(i) + } + + err = eg.Wait() + require.NoError(t, err) +} + +// testRelativeMountpoint is a test that relative paths for mountpoints don't +// fail when runc is upgraded to at least rc95, which introduces an error when +// mountpoints are not absolute. Relative paths should be transformed to +// absolute points based on the llb.State's current working directory. +func testRelativeMountpoint(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + id := identity.NewID() + + st := llb.Image("busybox:latest").Dir("/root").Run( + llb.Shlexf("sh -c 'echo -n %s > /root/relpath/data'", id), + ).AddMount("relpath", llb.Scratch()) + + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + destDir := t.TempDir() + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + }, nil) + require.NoError(t, err) + + dt, err := os.ReadFile(filepath.Join(destDir, "data")) + require.NoError(t, err) + require.Equal(t, dt, []byte(id)) +} + +// moby/buildkit#2476 +func testBuildInfoExporter(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + st := llb.Image("busybox:latest").Run( + llb.Args([]string{"/bin/sh", "-c", `echo hello`}), + ) + def, err := st.Marshal(sb.Context()) + if err != nil { + return nil, err + } + return c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + } + + var exports []ExportEntry + if integration.IsTestDockerdMoby(sb) { + exports = []ExportEntry{{ + Type: "moby", + Attrs: map[string]string{ + "name": "reg.dummy:5000/buildkit/test:latest", + }, + }} + } else { + exports = []ExportEntry{{ + Type: ExporterOCI, + Attrs: map[string]string{}, + Output: fixedWriteCloser(nopWriteCloser{io.Discard}), + }} + } + + res, err := c.Build(sb.Context(), SolveOpt{ + Exports: exports, + }, "", frontend, nil) + require.NoError(t, err) + + require.Contains(t, res.ExporterResponse, exptypes.ExporterBuildInfo) + decbi, err := base64.StdEncoding.DecodeString(res.ExporterResponse[exptypes.ExporterBuildInfo]) + require.NoError(t, err) + + var exbi binfotypes.BuildInfo + err = json.Unmarshal(decbi, &exbi) + require.NoError(t, err) + + require.Equal(t, len(exbi.Sources), 1) + require.Equal(t, exbi.Sources[0].Type, binfotypes.SourceTypeDockerImage) + require.Equal(t, exbi.Sources[0].Ref, "docker.io/library/busybox:latest") +} + +// moby/buildkit#2476 +func testBuildInfoInline(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + st := llb.Image("busybox:latest").Run( + llb.Args([]string{"/bin/sh", "-c", `echo hello`}), + ) + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + cdAddress := sb.ContainerdAddress() + if cdAddress == "" { + t.Skip("rest of test requires containerd worker") + } + + client, err := newContainerd(cdAddress) + require.NoError(t, err) + defer client.Close() + + ctx := namespaces.WithNamespace(sb.Context(), "buildkit") + + target := registry + "/buildkit/test-buildinfo:latest" + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, }, - }} - case "max": - cacheExports = []CacheOptionsEntry{{ - Type: "registry", - Attrs: map[string]string{ - "ref": cacheTarget, - "mode": "max", + }, + }, nil) + require.NoError(t, err) + + img, err := client.GetImage(ctx, target) + require.NoError(t, err) + + desc, err := img.Config(ctx) + require.NoError(t, err) + + dt, err := content.ReadBlob(ctx, img.ContentStore(), desc) + require.NoError(t, err) + + var config binfotypes.ImageConfig + require.NoError(t, json.Unmarshal(dt, &config)) + + dec, err := base64.StdEncoding.DecodeString(config.BuildInfo) + require.NoError(t, err) + + var bi binfotypes.BuildInfo + require.NoError(t, json.Unmarshal(dec, &bi)) + + require.Equal(t, len(bi.Sources), 1) + require.Equal(t, bi.Sources[0].Type, binfotypes.SourceTypeDockerImage) + require.Equal(t, bi.Sources[0].Ref, "docker.io/library/busybox:latest") +} + +func testBuildInfoNoExport(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + st := llb.Image("busybox:latest").Run( + llb.Args([]string{"/bin/sh", "-c", `echo hello`}), + ) + def, err := st.Marshal(sb.Context()) + if err != nil { + return nil, err + } + return c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + } + + res, err := c.Build(sb.Context(), SolveOpt{}, "", frontend, nil) + require.NoError(t, err) + + require.Contains(t, res.ExporterResponse, exptypes.ExporterBuildInfo) + decbi, err := base64.StdEncoding.DecodeString(res.ExporterResponse[exptypes.ExporterBuildInfo]) + require.NoError(t, err) + + var exbi binfotypes.BuildInfo + err = json.Unmarshal(decbi, &exbi) + require.NoError(t, err) + + require.Equal(t, len(exbi.Sources), 1) + require.Equal(t, exbi.Sources[0].Type, binfotypes.SourceTypeDockerImage) + require.Equal(t, exbi.Sources[0].Ref, "docker.io/library/busybox:latest") +} + +func testPullWithLayerLimit(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + st := llb.Scratch(). + File(llb.Mkfile("/first", 0644, []byte("first"))). + File(llb.Mkfile("/second", 0644, []byte("second"))). + File(llb.Mkfile("/third", 0644, []byte("third"))) + + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + target := registry + "/buildkit/testlayers:latest" + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + // pull 2 first layers + st = llb.Image(target, llb.WithLayerLimit(2)). + File(llb.Mkfile("/forth", 0644, []byte("forth"))) + + def, err = st.Marshal(sb.Context()) + require.NoError(t, err) + + destDir := t.TempDir() + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{{ + Type: ExporterLocal, + OutputDir: destDir, + }}, + }, nil) + require.NoError(t, err) + + dt, err := os.ReadFile(filepath.Join(destDir, "first")) + require.NoError(t, err) + require.Equal(t, string(dt), "first") + + dt, err = os.ReadFile(filepath.Join(destDir, "second")) + require.NoError(t, err) + require.Equal(t, string(dt), "second") + + _, err = os.ReadFile(filepath.Join(destDir, "third")) + require.Error(t, err) + require.True(t, errors.Is(err, os.ErrNotExist)) + + dt, err = os.ReadFile(filepath.Join(destDir, "forth")) + require.NoError(t, err) + require.Equal(t, string(dt), "forth") + + // pull 3rd layer only + st = llb.Diff( + llb.Image(target, llb.WithLayerLimit(2)), + llb.Image(target)). + File(llb.Mkfile("/forth", 0644, []byte("forth"))) + + def, err = st.Marshal(sb.Context()) + require.NoError(t, err) + + destDir = t.TempDir() + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{{ + Type: ExporterLocal, + OutputDir: destDir, + }}, + }, nil) + require.NoError(t, err) + + _, err = os.ReadFile(filepath.Join(destDir, "first")) + require.Error(t, err) + require.True(t, errors.Is(err, os.ErrNotExist)) + + _, err = os.ReadFile(filepath.Join(destDir, "second")) + require.Error(t, err) + require.True(t, errors.Is(err, os.ErrNotExist)) + + dt, err = os.ReadFile(filepath.Join(destDir, "third")) + require.NoError(t, err) + require.Equal(t, string(dt), "third") + + dt, err = os.ReadFile(filepath.Join(destDir, "forth")) + require.NoError(t, err) + require.Equal(t, string(dt), "forth") + + // zero limit errors cleanly + st = llb.Image(target, llb.WithLayerLimit(0)) + + def, err = st.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid layer limit") +} + +func testCallInfo(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureInfo) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + _, err = c.Info(sb.Context()) + require.NoError(t, err) +} + +func testValidateDigestOrigin(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + st := llb.Image("busybox:latest").Run(llb.Shlex("touch foo"), llb.Dir("/wd")).AddMount("/wd", llb.Scratch()) + + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + target := registry + "/buildkit/testdigest:latest" + + resp, err := c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + dgst, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey] + require.True(t, ok) + + err = c.Prune(sb.Context(), nil, PruneAll) + require.NoError(t, err) + + st = llb.Image(target + "@" + dgst) + + def, err = st.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.NoError(t, err) + + // accessing the digest from invalid names should fail + st = llb.Image("example.invalid/nosuchrepo@" + dgst) + + def, err = st.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.Error(t, err) + + // also check repo that does exists but not digest + st = llb.Image("docker.io/library/ubuntu@" + dgst) + + def, err = st.Marshal(sb.Context()) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil) + require.Error(t, err) +} + +func testExportAnnotations(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + amd64 := platforms.MustParse("linux/amd64") + arm64 := platforms.MustParse("linux/arm64") + ps := []ocispecs.Platform{amd64, arm64} + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() + expPlatforms := &exptypes.Platforms{ + Platforms: make([]exptypes.Platform, len(ps)), + } + for i, p := range ps { + st := llb.Scratch().File( + llb.Mkfile("platform", 0600, []byte(platforms.Format(p))), + ) + + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + + ref, err := r.SingleRef() + if err != nil { + return nil, err + } + + _, err = ref.ToState() + if err != nil { + return nil, err + } + + k := platforms.Format(p) + res.AddRef(k, ref) + + expPlatforms.Platforms[i] = exptypes.Platform{ + ID: k, + Platform: p, + } + } + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + + res.AddMeta(exptypes.AnnotationIndexKey("gi"), []byte("generic index")) + res.AddMeta(exptypes.AnnotationIndexDescriptorKey("gid"), []byte("generic index descriptor")) + res.AddMeta(exptypes.AnnotationManifestKey(nil, "gm"), []byte("generic manifest")) + res.AddMeta(exptypes.AnnotationManifestDescriptorKey(nil, "gmd"), []byte("generic manifest descriptor")) + res.AddMeta(exptypes.AnnotationManifestKey(&amd64, "m"), []byte("amd64 manifest")) + res.AddMeta(exptypes.AnnotationManifestKey(&arm64, "m"), []byte("arm64 manifest")) + res.AddMeta(exptypes.AnnotationManifestDescriptorKey(&amd64, "md"), []byte("amd64 manifest descriptor")) + res.AddMeta(exptypes.AnnotationManifestDescriptorKey(&arm64, "md"), []byte("arm64 manifest descriptor")) + res.AddMeta(exptypes.AnnotationKey{Key: "gd"}.String(), []byte("generic default")) + + return res, nil + } + + // testing for image exporter + + target := registry + "/buildkit/testannotations:latest" + + const created = "2022-01-23T12:34:56Z" + + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "annotation-index.gio": "generic index opt", + "annotation-index." + ocispecs.AnnotationCreated: created, + "annotation-manifest.gmo": "generic manifest opt", + "annotation-manifest-descriptor.gmdo": "generic manifest descriptor opt", + "annotation-manifest[linux/amd64].mo": "amd64 manifest opt", + "annotation-manifest-descriptor[linux/amd64].mdo": "amd64 manifest descriptor opt", + "annotation-manifest[linux/arm64].mo": "arm64 manifest opt", + "annotation-manifest-descriptor[linux/arm64].mdo": "arm64 manifest descriptor opt", + }, }, - }} - cacheImports = []CacheOptionsEntry{{ - Type: "registry", - Attrs: map[string]string{ - "ref": cacheTarget, + }, + }, "", frontend, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) + + require.Equal(t, "generic index", imgs.Index.Annotations["gi"]) + require.Equal(t, "generic index opt", imgs.Index.Annotations["gio"]) + require.Equal(t, created, imgs.Index.Annotations[ocispecs.AnnotationCreated]) + for _, desc := range imgs.Index.Manifests { + require.Equal(t, "generic manifest descriptor", desc.Annotations["gmd"]) + require.Equal(t, "generic manifest descriptor opt", desc.Annotations["gmdo"]) + switch { + case platforms.Only(amd64).Match(*desc.Platform): + require.Equal(t, "amd64 manifest descriptor", desc.Annotations["md"]) + require.Equal(t, "amd64 manifest descriptor opt", desc.Annotations["mdo"]) + case platforms.Only(arm64).Match(*desc.Platform): + require.Equal(t, "arm64 manifest descriptor", desc.Annotations["md"]) + require.Equal(t, "arm64 manifest descriptor opt", desc.Annotations["mdo"]) + default: + require.Fail(t, "unrecognized platform") + } + } + + amdImage := imgs.Find(platforms.Format(amd64)) + require.Equal(t, "generic default", amdImage.Manifest.Annotations["gd"]) + require.Equal(t, "generic manifest", amdImage.Manifest.Annotations["gm"]) + require.Equal(t, "generic manifest opt", amdImage.Manifest.Annotations["gmo"]) + require.Equal(t, "amd64 manifest", amdImage.Manifest.Annotations["m"]) + require.Equal(t, "amd64 manifest opt", amdImage.Manifest.Annotations["mo"]) + + armImage := imgs.Find(platforms.Format(arm64)) + require.Equal(t, "generic default", armImage.Manifest.Annotations["gd"]) + require.Equal(t, "generic manifest", armImage.Manifest.Annotations["gm"]) + require.Equal(t, "generic manifest opt", armImage.Manifest.Annotations["gmo"]) + require.Equal(t, "arm64 manifest", armImage.Manifest.Annotations["m"]) + require.Equal(t, "arm64 manifest opt", armImage.Manifest.Annotations["mo"]) + + // testing for oci exporter + + destDir := t.TempDir() + + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) + require.NoError(t, err) + + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterOCI, + Output: fixedWriteCloser(outW), + Attrs: map[string]string{ + "annotation-index.gio": "generic index opt", + "annotation-index-descriptor.gido": "generic index descriptor opt", + "annotation-index-descriptor." + ocispecs.AnnotationCreated: created, + "annotation-manifest.gmo": "generic manifest opt", + "annotation-manifest-descriptor.gmdo": "generic manifest descriptor opt", + "annotation-manifest[linux/amd64].mo": "amd64 manifest opt", + "annotation-manifest-descriptor[linux/amd64].mdo": "amd64 manifest descriptor opt", + "annotation-manifest[linux/arm64].mo": "arm64 manifest opt", + "annotation-manifest-descriptor[linux/arm64].mdo": "arm64 manifest descriptor opt", + }, }, - }} - default: - require.Fail(t, "unknown cache mode: %s", mode) + }, + }, "", frontend, nil) + require.NoError(t, err) + + dt, err := os.ReadFile(out) + require.NoError(t, err) + + m, err := testutil.ReadTarToMap(dt, false) + require.NoError(t, err) + + var layout ocispecs.Index + err = json.Unmarshal(m["index.json"].Data, &layout) + require.Equal(t, "generic index descriptor", layout.Manifests[0].Annotations["gid"]) + require.Equal(t, "generic index descriptor opt", layout.Manifests[0].Annotations["gido"]) + require.Equal(t, created, layout.Manifests[0].Annotations[ocispecs.AnnotationCreated]) + require.NoError(t, err) + + var index ocispecs.Index + err = json.Unmarshal(m["blobs/sha256/"+layout.Manifests[0].Digest.Hex()].Data, &index) + require.Equal(t, "generic index", index.Annotations["gi"]) + require.Equal(t, "generic index opt", index.Annotations["gio"]) + require.NoError(t, err) + + for _, desc := range index.Manifests { + var mfst ocispecs.Manifest + err = json.Unmarshal(m["blobs/sha256/"+desc.Digest.Hex()].Data, &mfst) + require.NoError(t, err) + + require.Equal(t, "generic default", mfst.Annotations["gd"]) + require.Equal(t, "generic manifest", mfst.Annotations["gm"]) + require.Equal(t, "generic manifest descriptor", desc.Annotations["gmd"]) + require.Equal(t, "generic manifest opt", mfst.Annotations["gmo"]) + require.Equal(t, "generic manifest descriptor opt", desc.Annotations["gmdo"]) + + switch { + case platforms.Only(amd64).Match(*desc.Platform): + require.Equal(t, "amd64 manifest", mfst.Annotations["m"]) + require.Equal(t, "amd64 manifest descriptor", desc.Annotations["md"]) + require.Equal(t, "amd64 manifest opt", mfst.Annotations["mo"]) + require.Equal(t, "amd64 manifest descriptor opt", desc.Annotations["mdo"]) + case platforms.Only(arm64).Match(*desc.Platform): + require.Equal(t, "arm64 manifest", mfst.Annotations["m"]) + require.Equal(t, "arm64 manifest descriptor", desc.Annotations["md"]) + require.Equal(t, "arm64 manifest opt", mfst.Annotations["mo"]) + require.Equal(t, "arm64 manifest descriptor opt", desc.Annotations["mdo"]) + default: + require.Fail(t, "unrecognized platform") + } + } +} + +func testExportAnnotationsMediaTypes(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) } + require.NoError(t, err) - _, err = c.Solve(sb.Context(), def, SolveOpt{ + p := platforms.DefaultSpec() + ps := []ocispecs.Platform{p} + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() + expPlatforms := &exptypes.Platforms{ + Platforms: make([]exptypes.Platform, len(ps)), + } + for i, p := range ps { + st := llb.Scratch().File( + llb.Mkfile("platform", 0600, []byte(platforms.Format(p))), + ) + + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + + ref, err := r.SingleRef() + if err != nil { + return nil, err + } + + _, err = ref.ToState() + if err != nil { + return nil, err + } + + k := platforms.Format(p) + res.AddRef(k, ref) + + expPlatforms.Platforms[i] = exptypes.Platform{ + ID: k, + Platform: p, + } + } + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + + return res, nil + } + + target := registry + "/buildkit/testannotationsmedia:1" + _, err = c.Build(sb.Context(), SolveOpt{ Exports: []ExportEntry{ { Type: ExporterImage, Attrs: map[string]string{ - "name": target, - "push": "true", + "name": target, + "push": "true", + "annotation-manifest.a": "b", + }, + }, + }, + }, "", frontend, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 1, len(imgs.Images)) + + target2 := registry + "/buildkit/testannotationsmedia:2" + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target2, + "push": "true", + "annotation-index.c": "d", + }, + }, + }, + }, "", frontend, nil) + require.NoError(t, err) + + desc, provider, err = contentutil.ProviderFromRef(target2) + require.NoError(t, err) + imgs2, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 1, len(imgs2.Images)) + + require.Equal(t, "b", imgs.Images[0].Manifest.Annotations["a"]) + require.Equal(t, "d", imgs2.Index.Annotations["c"]) + + require.Equal(t, images.MediaTypeDockerSchema2ManifestList, imgs.Index.MediaType) + require.Equal(t, ocispecs.MediaTypeImageIndex, imgs2.Index.MediaType) +} + +func testExportAttestations(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + ps := []ocispecs.Platform{ + platforms.MustParse("linux/amd64"), + platforms.MustParse("linux/arm64"), + } + + success := []byte(`{"success": true}`) + successDigest := digest.SHA256.FromBytes(success) + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() + expPlatforms := &exptypes.Platforms{} + + for _, p := range ps { + pk := platforms.Format(p) + expPlatforms.Platforms = append(expPlatforms.Platforms, exptypes.Platform{ID: pk, Platform: p}) + + // build image + st := llb.Scratch().File( + llb.Mkfile("/greeting", 0600, []byte(fmt.Sprintf("hello %s!", pk))), + ) + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + ref, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddRef(pk, ref) + + // build attestations + st = llb.Scratch(). + File(llb.Mkfile("/attestation.json", 0600, success)). + File(llb.Mkfile("/attestation2.json", 0600, []byte{})) + def, err = st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err = c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + refAttest, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddAttestation(pk, gateway.Attestation{ + Kind: gatewaypb.AttestationKindInToto, + Ref: refAttest, + Path: "/attestation.json", + InToto: result.InTotoAttestation{ + PredicateType: "https://example.com/attestations/v1.0", + Subjects: []result.InTotoSubject{{ + Kind: gatewaypb.InTotoSubjectKindSelf, + }}, + }, + }) + res.AddAttestation(pk, gateway.Attestation{ + Kind: gatewaypb.AttestationKindInToto, + Ref: refAttest, + Path: "/attestation2.json", + InToto: result.InTotoAttestation{ + PredicateType: "https://example.com/attestations2/v1.0", + Subjects: []result.InTotoSubject{{ + Kind: gatewaypb.InTotoSubjectKindRaw, + Name: "/attestation.json", + Digest: []digest.Digest{successDigest}, + }}, + }, + }) + } + + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + + return res, nil + } + + t.Run("image", func(t *testing.T) { + targets := []string{ + registry + "/buildkit/testattestationsfoo:latest", + registry + "/buildkit/testattestationsbar:latest", + } + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": strings.Join(targets, ","), + "push": "true", + }, + }, + }, + }, "", frontend, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(targets[0]) + require.NoError(t, err) + + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, len(ps)*2, len(imgs.Images)) + + var bases []*testutil.ImageInfo + for _, p := range ps { + pk := platforms.Format(p) + img := imgs.Find(pk) + require.NotNil(t, img) + require.Equal(t, pk, platforms.Format(*img.Desc.Platform)) + require.Equal(t, 1, len(img.Layers)) + require.Equal(t, []byte(fmt.Sprintf("hello %s!", pk)), img.Layers[0]["greeting"].Data) + bases = append(bases, img) + } + + atts := imgs.Filter("unknown/unknown") + require.Equal(t, len(ps), len(atts.Images)) + for i, att := range atts.Images { + require.Equal(t, ocispecs.MediaTypeImageManifest, att.Desc.MediaType) + require.Equal(t, "unknown/unknown", platforms.Format(*att.Desc.Platform)) + require.Equal(t, "unknown/unknown", att.Img.OS+"/"+att.Img.Architecture) + require.Equal(t, attestation.DockerAnnotationReferenceTypeDefault, att.Desc.Annotations[attestation.DockerAnnotationReferenceType]) + require.Equal(t, bases[i].Desc.Digest.String(), att.Desc.Annotations[attestation.DockerAnnotationReferenceDigest]) + require.Equal(t, 2, len(att.Layers)) + require.Equal(t, len(att.Layers), len(att.Img.RootFS.DiffIDs)) + require.Equal(t, len(att.Img.History), 0) + + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + + purls := map[string]string{} + for _, k := range targets { + p, _ := purl.RefToPURL(k, &ps[i]) + purls[k] = p + } + + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType) + require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate) + subjects := []intoto.Subject{ + { + Name: purls[targets[0]], + Digest: map[string]string{ + "sha256": bases[i].Desc.Digest.Encoded(), + }, + }, + { + Name: purls[targets[1]], + Digest: map[string]string{ + "sha256": bases[i].Desc.Digest.Encoded(), + }, + }, + } + require.Equal(t, subjects, attest.Subject) + + var attest2 intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[1], &attest2)) + + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest2.Type) + require.Equal(t, "https://example.com/attestations2/v1.0", attest2.PredicateType) + require.Nil(t, attest2.Predicate) + subjects = []intoto.Subject{{ + Name: "/attestation.json", + Digest: map[string]string{ + "sha256": successDigest.Encoded(), + }, + }} + require.Equal(t, subjects, attest2.Subject) + } + + cdAddress := sb.ContainerdAddress() + if cdAddress == "" { + return + } + client, err := containerd.New(cdAddress) + require.NoError(t, err) + defer client.Close() + ctx := namespaces.WithNamespace(sb.Context(), "buildkit") + + for _, target := range targets { + err = client.ImageService().Delete(ctx, target, images.SynchronousDelete()) + require.NoError(t, err) + } + checkAllReleasable(t, c, sb, true) + }) + + t.Run("local", func(t *testing.T) { + dir := t.TempDir() + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: dir, + Attrs: map[string]string{ + "attestation-prefix": "test.", + }, + }, + }, + }, "", frontend, nil) + require.NoError(t, err) + + for _, p := range ps { + var attest intoto.Statement + dt, err := os.ReadFile(path.Join(dir, strings.ReplaceAll(platforms.Format(p), "/", "_"), "test.attestation.json")) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(dt, &attest)) + + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType) + require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate) + + require.Equal(t, []intoto.Subject{{ + Name: "greeting", + Digest: result.ToDigestMap(digest.Canonical.FromString("hello " + platforms.Format(p) + "!")), + }}, attest.Subject) + + var attest2 intoto.Statement + dt, err = os.ReadFile(path.Join(dir, strings.ReplaceAll(platforms.Format(p), "/", "_"), "test.attestation2.json")) + require.NoError(t, err) + require.NoError(t, json.Unmarshal(dt, &attest2)) + + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest2.Type) + require.Equal(t, "https://example.com/attestations2/v1.0", attest2.PredicateType) + require.Nil(t, attest2.Predicate) + subjects := []intoto.Subject{{ + Name: "/attestation.json", + Digest: map[string]string{ + "sha256": successDigest.Encoded(), + }, + }} + require.Equal(t, subjects, attest2.Subject) + } + }) + + t.Run("tar", func(t *testing.T) { + dir := t.TempDir() + out := filepath.Join(dir, "out.tar") + outW, err := os.Create(out) + require.NoError(t, err) + + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterTar, + Output: fixedWriteCloser(outW), + Attrs: map[string]string{ + "attestation-prefix": "test.", + }, }, }, - }, - CacheExports: cacheExports, - }, nil) - require.NoError(t, err) + }, "", frontend, nil) + require.NoError(t, err) - // verify that the busybox image stayed lazy - for _, layer := range busyboxManifest.Layers { - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) - } + dt, err := os.ReadFile(out) + require.NoError(t, err) - // get the random value at /bar/2 - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + m, err := testutil.ReadTarToMap(dt, false) + require.NoError(t, err) - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterLocal, - OutputDir: destDir, - }, - }, - }, nil) - require.NoError(t, err) + for _, p := range ps { + var attest intoto.Statement + dt := m[path.Join(strings.ReplaceAll(platforms.Format(p), "/", "_"), "test.attestation.json")].Data + require.NoError(t, json.Unmarshal(dt, &attest)) + + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType) + require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate) + + require.Equal(t, []intoto.Subject{{ + Name: "greeting", + Digest: result.ToDigestMap(digest.Canonical.FromString("hello " + platforms.Format(p) + "!")), + }}, attest.Subject) + + var attest2 intoto.Statement + dt = m[path.Join(strings.ReplaceAll(platforms.Format(p), "/", "_"), "test.attestation2.json")].Data + require.NoError(t, json.Unmarshal(dt, &attest2)) + + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest2.Type) + require.Equal(t, "https://example.com/attestations2/v1.0", attest2.PredicateType) + require.Nil(t, attest2.Predicate) + subjects := []intoto.Subject{{ + Name: "/attestation.json", + Digest: map[string]string{ + "sha256": successDigest.Encoded(), + }, + }} + require.Equal(t, subjects, attest2.Subject) + } + }) +} - bar2Contents, err := ioutil.ReadFile(filepath.Join(destDir, "bar", "2")) +func testAttestationDefaultSubject(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) + defer c.Close() - // clear all local state out - img, err := imageService.Get(ctx, target) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) - manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) - require.NoError(t, err) + ps := []ocispecs.Platform{ + platforms.MustParse("linux/amd64"), + } - err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) - require.NoError(t, err) - checkAllReleasable(t, c, sb, true) + success := []byte(`{"success": true}`) - for _, layer := range manifest.Layers { - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() + expPlatforms := &exptypes.Platforms{} + + for _, p := range ps { + pk := platforms.Format(p) + expPlatforms.Platforms = append(expPlatforms.Platforms, exptypes.Platform{ID: pk, Platform: p}) + + // build image + st := llb.Scratch().File( + llb.Mkfile("/greeting", 0600, []byte(fmt.Sprintf("hello %s!", pk))), + ) + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + ref, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddRef(pk, ref) + + // build attestations + st = llb.Scratch().File(llb.Mkfile("/attestation.json", 0600, success)) + def, err = st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err = c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + refAttest, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddAttestation(pk, gateway.Attestation{ + Kind: gatewaypb.AttestationKindInToto, + Ref: refAttest, + Path: "/attestation.json", + InToto: result.InTotoAttestation{ + PredicateType: "https://example.com/attestations/v1.0", + }, + }) + } + + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + + return res, nil } - // re-run the same build with cache imports and verify everything stays lazy - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{{ - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", + target := registry + "/buildkit/testattestationsemptysubject:latest" + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, }, - }}, - CacheImports: cacheImports, - CacheExports: cacheExports, - }, nil) + }, + }, "", frontend, nil) require.NoError(t, err) - // verify everything from before stayed lazy - img, err = imageService.Get(ctx, target) + desc, provider, err := contentutil.ProviderFromRef(target) require.NoError(t, err) - manifest, err = images.Manifest(ctx, contentStore, img.Target, nil) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) require.NoError(t, err) + require.Equal(t, len(ps)*2, len(imgs.Images)) - for i, layer := range manifest.Layers { - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v for index %d", err, i) + var bases []*testutil.ImageInfo + for _, p := range ps { + pk := platforms.Format(p) + bases = append(bases, imgs.Find(pk)) } - // re-run the build with a change only to input1 using the remote cache - input1 = llb.Scratch(). - File(llb.Mkdir("/dir", 0777)). - File(llb.Mkfile("/dir/1", 0444, nil)) - input1Copy = llb.Scratch().File(llb.Copy(input1, "/dir/1", "/foo/1", &llb.CopyInfo{CreateDestPath: true})) + atts := imgs.Filter("unknown/unknown") + require.Equal(t, len(ps), len(atts.Images)) + for i, att := range atts.Images { + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) - merge = llb.Merge([]llb.State{llb.Image(busyboxTarget), input1Copy, input2Copy}) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType) + require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate) - def, err = merge.Marshal(sb.Context()) - require.NoError(t, err) + name, _ := purl.RefToPURL(target, &ps[0]) - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{{ - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", + subjects := []intoto.Subject{{ + Name: name, + Digest: map[string]string{ + "sha256": bases[i].Desc.Digest.Encoded(), }, - }}, - CacheExports: cacheExports, - CacheImports: cacheImports, - }, nil) - require.NoError(t, err) + }} + require.Equal(t, subjects, attest.Subject) + } +} - // verify everything from before stayed lazy except the middle layer for input1Copy - img, err = imageService.Get(ctx, target) +func testAttestationBundle(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) + defer c.Close() - manifest, err = images.Manifest(ctx, contentStore, img.Target, nil) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) - for i, layer := range manifest.Layers { - switch i { - case 0, 2: - // bottom and top layer should stay lazy as they didn't change - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v for index %d", err, i) - case 1: - // middle layer had to be rebuilt, should exist locally - _, err = contentStore.Info(ctx, layer.Digest) - require.NoError(t, err) - default: - require.Fail(t, "unexpected layer index %d", i) - } + ps := []ocispecs.Platform{ + platforms.MustParse("linux/amd64"), } - // check the random value at /bar/2 didn't change - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() + expPlatforms := &exptypes.Platforms{} + + for _, p := range ps { + pk := platforms.Format(p) + expPlatforms.Platforms = append(expPlatforms.Platforms, exptypes.Platform{ID: pk, Platform: p}) + + // build image + st := llb.Scratch().File( + llb.Mkfile("/greeting", 0600, []byte(fmt.Sprintf("hello %s!", pk))), + ) + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + ref, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddRef(pk, ref) - _, err = c.Solve(sb.Context(), def, SolveOpt{ + stmt := intoto.Statement{ + StatementHeader: intoto.StatementHeader{ + Type: intoto.StatementInTotoV01, + PredicateType: "https://example.com/attestations/v1.0", + }, + Predicate: map[string]interface{}{ + "foo": "1", + }, + } + buff := bytes.NewBuffer(nil) + enc := json.NewEncoder(buff) + require.NoError(t, enc.Encode(stmt)) + + // build attestations + st = llb.Scratch() + st = st.File( + llb.Mkdir("/bundle", 0700), + ) + st = st.File( + llb.Mkfile("/bundle/attestation.json", 0600, buff.Bytes()), + ) + def, err = st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err = c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + refAttest, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddAttestation(pk, gateway.Attestation{ + Kind: gatewaypb.AttestationKindBundle, + Ref: refAttest, + Path: "/bundle", + }) + } + + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + + return res, nil + } + + target := registry + "/buildkit/testattestationsbundle:latest" + _, err = c.Build(sb.Context(), SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterLocal, - OutputDir: destDir, + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, }, }, - CacheImports: cacheImports, - }, nil) + }, "", frontend, nil) require.NoError(t, err) - newBar2Contents, err := ioutil.ReadFile(filepath.Join(destDir, "bar", "2")) + desc, provider, err := contentutil.ProviderFromRef(target) require.NoError(t, err) - require.Equalf(t, bar2Contents, newBar2Contents, "bar/2 contents changed") - - // Now test the case with a layer on top of a merge. - err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) require.NoError(t, err) - checkAllReleasable(t, c, sb, true) + require.Equal(t, len(ps)*2, len(imgs.Images)) - for _, layer := range manifest.Layers { - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) + var bases []*testutil.ImageInfo + for _, p := range ps { + pk := platforms.Format(p) + bases = append(bases, imgs.Find(pk)) } - mergePlusLayer := merge.File(llb.Mkfile("/3", 0444, nil)) - - def, err = mergePlusLayer.Marshal(sb.Context()) - require.NoError(t, err) + atts := imgs.Filter("unknown/unknown") + require.Equal(t, len(ps)*1, len(atts.Images)) + for i, att := range atts.Images { + require.Equal(t, 1, len(att.LayersRaw)) + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{{ - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", + require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType) + require.Equal(t, map[string]interface{}{"foo": "1"}, attest.Predicate) + name, _ := purl.RefToPURL(target, &ps[i]) + subjects := []intoto.Subject{{ + Name: name, + Digest: map[string]string{ + "sha256": bases[i].Desc.Digest.Encoded(), }, - }}, - CacheExports: cacheExports, - CacheImports: cacheImports, - }, nil) - require.NoError(t, err) + }} + require.Equal(t, subjects, attest.Subject) + } +} - // check the random value at /bar/2 didn't change - destDir, err = ioutil.TempDir("", "buildkit") +func testSBOMScan(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureSBOM) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) - defer os.RemoveAll(destDir) + defer c.Close() - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterLocal, - OutputDir: destDir, - }, - }, - CacheImports: cacheImports, - }, nil) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) - newBar2Contents, err = ioutil.ReadFile(filepath.Join(destDir, "bar", "2")) - require.NoError(t, err) + p := platforms.MustParse("linux/amd64") + pk := platforms.Format(p) - require.Equalf(t, bar2Contents, newBar2Contents, "bar/2 contents changed") + scannerFrontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() - // clear local state, repeat the build, verify everything stays lazy - err = imageService.Delete(ctx, img.Name, images.SynchronousDelete()) - require.NoError(t, err) - checkAllReleasable(t, c, sb, true) + st := llb.Image("busybox") + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + ref, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddRef(pk, ref) + + expPlatforms := &exptypes.Platforms{ + Platforms: []exptypes.Platform{{ID: pk, Platform: p}}, + } + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + + var img ocispecs.Image + cmd := ` +cat < $BUILDKIT_SCAN_DESTINATION/spdx.json +{ + "_type": "https://in-toto.io/Statement/v0.1", + "predicateType": "https://spdx.dev/Document", + "predicate": {"name": "fallback"} +} +EOF +` + img.Config.Cmd = []string{"/bin/sh", "-c", cmd} + config, err := json.Marshal(img) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal image config") + } + res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, pk), config) - for _, layer := range manifest.Layers { - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err) + return res, nil } - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{{ - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", + scannerTarget := registry + "/buildkit/testsbomscanner:latest" + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": scannerTarget, + "push": "true", + }, }, - }}, - CacheImports: cacheImports, - CacheExports: cacheExports, - }, nil) - require.NoError(t, err) - - img, err = imageService.Get(ctx, target) + }, + }, "", scannerFrontend, nil) require.NoError(t, err) - manifest, err = images.Manifest(ctx, contentStore, img.Target, nil) - require.NoError(t, err) + makeTargetFrontend := func(attest bool) func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + return func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() - for i, layer := range manifest.Layers { - _, err = contentStore.Info(ctx, layer.Digest) - require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v for index %d", err, i) - } -} + // build image + st := llb.Scratch().File( + llb.Mkfile("/greeting", 0600, []byte("hello world!")), + ) + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + ref, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddRef(pk, ref) -func requireContents(ctx context.Context, t *testing.T, c *Client, sb integration.Sandbox, state llb.State, cacheImports, cacheExports []CacheOptionsEntry, imageTarget string, files ...fstest.Applier) { - t.Helper() + expPlatforms := &exptypes.Platforms{ + Platforms: []exptypes.Platform{{ID: pk, Platform: p}}, + } + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + + // build attestations + if attest { + st = llb.Scratch(). + File(llb.Mkfile("/result.spdx", 0600, []byte(`{"name": "frontend"}`))) + def, err = st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err = c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + refAttest, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } - def, err := state.Marshal(ctx) - require.NoError(t, err) + res.AddAttestation(pk, gateway.Attestation{ + Kind: gatewaypb.AttestationKindInToto, + Ref: refAttest, + Path: "/result.spdx", + InToto: result.InTotoAttestation{ + PredicateType: intoto.PredicateSPDX, + }, + }) + } - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + return res, nil + } + } - _, err = c.Solve(ctx, def, SolveOpt{ + // test the default fallback scanner + target := registry + "/buildkit/testsbom:latest" + _, err = c.Build(sb.Context(), SolveOpt{ + FrontendAttrs: map[string]string{ + "attest:sbom": "", + }, Exports: []ExportEntry{ { - Type: ExporterLocal, - OutputDir: destDir, + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, }, }, - CacheImports: cacheImports, - CacheExports: cacheExports, - }, nil) + }, "", makeTargetFrontend(false), nil) require.NoError(t, err) - require.NoError(t, fstest.CheckDirectoryEqualWithApplier(destDir, fstest.Apply(files...))) + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) - if imageTarget != "" { - var exports []ExportEntry - if os.Getenv("TEST_DOCKERD") == "1" { - exports = []ExportEntry{{ - Type: "moby", - Attrs: map[string]string{ - "name": imageTarget, - }, - }} - } else { - exports = []ExportEntry{{ + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) + + // test the frontend builtin scanner + target = registry + "/buildkit/testsbom2:latest" + _, err = c.Build(sb.Context(), SolveOpt{ + FrontendAttrs: map[string]string{ + "attest:sbom": "", + }, + Exports: []ExportEntry{ + { Type: ExporterImage, Attrs: map[string]string{ - "name": imageTarget, + "name": target, "push": "true", }, - }} - } - - _, err = c.Solve(ctx, def, SolveOpt{Exports: exports, CacheImports: cacheImports, CacheExports: cacheExports}, nil) - require.NoError(t, err) - resetState(t, c, sb) - requireContents(ctx, t, c, sb, llb.Image(imageTarget, llb.ResolveModePreferLocal), cacheImports, nil, "", files...) - } -} - -func requireEqualContents(ctx context.Context, t *testing.T, c *Client, stateA, stateB llb.State) { - t.Helper() + }, + }, + }, "", makeTargetFrontend(true), nil) + require.NoError(t, err) - defA, err := stateA.Marshal(ctx) + desc, provider, err = contentutil.ProviderFromRef(target) require.NoError(t, err) - destDirA, err := ioutil.TempDir("", "buildkit") + imgs, err = testutil.ReadImages(sb.Context(), provider, desc) require.NoError(t, err) - defer os.RemoveAll(destDirA) + require.Equal(t, 2, len(imgs.Images)) - _, err = c.Solve(ctx, defA, SolveOpt{ + att := imgs.Find("unknown/unknown") + attest := intoto.Statement{} + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, intoto.PredicateSPDX, attest.PredicateType) + require.Subset(t, attest.Predicate, map[string]interface{}{"name": "frontend"}) + + // test the specified fallback scanner + target = registry + "/buildkit/testsbom3:latest" + _, err = c.Build(sb.Context(), SolveOpt{ + FrontendAttrs: map[string]string{ + "attest:sbom": "generator=" + scannerTarget, + }, Exports: []ExportEntry{ { - Type: ExporterLocal, - OutputDir: destDirA, + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, }, }, - }, nil) + }, "", makeTargetFrontend(false), nil) require.NoError(t, err) - defB, err := stateB.Marshal(ctx) + desc, provider, err = contentutil.ProviderFromRef(target) require.NoError(t, err) - destDirB, err := ioutil.TempDir("", "buildkit") + imgs, err = testutil.ReadImages(sb.Context(), provider, desc) require.NoError(t, err) - defer os.RemoveAll(destDirB) + require.Equal(t, 2, len(imgs.Images)) - _, err = c.Solve(ctx, defB, SolveOpt{ + att = imgs.Find("unknown/unknown") + attest = intoto.Statement{} + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, intoto.PredicateSPDX, attest.PredicateType) + require.Subset(t, attest.Predicate, map[string]interface{}{"name": "fallback"}) + + // test the builtin frontend scanner and the specified fallback scanner together + target = registry + "/buildkit/testsbom3:latest" + _, err = c.Build(sb.Context(), SolveOpt{ + FrontendAttrs: map[string]string{ + "attest:sbom": "generator=" + scannerTarget, + }, Exports: []ExportEntry{ { - Type: ExporterLocal, - OutputDir: destDirB, + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, }, }, - }, nil) + }, "", makeTargetFrontend(true), nil) require.NoError(t, err) - require.NoError(t, fstest.CheckDirectoryEqual(destDirA, destDirB)) -} + desc, provider, err = contentutil.ProviderFromRef(target) + require.NoError(t, err) -func runShellExecState(base llb.State, cmds ...string) llb.ExecState { - return base.Run(llb.Args([]string{"sh", "-c", strings.Join(cmds, " && ")})) -} + imgs, err = testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) -func runShell(base llb.State, cmds ...string) llb.State { - return runShellExecState(base, cmds...).Root() + att = imgs.Find("unknown/unknown") + attest = intoto.Statement{} + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, intoto.PredicateSPDX, attest.PredicateType) + require.Subset(t, attest.Predicate, map[string]interface{}{"name": "frontend"}) } -func chainRunShells(base llb.State, cmdss ...[]string) llb.State { - for _, cmds := range cmdss { - base = runShell(base, cmds...) - } - return base -} +func testSBOMScanSingleRef(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureSBOM) + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() -func requiresLinux(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skipf("unsupported GOOS: %s", runtime.GOOS) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) } -} + require.NoError(t, err) -// ensurePruneAll tries to ensure Prune completes with retries. -// Current cache implementation defers release-related logic using goroutine so -// there can be situation where a build has finished but the following prune doesn't -// cleanup cache because some records still haven't been released. -// This function tries to ensure prune by retrying it. -func ensurePruneAll(t *testing.T, c *Client, sb integration.Sandbox) { - for i := 0; i < 2; i++ { - require.NoError(t, c.Prune(sb.Context(), nil, PruneAll)) - for j := 0; j < 20; j++ { - du, err := c.DiskUsage(sb.Context()) - require.NoError(t, err) - if len(du) == 0 { - return - } - time.Sleep(500 * time.Millisecond) - } - t.Logf("retrying prune(%d)", i) - } - t.Fatalf("failed to ensure prune") -} + p := platforms.DefaultSpec() + pk := platforms.Format(p) -func checkAllReleasable(t *testing.T, c *Client, sb integration.Sandbox, checkContent bool) { - retries := 0 -loop0: - for { - require.True(t, 20 > retries) - retries++ - du, err := c.DiskUsage(sb.Context()) + scannerFrontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() + + st := llb.Image("busybox") + def, err := st.Marshal(sb.Context()) require.NoError(t, err) - for _, d := range du { - if d.InUse { - time.Sleep(500 * time.Millisecond) - continue loop0 - } - } - break - } - err := c.Prune(sb.Context(), nil, PruneAll) - require.NoError(t, err) + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + ref, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddRef(pk, ref) - du, err := c.DiskUsage(sb.Context()) - require.NoError(t, err) - require.Equal(t, 0, len(du)) + expPlatforms := &exptypes.Platforms{ + Platforms: []exptypes.Platform{{ID: pk, Platform: p}}, + } + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + + var img ocispecs.Image + cmd := ` +cat < $BUILDKIT_SCAN_DESTINATION/spdx.json +{ + "_type": "https://in-toto.io/Statement/v0.1", + "predicateType": "https://spdx.dev/Document", + "predicate": {"name": "fallback"} +} +EOF +` + img.Config.Cmd = []string{"/bin/sh", "-c", cmd} + config, err := json.Marshal(img) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal image config") + } + res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, pk), config) - // examine contents of exported tars (requires containerd) - cdAddress := sb.ContainerdAddress() - if cdAddress == "" { - t.Logf("checkAllReleasable: skipping check for exported tars in non-containerd test") - return + return res, nil } - // TODO: make public pull helper function so this can be checked for standalone as well - - client, err := newContainerd(cdAddress) + scannerTarget := registry + "/buildkit/testsbomscanner:latest" + _, err = c.Build(sb.Context(), SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": scannerTarget, + "push": "true", + }, + }, + }, + }, "", scannerFrontend, nil) require.NoError(t, err) - defer client.Close() - ctx := namespaces.WithNamespace(sb.Context(), "buildkit") - snapshotService := client.SnapshotService("overlayfs") + targetFrontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() - retries = 0 - for { - count := 0 - err = snapshotService.Walk(ctx, func(context.Context, snapshots.Info) error { - count++ - return nil + // build image + st := llb.Scratch().File( + llb.Mkfile("/greeting", 0600, []byte("hello world!")), + ) + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), }) - require.NoError(t, err) - if count == 0 { - break + if err != nil { + return nil, err + } + ref, err := r.SingleRef() + if err != nil { + return nil, err } - require.True(t, 20 > retries) - retries++ - time.Sleep(500 * time.Millisecond) - } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.SetRef(ref) - if !checkContent { - return - } + var img ocispecs.Image + img.Config.Cmd = []string{"/bin/sh", "-c", "cat /greeting"} + config, err := json.Marshal(img) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal image config") + } + res.AddMeta(exptypes.ExporterImageConfigKey, config) - retries = 0 - for { - count := 0 - var infos []content.Info - err = client.ContentStore().Walk(ctx, func(info content.Info) error { - count++ - infos = append(infos, info) - return nil - }) - require.NoError(t, err) - if count == 0 { - break + expPlatforms := &exptypes.Platforms{ + Platforms: []exptypes.Platform{{ID: pk, Platform: p}}, } - if retries >= 20 { - require.FailNowf(t, "content still exists", "%+v", infos) + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err } - retries++ - time.Sleep(500 * time.Millisecond) + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + + return res, nil } + + target := registry + "/buildkit/testsbomsingle:latest" + _, err = c.Build(sb.Context(), SolveOpt{ + FrontendAttrs: map[string]string{ + "attest:sbom": "generator=" + scannerTarget, + }, + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, "", targetFrontend, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) + + img := imgs.Find(pk) + require.NotNil(t, img) + require.Equal(t, []string{"/bin/sh", "-c", "cat /greeting"}, img.Img.Config.Cmd) + + att := imgs.Find("unknown/unknown") + require.NotNil(t, att) + attest := intoto.Statement{} + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, intoto.PredicateSPDX, attest.PredicateType) + require.Subset(t, attest.Predicate, map[string]interface{}{"name": "fallback"}) } -func testInvalidExporter(t *testing.T, sb integration.Sandbox) { +func testSBOMSupplements(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureSBOM) requiresLinux(t) c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - def, err := llb.Image("busybox:latest").Marshal(sb.Context()) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + p := platforms.MustParse("linux/amd64") + pk := platforms.Format(p) - target := "example.com/buildkit/testoci:latest" - attrs := map[string]string{ - "name": target, - } - for _, exp := range []string{ExporterOCI, ExporterDocker} { - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res := gateway.NewResult() + + // build image + st := llb.Scratch().File( + llb.Mkfile("/foo", 0600, []byte{}), + ) + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + ref, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + res.AddRef(pk, ref) + + expPlatforms := &exptypes.Platforms{ + Platforms: []exptypes.Platform{{ID: pk, Platform: p}}, + } + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + + // build attestations + doc := spdx.Document{ + SPDXIdentifier: "DOCUMENT", + Files: []*spdx.File{ { - Type: exp, - Attrs: attrs, + // foo exists... + FileSPDXIdentifier: "SPDXRef-File-foo", + FileName: "/foo", }, - }, - }, nil) - // output file writer is required - require.Error(t, err) - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ { - Type: exp, - Attrs: attrs, - OutputDir: destDir, + // ...but bar doesn't + FileSPDXIdentifier: "SPDXRef-File-bar", + FileName: "/bar", }, }, - }, nil) - // output directory is not supported - require.Error(t, err) + } + docBytes, err := json.Marshal(doc) + if err != nil { + return nil, err + } + st = llb.Scratch(). + File(llb.Mkfile("/result.spdx", 0600, docBytes)) + def, err = st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err = c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + refAttest, err := r.SingleRef() + if err != nil { + return nil, err + } + _, err = ref.ToState() + if err != nil { + return nil, err + } + + res.AddAttestation(pk, gateway.Attestation{ + Kind: gatewaypb.AttestationKindInToto, + Ref: refAttest, + Path: "/result.spdx", + InToto: result.InTotoAttestation{ + PredicateType: intoto.PredicateSPDX, + }, + Metadata: map[string][]byte{ + result.AttestationSBOMCore: []byte("result"), + }, + }) + + return res, nil } - _, err = c.Solve(sb.Context(), def, SolveOpt{ + // test the default fallback scanner + target := registry + "/buildkit/testsbom:latest" + _, err = c.Build(sb.Context(), SolveOpt{ + FrontendAttrs: map[string]string{ + "attest:sbom": "", + }, Exports: []ExportEntry{ { - Type: ExporterLocal, - Attrs: attrs, + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, }, }, - }, nil) - // output directory is required - require.Error(t, err) + }, "", frontend, nil) + require.NoError(t, err) - f, err := os.Create(filepath.Join(destDir, "a")) + desc, provider, err := contentutil.ProviderFromRef(target) require.NoError(t, err) - defer f.Close() - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterLocal, - Attrs: attrs, - Output: fixedWriteCloser(f), - }, - }, - }, nil) - // output file writer is not supported - require.Error(t, err) - checkAllReleasable(t, c, sb, true) -} + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) -// moby/buildkit#492 -func testParallelLocalBuilds(t *testing.T, sb integration.Sandbox) { - ctx, cancel := context.WithCancel(sb.Context()) - defer cancel() + att := imgs.Find("unknown/unknown") + attest := struct { + intoto.StatementHeader + Predicate spdx.Document + }{} + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, intoto.PredicateSPDX, attest.PredicateType) + + require.Equal(t, "DOCUMENT", string(attest.Predicate.SPDXIdentifier)) + require.Len(t, attest.Predicate.Files, 2) + require.Equal(t, attest.Predicate.Files[0].FileName, "/foo") + require.Regexp(t, "^layerID: sha256:", attest.Predicate.Files[0].FileComment) + require.Equal(t, attest.Predicate.Files[1].FileName, "/bar") + require.Empty(t, attest.Predicate.Files[1].FileComment) +} - c, err := New(ctx, sb.Address()) +func testMultipleCacheExports(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureMultiCacheExport) + c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - eg, ctx := errgroup.WithContext(ctx) - - for i := 0; i < 3; i++ { - func(i int) { - eg.Go(func() error { - fn := fmt.Sprintf("test%d", i) - srcDir, err := tmpdir( - fstest.CreateFile(fn, []byte("contents"), 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(srcDir) - - def, err := llb.Local("source").Marshal(sb.Context()) - require.NoError(t, err) - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Solve(ctx, def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterLocal, - OutputDir: destDir, - }, - }, - LocalDirs: map[string]string{ - "source": srcDir, - }, - }, nil) - require.NoError(t, err) - - act, err := ioutil.ReadFile(filepath.Join(destDir, fn)) - require.NoError(t, err) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) - require.Equal(t, "contents", string(act)) - return nil - }) - }(i) + busybox := llb.Image("busybox:latest") + st := llb.Scratch() + run := func(cmd string) { + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) } + run(`sh -c "echo -n foobar > const"`) + run(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > unique"`) - err = eg.Wait() + def, err := st.Marshal(sb.Context()) require.NoError(t, err) -} -// testRelativeMountpoint is a test that relative paths for mountpoints don't -// fail when runc is upgraded to at least rc95, which introduces an error when -// mountpoints are not absolute. Relative paths should be transformed to -// absolute points based on the llb.State's current working directory. -func testRelativeMountpoint(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) + target := path.Join(registry, "image:test") + target2 := path.Join(registry, "image-copy:test") + cacheRef := path.Join(registry, "cache:test") + cacheOutDir, cacheOutDir2 := t.TempDir(), t.TempDir() + + res, err := c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + CacheExports: []CacheOptionsEntry{ + { + Type: "local", + Attrs: map[string]string{ + "dest": cacheOutDir, + }, + }, + { + Type: "local", + Attrs: map[string]string{ + "dest": cacheOutDir2, + }, + }, + { + Type: "registry", + Attrs: map[string]string{ + "ref": cacheRef, + }, + }, + { + Type: "inline", + }, + }, + }, nil) require.NoError(t, err) - defer c.Close() - id := identity.NewID() + ensureFile(t, filepath.Join(cacheOutDir, "index.json")) + ensureFile(t, filepath.Join(cacheOutDir2, "index.json")) - st := llb.Image("busybox:latest").Dir("/root").Run( - llb.Shlexf("sh -c 'echo -n %s > /root/relpath/data'", id), - ).AddMount("relpath", llb.Scratch()) + dgst := res.ExporterResponse[exptypes.ExporterImageDigestKey] - def, err := st.Marshal(sb.Context()) + uniqueFile, err := readFileInImage(sb.Context(), t, c, target+"@"+dgst, "/unique") require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") + res, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target2, + "push": "true", + }, + }, + }, + CacheExports: []CacheOptionsEntry{ + { + Type: "inline", + }, + }, + }, nil) require.NoError(t, err) - defer os.RemoveAll(destDir) + dgst2 := res.ExporterResponse[exptypes.ExporterImageDigestKey] + require.Equal(t, dgst, dgst2) + + destDir := t.TempDir() + ensurePruneAll(t, c, sb) _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { @@ -5538,235 +8463,233 @@ func testRelativeMountpoint(t *testing.T, sb integration.Sandbox) { OutputDir: destDir, }, }, + CacheImports: []CacheOptionsEntry{ + { + Type: "registry", + Attrs: map[string]string{ + "ref": cacheRef, + }, + }, + }, }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "data")) - require.NoError(t, err) - require.Equal(t, dt, []byte(id)) + ensureFileContents(t, filepath.Join(destDir, "const"), "foobar") + ensureFileContents(t, filepath.Join(destDir, "unique"), string(uniqueFile)) } -// moby/buildkit#2476 -func testBuildInfoExporter(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) +func testMountStubsDirectory(t *testing.T, sb integration.Sandbox) { c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - st := llb.Image("busybox:latest").Run( - llb.Args([]string{"/bin/sh", "-c", `echo hello`}), - ) - def, err := st.Marshal(sb.Context()) - if err != nil { - return nil, err - } - return c.Solve(ctx, gateway.SolveRequest{ - Definition: def.ToPB(), - FrontendOpt: map[string]string{"build-arg:foo": "bar"}, - }) - } + st := llb.Image("busybox:latest"). + File(llb.Mkdir("/test", 0700)). + File(llb.Mkdir("/test/qux/", 0700)). + Run( + llb.Args([]string{"touch", "/test/baz/keep"}), + // check stubs directory is removed + llb.AddMount("/test/foo", llb.Scratch(), llb.Tmpfs()), + // check that stubs directory are recursively removed + llb.AddMount("/test/bar/x/y", llb.Scratch(), llb.Tmpfs()), + // check that only empty stubs directories are removed + llb.AddMount("/test/baz/x", llb.Scratch(), llb.Tmpfs()), + // check that previously existing directory are not removed + llb.AddMount("/test/qux", llb.Scratch(), llb.Tmpfs()), + ).Root() + st = llb.Scratch().File(llb.Copy(st, "/test", "/", &llb.CopyInfo{CopyDirContentsOnly: true})) + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } + tmpDir := t.TempDir() + tarFile := filepath.Join(tmpDir, "out.tar") + tarFileW, err := os.Create(tarFile) require.NoError(t, err) + defer tarFileW.Close() - res, err := c.Build(sb.Context(), SolveOpt{ + _, err = c.Solve(sb.Context(), def, SolveOpt{ Exports: []ExportEntry{ { - Type: ExporterImage, - Attrs: map[string]string{ - "name": registry + "/buildkit/test-buildinfo:latest", - "push": "true", - }, + Type: ExporterTar, + Output: fixedWriteCloser(tarFileW), }, }, - }, "", frontend, nil) + }, nil) require.NoError(t, err) + tarFileW.Close() - require.Contains(t, res.ExporterResponse, exptypes.ExporterBuildInfo) - decbi, err := base64.StdEncoding.DecodeString(res.ExporterResponse[exptypes.ExporterBuildInfo]) + dt, err := os.ReadFile(tarFile) require.NoError(t, err) - var exbi binfotypes.BuildInfo - err = json.Unmarshal(decbi, &exbi) + m, err := testutil.ReadTarToMap(dt, false) require.NoError(t, err) - attrval := "bar" - require.Equal(t, exbi.Attrs, map[string]*string{"build-arg:foo": &attrval}) - require.Equal(t, len(exbi.Sources), 1) - require.Equal(t, exbi.Sources[0].Type, binfotypes.SourceTypeDockerImage) - require.Equal(t, exbi.Sources[0].Ref, "docker.io/library/busybox:latest") + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + + require.ElementsMatch(t, []string{ + "baz/", + "baz/keep", + "qux/", + }, keys) } -// moby/buildkit#2476 -func testBuildInfoInline(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) +// https://github.com/moby/buildkit/issues/3148 +func testMountStubsTimestamp(t *testing.T, sb integration.Sandbox) { c, err := New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() + const sourceDateEpoch = int64(1234567890) // Fri Feb 13 11:31:30 PM UTC 2009 st := llb.Image("busybox:latest").Run( - llb.Args([]string{"/bin/sh", "-c", `echo hello`}), + llb.Args([]string{"/bin/touch", fmt.Sprintf("--date=@%d", sourceDateEpoch), + "/bin", + "/etc", + "/var", + "/var/foo", + "/tmp", + "/tmp/foo2", + "/tmp/foo2/bar", + }), + llb.AddMount("/var/foo", llb.Scratch(), llb.Tmpfs()), + llb.AddMount("/tmp/foo2/bar", llb.Scratch(), llb.Tmpfs()), ) def, err := st.Marshal(sb.Context()) require.NoError(t, err) - registry, err := sb.NewRegistry() - if errors.Is(err, integration.ErrRequirements) { - t.Skip(err.Error()) - } + tmpDir := t.TempDir() + tarFile := filepath.Join(tmpDir, "out.tar") + tarFileW, err := os.Create(tarFile) require.NoError(t, err) + defer tarFileW.Close() - cdAddress := sb.ContainerdAddress() - if cdAddress == "" { - t.Skip("rest of test requires containerd worker") - } - - client, err := newContainerd(cdAddress) + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterTar, + Output: fixedWriteCloser(tarFileW), + }, + }, + }, nil) require.NoError(t, err) - defer client.Close() + tarFileW.Close() - ctx := namespaces.WithNamespace(sb.Context(), "buildkit") - - for _, tt := range []struct { - name string - buildAttrs bool - }{{ - "attrsEnabled", - true, - }, { - "attrsDisabled", - false, - }} { - t.Run(tt.name, func(t *testing.T) { - target := registry + "/buildkit/test-buildinfo:latest" - - _, err = c.Solve(sb.Context(), def, SolveOpt{ - Exports: []ExportEntry{ - { - Type: ExporterImage, - Attrs: map[string]string{ - "name": target, - "push": "true", - "buildinfo-attrs": strconv.FormatBool(tt.buildAttrs), - }, - }, - }, - FrontendAttrs: map[string]string{ - "build-arg:foo": "bar", - }, - }, nil) - require.NoError(t, err) - - img, err := client.GetImage(ctx, target) - require.NoError(t, err) - - desc, err := img.Config(ctx) - require.NoError(t, err) - - dt, err := content.ReadBlob(ctx, img.ContentStore(), desc) - require.NoError(t, err) - - var config binfotypes.ImageConfig - require.NoError(t, json.Unmarshal(dt, &config)) - - dec, err := base64.StdEncoding.DecodeString(config.BuildInfo) - require.NoError(t, err) - - var bi binfotypes.BuildInfo - require.NoError(t, json.Unmarshal(dec, &bi)) - - if tt.buildAttrs { - attrval := "bar" - require.Contains(t, bi.Attrs, "build-arg:foo") - require.Equal(t, bi.Attrs["build-arg:foo"], &attrval) - } else { - require.NotContains(t, bi.Attrs, "build-arg:foo") - } - require.Equal(t, len(bi.Sources), 1) - require.Equal(t, bi.Sources[0].Type, binfotypes.SourceTypeDockerImage) - require.Equal(t, bi.Sources[0].Ref, "docker.io/library/busybox:latest") - }) - } -} - -func testBuildInfoNoExport(t *testing.T, sb integration.Sandbox) { - requiresLinux(t) - c, err := New(sb.Context(), sb.Address()) + tarFileR, err := os.Open(tarFile) require.NoError(t, err) - defer c.Close() - - frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - st := llb.Image("busybox:latest").Run( - llb.Args([]string{"/bin/sh", "-c", `echo hello`}), - ) - def, err := st.Marshal(sb.Context()) - if err != nil { - return nil, err + defer tarFileR.Close() + tarR := tar.NewReader(tarFileR) + touched := map[string]*tar.Header{ + "bin/": nil, // Regular dir + "etc/": nil, // Parent of file mounts (etc/{resolv.conf, hosts}) + "var/": nil, // Parent of dir mount (var/foo/) + "tmp/": nil, // Grandparent of dir mount (tmp/foo2/bar/) + // No support for reproducing the timestamps of mount point directories such as var/foo/ and tmp/foo2/bar/, + // because the touched timestamp value is lost when the mount is unmounted. + } + for { + hd, err := tarR.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + if x, ok := touched[hd.Name]; ok && x == nil { + touched[hd.Name] = hd } - return c.Solve(ctx, gateway.SolveRequest{ - Definition: def.ToPB(), - FrontendOpt: map[string]string{"build-arg:foo": "bar"}, - }) } + for name, hd := range touched { + t.Logf("Verifying %q (%+v)", name, hd) + require.NotNil(t, hd, name) + require.Equal(t, sourceDateEpoch, hd.ModTime.Unix(), name) + } +} - res, err := c.Build(sb.Context(), SolveOpt{}, "", frontend, nil) - require.NoError(t, err) - - require.Contains(t, res.ExporterResponse, exptypes.ExporterBuildInfo) - decbi, err := base64.StdEncoding.DecodeString(res.ExporterResponse[exptypes.ExporterBuildInfo]) - require.NoError(t, err) +func ensureFile(t *testing.T, path string) { + st, err := os.Stat(path) + require.NoError(t, err, "expected file at %s", path) + require.True(t, st.Mode().IsRegular()) +} - var exbi binfotypes.BuildInfo - err = json.Unmarshal(decbi, &exbi) +func ensureFileContents(t *testing.T, path, expectedContents string) { + contents, err := os.ReadFile(path) require.NoError(t, err) - - attrval := "bar" - require.Equal(t, exbi.Attrs, map[string]*string{"build-arg:foo": &attrval}) - require.Equal(t, len(exbi.Sources), 1) - require.Equal(t, exbi.Sources[0].Type, binfotypes.SourceTypeDockerImage) - require.Equal(t, exbi.Sources[0].Ref, "docker.io/library/busybox:latest") + require.Equal(t, expectedContents, string(contents)) } -func tmpdir(appliers ...fstest.Applier) (string, error) { - tmpdir, err := ioutil.TempDir("", "buildkit-client") +func makeSSHAgentSock(t *testing.T, agent agent.Agent) (p string, err error) { + tmpDir, err := integration.Tmpdir(t) if err != nil { return "", err } - if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil { + + sockPath := filepath.Join(tmpDir, "ssh_auth_sock") + + l, err := net.Listen("unix", sockPath) + if err != nil { return "", err } - return tmpdir, nil + t.Cleanup(func() { + require.NoError(t, l.Close()) + }) + + s := &server{l: l} + go s.run(agent) + + return sockPath, nil +} + +type imageTimestamps struct { + FromImage []string // from img.Created and img.[]History.Created + FromAnnotation string // from index.Manifests[0].Annotations["org.opencontainers.image.created"] } -func makeSSHAgentSock(agent agent.Agent) (p string, cleanup func() error, err error) { - tmpDir, err := ioutil.TempDir("", "buildkit") +func readImageTimestamps(dt []byte) (*imageTimestamps, error) { + m, err := testutil.ReadTarToMap(dt, false) if err != nil { - return "", nil, err + return nil, err } - defer func() { - if err != nil { - os.RemoveAll(tmpDir) - } - }() - sockPath := filepath.Join(tmpDir, "ssh_auth_sock") + if _, ok := m["oci-layout"]; !ok { + return nil, errors.Errorf("no oci-layout") + } - l, err := net.Listen("unix", sockPath) - if err != nil { - return "", nil, err + var index ocispecs.Index + if err := json.Unmarshal(m["index.json"].Data, &index); err != nil { + return nil, err + } + if len(index.Manifests) != 1 { + return nil, errors.Errorf("invalid manifest count %d", len(index.Manifests)) } - s := &server{l: l} - go s.run(agent) + var res imageTimestamps + res.FromAnnotation = index.Manifests[0].Annotations[ocispecs.AnnotationCreated] + + var mfst ocispecs.Manifest + if err := json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst); err != nil { + return nil, err + } + // don't unmarshal to image type so we get the original string value + type history struct { + Created string `json:"created"` + } + + img := struct { + History []history `json:"history"` + Created string `json:"created"` + }{} + + if err := json.Unmarshal(m["blobs/sha256/"+mfst.Config.Digest.Hex()].Data, &img); err != nil { + return nil, err + } - return sockPath, func() error { - l.Close() - return os.RemoveAll(tmpDir) - }, nil + res.FromImage = []string{ + img.Created, + } + for _, h := range img.History { + res.FromImage = append(res.FromImage, h.Created) + } + return &res, nil } type server struct { @@ -5811,11 +8734,159 @@ func (*netModeDefault) UpdateConfigFile(in string) string { return in } +type netModeBridgeDNS struct{} + +func (*netModeBridgeDNS) UpdateConfigFile(in string) string { + return in + ` +# configure bridge networking +[worker.oci] +networkMode = "cni" +cniConfigPath = "/etc/buildkit/dns-cni.conflist" + +[worker.containerd] +networkMode = "cni" +cniConfigPath = "/etc/buildkit/dns-cni.conflist" + +[dns] +nameservers = ["10.11.0.1"] +` +} + var hostNetwork integration.ConfigUpdater = &netModeHost{} var defaultNetwork integration.ConfigUpdater = &netModeDefault{} +var bridgeDNSNetwork integration.ConfigUpdater = &netModeBridgeDNS{} func fixedWriteCloser(wc io.WriteCloser) func(map[string]string) (io.WriteCloser, error) { return func(map[string]string) (io.WriteCloser, error) { return wc, nil } } + +func testSourcePolicy(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + st := llb.Image("busybox:1.34.1-uclibc").File( + llb.Copy(llb.HTTP("https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md"), + "README.md", "README.md")) + def, err := st.Marshal(sb.Context()) + if err != nil { + return nil, err + } + return c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + } + + type testCase struct { + srcPol *sourcepolicypb.Policy + expectedErr string + } + testCases := []testCase{ + { + // Valid + srcPol: &sourcepolicypb.Policy{ + Rules: []*sourcepolicypb.Rule{ + { + Action: sourcepolicypb.PolicyAction_CONVERT, + Selector: &sourcepolicypb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc", + }, + Updates: &sourcepolicypb.Update{ + Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc@sha256:3614ca5eacf0a3a1bcc361c939202a974b4902b9334ff36eb29ffe9011aaad83", + }, + }, + { + Action: sourcepolicypb.PolicyAction_CONVERT, + Selector: &sourcepolicypb.Selector{ + Identifier: "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md", + }, + Updates: &sourcepolicypb.Update{ + Identifier: "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md", + Attrs: map[string]string{"http.checksum": "sha256:6e4b94fc270e708e1068be28bd3551dc6917a4fc5a61293d51bb36e6b75c4b53"}, + }, + }, + }, + }, + expectedErr: "", + }, + { + // Invalid docker-image source + srcPol: &sourcepolicypb.Policy{ + Rules: []*sourcepolicypb.Rule{ + { + Action: sourcepolicypb.PolicyAction_CONVERT, + Selector: &sourcepolicypb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc", + }, + Updates: &sourcepolicypb.Update{ + Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // invalid + }, + }, + }, + }, + expectedErr: "docker.io/library/busybox:1.34.1-uclibc@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: not found", + }, + { + // Invalid http source + srcPol: &sourcepolicypb.Policy{ + Rules: []*sourcepolicypb.Rule{ + { + Action: sourcepolicypb.PolicyAction_CONVERT, + Selector: &sourcepolicypb.Selector{ + Identifier: "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md", + }, + Updates: &sourcepolicypb.Update{ + Attrs: map[string]string{pb.AttrHTTPChecksum: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"}, // invalid + }, + }, + }, + }, + expectedErr: "digest mismatch sha256:6e4b94fc270e708e1068be28bd3551dc6917a4fc5a61293d51bb36e6b75c4b53: sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + }, + } + for i, tc := range testCases { + tc := tc + t.Run(strconv.Itoa(i), func(t *testing.T) { + _, err = c.Build(sb.Context(), SolveOpt{SourcePolicy: tc.srcPol}, "", frontend, nil) + if tc.expectedErr == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectedErr) + } + }) + } + + t.Run("Frontend policies", func(t *testing.T) { + denied := "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md" + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + st := llb.Image("busybox:1.34.1-uclibc").File( + llb.Copy(llb.HTTP(denied), + "README.md", "README.md")) + def, err := st.Marshal(sb.Context()) + if err != nil { + return nil, err + } + return c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + SourcePolicies: []*spb.Policy{{ + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_DENY, + Selector: &spb.Selector{ + Identifier: denied, + }, + }, + }, + }}, + }) + } + + _, err = c.Build(sb.Context(), SolveOpt{}, "", frontend, nil) + require.ErrorContains(t, err, sourcepolicy.ErrSourceDenied.Error()) + }) +} diff --git a/client/connhelper/ssh/ssh.go b/client/connhelper/ssh/ssh.go new file mode 100644 index 000000000000..e3666b572f2a --- /dev/null +++ b/client/connhelper/ssh/ssh.go @@ -0,0 +1,78 @@ +// Package ssh provides connhelper for ssh:// +package ssh + +import ( + "context" + "net" + "net/url" + + "github.com/docker/cli/cli/connhelper/commandconn" + "github.com/moby/buildkit/client/connhelper" + "github.com/pkg/errors" +) + +func init() { + connhelper.Register("ssh", Helper) +} + +// Helper returns helper for connecting through an SSH URL. +func Helper(u *url.URL) (*connhelper.ConnectionHelper, error) { + sp, err := SpecFromURL(u) + if err != nil { + return nil, err + } + return &connhelper.ConnectionHelper{ + ContextDialer: func(ctx context.Context, addr string) (net.Conn, error) { + args := []string{} + if sp.User != "" { + args = append(args, "-l", sp.User) + } + if sp.Port != "" { + args = append(args, "-p", sp.Port) + } + args = append(args, "--", sp.Host) + args = append(args, "buildctl") + if socket := sp.Socket; socket != "" { + args = append(args, "--addr", "unix://"+socket) + } + args = append(args, "dial-stdio") + // using background context because context remains active for the duration of the process, after dial has completed + return commandconn.New(context.Background(), "ssh", args...) + }, + }, nil +} + +// Spec +type Spec struct { + User string + Host string + Port string + Socket string +} + +// SpecFromURL creates Spec from URL. +// URL is like ssh://@host: +// Only part is mandatory. +func SpecFromURL(u *url.URL) (*Spec, error) { + sp := Spec{ + Host: u.Hostname(), + Port: u.Port(), + Socket: u.Path, + } + if user := u.User; user != nil { + sp.User = user.Username() + if _, ok := user.Password(); ok { + return nil, errors.New("plain-text password is not supported") + } + } + if sp.Host == "" { + return nil, errors.Errorf("no host specified") + } + if u.RawQuery != "" { + return nil, errors.Errorf("extra query after the host: %q", u.RawQuery) + } + if u.Fragment != "" { + return nil, errors.Errorf("extra fragment after the host: %q", u.Fragment) + } + return &sp, nil +} diff --git a/client/connhelper/ssh/ssh_test.go b/client/connhelper/ssh/ssh_test.go new file mode 100644 index 000000000000..e4afb31cf882 --- /dev/null +++ b/client/connhelper/ssh/ssh_test.go @@ -0,0 +1,39 @@ +package ssh + +import ( + "net/url" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSpecFromURL(t *testing.T) { + cases := map[string]*Spec{ + "ssh://foo": { + Host: "foo", + }, + "ssh://me@foo:10022/s/o/c/k/e/t.sock": { + User: "me", Host: "foo", Port: "10022", Socket: "/s/o/c/k/e/t.sock", + }, + "ssh://me:passw0rd@foo": nil, + "ssh://foo/bar": { + Host: "foo", Socket: "/bar", + }, + "ssh://foo?bar": nil, + "ssh://foo#bar": nil, + "ssh://": nil, + } + for s, expected := range cases { + u, err := url.Parse(s) + if err != nil { + t.Fatal(err) + } + got, err := SpecFromURL(u) + if expected != nil { + require.NoError(t, err) + require.EqualValues(t, expected, got, s) + } else { + require.Error(t, err, s) + } + } +} diff --git a/client/diskusage.go b/client/diskusage.go index 2a2373f9d36a..0918c7dcd40f 100644 --- a/client/diskusage.go +++ b/client/diskusage.go @@ -10,18 +10,18 @@ import ( ) type UsageInfo struct { - ID string - Mutable bool - InUse bool - Size int64 + ID string `json:"id"` + Mutable bool `json:"mutable"` + InUse bool `json:"inUse"` + Size int64 `json:"size"` - CreatedAt time.Time - LastUsedAt *time.Time - UsageCount int - Parents []string - Description string - RecordType UsageRecordType - Shared bool + CreatedAt time.Time `json:"createdAt"` + LastUsedAt *time.Time `json:"lastUsedAt"` + UsageCount int `json:"usageCount"` + Parents []string `json:"parents"` + Description string `json:"description"` + RecordType UsageRecordType `json:"recordType"` + Shared bool `json:"shared"` } func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) { @@ -31,7 +31,7 @@ func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*Usa } req := &controlapi.DiskUsageRequest{Filter: info.Filter} - resp, err := c.controlClient().DiskUsage(ctx, req) + resp, err := c.ControlClient().DiskUsage(ctx, req) if err != nil { return nil, errors.Wrap(err, "failed to call diskusage") } diff --git a/client/info.go b/client/info.go new file mode 100644 index 000000000000..d5bdbcec8968 --- /dev/null +++ b/client/info.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + + controlapi "github.com/moby/buildkit/api/services/control" + apitypes "github.com/moby/buildkit/api/types" + "github.com/pkg/errors" +) + +type Info struct { + BuildkitVersion BuildkitVersion `json:"buildkitVersion"` +} + +type BuildkitVersion struct { + Package string `json:"package"` + Version string `json:"version"` + Revision string `json:"revision"` +} + +func (c *Client) Info(ctx context.Context) (*Info, error) { + res, err := c.ControlClient().Info(ctx, &controlapi.InfoRequest{}) + if err != nil { + return nil, errors.Wrap(err, "failed to call info") + } + return &Info{ + BuildkitVersion: fromAPIBuildkitVersion(res.BuildkitVersion), + }, nil +} + +func fromAPIBuildkitVersion(in *apitypes.BuildkitVersion) BuildkitVersion { + if in == nil { + return BuildkitVersion{} + } + return BuildkitVersion{ + Package: in.Package, + Version: in.Version, + Revision: in.Revision, + } +} diff --git a/client/llb/definition.go b/client/llb/definition.go index 697c1f54c913..d6dda89bb14b 100644 --- a/client/llb/definition.go +++ b/client/llb/definition.go @@ -29,6 +29,10 @@ type DefinitionOp struct { // NewDefinitionOp returns a new operation from a marshalled definition. func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) { + if def == nil { + return nil, errors.New("invalid nil input definition to definition op") + } + ops := make(map[digest.Digest]*pb.Op) defs := make(map[digest.Digest][]byte) platforms := make(map[digest.Digest]*ocispecs.Platform) diff --git a/client/llb/definition_test.go b/client/llb/definition_test.go index 8d7a4009aff9..4ef93e0ec357 100644 --- a/client/llb/definition_test.go +++ b/client/llb/definition_test.go @@ -118,3 +118,9 @@ func TestDefinitionInputCache(t *testing.T) { // 1 exec + 2x2 mounts from stA and stB + 1 src = 6 vertexes require.Equal(t, 6, len(vertexCache)) } + +func TestDefinitionNil(t *testing.T) { + // should be an error, not a panic + _, err := NewDefinitionOp(nil) + require.Error(t, err) +} diff --git a/client/llb/exec.go b/client/llb/exec.go index 994804a13992..2b1d9bd3f1ee 100644 --- a/client/llb/exec.go +++ b/client/llb/exec.go @@ -192,12 +192,13 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] } meta := &pb.Meta{ - Args: args, - Env: env.ToArray(), - Cwd: cwd, - User: user, - Hostname: hostname, - CgroupParent: cgrpParent, + Args: args, + Env: env.ToArray(), + Cwd: cwd, + User: user, + Hostname: hostname, + CgroupParent: cgrpParent, + RemoveMountStubsRecursive: true, } extraHosts, err := getExtraHosts(e.base)(ctx, c) diff --git a/client/llb/llbtest/platform_test.go b/client/llb/llbtest/platform_test.go index 40c676927b88..f66da1b989ec 100644 --- a/client/llb/llbtest/platform_test.go +++ b/client/llb/llbtest/platform_test.go @@ -27,7 +27,7 @@ func TestCustomPlatform(t *testing.T) { def, err := s.Marshal(context.TODO()) require.NoError(t, err) - e, err := llbsolver.Load(def.ToPB()) + e, err := llbsolver.Load(context.TODO(), def.ToPB(), nil) require.NoError(t, err) require.Equal(t, depth(e), 5) @@ -56,7 +56,7 @@ func TestDefaultPlatform(t *testing.T) { def, err := s.Marshal(context.TODO()) require.NoError(t, err) - e, err := llbsolver.Load(def.ToPB()) + e, err := llbsolver.Load(context.TODO(), def.ToPB(), nil) require.NoError(t, err) require.Equal(t, depth(e), 2) @@ -80,7 +80,7 @@ func TestPlatformOnMarshal(t *testing.T) { def, err := s.Marshal(context.TODO(), llb.Windows) require.NoError(t, err) - e, err := llbsolver.Load(def.ToPB()) + e, err := llbsolver.Load(context.TODO(), def.ToPB(), nil) require.NoError(t, err) expected := ocispecs.Platform{OS: "windows", Architecture: "amd64"} @@ -100,7 +100,7 @@ func TestPlatformMixed(t *testing.T) { def, err := s1.Marshal(context.TODO(), llb.LinuxAmd64) require.NoError(t, err) - e, err := llbsolver.Load(def.ToPB()) + e, err := llbsolver.Load(context.TODO(), def.ToPB(), nil) require.NoError(t, err) require.Equal(t, depth(e), 4) @@ -129,7 +129,7 @@ func TestFallbackPath(t *testing.T) { // the cap. def, err := llb.Scratch().Run(llb.Shlex("cmd")).Marshal(context.TODO(), llb.LinuxAmd64) require.NoError(t, err) - e, err := llbsolver.Load(def.ToPB()) + e, err := llbsolver.Load(context.TODO(), def.ToPB(), nil) require.NoError(t, err) require.False(t, def.Metadata[e.Vertex.Digest()].Caps[pb.CapExecMetaSetsDefaultPath]) _, ok := getenv(e, "PATH") @@ -141,7 +141,7 @@ func TestFallbackPath(t *testing.T) { require.Error(t, cs.Supports(pb.CapExecMetaSetsDefaultPath)) def, err = llb.Scratch().Run(llb.Shlex("cmd")).Marshal(context.TODO(), llb.LinuxAmd64, llb.WithCaps(cs)) require.NoError(t, err) - e, err = llbsolver.Load(def.ToPB()) + e, err = llbsolver.Load(context.TODO(), def.ToPB(), nil) require.NoError(t, err) require.False(t, def.Metadata[e.Vertex.Digest()].Caps[pb.CapExecMetaSetsDefaultPath]) v, ok := getenv(e, "PATH") @@ -155,7 +155,7 @@ func TestFallbackPath(t *testing.T) { require.NoError(t, cs.Supports(pb.CapExecMetaSetsDefaultPath)) def, err = llb.Scratch().Run(llb.Shlex("cmd")).Marshal(context.TODO(), llb.LinuxAmd64, llb.WithCaps(cs)) require.NoError(t, err) - e, err = llbsolver.Load(def.ToPB()) + e, err = llbsolver.Load(context.TODO(), def.ToPB(), nil) require.NoError(t, err) require.True(t, def.Metadata[e.Vertex.Digest()].Caps[pb.CapExecMetaSetsDefaultPath]) _, ok = getenv(e, "PATH") @@ -171,7 +171,7 @@ func TestFallbackPath(t *testing.T) { } { def, err = llb.Scratch().AddEnv("PATH", "foo").Run(llb.Shlex("cmd")).Marshal(context.TODO(), append(cos, llb.LinuxAmd64)...) require.NoError(t, err) - e, err = llbsolver.Load(def.ToPB()) + e, err = llbsolver.Load(context.TODO(), def.ToPB(), nil) require.NoError(t, err) // pb.CapExecMetaSetsDefaultPath setting is irrelevant (and variable). v, ok = getenv(e, "PATH") diff --git a/client/llb/marshal.go b/client/llb/marshal.go index e59e560ee95c..3b02299e431d 100644 --- a/client/llb/marshal.go +++ b/client/llb/marshal.go @@ -2,7 +2,6 @@ package llb import ( "io" - "io/ioutil" "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/solver/pb" @@ -67,7 +66,7 @@ func WriteTo(def *Definition, w io.Writer) error { } func ReadFrom(r io.Reader) (*Definition, error) { - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return nil, err } @@ -88,10 +87,7 @@ func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) { c.Platform = p } - for _, wc := range override.WorkerConstraints { - c.WorkerConstraints = append(c.WorkerConstraints, wc) - } - + c.WorkerConstraints = append(c.WorkerConstraints, override.WorkerConstraints...) c.Metadata = mergeMetadata(c.Metadata, override.Metadata) if c.Platform == nil { diff --git a/client/llb/resolver.go b/client/llb/resolver.go index af1edc10715e..b3b9cdf751c7 100644 --- a/client/llb/resolver.go +++ b/client/llb/resolver.go @@ -23,13 +23,35 @@ func ResolveDigest(v bool) ImageOption { }) } +func WithLayerLimit(l int) ImageOption { + return imageOptionFunc(func(ii *ImageInfo) { + ii.layerLimit = &l + }) +} + // ImageMetaResolver can resolve image config metadata from a reference type ImageMetaResolver interface { ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error) } +type ResolverType int + +const ( + ResolverTypeRegistry ResolverType = iota + ResolverTypeOCILayout +) + type ResolveImageConfigOpt struct { + ResolverType + Platform *ocispecs.Platform ResolveMode string LogName string + + Store ResolveImageConfigOptStore +} + +type ResolveImageConfigOptStore struct { + SessionID string + StoreID string } diff --git a/client/llb/source.go b/client/llb/source.go index c1be90b70405..27c8c1b617f2 100644 --- a/client/llb/source.go +++ b/client/llb/source.go @@ -116,6 +116,11 @@ func Image(ref string, opts ...ImageOption) State { attrs[pb.AttrImageRecordType] = info.RecordType } + if ll := info.layerLimit; ll != nil { + attrs[pb.AttrImageLayerLimit] = strconv.FormatInt(int64(*ll), 10) + addCap(&info.Constraints, pb.CapSourceImageLayerLimit) + } + src := NewSource("docker-image://"+ref, attrs, info.Constraints) // controversial if err != nil { src.err = err @@ -127,8 +132,9 @@ func Image(ref string, opts ...ImageOption) State { p = c.Platform } _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{ - Platform: p, - ResolveMode: info.resolveMode.String(), + Platform: p, + ResolveMode: info.resolveMode.String(), + ResolverType: ResolverTypeRegistry, }) if err != nil { return State{}, err @@ -142,8 +148,9 @@ func Image(ref string, opts ...ImageOption) State { p = c.Platform } dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{ - Platform: p, - ResolveMode: info.resolveMode.String(), + Platform: p, + ResolveMode: info.resolveMode.String(), + ResolverType: ResolverTypeRegistry, }) if err != nil { return State{}, err @@ -204,6 +211,7 @@ type ImageInfo struct { metaResolver ImageMetaResolver resolveDigest bool resolveMode ResolveMode + layerLimit *int RecordType string } @@ -446,6 +454,59 @@ func Differ(t DiffType, required bool) LocalOption { }) } +func OCILayout(ref string, opts ...OCILayoutOption) State { + gi := &OCILayoutInfo{} + + for _, o := range opts { + o.SetOCILayoutOption(gi) + } + attrs := map[string]string{} + if gi.sessionID != "" { + attrs[pb.AttrOCILayoutSessionID] = gi.sessionID + } + if gi.storeID != "" { + attrs[pb.AttrOCILayoutStoreID] = gi.storeID + } + if gi.layerLimit != nil { + attrs[pb.AttrOCILayoutLayerLimit] = strconv.FormatInt(int64(*gi.layerLimit), 10) + } + + addCap(&gi.Constraints, pb.CapSourceOCILayout) + + source := NewSource("oci-layout://"+ref, attrs, gi.Constraints) + return NewState(source.Output()) +} + +type OCILayoutOption interface { + SetOCILayoutOption(*OCILayoutInfo) +} + +type ociLayoutOptionFunc func(*OCILayoutInfo) + +func (fn ociLayoutOptionFunc) SetOCILayoutOption(li *OCILayoutInfo) { + fn(li) +} + +func OCIStore(sessionID string, storeID string) OCILayoutOption { + return ociLayoutOptionFunc(func(oi *OCILayoutInfo) { + oi.sessionID = sessionID + oi.storeID = storeID + }) +} + +func OCILayerLimit(limit int) OCILayoutOption { + return ociLayoutOptionFunc(func(oi *OCILayoutInfo) { + oi.layerLimit = &limit + }) +} + +type OCILayoutInfo struct { + constraintsWrapper + sessionID string + storeID string + layerLimit *int +} + type DiffType string const ( @@ -549,7 +610,7 @@ func Chown(uid, gid int) HTTPOption { } func platformSpecificSource(id string) bool { - return strings.HasPrefix(id, "docker-image://") + return strings.HasPrefix(id, "docker-image://") || strings.HasPrefix(id, "oci-layout://") } func addCap(c *Constraints, id apicaps.CapID) { diff --git a/client/llb/sourcemap.go b/client/llb/sourcemap.go index 149355d92e5a..17cc1de6f538 100644 --- a/client/llb/sourcemap.go +++ b/client/llb/sourcemap.go @@ -61,7 +61,7 @@ func (smc *sourceMapCollector) Add(dgst digest.Digest, ls []*SourceLocation) { } smc.index[l.SourceMap] = idx } - smc.locations[dgst] = ls + smc.locations[dgst] = append(smc.locations[dgst], ls...) } func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt) (*pb.Source, error) { diff --git a/client/llb/state.go b/client/llb/state.go index 0295f635ccfa..7d35f3be5968 100644 --- a/client/llb/state.go +++ b/client/llb/state.go @@ -199,10 +199,10 @@ func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollect if opMeta != nil { def.Metadata[dgst] = mergeMetadata(def.Metadata[dgst], *opMeta) } + s.Add(dgst, sls) if _, ok := cache[dgst]; ok { return def, nil } - s.Add(dgst, sls) def.Def = append(def.Def, dt) cache[dgst] = struct{}{} return def, nil @@ -230,13 +230,7 @@ func (s State) WithOutput(o Output) State { } func (s State) WithImageConfig(c []byte) (State, error) { - var img struct { - Config struct { - Env []string `json:"Env,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty"` - User string `json:"User,omitempty"` - } `json:"config,omitempty"` - } + var img ocispecs.Image if err := json.Unmarshal(c, &img); err != nil { return State{}, err } @@ -251,6 +245,13 @@ func (s State) WithImageConfig(c []byte) (State, error) { } } s = s.Dir(img.Config.WorkingDir) + if img.Architecture != "" && img.OS != "" { + s = s.Platform(ocispecs.Platform{ + OS: img.OS, + Architecture: img.Architecture, + Variant: img.Variant, + }) + } return s, nil } @@ -454,6 +455,7 @@ type ConstraintsOpt interface { HTTPOption ImageOption GitOption + OCILayoutOption } type constraintsOptFunc func(m *Constraints) @@ -470,6 +472,10 @@ func (fn constraintsOptFunc) SetLocalOption(li *LocalInfo) { li.applyConstraints(fn) } +func (fn constraintsOptFunc) SetOCILayoutOption(oi *OCILayoutInfo) { + oi.applyConstraints(fn) +} + func (fn constraintsOptFunc) SetHTTPOption(hi *HTTPInfo) { hi.applyConstraints(fn) } @@ -611,6 +617,7 @@ var ( LinuxArmel = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"}) LinuxArm64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm64"}) LinuxS390x = Platform(ocispecs.Platform{OS: "linux", Architecture: "s390x"}) + LinuxPpc64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64"}) LinuxPpc64le = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64le"}) Darwin = Platform(ocispecs.Platform{OS: "darwin", Architecture: "amd64"}) Windows = Platform(ocispecs.Platform{OS: "windows", Architecture: "amd64"}) @@ -618,9 +625,7 @@ var ( func Require(filters ...string) ConstraintsOpt { return constraintsOptFunc(func(c *Constraints) { - for _, f := range filters { - c.WorkerConstraints = append(c.WorkerConstraints, f) - } + c.WorkerConstraints = append(c.WorkerConstraints, filters...) }) } diff --git a/client/llb/state_test.go b/client/llb/state_test.go index a29af4dc5fbb..35a901a1d3fd 100644 --- a/client/llb/state_test.go +++ b/client/llb/state_test.go @@ -99,6 +99,44 @@ func TestStateSourceMapMarshal(t *testing.T) { require.Equal(t, int32(0), def.Source.Locations[dgst.String()].Locations[2].SourceIndex) require.Equal(t, 1, len(def.Source.Locations[dgst.String()].Locations[2].Ranges)) require.Equal(t, int32(9), def.Source.Locations[dgst.String()].Locations[2].Ranges[0].Start.Line) + + s = Merge([]State{s, Image("myimage", + sm1.Location([]*pb.Range{{Start: pb.Position{Line: 10}}}), + )}) + def, err = s.Marshal(context.TODO()) + require.NoError(t, err) + require.Equal(t, 3, len(def.Def)) + dgst = digest.FromBytes(def.Def[0]) + + require.Equal(t, 2, len(def.Source.Infos)) + require.Equal(t, 2, len(def.Source.Locations)) + + require.Equal(t, "foo", def.Source.Infos[0].Filename) + require.Equal(t, []byte("data1"), def.Source.Infos[0].Data) + require.Nil(t, def.Source.Infos[0].Definition) + + require.Equal(t, "bar", def.Source.Infos[1].Filename) + require.Equal(t, []byte("data2"), def.Source.Infos[1].Data) + require.Nil(t, def.Source.Infos[1].Definition) + + require.NotNil(t, def.Source.Locations[dgst.String()]) + require.Equal(t, 4, len(def.Source.Locations[dgst.String()].Locations)) + + require.Equal(t, int32(0), def.Source.Locations[dgst.String()].Locations[0].SourceIndex) + require.Equal(t, 1, len(def.Source.Locations[dgst.String()].Locations[0].Ranges)) + require.Equal(t, int32(7), def.Source.Locations[dgst.String()].Locations[0].Ranges[0].Start.Line) + + require.Equal(t, int32(1), def.Source.Locations[dgst.String()].Locations[1].SourceIndex) + require.Equal(t, 1, len(def.Source.Locations[dgst.String()].Locations[1].Ranges)) + require.Equal(t, int32(8), def.Source.Locations[dgst.String()].Locations[1].Ranges[0].Start.Line) + + require.Equal(t, int32(0), def.Source.Locations[dgst.String()].Locations[2].SourceIndex) + require.Equal(t, 1, len(def.Source.Locations[dgst.String()].Locations[2].Ranges)) + require.Equal(t, int32(9), def.Source.Locations[dgst.String()].Locations[2].Ranges[0].Start.Line) + + require.Equal(t, int32(0), def.Source.Locations[dgst.String()].Locations[3].SourceIndex) + require.Equal(t, 1, len(def.Source.Locations[dgst.String()].Locations[3].Ranges)) + require.Equal(t, int32(10), def.Source.Locations[dgst.String()].Locations[3].Ranges[0].Start.Line) } func TestPlatformFromImage(t *testing.T) { diff --git a/client/mergediff_test.go b/client/mergediff_test.go index da1d12d42f55..61fdc9b5062c 100644 --- a/client/mergediff_test.go +++ b/client/mergediff_test.go @@ -3,7 +3,6 @@ package client import ( "context" "fmt" - "os" "strings" "testing" @@ -1192,6 +1191,13 @@ func (tc verifyContents) Run(t *testing.T, sb integration.Sandbox) { t.Skip("rootless") } + switch tc.name { + case "TestDiffUpperScratch": + if integration.IsTestDockerdMoby(sb) { + t.Skip("failed to handle changes: lstat ... no such file or directory: https://github.com/moby/buildkit/pull/2726#issuecomment-1070978499") + } + } + requiresLinux(t) cdAddress := sb.ContainerdAddress() @@ -1218,7 +1224,7 @@ func (tc verifyContents) Run(t *testing.T, sb integration.Sandbox) { var exportInlineCacheOpts []CacheOptionsEntry var importRegistryCacheOpts []CacheOptionsEntry var exportRegistryCacheOpts []CacheOptionsEntry - if os.Getenv("TEST_DOCKERD") != "1" { + if !integration.IsTestDockerd() { importInlineCacheOpts = []CacheOptionsEntry{{ Type: "registry", Attrs: map[string]string{ @@ -1245,7 +1251,7 @@ func (tc verifyContents) Run(t *testing.T, sb integration.Sandbox) { resetState(t, c, sb) requireContents(ctx, t, c, sb, tc.state, nil, exportInlineCacheOpts, imageTarget, tc.contents(sb)) - if os.Getenv("TEST_DOCKERD") == "1" { + if integration.IsTestDockerd() { return } @@ -1266,8 +1272,9 @@ func (tc verifyContents) Run(t *testing.T, sb integration.Sandbox) { { Type: ExporterImage, Attrs: map[string]string{ - "name": imageTarget, - "push": "true", + "name": imageTarget, + "push": "true", + "unsafe-internal-store-allow-incomplete": "true", }, }, }, @@ -1310,7 +1317,6 @@ func (tc verifyBlobReuse) Name() string { } func (tc verifyBlobReuse) Run(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) requiresLinux(t) cdAddress := sb.ContainerdAddress() diff --git a/client/ociindex/ociindex.go b/client/ociindex/ociindex.go index a9c100a95bcd..3731ff36bb23 100644 --- a/client/ociindex/ociindex.go +++ b/client/ociindex/ociindex.go @@ -2,8 +2,9 @@ package ociindex import ( "encoding/json" - "io/ioutil" + "io" "os" + "path" "github.com/gofrs/flock" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" @@ -11,69 +12,86 @@ import ( ) const ( - // IndexJSONLockFileSuffix is the suffix of the lock file - IndexJSONLockFileSuffix = ".lock" + // indexFile is the name of the index file + indexFile = "index.json" + + // lockFileSuffix is the suffix of the lock file + lockFileSuffix = ".lock" ) -// PutDescToIndex puts desc to index with tag. -// Existing manifests with the same tag will be removed from the index. -func PutDescToIndex(index *ocispecs.Index, desc ocispecs.Descriptor, tag string) error { - if index == nil { - index = &ocispecs.Index{} +type StoreIndex struct { + indexPath string + lockPath string +} + +func NewStoreIndex(storePath string) StoreIndex { + indexPath := path.Join(storePath, indexFile) + return StoreIndex{ + indexPath: indexPath, + lockPath: indexPath + lockFileSuffix, } - if index.SchemaVersion == 0 { - index.SchemaVersion = 2 +} + +func (s StoreIndex) Read() (*ocispecs.Index, error) { + lock := flock.New(s.lockPath) + locked, err := lock.TryRLock() + if err != nil { + return nil, errors.Wrapf(err, "could not lock %s", s.lockPath) } - if tag != "" { - if desc.Annotations == nil { - desc.Annotations = make(map[string]string) - } - desc.Annotations[ocispecs.AnnotationRefName] = tag - // remove existing manifests with the same tag - var manifests []ocispecs.Descriptor - for _, m := range index.Manifests { - if m.Annotations[ocispecs.AnnotationRefName] != tag { - manifests = append(manifests, m) - } - } - index.Manifests = manifests + if !locked { + return nil, errors.Errorf("could not lock %s", s.lockPath) } - index.Manifests = append(index.Manifests, desc) - return nil + defer func() { + lock.Unlock() + os.RemoveAll(s.lockPath) + }() + + b, err := os.ReadFile(s.indexPath) + if err != nil { + return nil, errors.Wrapf(err, "could not read %s", s.indexPath) + } + var idx ocispecs.Index + if err := json.Unmarshal(b, &idx); err != nil { + return nil, errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(b)) + } + return &idx, nil } -func PutDescToIndexJSONFileLocked(indexJSONPath string, desc ocispecs.Descriptor, tag string) error { - lockPath := indexJSONPath + IndexJSONLockFileSuffix - lock := flock.New(lockPath) +func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error { + lock := flock.New(s.lockPath) locked, err := lock.TryLock() if err != nil { - return errors.Wrapf(err, "could not lock %s", lockPath) + return errors.Wrapf(err, "could not lock %s", s.lockPath) } if !locked { - return errors.Errorf("could not lock %s", lockPath) + return errors.Errorf("could not lock %s", s.lockPath) } defer func() { lock.Unlock() - os.RemoveAll(lockPath) + os.RemoveAll(s.lockPath) }() - f, err := os.OpenFile(indexJSONPath, os.O_RDWR|os.O_CREATE, 0644) + + f, err := os.OpenFile(s.indexPath, os.O_RDWR|os.O_CREATE, 0644) if err != nil { - return errors.Wrapf(err, "could not open %s", indexJSONPath) + return errors.Wrapf(err, "could not open %s", s.indexPath) } defer f.Close() + var idx ocispecs.Index - b, err := ioutil.ReadAll(f) + b, err := io.ReadAll(f) if err != nil { - return errors.Wrapf(err, "could not read %s", indexJSONPath) + return errors.Wrapf(err, "could not read %s", s.indexPath) } if len(b) > 0 { if err := json.Unmarshal(b, &idx); err != nil { - return errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b)) + return errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(b)) } } - if err = PutDescToIndex(&idx, desc, tag); err != nil { + + if err = insertDesc(&idx, desc, tag); err != nil { return err } + b, err = json.Marshal(idx) if err != nil { return err @@ -87,27 +105,56 @@ func PutDescToIndexJSONFileLocked(indexJSONPath string, desc ocispecs.Descriptor return nil } -func ReadIndexJSONFileLocked(indexJSONPath string) (*ocispecs.Index, error) { - lockPath := indexJSONPath + IndexJSONLockFileSuffix - lock := flock.New(lockPath) - locked, err := lock.TryRLock() +func (s StoreIndex) Get(tag string) (*ocispecs.Descriptor, error) { + idx, err := s.Read() if err != nil { - return nil, errors.Wrapf(err, "could not lock %s", lockPath) + return nil, err } - if !locked { - return nil, errors.Errorf("could not lock %s", lockPath) + + for _, m := range idx.Manifests { + if t, ok := m.Annotations[ocispecs.AnnotationRefName]; ok && t == tag { + return &m, nil + } } - defer func() { - lock.Unlock() - os.RemoveAll(lockPath) - }() - b, err := ioutil.ReadFile(indexJSONPath) + return nil, nil +} + +func (s StoreIndex) GetSingle() (*ocispecs.Descriptor, error) { + idx, err := s.Read() if err != nil { - return nil, errors.Wrapf(err, "could not read %s", indexJSONPath) + return nil, err } - var idx ocispecs.Index - if err := json.Unmarshal(b, &idx); err != nil { - return nil, errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b)) + + if len(idx.Manifests) == 1 { + return &idx.Manifests[0], nil } - return &idx, nil + return nil, nil +} + +// insertDesc puts desc to index with tag. +// Existing manifests with the same tag will be removed from the index. +func insertDesc(index *ocispecs.Index, desc ocispecs.Descriptor, tag string) error { + if index == nil { + return nil + } + + if index.SchemaVersion == 0 { + index.SchemaVersion = 2 + } + if tag != "" { + if desc.Annotations == nil { + desc.Annotations = make(map[string]string) + } + desc.Annotations[ocispecs.AnnotationRefName] = tag + // remove existing manifests with the same tag + var manifests []ocispecs.Descriptor + for _, m := range index.Manifests { + if m.Annotations[ocispecs.AnnotationRefName] != tag { + manifests = append(manifests, m) + } + } + index.Manifests = manifests + } + index.Manifests = append(index.Manifests, desc) + return nil } diff --git a/client/prune.go b/client/prune.go index ed4815cb5aac..af8491385558 100644 --- a/client/prune.go +++ b/client/prune.go @@ -23,7 +23,7 @@ func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOpti if info.All { req.All = true } - cl, err := c.controlClient().Prune(ctx, req) + cl, err := c.ControlClient().Prune(ctx, req) if err != nil { return errors.Wrap(err, "failed to call prune") } diff --git a/client/solve.go b/client/solve.go index f14d9c410d79..65183d61cd88 100644 --- a/client/solve.go +++ b/client/solve.go @@ -2,6 +2,7 @@ package client import ( "context" + "encoding/base64" "encoding/json" "io" "os" @@ -14,16 +15,19 @@ import ( controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/client/ociindex" + "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" sessioncontent "github.com/moby/buildkit/session/content" "github.com/moby/buildkit/session/filesync" "github.com/moby/buildkit/session/grpchijack" "github.com/moby/buildkit/solver/pb" + spb "github.com/moby/buildkit/sourcepolicy/pb" "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/entitlements" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" fstypes "github.com/tonistiigi/fsutil/types" "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" @@ -32,6 +36,7 @@ import ( type SolveOpt struct { Exports []ExportEntry LocalDirs map[string]string + OCIStores map[string]content.Store SharedKey string Frontend string FrontendAttrs map[string]string @@ -42,6 +47,9 @@ type SolveOpt struct { AllowedEntitlements []entitlements.Entitlement SharedSession *session.Session // TODO: refactor to better session syncing SessionPreInitialized bool // TODO: refactor to better session syncing + Internal bool + SourcePolicy *spb.Policy + Ref string } type ExportEntry struct { @@ -88,6 +96,9 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG } ref := identity.NewID() + if opt.Ref != "" { + ref = opt.Ref + } eg, ctx := errgroup.WithContext(ctx) statusContext, cancelStatus := context.WithCancel(context.Background()) @@ -122,6 +133,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG ex = opt.Exports[0] } + storesToUpdate := []string{} + if !opt.SessionPreInitialized { if len(syncedDirs) > 0 { s.Allow(filesync.NewFSSyncProvider(syncedDirs)) @@ -131,50 +144,85 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG s.Allow(a) } + contentStores := map[string]content.Store{} + for key, store := range cacheOpt.contentStores { + contentStores[key] = store + } + for key, store := range opt.OCIStores { + key2 := "oci:" + key + if _, ok := contentStores[key2]; ok { + return nil, errors.Errorf("oci store key %q already exists", key) + } + contentStores[key2] = store + } + + var supportFile bool + var supportDir bool switch ex.Type { case ExporterLocal: - if ex.Output != nil { - return nil, errors.New("output file writer is not supported by local exporter") - } - if ex.OutputDir == "" { - return nil, errors.New("output directory is required for local exporter") - } - s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir)) - case ExporterOCI, ExporterDocker, ExporterTar: - if ex.OutputDir != "" { - return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type) - } + supportDir = true + case ExporterTar: + supportFile = true + case ExporterOCI, ExporterDocker: + supportDir = ex.OutputDir != "" + supportFile = ex.Output != nil + } + + if supportFile && supportDir { + return nil, errors.Errorf("both file and directory output is not support by %s exporter", ex.Type) + } + if !supportFile && ex.Output != nil { + return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type) + } + if !supportDir && ex.OutputDir != "" { + return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type) + } + + if supportFile { if ex.Output == nil { return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type) } s.Allow(filesync.NewFSSyncTarget(ex.Output)) - default: - if ex.Output != nil { - return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type) + } + if supportDir { + if ex.OutputDir == "" { + return nil, errors.Errorf("output directory is required for %s exporter", ex.Type) } - if ex.OutputDir != "" { - return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type) + switch ex.Type { + case ExporterOCI, ExporterDocker: + if err := os.MkdirAll(ex.OutputDir, 0755); err != nil { + return nil, err + } + cs, err := contentlocal.NewStore(ex.OutputDir) + if err != nil { + return nil, err + } + contentStores["export"] = cs + storesToUpdate = append(storesToUpdate, ex.OutputDir) + default: + s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir)) } } - if len(cacheOpt.contentStores) > 0 { - s.Allow(sessioncontent.NewAttachable(cacheOpt.contentStores)) + if len(contentStores) > 0 { + s.Allow(sessioncontent.NewAttachable(contentStores)) } eg.Go(func() error { sd := c.sessionDialer if sd == nil { - sd = grpchijack.Dialer(c.controlClient()) + sd = grpchijack.Dialer(c.ControlClient()) } return s.Run(statusContext, sd) }) } + frontendAttrs := map[string]string{} + for k, v := range opt.FrontendAttrs { + frontendAttrs[k] = v + } for k, v := range cacheOpt.frontendAttrs { - if opt.FrontendAttrs == nil { - opt.FrontendAttrs = map[string]string{} - } - opt.FrontendAttrs[k] = v + frontendAttrs[k] = v } solveCtx, cancelSolve := context.WithCancel(ctx) @@ -188,8 +236,10 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG <-time.After(3 * time.Second) cancelStatus() }() - bklog.G(ctx).Debugf("stopping session") - s.Close() + if !opt.SessionPreInitialized { + bklog.G(ctx).Debugf("stopping session") + s.Close() + } }() var pbd *pb.Definition if def != nil { @@ -205,17 +255,19 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG frontendInputs[key] = def.ToPB() } - resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{ + resp, err := c.ControlClient().Solve(ctx, &controlapi.SolveRequest{ Ref: ref, Definition: pbd, Exporter: ex.Type, ExporterAttrs: ex.Attrs, Session: s.ID(), Frontend: opt.Frontend, - FrontendAttrs: opt.FrontendAttrs, + FrontendAttrs: frontendAttrs, FrontendInputs: frontendInputs, Cache: cacheOpt.options, Entitlements: opt.AllowedEntitlements, + Internal: opt.Internal, + SourcePolicy: opt.SourcePolicy, }) if err != nil { return errors.Wrap(err, "failed to solve") @@ -228,7 +280,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG if runGateway != nil { eg.Go(func() error { - err := runGateway(ref, s, opt.FrontendAttrs) + err := runGateway(ref, s, frontendAttrs) if err == nil { return nil } @@ -249,7 +301,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG } eg.Go(func() error { - stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{ + stream, err := c.ControlClient().Status(statusContext, &controlapi.StatusRequest{ Ref: ref, }) if err != nil { @@ -263,52 +315,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG } return errors.Wrap(err, "failed to receive status") } - s := SolveStatus{} - for _, v := range resp.Vertexes { - s.Vertexes = append(s.Vertexes, &Vertex{ - Digest: v.Digest, - Inputs: v.Inputs, - Name: v.Name, - Started: v.Started, - Completed: v.Completed, - Error: v.Error, - Cached: v.Cached, - ProgressGroup: v.ProgressGroup, - }) - } - for _, v := range resp.Statuses { - s.Statuses = append(s.Statuses, &VertexStatus{ - ID: v.ID, - Vertex: v.Vertex, - Name: v.Name, - Total: v.Total, - Current: v.Current, - Timestamp: v.Timestamp, - Started: v.Started, - Completed: v.Completed, - }) - } - for _, v := range resp.Logs { - s.Logs = append(s.Logs, &VertexLog{ - Vertex: v.Vertex, - Stream: int(v.Stream), - Data: v.Msg, - Timestamp: v.Timestamp, - }) - } - for _, v := range resp.Warnings { - s.Warnings = append(s.Warnings, &VertexWarning{ - Vertex: v.Vertex, - Level: int(v.Level), - Short: v.Short, - Detail: v.Detail, - URL: v.Url, - SourceInfo: v.Info, - Range: v.Ranges, - }) - } if statusChan != nil { - statusChan <- &s + statusChan <- NewSolveStatus(resp) } } }) @@ -323,8 +331,29 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG if err = json.Unmarshal([]byte(manifestDescJSON), &manifestDesc); err != nil { return nil, err } - for indexJSONPath, tag := range cacheOpt.indicesToUpdate { - if err = ociindex.PutDescToIndexJSONFileLocked(indexJSONPath, manifestDesc, tag); err != nil { + for storePath, tag := range cacheOpt.storesToUpdate { + idx := ociindex.NewStoreIndex(storePath) + if err := idx.Put(tag, manifestDesc); err != nil { + return nil, err + } + } + } + if manifestDescDt := res.ExporterResponse[exptypes.ExporterImageDescriptorKey]; manifestDescDt != "" { + manifestDescDt, err := base64.StdEncoding.DecodeString(manifestDescDt) + if err != nil { + return nil, err + } + var manifestDesc ocispecs.Descriptor + if err = json.Unmarshal([]byte(manifestDescDt), &manifestDesc); err != nil { + return nil, err + } + for _, storePath := range storesToUpdate { + tag := "latest" + if t, ok := res.ExporterResponse["image.name"]; ok { + tag = t + } + idx := ociindex.NewStoreIndex(storePath) + if err := idx.Put(tag, manifestDesc); err != nil { return nil, err } } @@ -332,7 +361,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG return res, nil } -func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) { +func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) (filesync.StaticDirSource, error) { for _, d := range localDirs { fi, err := os.Stat(d) if err != nil { @@ -342,16 +371,16 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file return nil, errors.Errorf("%s not a directory", d) } } - resetUIDAndGID := func(p string, st *fstypes.Stat) bool { + resetUIDAndGID := func(p string, st *fstypes.Stat) fsutil.MapResult { st.Uid = 0 st.Gid = 0 - return true + return fsutil.MapResultKeep } - dirs := make([]filesync.SyncedDir, 0, len(localDirs)) + dirs := make(filesync.StaticDirSource, len(localDirs)) if def == nil { for name, d := range localDirs { - dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID}) + dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID} } } else { for _, dt := range def.Def { @@ -366,7 +395,7 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file if !ok { return nil, errors.Errorf("local directory %s not enabled", name) } - dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID}) + dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID} } } } @@ -383,24 +412,20 @@ func defaultSessionName() string { } type cacheOptions struct { - options controlapi.CacheOptions - contentStores map[string]content.Store // key: ID of content store ("local:" + csDir) - indicesToUpdate map[string]string // key: index.JSON file name, value: tag - frontendAttrs map[string]string + options controlapi.CacheOptions + contentStores map[string]content.Store // key: ID of content store ("local:" + csDir) + storesToUpdate map[string]string // key: path to content store, value: tag + frontendAttrs map[string]string } func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cacheOptions, error) { var ( cacheExports []*controlapi.CacheOptionsEntry cacheImports []*controlapi.CacheOptionsEntry - // legacy API is used for registry caches, because the daemon might not support the new API - legacyExportRef string - legacyImportRefs []string ) contentStores := make(map[string]content.Store) - indicesToUpdate := make(map[string]string) // key: index.JSON file name, value: tag + storesToUpdate := make(map[string]string) frontendAttrs := make(map[string]string) - legacyExportAttrs := make(map[string]string) for _, ex := range opt.CacheExports { if ex.Type == "local" { csDir := ex.Attrs["dest"] @@ -415,26 +440,26 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach return nil, err } contentStores["local:"+csDir] = cs + + tag := "latest" + if t, ok := ex.Attrs["tag"]; ok { + tag = t + } // TODO(AkihiroSuda): support custom index JSON path and tag - indexJSONPath := filepath.Join(csDir, "index.json") - indicesToUpdate[indexJSONPath] = "latest" - } - if ex.Type == "registry" && legacyExportRef == "" { - legacyExportRef = ex.Attrs["ref"] - for k, v := range ex.Attrs { - if k != "ref" { - legacyExportAttrs[k] = v - } + storesToUpdate[csDir] = tag + } + if ex.Type == "registry" { + regRef := ex.Attrs["ref"] + if regRef == "" { + return nil, errors.New("registry cache exporter requires ref") } - } else { - cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{ - Type: ex.Type, - Attrs: ex.Attrs, - }) } + cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{ + Type: ex.Type, + Attrs: ex.Attrs, + }) } for _, im := range opt.CacheImports { - attrs := im.Attrs if im.Type == "local" { csDir := im.Attrs["src"] if csDir == "" { @@ -445,41 +470,40 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error()) continue } - // if digest is not specified, load from "latest" tag - if attrs["digest"] == "" { - idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json")) + // if digest is not specified, attempt to load from tag + if im.Attrs["digest"] == "" { + tag := "latest" + if t, ok := im.Attrs["tag"]; ok { + tag = t + } + + idx := ociindex.NewStoreIndex(csDir) + desc, err := idx.Get(tag) if err != nil { bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error()) continue } - for _, m := range idx.Manifests { - if (m.Annotations[ocispecs.AnnotationRefName] == "latest" && attrs["tag"] == "") || (attrs["tag"] != "" && m.Annotations[ocispecs.AnnotationRefName] == attrs["tag"]) { - attrs["digest"] = string(m.Digest) - break - } - } - if attrs["digest"] == "" { - return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json") + if desc != nil { + im.Attrs["digest"] = desc.Digest.String() } } + if im.Attrs["digest"] == "" { + return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json") + } contentStores["local:"+csDir] = cs } if im.Type == "registry" { - legacyImportRef := attrs["ref"] - legacyImportRefs = append(legacyImportRefs, legacyImportRef) - } else { - cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{ - Type: im.Type, - Attrs: attrs, - }) + regRef := im.Attrs["ref"] + if regRef == "" { + return nil, errors.New("registry cache importer requires ref") + } } + cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{ + Type: im.Type, + Attrs: im.Attrs, + }) } if opt.Frontend != "" || isGateway { - // use legacy API for registry importers, because the frontend might not support the new API - if len(legacyImportRefs) > 0 { - frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",") - } - // use new API for other importers if len(cacheImports) > 0 { s, err := json.Marshal(cacheImports) if err != nil { @@ -490,17 +514,12 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach } res := cacheOptions{ options: controlapi.CacheOptions{ - // old API (for registry caches, planned to be removed in early 2019) - ExportRefDeprecated: legacyExportRef, - ExportAttrsDeprecated: legacyExportAttrs, - ImportRefsDeprecated: legacyImportRefs, - // new API Exports: cacheExports, Imports: cacheImports, }, - contentStores: contentStores, - indicesToUpdate: indicesToUpdate, - frontendAttrs: frontendAttrs, + contentStores: contentStores, + storesToUpdate: storesToUpdate, + frontendAttrs: frontendAttrs, } return &res, nil } diff --git a/client/status.go b/client/status.go new file mode 100644 index 000000000000..d692094af3fb --- /dev/null +++ b/client/status.go @@ -0,0 +1,125 @@ +package client + +import ( + controlapi "github.com/moby/buildkit/api/services/control" +) + +var emptyLogVertexSize int + +func init() { + emptyLogVertex := controlapi.VertexLog{} + emptyLogVertexSize = emptyLogVertex.Size() +} + +func NewSolveStatus(resp *controlapi.StatusResponse) *SolveStatus { + s := &SolveStatus{} + for _, v := range resp.Vertexes { + s.Vertexes = append(s.Vertexes, &Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + ProgressGroup: v.ProgressGroup, + }) + } + for _, v := range resp.Statuses { + s.Statuses = append(s.Statuses, &VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Total: v.Total, + Current: v.Current, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for _, v := range resp.Logs { + s.Logs = append(s.Logs, &VertexLog{ + Vertex: v.Vertex, + Stream: int(v.Stream), + Data: v.Msg, + Timestamp: v.Timestamp, + }) + } + for _, v := range resp.Warnings { + s.Warnings = append(s.Warnings, &VertexWarning{ + Vertex: v.Vertex, + Level: int(v.Level), + Short: v.Short, + Detail: v.Detail, + URL: v.Url, + SourceInfo: v.Info, + Range: v.Ranges, + }) + } + return s +} + +func (ss *SolveStatus) Marshal() (out []*controlapi.StatusResponse) { + logSize := 0 + for { + retry := false + sr := controlapi.StatusResponse{} + for _, v := range ss.Vertexes { + sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + ProgressGroup: v.ProgressGroup, + }) + } + for _, v := range ss.Statuses { + sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Current: v.Current, + Total: v.Total, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for i, v := range ss.Logs { + sr.Logs = append(sr.Logs, &controlapi.VertexLog{ + Vertex: v.Vertex, + Stream: int64(v.Stream), + Msg: v.Data, + Timestamp: v.Timestamp, + }) + logSize += len(v.Data) + emptyLogVertexSize + // avoid logs growing big and split apart if they do + if logSize > 1024*1024 { + ss.Vertexes = nil + ss.Statuses = nil + ss.Logs = ss.Logs[i+1:] + retry = true + break + } + } + for _, v := range ss.Warnings { + sr.Warnings = append(sr.Warnings, &controlapi.VertexWarning{ + Vertex: v.Vertex, + Level: int64(v.Level), + Short: v.Short, + Detail: v.Detail, + Info: v.SourceInfo, + Ranges: v.Range, + Url: v.URL, + }) + } + out = append(out, &sr) + if !retry { + break + } + } + return +} diff --git a/client/workers.go b/client/workers.go index e5331cd608c4..b7f6f6725d90 100644 --- a/client/workers.go +++ b/client/workers.go @@ -13,10 +13,11 @@ import ( // WorkerInfo contains information about a worker type WorkerInfo struct { - ID string `json:"id"` - Labels map[string]string `json:"labels"` - Platforms []ocispecs.Platform `json:"platforms"` - GCPolicy []PruneInfo `json:"gcPolicy"` + ID string `json:"id"` + Labels map[string]string `json:"labels"` + Platforms []ocispecs.Platform `json:"platforms"` + GCPolicy []PruneInfo `json:"gcPolicy"` + BuildkitVersion BuildkitVersion `json:"buildkitVersion"` } // ListWorkers lists all active workers @@ -27,7 +28,7 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([] } req := &controlapi.ListWorkersRequest{Filter: info.Filter} - resp, err := c.controlClient().ListWorkers(ctx, req) + resp, err := c.ControlClient().ListWorkers(ctx, req) if err != nil { return nil, errors.Wrap(err, "failed to list workers") } @@ -36,10 +37,11 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([] for _, w := range resp.Record { wi = append(wi, &WorkerInfo{ - ID: w.ID, - Labels: w.Labels, - Platforms: pb.ToSpecPlatforms(w.Platforms), - GCPolicy: fromAPIGCPolicy(w.GCPolicy), + ID: w.ID, + Labels: w.Labels, + Platforms: pb.ToSpecPlatforms(w.Platforms), + GCPolicy: fromAPIGCPolicy(w.GCPolicy), + BuildkitVersion: fromAPIBuildkitVersion(w.BuildkitVersion), }) } diff --git a/cmd/buildctl/build.go b/cmd/buildctl/build.go index f3d36015d0d3..e7cb1f770d3c 100644 --- a/cmd/buildctl/build.go +++ b/cmd/buildctl/build.go @@ -4,18 +4,24 @@ import ( "context" "encoding/base64" "encoding/json" + "fmt" "io" "os" + "strings" "github.com/containerd/continuity" + "github.com/docker/cli/cli/config" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/cmd/buildctl/build" bccommon "github.com/moby/buildkit/cmd/buildctl/common" + gateway "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/auth/authprovider" "github.com/moby/buildkit/session/sshforward/sshprovider" "github.com/moby/buildkit/solver/pb" + spb "github.com/moby/buildkit/sourcepolicy/pb" "github.com/moby/buildkit/util/progress/progresswriter" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -38,16 +44,6 @@ var buildCommand = cli.Command{ Name: "output,o", Usage: "Define exports for build result, e.g. --output type=image,name=docker.io/username/image,push=true", }, - cli.StringFlag{ - Name: "exporter", - Usage: "Define exporter for build result (DEPRECATED: use --export type=[,=]", - Hidden: true, - }, - cli.StringSliceFlag{ - Name: "exporter-opt", - Usage: "Define custom options for exporter (DEPRECATED: use --output type=[,=]", - Hidden: true, - }, cli.StringFlag{ Name: "progress", Usage: "Set type of progress (auto, plain, tty). Use plain to show container output", @@ -61,6 +57,10 @@ var buildCommand = cli.Command{ Name: "local", Usage: "Allow build access to the local directory", }, + cli.StringSliceFlag{ + Name: "oci-layout", + Usage: "Allow build access to the local OCI layout", + }, cli.StringFlag{ Name: "frontend", Usage: "Define frontend used for build", @@ -69,11 +69,6 @@ var buildCommand = cli.Command{ Name: "opt", Usage: "Define custom options for frontend, e.g. --opt target=foo --opt build-arg:foo=bar", }, - cli.StringSliceFlag{ - Name: "frontend-opt", - Usage: "Define custom options for frontend, e.g. --frontend-opt target=foo --frontend-opt build-arg:foo=bar (DEPRECATED: use --opt)", - Hidden: true, - }, cli.BoolFlag{ Name: "no-cache", Usage: "Disable cache for all the vertices", @@ -82,11 +77,6 @@ var buildCommand = cli.Command{ Name: "export-cache", Usage: "Export build cache, e.g. --export-cache type=registry,ref=example.com/foo/bar, or --export-cache type=local,dest=path/to/dir", }, - cli.StringSliceFlag{ - Name: "export-cache-opt", - Usage: "Define custom options for cache exporting (DEPRECATED: use --export-cache type=,=[,=]", - Hidden: true, - }, cli.StringSliceFlag{ Name: "import-cache", Usage: "Import build cache, e.g. --import-cache type=registry,ref=example.com/foo/bar, or --import-cache type=local,src=path/to/dir", @@ -107,6 +97,14 @@ var buildCommand = cli.Command{ Name: "metadata-file", Usage: "Output build metadata (e.g., image digest) to a file as JSON", }, + cli.StringFlag{ + Name: "source-policy-file", + Usage: "Read source policy file from a JSON file", + }, + cli.StringFlag{ + Name: "ref-file", + Usage: "Write build ref to a file", + }, }, } @@ -159,7 +157,8 @@ func buildAction(clicontext *cli.Context) error { logrus.Infof("tracing logs to %s", traceFile.Name()) } - attachable := []session.Attachable{authprovider.NewDockerAuthProvider(os.Stderr)} + dockerConfig := config.LoadDefaultConfigFile(os.Stderr) + attachable := []session.Attachable{authprovider.NewDockerAuthProvider(dockerConfig)} if ssh := clicontext.StringSlice("ssh"); len(ssh) > 0 { configs, err := build.ParseSSH(ssh) @@ -186,21 +185,12 @@ func buildAction(clicontext *cli.Context) error { return err } - var exports []client.ExportEntry - if legacyExporter := clicontext.String("exporter"); legacyExporter != "" { - logrus.Warnf("--exporter is deprecated. Please use --output type=[,=] instead.") - if len(clicontext.StringSlice("output")) > 0 { - return errors.New("--exporter cannot be used with --output") - } - exports, err = build.ParseLegacyExporter(clicontext.String("exporter"), clicontext.StringSlice("exporter-opt")) - } else { - exports, err = build.ParseOutput(clicontext.StringSlice("output")) - } + exports, err := build.ParseOutput(clicontext.StringSlice("output")) if err != nil { return err } - cacheExports, err := build.ParseExportCache(clicontext.StringSlice("export-cache"), clicontext.StringSlice("export-cache-opt")) + cacheExports, err := build.ParseExportCache(clicontext.StringSlice("export-cache")) if err != nil { return err } @@ -209,20 +199,38 @@ func buildAction(clicontext *cli.Context) error { return err } + var srcPol *spb.Policy + if srcPolFile := clicontext.String("source-policy-file"); srcPolFile != "" { + b, err := os.ReadFile(srcPolFile) + if err != nil { + return err + } + var srcPolStruct spb.Policy + if err := json.Unmarshal(b, &srcPolStruct); err != nil { + return errors.Wrapf(err, "failed to unmarshal source-policy-file %q", srcPolFile) + } + srcPol = &srcPolStruct + } + eg, ctx := errgroup.WithContext(bccommon.CommandContext(clicontext)) + ref := identity.NewID() + solveOpt := client.SolveOpt{ Exports: exports, // LocalDirs is set later Frontend: clicontext.String("frontend"), // FrontendAttrs is set later + // OCILayouts is set later CacheExports: cacheExports, CacheImports: cacheImports, Session: attachable, AllowedEntitlements: allowed, + SourcePolicy: srcPol, + Ref: ref, } - solveOpt.FrontendAttrs, err = build.ParseOpt(clicontext.StringSlice("opt"), clicontext.StringSlice("frontend-opt")) + solveOpt.FrontendAttrs, err = build.ParseOpt(clicontext.StringSlice("opt")) if err != nil { return errors.Wrap(err, "invalid opt") } @@ -232,6 +240,11 @@ func buildAction(clicontext *cli.Context) error { return errors.Wrap(err, "invalid local") } + solveOpt.OCIStores, err = build.ParseOCILayout(clicontext.StringSlice("oci-layout")) + if err != nil { + return errors.Wrap(err, "invalid oci-layout") + } + var def *llb.Definition if clicontext.String("frontend") == "" { if fi, _ := os.Stdin.Stat(); (fi.Mode() & os.ModeCharDevice) != 0 { @@ -250,6 +263,13 @@ func buildAction(clicontext *cli.Context) error { } } + refFile := clicontext.String("ref-file") + if refFile != "" { + defer func() { + continuity.AtomicWriteFile(refFile, []byte(ref), 0666) + }() + } + // not using shared context to not disrupt display but let is finish reporting errors pw, err := progresswriter.NewPrinter(context.TODO(), os.Stderr, clicontext.String("progress")) if err != nil { @@ -283,13 +303,38 @@ func buildAction(clicontext *cli.Context) error { } } + var subMetadata map[string][]byte + eg.Go(func() error { defer func() { for _, w := range writers { close(w.Status()) } }() - resp, err := c.Solve(ctx, def, solveOpt, progresswriter.ResetTime(mw.WithPrefix("", false)).Status()) + + sreq := gateway.SolveRequest{ + Frontend: solveOpt.Frontend, + FrontendOpt: solveOpt.FrontendAttrs, + } + if def != nil { + sreq.Definition = def.ToPB() + } + resp, err := c.Build(ctx, solveOpt, "buildctl", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + _, isSubRequest := sreq.FrontendOpt["requestid"] + if isSubRequest { + if _, ok := sreq.FrontendOpt["frontend.caps"]; !ok { + sreq.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests" + } + } + res, err := c.Solve(ctx, sreq) + if err != nil { + return nil, err + } + if isSubRequest && res != nil { + subMetadata = res.Metadata + } + return res, err + }, progresswriter.ResetTime(mw.WithPrefix("", false)).Status()) if err != nil { return err } @@ -312,7 +357,20 @@ func buildAction(clicontext *cli.Context) error { return pw.Err() }) - return eg.Wait() + if err := eg.Wait(); err != nil { + return err + } + + if txt, ok := subMetadata["result.txt"]; ok { + fmt.Print(string(txt)) + } else { + for k, v := range subMetadata { + if strings.HasPrefix(k, "result.") { + fmt.Printf("%s\n%s\n", k, v) + } + } + } + return nil } func writeMetadataFile(filename string, exporterResponse map[string]string) error { diff --git a/cmd/buildctl/build/exportcache.go b/cmd/buildctl/build/exportcache.go index cf83897b8f34..ec8b6a24bc4e 100644 --- a/cmd/buildctl/build/exportcache.go +++ b/cmd/buildctl/build/exportcache.go @@ -20,12 +20,11 @@ func parseExportCacheCSV(s string) (client.CacheOptionsEntry, error) { return ex, err } for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { + key, value, ok := strings.Cut(field, "=") + if !ok { return ex, errors.Errorf("invalid value %s", field) } - key := strings.ToLower(parts[0]) - value := parts[1] + key = strings.ToLower(key) switch key { case "type": ex.Type = value @@ -39,37 +38,28 @@ func parseExportCacheCSV(s string) (client.CacheOptionsEntry, error) { if _, ok := ex.Attrs["mode"]; !ok { ex.Attrs["mode"] = "min" } + if ex.Type == "gha" { + return loadGithubEnv(ex) + } return ex, nil } -// ParseExportCache parses --export-cache (and legacy --export-cache-opt) -func ParseExportCache(exportCaches, legacyExportCacheOpts []string) ([]client.CacheOptionsEntry, error) { +// ParseExportCache parses --export-cache +func ParseExportCache(exportCaches []string) ([]client.CacheOptionsEntry, error) { var exports []client.CacheOptionsEntry - if len(legacyExportCacheOpts) > 0 { - if len(exportCaches) != 1 { - return nil, errors.New("--export-cache-opt requires exactly single --export-cache") - } - } for _, exportCache := range exportCaches { legacy := !strings.Contains(exportCache, "type=") if legacy { - logrus.Warnf("--export-cache --export-cache-opt = is deprecated. Please use --export-cache type=registry,ref=,=[,=] instead") - attrs, err := attrMap(legacyExportCacheOpts) - if err != nil { - return nil, err - } - if _, ok := attrs["mode"]; !ok { - attrs["mode"] = "min" - } - attrs["ref"] = exportCache + // Deprecated since BuildKit v0.4.0, but no plan to remove: https://github.com/moby/buildkit/pull/2783#issuecomment-1093449772 + logrus.Warnf("--export-cache is deprecated. Please use --export-cache type=registry,ref=,=[,=] instead") exports = append(exports, client.CacheOptionsEntry{ - Type: "registry", - Attrs: attrs, + Type: "registry", + Attrs: map[string]string{ + "mode": "min", + "ref": exportCache, + }, }) } else { - if len(legacyExportCacheOpts) > 0 { - return nil, errors.New("--export-cache-opt is not supported for the specified --export-cache. Please use --export-cache type=,=[,=] instead") - } ex, err := parseExportCacheCSV(exportCache) if err != nil { return nil, err diff --git a/cmd/buildctl/build/exportcache_test.go b/cmd/buildctl/build/exportcache_test.go index 77a3b8aa255c..ccaa321735d9 100644 --- a/cmd/buildctl/build/exportcache_test.go +++ b/cmd/buildctl/build/exportcache_test.go @@ -9,10 +9,9 @@ import ( func TestParseExportCache(t *testing.T) { type testCase struct { - exportCaches []string // --export-cache - legacyExportCacheOpts []string // --export-cache-opt (legacy) - expected []client.CacheOptionsEntry - expectedErr string + exportCaches []string // --export-cache + expected []client.CacheOptionsEntry + expectedErr string } testCases := []testCase{ { @@ -28,28 +27,22 @@ func TestParseExportCache(t *testing.T) { }, }, { - exportCaches: []string{"example.com/foo/bar"}, - legacyExportCacheOpts: []string{"mode=max"}, + exportCaches: []string{"example.com/foo/bar"}, expected: []client.CacheOptionsEntry{ { Type: "registry", Attrs: map[string]string{ "ref": "example.com/foo/bar", - "mode": "max", + "mode": "min", }, }, }, }, - { - exportCaches: []string{"type=registry,ref=example.com/foo/bar"}, - legacyExportCacheOpts: []string{"mode=max"}, - expectedErr: "--export-cache-opt is not supported for the specified --export-cache", - }, // TODO: test multiple exportCaches (valid for CLI but not supported by solver) } for _, tc := range testCases { - ex, err := ParseExportCache(tc.exportCaches, tc.legacyExportCacheOpts) + ex, err := ParseExportCache(tc.exportCaches) if tc.expectedErr == "" { require.EqualValues(t, tc.expected, ex) } else { diff --git a/cmd/buildctl/build/importcache.go b/cmd/buildctl/build/importcache.go index a300f327bbae..b91eb36490c9 100644 --- a/cmd/buildctl/build/importcache.go +++ b/cmd/buildctl/build/importcache.go @@ -20,12 +20,11 @@ func parseImportCacheCSV(s string) (client.CacheOptionsEntry, error) { return im, err } for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { + key, value, ok := strings.Cut(field, "=") + if !ok { return im, errors.Errorf("invalid value %s", field) } - key := strings.ToLower(parts[0]) - value := parts[1] + key = strings.ToLower(key) switch key { case "type": im.Type = value @@ -36,6 +35,9 @@ func parseImportCacheCSV(s string) (client.CacheOptionsEntry, error) { if im.Type == "" { return im, errors.New("--import-cache requires type=") } + if im.Type == "gha" { + return loadGithubEnv(im) + } return im, nil } @@ -45,6 +47,7 @@ func ParseImportCache(importCaches []string) ([]client.CacheOptionsEntry, error) for _, importCache := range importCaches { legacy := !strings.Contains(importCache, "type=") if legacy { + // Deprecated since BuildKit v0.4.0, but no plan to remove: https://github.com/moby/buildkit/pull/2783#issuecomment-1093449772 logrus.Warn("--import-cache is deprecated. Please use --import-cache type=registry,ref=,=[,=] instead.") imports = append(imports, client.CacheOptionsEntry{ Type: "registry", diff --git a/cmd/buildctl/build/importcache_test.go b/cmd/buildctl/build/importcache_test.go index 8dd18bcf16ab..ad175812d65e 100644 --- a/cmd/buildctl/build/importcache_test.go +++ b/cmd/buildctl/build/importcache_test.go @@ -48,7 +48,36 @@ func TestParseImportCache(t *testing.T) { }, }, }, + { + importCaches: []string{"type=gha,url=https://foo.bar,token=foo"}, + expected: []client.CacheOptionsEntry{ + { + Type: "gha", + Attrs: map[string]string{ + "url": "https://foo.bar", + "token": "foo", + }, + }, + }, + }, + { + importCaches: []string{"type=gha"}, + expected: []client.CacheOptionsEntry{ + { + Type: "gha", + Attrs: map[string]string{ + "url": "https://github.com/test", // Set from env below + "token": "bar", // Set from env below + }, + }, + }, + }, } + + // Set values for GitHub parse cache + t.Setenv("ACTIONS_CACHE_URL", "https://github.com/test") + t.Setenv("ACTIONS_RUNTIME_TOKEN", "bar") + for _, tc := range testCases { im, err := ParseImportCache(tc.importCaches) if tc.expectedErr == "" { diff --git a/cmd/buildctl/build/ocilayout.go b/cmd/buildctl/build/ocilayout.go new file mode 100644 index 000000000000..e611f5799691 --- /dev/null +++ b/cmd/buildctl/build/ocilayout.go @@ -0,0 +1,27 @@ +package build + +import ( + "strings" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/local" + "github.com/pkg/errors" +) + +// ParseOCILayout parses --oci-layout +func ParseOCILayout(layouts []string) (map[string]content.Store, error) { + contentStores := make(map[string]content.Store) + for _, idAndDir := range layouts { + parts := strings.SplitN(idAndDir, "=", 2) + if len(parts) != 2 { + return nil, errors.Errorf("oci-layout option must be 'id=path/to/layout', instead had invalid %s", idAndDir) + } + cs, err := local.NewStore(parts[1]) + if err != nil { + return nil, errors.Wrapf(err, "oci-layout context at %s failed to initialize", parts[1]) + } + contentStores[parts[0]] = cs + } + + return contentStores, nil +} diff --git a/cmd/buildctl/build/opt.go b/cmd/buildctl/build/opt.go index 93acc7dd23ac..3731b130b30d 100644 --- a/cmd/buildctl/build/opt.go +++ b/cmd/buildctl/build/opt.go @@ -1,27 +1,5 @@ package build -import ( - "github.com/sirupsen/logrus" -) - -func ParseOpt(opts, legacyFrontendOpts []string) (map[string]string, error) { - m := make(map[string]string) - if len(legacyFrontendOpts) > 0 { - logrus.Warn("--frontend-opt = is deprecated. Please use --opt = instead.") - legacy, err := attrMap(legacyFrontendOpts) - if err != nil { - return nil, err - } - for k, v := range legacy { - m[k] = v - } - } - modern, err := attrMap(opts) - if err != nil { - return nil, err - } - for k, v := range modern { - m[k] = v - } - return m, nil +func ParseOpt(opts []string) (map[string]string, error) { + return attrMap(opts) } diff --git a/cmd/buildctl/build/output.go b/cmd/buildctl/build/output.go index 185266e1e1d5..abdd508b833f 100644 --- a/cmd/buildctl/build/output.go +++ b/cmd/buildctl/build/output.go @@ -4,6 +4,7 @@ import ( "encoding/csv" "io" "os" + "strconv" "strings" "github.com/containerd/console" @@ -23,12 +24,11 @@ func parseOutputCSV(s string) (client.ExportEntry, error) { return ex, err } for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { + key, value, ok := strings.Cut(field, "=") + if !ok { return ex, errors.Errorf("invalid value %s", field) } - key := strings.ToLower(parts[0]) - value := parts[1] + key = strings.ToLower(key) switch key { case "type": ex.Type = value @@ -42,7 +42,7 @@ func parseOutputCSV(s string) (client.ExportEntry, error) { if v, ok := ex.Attrs["output"]; ok { return ex, errors.Errorf("output=%s not supported for --output, you meant dest=%s?", v, v) } - ex.Output, ex.OutputDir, err = resolveExporterDest(ex.Type, ex.Attrs["dest"]) + ex.Output, ex.OutputDir, err = resolveExporterDest(ex.Type, ex.Attrs["dest"], ex.Attrs) if err != nil { return ex, errors.Wrap(err, "invalid output option: output") } @@ -65,42 +65,36 @@ func ParseOutput(exports []string) ([]client.ExportEntry, error) { return entries, nil } -// ParseLegacyExporter parses legacy --exporter --exporter-opt = -func ParseLegacyExporter(legacyExporter string, legacyExporterOpts []string) ([]client.ExportEntry, error) { - var ex client.ExportEntry - ex.Type = legacyExporter - var err error - ex.Attrs, err = attrMap(legacyExporterOpts) - if err != nil { - return nil, errors.Wrap(err, "invalid exporter-opt") - } - if v, ok := ex.Attrs["dest"]; ok { - return nil, errors.Errorf("dest=%s not supported for --exporter-opt, you meant output=%s?", v, v) - } - ex.Output, ex.OutputDir, err = resolveExporterDest(ex.Type, ex.Attrs["output"]) - if err != nil { - return nil, errors.Wrap(err, "invalid exporter option: output") - } - if ex.Output != nil || ex.OutputDir != "" { - delete(ex.Attrs, "output") - } - return []client.ExportEntry{ex}, nil -} - // resolveExporterDest returns at most either one of io.WriteCloser (single file) or a string (directory path). -func resolveExporterDest(exporter, dest string) (func(map[string]string) (io.WriteCloser, error), string, error) { +func resolveExporterDest(exporter, dest string, attrs map[string]string) (func(map[string]string) (io.WriteCloser, error), string, error) { wrapWriter := func(wc io.WriteCloser) func(map[string]string) (io.WriteCloser, error) { return func(m map[string]string) (io.WriteCloser, error) { return wc, nil } } + + var supportFile bool + var supportDir bool switch exporter { case client.ExporterLocal: + supportDir = true + case client.ExporterTar: + supportFile = true + case client.ExporterOCI, client.ExporterDocker: + tar, err := strconv.ParseBool(attrs["tar"]) + if err != nil { + tar = true + } + supportFile = tar + supportDir = !tar + } + + if supportDir { if dest == "" { - return nil, "", errors.New("output directory is required for local exporter") + return nil, "", errors.Errorf("output directory is required for %s exporter", exporter) } return nil, dest, nil - case client.ExporterOCI, client.ExporterDocker, client.ExporterTar: + } else if supportFile { if dest != "" && dest != "-" { fi, err := os.Stat(dest) if err != nil && !errors.Is(err, os.ErrNotExist) { @@ -117,7 +111,8 @@ func resolveExporterDest(exporter, dest string) (func(map[string]string) (io.Wri return nil, "", errors.Errorf("output file is required for %s exporter. refusing to write to console", exporter) } return wrapWriter(os.Stdout), "", nil - default: // e.g. client.ExporterImage + } else { + // e.g. client.ExporterImage if dest != "" { return nil, "", errors.Errorf("output %s is not supported by %s exporter", dest, exporter) } diff --git a/cmd/buildctl/build/secret.go b/cmd/buildctl/build/secret.go index 7d91a439073d..a41892b10e30 100644 --- a/cmd/buildctl/build/secret.go +++ b/cmd/buildctl/build/secret.go @@ -26,8 +26,8 @@ func ParseSecret(sl []string) (session.Attachable, error) { return secretsprovider.NewSecretProvider(store), nil } -func parseSecret(value string) (*secretsprovider.Source, error) { - csvReader := csv.NewReader(strings.NewReader(value)) +func parseSecret(val string) (*secretsprovider.Source, error) { + csvReader := csv.NewReader(strings.NewReader(val)) fields, err := csvReader.Read() if err != nil { return nil, errors.Wrap(err, "failed to parse csv secret") @@ -37,14 +37,11 @@ func parseSecret(value string) (*secretsprovider.Source, error) { var typ string for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) != 2 { + key, value, ok := strings.Cut(field, "=") + if !ok { return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) } - - value := parts[1] + key = strings.ToLower(key) switch key { case "type": if value != "file" && value != "env" { diff --git a/cmd/buildctl/build/util.go b/cmd/buildctl/build/util.go new file mode 100644 index 000000000000..4dfe63289791 --- /dev/null +++ b/cmd/buildctl/build/util.go @@ -0,0 +1,33 @@ +package build + +import ( + "os" + + "github.com/pkg/errors" + + "github.com/moby/buildkit/client" +) + +// loadGithubEnv verify that url and token attributes exists in the +// cache. +// If not, it will search for $ACTIONS_RUNTIME_TOKEN and $ACTIONS_CACHE_URL +// environments variables and add it to cache Options +// Since it works for both import and export +func loadGithubEnv(cache client.CacheOptionsEntry) (client.CacheOptionsEntry, error) { + if _, ok := cache.Attrs["url"]; !ok { + url, ok := os.LookupEnv("ACTIONS_CACHE_URL") + if !ok { + return cache, errors.New("cache with type gha requires url parameter or $ACTIONS_CACHE_URL") + } + cache.Attrs["url"] = url + } + + if _, ok := cache.Attrs["token"]; !ok { + token, ok := os.LookupEnv("ACTIONS_RUNTIME_TOKEN") + if !ok { + return cache, errors.New("cache with type gha requires token parameter or $ACTIONS_RUNTIME_TOKEN") + } + cache.Attrs["token"] = token + } + return cache, nil +} diff --git a/cmd/buildctl/build_test.go b/cmd/buildctl/build_test.go index bde880c9095c..8fde945219e8 100644 --- a/cmd/buildctl/build_test.go +++ b/cmd/buildctl/build_test.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -25,10 +24,10 @@ import ( func testBuildWithLocalFiles(t *testing.T, sb integration.Sandbox) { dir, err := tmpdir( + t, fstest.CreateFile("foo", []byte("bar"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) st := llb.Image("busybox"). Run(llb.Shlex("sh -c 'echo -n bar > foo2'")). @@ -55,17 +54,15 @@ func testBuildLocalExporter(t *testing.T, sb integration.Sandbox) { rdr, err := marshal(sb.Context(), out) require.NoError(t, err) - tmpdir, err := ioutil.TempDir("", "buildkit-buildctl") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() - cmd := sb.Cmd(fmt.Sprintf("build --progress=plain --exporter=local --exporter-opt output=%s", tmpdir)) + cmd := sb.Cmd(fmt.Sprintf("build --progress=plain --output type=local,dest=%s", tmpdir)) cmd.Stdin = rdr err = cmd.Run() require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(tmpdir, "foo")) + dt, err := os.ReadFile(filepath.Join(tmpdir, "foo")) require.NoError(t, err) require.Equal(t, string(dt), "bar") } @@ -86,8 +83,7 @@ func testBuildContainerdExporter(t *testing.T, sb integration.Sandbox) { buildCmd := []string{ "build", "--progress=plain", - "--exporter=image", "--exporter-opt", "unpack=true", - "--exporter-opt", "name=" + imageName, + "--output", "type=image,unpack=true,name=" + imageName, } cmd := sb.Cmd(strings.Join(buildCmd, " ")) @@ -121,9 +117,7 @@ func testBuildMetadataFile(t *testing.T, sb integration.Sandbox) { rdr, err := marshal(sb.Context(), st.Root()) require.NoError(t, err) - tmpDir, err := ioutil.TempDir("", "buildkit-buildctl") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() imageName := "example.com/moby/metadata:test" metadataFile := filepath.Join(tmpDir, "metadata.json") @@ -140,7 +134,7 @@ func testBuildMetadataFile(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) require.FileExists(t, metadataFile) - metadataBytes, err := ioutil.ReadFile(metadataFile) + metadataBytes, err := os.ReadFile(metadataFile) require.NoError(t, err) var metadata map[string]interface{} @@ -192,11 +186,8 @@ func marshal(ctx context.Context, st llb.State) (io.Reader, error) { return bytes.NewBuffer(dt), nil } -func tmpdir(appliers ...fstest.Applier) (string, error) { - tmpdir, err := ioutil.TempDir("", "buildkit-buildctl") - if err != nil { - return "", err - } +func tmpdir(t *testing.T, appliers ...fstest.Applier) (string, error) { + tmpdir := t.TempDir() if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil { return "", err } diff --git a/cmd/buildctl/buildctl_test.go b/cmd/buildctl/buildctl_test.go index 66fef818f919..feda648090c3 100644 --- a/cmd/buildctl/buildctl_test.go +++ b/cmd/buildctl/buildctl_test.go @@ -2,7 +2,6 @@ package main import ( "encoding/json" - "io/ioutil" "os" "path" "testing" @@ -37,9 +36,7 @@ func testUsage(t *testing.T, sb integration.Sandbox) { } func TestWriteMetadataFile(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() cases := []struct { name string @@ -120,7 +117,7 @@ func TestWriteMetadataFile(t *testing.T) { t.Run(tt.name, func(t *testing.T) { fname := path.Join(tmpdir, "metadata_"+tt.name) require.NoError(t, writeMetadataFile(fname, tt.exporterResponse)) - current, err := ioutil.ReadFile(fname) + current, err := os.ReadFile(fname) require.NoError(t, err) var raw map[string]interface{} require.NoError(t, json.Unmarshal(current, &raw)) diff --git a/cmd/buildctl/common/common.go b/cmd/buildctl/common/common.go index e1013160a41e..3a691dc0bac7 100644 --- a/cmd/buildctl/common/common.go +++ b/cmd/buildctl/common/common.go @@ -1,10 +1,14 @@ package common import ( + "bytes" "context" + "encoding/json" "net/url" "os" "path/filepath" + "strings" + "text/template" "time" "github.com/moby/buildkit/client" @@ -88,3 +92,25 @@ func ResolveClient(c *cli.Context) (*client.Client, error) { return client.New(ctx, c.GlobalString("addr"), opts...) } + +func ParseTemplate(format string) (*template.Template, error) { + // aliases is from https://github.com/containerd/nerdctl/blob/v0.17.1/cmd/nerdctl/fmtutil.go#L116-L126 (Apache License 2.0) + aliases := map[string]string{ + "json": "{{json .}}", + } + if alias, ok := aliases[format]; ok { + format = alias + } + // funcs is from https://github.com/docker/cli/blob/v20.10.12/templates/templates.go#L12-L20 (Apache License 2.0) + funcs := template.FuncMap{ + "json": func(v interface{}) string { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + enc.Encode(v) + // Remove the trailing new line added by the encoder + return strings.TrimSpace(buf.String()) + }, + } + return template.New("").Funcs(funcs).Parse(format) +} diff --git a/cmd/buildctl/debug.go b/cmd/buildctl/debug.go index a6a33b6f8882..d7d735ef72d5 100644 --- a/cmd/buildctl/debug.go +++ b/cmd/buildctl/debug.go @@ -12,5 +12,10 @@ var debugCommand = cli.Command{ debug.DumpLLBCommand, debug.DumpMetadataCommand, debug.WorkersCommand, + debug.InfoCommand, + debug.MonitorCommand, + debug.LogsCommand, + debug.CtlCommand, + debug.GetCommand, }, } diff --git a/cmd/buildctl/debug/ctl.go b/cmd/buildctl/debug/ctl.go new file mode 100644 index 000000000000..2ebbe6ca92ec --- /dev/null +++ b/cmd/buildctl/debug/ctl.go @@ -0,0 +1,67 @@ +package debug + +import ( + controlapi "github.com/moby/buildkit/api/services/control" + bccommon "github.com/moby/buildkit/cmd/buildctl/common" + "github.com/moby/buildkit/util/appcontext" + "github.com/pkg/errors" + "github.com/urfave/cli" +) + +var CtlCommand = cli.Command{ + Name: "ctl", + Usage: "control build records", + Action: ctl, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "pin", + Usage: "Pin build so it will not be garbage collected", + }, + cli.BoolFlag{ + Name: "unpin", + Usage: "Unpin build so it will be garbage collected", + }, + cli.BoolFlag{ + Name: "delete", + Usage: "Delete build record", + }, + }, +} + +func ctl(clicontext *cli.Context) error { + args := clicontext.Args() + if len(args) == 0 { + return errors.Errorf("build ref must be specified") + } + ref := args[0] + + c, err := bccommon.ResolveClient(clicontext) + if err != nil { + return err + } + + ctx := appcontext.Context() + + pin := clicontext.Bool("pin") + unpin := clicontext.Bool("unpin") + del := clicontext.Bool("delete") + + if !pin && !unpin && !del { + return errors.Errorf("must specify one of --pin, --unpin, --delete") + } + + if pin && unpin { + return errors.Errorf("cannot specify both --pin and --unpin") + } + + if del && (pin || unpin) { + return errors.Errorf("cannot specify --delete with --pin or --unpin") + } + + _, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{ + Ref: ref, + Pinned: pin, + Delete: del, + }) + return err +} diff --git a/cmd/buildctl/debug/dumpmetadata.go b/cmd/buildctl/debug/dumpmetadata.go index 3b7e948b3990..b53fc25fd578 100644 --- a/cmd/buildctl/debug/dumpmetadata.go +++ b/cmd/buildctl/debug/dumpmetadata.go @@ -2,7 +2,6 @@ package debug import ( "fmt" - "io/ioutil" "os" "path/filepath" "time" @@ -41,7 +40,7 @@ var DumpMetadataCommand = cli.Command{ } func findMetadataDBFiles(root string) ([]string, error) { - dirs, err := ioutil.ReadDir(root) + dirs, err := os.ReadDir(root) if err != nil { return nil, err } diff --git a/cmd/buildctl/debug/get.go b/cmd/buildctl/debug/get.go new file mode 100644 index 000000000000..e19ee275f346 --- /dev/null +++ b/cmd/buildctl/debug/get.go @@ -0,0 +1,54 @@ +package debug + +import ( + "io" + "os" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/proxy" + bccommon "github.com/moby/buildkit/cmd/buildctl/common" + "github.com/moby/buildkit/util/appcontext" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/urfave/cli" +) + +var GetCommand = cli.Command{ + Name: "get", + Usage: "retrieve a blob from contentstore", + Action: get, +} + +func get(clicontext *cli.Context) error { + args := clicontext.Args() + if len(args) == 0 { + return errors.Errorf("blob digest must be specified") + } + + dgst, err := digest.Parse(args[0]) + if err != nil { + return err + } + + c, err := bccommon.ResolveClient(clicontext) + if err != nil { + return err + } + + ctx := appcontext.Context() + + store := proxy.NewContentStore(c.ContentClient()) + ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{ + Digest: dgst, + }) + if err != nil { + return err + } + defer ra.Close() + + // use 1MB buffer like we do for ingesting + buf := make([]byte, 1<<20) + _, err = io.CopyBuffer(os.Stdout, content.NewReader(ra), buf) + return err +} diff --git a/cmd/buildctl/debug/info.go b/cmd/buildctl/debug/info.go new file mode 100644 index 000000000000..3f702ff3aa0b --- /dev/null +++ b/cmd/buildctl/debug/info.go @@ -0,0 +1,48 @@ +package debug + +import ( + "fmt" + "os" + "text/tabwriter" + + bccommon "github.com/moby/buildkit/cmd/buildctl/common" + "github.com/urfave/cli" +) + +var InfoCommand = cli.Command{ + Name: "info", + Usage: "display internal information", + Action: info, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "format", + Usage: "Format the output using the given Go template, e.g, '{{json .}}'", + }, + }, +} + +func info(clicontext *cli.Context) error { + c, err := bccommon.ResolveClient(clicontext) + if err != nil { + return err + } + res, err := c.Info(bccommon.CommandContext(clicontext)) + if err != nil { + return err + } + if format := clicontext.String("format"); format != "" { + tmpl, err := bccommon.ParseTemplate(format) + if err != nil { + return err + } + if err := tmpl.Execute(clicontext.App.Writer, res); err != nil { + return err + } + _, err = fmt.Fprintf(clicontext.App.Writer, "\n") + return err + } + + w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + _, _ = fmt.Fprintf(w, "BuildKit:\t%s %s %s\n", res.BuildkitVersion.Package, res.BuildkitVersion.Version, res.BuildkitVersion.Revision) + return w.Flush() +} diff --git a/cmd/buildctl/debug/logs.go b/cmd/buildctl/debug/logs.go new file mode 100644 index 000000000000..61693f4df141 --- /dev/null +++ b/cmd/buildctl/debug/logs.go @@ -0,0 +1,111 @@ +package debug + +import ( + "io" + "os" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/proxy" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client" + bccommon "github.com/moby/buildkit/cmd/buildctl/common" + "github.com/moby/buildkit/util/appcontext" + "github.com/moby/buildkit/util/progress/progresswriter" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/urfave/cli" +) + +var LogsCommand = cli.Command{ + Name: "logs", + Usage: "display build logs", + Action: logs, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "progress", + Usage: "progress output type", + Value: "auto", + }, + cli.BoolFlag{ + Name: "trace", + Usage: "show opentelemetry trace", + }, + }, +} + +func logs(clicontext *cli.Context) error { + args := clicontext.Args() + if len(args) == 0 { + return errors.Errorf("build ref must be specified") + } + ref := args[0] + + c, err := bccommon.ResolveClient(clicontext) + if err != nil { + return err + } + + ctx := appcontext.Context() + + if clicontext.Bool("trace") { + cl, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{ + Ref: ref, + }) + if err != nil { + return err + } + he, err := cl.Recv() + if err != nil { + if err == io.EOF { + return errors.Errorf("ref %s not found", ref) + } + return err + } + if he.Record.Trace == nil { + return errors.Errorf("ref %s does not have trace", ref) + } + store := proxy.NewContentStore(c.ContentClient()) + ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{ + Digest: he.Record.Trace.Digest, + Size: he.Record.Trace.Size_, + MediaType: he.Record.Trace.MediaType, + }) + if err != nil { + return err + } + defer ra.Close() + + // use 1MB buffer like we do for ingesting + buf := make([]byte, 1<<20) + _, err = io.CopyBuffer(os.Stdout, content.NewReader(ra), buf) + return err + } + + cl, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{ + Ref: ref, + }) + if err != nil { + return err + } + + pw, err := progresswriter.NewPrinter(ctx, os.Stdout, clicontext.String("progress")) + if err != nil { + return err + } + + defer func() { + <-pw.Done() + }() + + for { + resp, err := cl.Recv() + if err != nil { + close(pw.Status()) + if errors.Is(err, io.EOF) { + return nil + } + return err + } + pw.Status() <- client.NewSolveStatus(resp) + } +} diff --git a/cmd/buildctl/debug/monitor.go b/cmd/buildctl/debug/monitor.go new file mode 100644 index 000000000000..2cf83e9326bf --- /dev/null +++ b/cmd/buildctl/debug/monitor.go @@ -0,0 +1,78 @@ +package debug + +import ( + "fmt" + + controlapi "github.com/moby/buildkit/api/services/control" + bccommon "github.com/moby/buildkit/cmd/buildctl/common" + "github.com/moby/buildkit/util/appcontext" + "github.com/urfave/cli" +) + +var MonitorCommand = cli.Command{ + Name: "monitor", + Usage: "display build events", + Action: monitor, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "completed", + Usage: "show completed builds", + }, + cli.StringFlag{ + Name: "ref", + Usage: "show events for a specific build", + }, + }, +} + +func monitor(clicontext *cli.Context) error { + c, err := bccommon.ResolveClient(clicontext) + if err != nil { + return err + } + completed := clicontext.Bool("completed") + + ctx := appcontext.Context() + + cl, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{ + ActiveOnly: !completed, + Ref: clicontext.String("ref"), + }) + if err != nil { + return err + } + + for { + ev, err := cl.Recv() + if err != nil { + return err + } + fmt.Printf("event: %s ref:%s\n", ev.Type.String(), ev.Record.Ref) + if ev.Record.NumTotalSteps != 0 { + fmt.Printf(" cache: %d/%d\n", ev.Record.NumCachedSteps, ev.Record.NumTotalSteps) + } + if ev.Record.Logs != nil { + fmt.Printf(" logs: %s\n", ev.Record.Logs) + } + if ev.Record.Trace != nil { + fmt.Printf(" trace: %s\n", ev.Record.Trace) + } + + if ev.Record.Result != nil { + if ev.Record.Result.Result != nil { + fmt.Printf(" descriptor: %s\n", ev.Record.Result.Result) + } + for _, att := range ev.Record.Result.Attestations { + fmt.Printf(" attestation: %s\n", att) + } + } + for k, res := range ev.Record.Results { + if res.Result != nil { + fmt.Printf(" [%s] descriptor: %s\n", k, res.Result) + } + for _, att := range res.Attestations { + fmt.Printf(" [%s] attestation: %s\n", k, att) + } + } + } +} diff --git a/cmd/buildctl/debug/workers.go b/cmd/buildctl/debug/workers.go index feacf4beefbe..9a68d34d532b 100644 --- a/cmd/buildctl/debug/workers.go +++ b/cmd/buildctl/debug/workers.go @@ -1,15 +1,12 @@ package debug import ( - "bytes" "context" - "encoding/json" "fmt" "os" "sort" "strings" "text/tabwriter" - "text/template" "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/client" @@ -54,7 +51,7 @@ func listWorkers(clicontext *cli.Context) error { if clicontext.Bool("verbose") { logrus.Debug("Ignoring --verbose") } - tmpl, err := parseTemplate(format) + tmpl, err := bccommon.ParseTemplate(format) if err != nil { return err } @@ -79,6 +76,7 @@ func printWorkersVerbose(tw *tabwriter.Writer, winfo []*client.WorkerInfo) { for _, wi := range winfo { fmt.Fprintf(tw, "ID:\t%s\n", wi.ID) fmt.Fprintf(tw, "Platforms:\t%s\n", joinPlatforms(wi.Platforms)) + fmt.Fprintf(tw, "BuildKit:\t%s %s %s\n", wi.BuildkitVersion.Package, wi.BuildkitVersion.Version, wi.BuildkitVersion.Revision) fmt.Fprintf(tw, "Labels:\n") for _, k := range sortedKeys(wi.Labels) { v := wi.Labels[k] @@ -136,25 +134,3 @@ func joinPlatforms(p []ocispecs.Platform) string { } return strings.Join(str, ",") } - -func parseTemplate(format string) (*template.Template, error) { - // aliases is from https://github.com/containerd/nerdctl/blob/v0.17.1/cmd/nerdctl/fmtutil.go#L116-L126 (Apache License 2.0) - aliases := map[string]string{ - "json": "{{json .}}", - } - if alias, ok := aliases[format]; ok { - format = alias - } - // funcs is from https://github.com/docker/cli/blob/v20.10.12/templates/templates.go#L12-L20 (Apache License 2.0) - funcs := template.FuncMap{ - "json": func(v interface{}) string { - buf := &bytes.Buffer{} - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - enc.Encode(v) - // Remove the trailing new line added by the encoder - return strings.TrimSpace(buf.String()) - }, - } - return template.New("").Funcs(funcs).Parse(format) -} diff --git a/cmd/buildctl/diskusage.go b/cmd/buildctl/diskusage.go index 48f64cfa4cf4..8c00fcfe4e77 100644 --- a/cmd/buildctl/diskusage.go +++ b/cmd/buildctl/diskusage.go @@ -9,6 +9,7 @@ import ( "github.com/moby/buildkit/client" bccommon "github.com/moby/buildkit/cmd/buildctl/common" + "github.com/sirupsen/logrus" "github.com/tonistiigi/units" "github.com/urfave/cli" ) @@ -26,6 +27,10 @@ var diskUsageCommand = cli.Command{ Name: "verbose, v", Usage: "Verbose output", }, + cli.StringFlag{ + Name: "format", + Usage: "Format the output using the given Go template, e.g, '{{json .}}'", + }, }, } @@ -40,6 +45,21 @@ func diskUsage(clicontext *cli.Context) error { return err } + if format := clicontext.String("format"); format != "" { + if clicontext.Bool("verbose") { + logrus.Debug("Ignoring --verbose") + } + tmpl, err := bccommon.ParseTemplate(format) + if err != nil { + return err + } + if err := tmpl.Execute(clicontext.App.Writer, du); err != nil { + return err + } + _, err = fmt.Fprintf(clicontext.App.Writer, "\n") + return err + } + tw := tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0) if clicontext.Bool("verbose") { diff --git a/cmd/buildctl/main.go b/cmd/buildctl/main.go index a4780517ae67..8203fc9d725a 100644 --- a/cmd/buildctl/main.go +++ b/cmd/buildctl/main.go @@ -7,6 +7,7 @@ import ( _ "github.com/moby/buildkit/client/connhelper/dockercontainer" _ "github.com/moby/buildkit/client/connhelper/kubepod" _ "github.com/moby/buildkit/client/connhelper/podmancontainer" + _ "github.com/moby/buildkit/client/connhelper/ssh" bccommon "github.com/moby/buildkit/cmd/buildctl/common" "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/util/apicaps" diff --git a/cmd/buildctl/prune.go b/cmd/buildctl/prune.go index fc3adeaf0e4a..31c917262514 100644 --- a/cmd/buildctl/prune.go +++ b/cmd/buildctl/prune.go @@ -7,6 +7,7 @@ import ( "github.com/moby/buildkit/client" bccommon "github.com/moby/buildkit/cmd/buildctl/common" + "github.com/sirupsen/logrus" "github.com/tonistiigi/units" "github.com/urfave/cli" ) @@ -36,6 +37,10 @@ var pruneCommand = cli.Command{ Name: "verbose, v", Usage: "Verbose output", }, + cli.StringFlag{ + Name: "format", + Usage: "Format the output using the given Go template, e.g, '{{json .}}'", + }, }, } @@ -47,27 +52,7 @@ func prune(clicontext *cli.Context) error { ch := make(chan client.UsageInfo) printed := make(chan struct{}) - - tw := tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0) - first := true - total := int64(0) - - go func() { - defer close(printed) - for du := range ch { - total += du.Size - if clicontext.Bool("verbose") { - printVerbose(tw, []*client.UsageInfo{&du}) - } else { - if first { - printTableHeader(tw) - first = false - } - printTableRow(tw, &du) - tw.Flush() - } - } - }() + var summarizer func() opts := []client.PruneOption{ client.WithFilter(clicontext.StringSlice("filter")), @@ -78,16 +63,61 @@ func prune(clicontext *cli.Context) error { opts = append(opts, client.PruneAll) } + if format := clicontext.String("format"); format != "" { + if clicontext.Bool("verbose") { + logrus.Debug("Ignoring --verbose") + } + tmpl, err := bccommon.ParseTemplate(format) + if err != nil { + return err + } + go func() { + defer close(printed) + for du := range ch { + // Unlike `buildctl du`, the template is applied to a UsageInfo, not to a slice of UsageInfo + if err := tmpl.Execute(clicontext.App.Writer, du); err != nil { + panic(err) + } + if _, err = fmt.Fprintf(clicontext.App.Writer, "\n"); err != nil { + panic(err) + } + } + }() + } else { + tw := tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0) + first := true + total := int64(0) + go func() { + defer close(printed) + for du := range ch { + total += du.Size + if clicontext.Bool("verbose") { + printVerbose(tw, []*client.UsageInfo{&du}) + } else { + if first { + printTableHeader(tw) + first = false + } + printTableRow(tw, &du) + tw.Flush() + } + } + }() + summarizer = func() { + tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0) + fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total)) + tw.Flush() + } + } + err = c.Prune(bccommon.CommandContext(clicontext), ch, opts...) close(ch) <-printed if err != nil { return err } - - tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0) - fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total)) - tw.Flush() - + if summarizer != nil { + summarizer() + } return nil } diff --git a/cmd/buildkitd/config/config.go b/cmd/buildkitd/config/config.go index 7ee7b577d582..1734d5e1567e 100644 --- a/cmd/buildkitd/config/config.go +++ b/cmd/buildkitd/config/config.go @@ -24,6 +24,8 @@ type Config struct { Registries map[string]resolverconfig.RegistryConfig `toml:"registry"` DNS *DNSConfig `toml:"dns"` + + History *HistoryConfig `toml:"history"` } type GRPCConfig struct { @@ -53,6 +55,7 @@ type NetworkConfig struct { Mode string `toml:"networkMode"` CNIConfigPath string `toml:"cniConfigPath"` CNIBinaryPath string `toml:"cniBinaryPath"` + CNIPoolSize int `toml:"cniPoolSize"` } type OCIConfig struct { @@ -81,6 +84,9 @@ type OCIConfig struct { // The profile should already be loaded (by a higher level system) before creating a worker. ApparmorProfile string `toml:"apparmor-profile"` + // SELinux enables applying SELinux labels. + SELinux bool `toml:"selinux"` + // MaxParallelism is the maximum number of parallel build steps that can be run at the same time. MaxParallelism int `toml:"max-parallelism"` } @@ -99,6 +105,9 @@ type ContainerdConfig struct { // The profile should already be loaded (by a higher level system) before creating a worker. ApparmorProfile string `toml:"apparmor-profile"` + // SELinux enables applying SELinux labels. + SELinux bool `toml:"selinux"` + MaxParallelism int `toml:"max-parallelism"` Rootless bool `toml:"rootless"` @@ -116,3 +125,8 @@ type DNSConfig struct { Options []string `toml:"options"` SearchDomains []string `toml:"searchDomains"` } + +type HistoryConfig struct { + MaxAge int64 `toml:"maxAge"` + MaxEntries int64 `toml:"maxEntries"` +} diff --git a/cmd/buildkitd/constants_unix.go b/cmd/buildkitd/constants_unix.go new file mode 100644 index 000000000000..8463a07745e0 --- /dev/null +++ b/cmd/buildkitd/constants_unix.go @@ -0,0 +1,8 @@ +//go:build !windows +// +build !windows + +package main + +const ( + defaultContainerdAddress = "/run/containerd/containerd.sock" +) diff --git a/cmd/buildkitd/constants_windows.go b/cmd/buildkitd/constants_windows.go new file mode 100644 index 000000000000..eb411d3f807c --- /dev/null +++ b/cmd/buildkitd/constants_windows.go @@ -0,0 +1,5 @@ +package main + +const ( + defaultContainerdAddress = "//./pipe/containerd-containerd" +) diff --git a/cmd/buildkitd/main.go b/cmd/buildkitd/main.go index 126ba0dbe2c8..ca411066b30f 100644 --- a/cmd/buildkitd/main.go +++ b/cmd/buildkitd/main.go @@ -5,7 +5,6 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "os" "os/user" @@ -25,10 +24,12 @@ import ( "github.com/gofrs/flock" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/cache/remotecache/azblob" "github.com/moby/buildkit/cache/remotecache/gha" inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline" localremotecache "github.com/moby/buildkit/cache/remotecache/local" registryremotecache "github.com/moby/buildkit/cache/remotecache/registry" + s3remotecache "github.com/moby/buildkit/cache/remotecache/s3" "github.com/moby/buildkit/client" "github.com/moby/buildkit/cmd/buildkitd/config" "github.com/moby/buildkit/control" @@ -58,6 +59,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" + "go.etcd.io/bbolt" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.opentelemetry.io/otel/propagation" sdktrace "go.opentelemetry.io/otel/sdk/trace" @@ -75,6 +77,9 @@ func init() { if reexec.Init() { os.Exit(0) } + + // enable in memory recording for buildkitd traces + detect.Recorder = detect.NewTraceRecorder() } var propagators = propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}) @@ -264,6 +269,7 @@ func main() { if err != nil { return err } + defer controller.Close() controller.Register(server) @@ -277,7 +283,7 @@ func main() { case "network.host": cfg.Entitlements = append(cfg.Entitlements, e) default: - return fmt.Errorf("invalid entitlement : %v", e) + return errors.Errorf("invalid entitlement : %s", e) } } } @@ -383,10 +389,10 @@ func setDefaultNetworkConfig(nc config.NetworkConfig) config.NetworkConfig { nc.Mode = "auto" } if nc.CNIConfigPath == "" { - nc.CNIConfigPath = "/etc/buildkit/cni.json" + nc.CNIConfigPath = appdefaults.DefaultCNIConfigPath } if nc.CNIBinaryPath == "" { - nc.CNIBinaryPath = "/opt/cni/bin" + nc.CNIBinaryPath = appdefaults.DefaultCNIBinDir } return nc } @@ -565,7 +571,10 @@ func unaryInterceptor(globalCtx context.Context, tp trace.TracerProvider) grpc.U resp, err = withTrace(ctx, req, info, handler) if err != nil { - logrus.Errorf("%s returned error: %+v", info.FullMethod, stack.Formatter(err)) + logrus.Errorf("%s returned error: %v", info.FullMethod, err) + if logrus.GetLevel() >= logrus.DebugLevel { + fmt.Fprintf(os.Stderr, "%+v", stack.Formatter(grpcerrors.FromGRPC(err))) + } } return } @@ -594,7 +603,7 @@ func serverCredentials(cfg config.TLSConfig) (*tls.Config, error) { } if caFile != "" { certPool := x509.NewCertPool() - ca, err := ioutil.ReadFile(caFile) + ca, err := os.ReadFile(caFile) if err != nil { return nil, errors.Wrap(err, "could not read ca certificate") } @@ -623,7 +632,8 @@ func newController(c *cli.Context, cfg *config.Config) (*control.Controller, err if tc != nil { traceSocket = filepath.Join(cfg.Root, "otel-grpc.sock") if err := runTraceController(traceSocket, tc); err != nil { - return nil, err + logrus.Warnf("failed set up otel-grpc controller: %v", err) + traceSocket = "" } } @@ -644,6 +654,11 @@ func newController(c *cli.Context, cfg *config.Config) (*control.Controller, err return nil, err } + historyDB, err := bbolt.Open(filepath.Join(cfg.Root, "history.db"), 0600, nil) + if err != nil { + return nil, err + } + resolverFn := resolverFunc(cfg) w, err := wc.GetDefault() @@ -656,13 +671,16 @@ func newController(c *cli.Context, cfg *config.Config) (*control.Controller, err "local": localremotecache.ResolveCacheExporterFunc(sessionManager), "inline": inlineremotecache.ResolveCacheExporterFunc(), "gha": gha.ResolveCacheExporterFunc(), + "s3": s3remotecache.ResolveCacheExporterFunc(), + "azblob": azblob.ResolveCacheExporterFunc(), } remoteCacheImporterFuncs := map[string]remotecache.ResolveCacheImporterFunc{ "registry": registryremotecache.ResolveCacheImporterFunc(sessionManager, w.ContentStore(), resolverFn), "local": localremotecache.ResolveCacheImporterFunc(sessionManager), "gha": gha.ResolveCacheImporterFunc(), + "s3": s3remotecache.ResolveCacheImporterFunc(), + "azblob": azblob.ResolveCacheImporterFunc(), } - return control.NewController(control.Opt{ SessionManager: sessionManager, WorkerController: wc, @@ -672,6 +690,10 @@ func newController(c *cli.Context, cfg *config.Config) (*control.Controller, err CacheKeyStorage: cacheStorage, Entitlements: cfg.Entitlements, TraceCollector: tc, + HistoryDB: historyDB, + LeaseManager: w.LeaseManager(), + ContentStore: w.ContentStore(), + HistoryConfig: cfg.History, }) } @@ -760,6 +782,14 @@ func getGCPolicy(cfg config.GCConfig, root string) []client.PruneInfo { return out } +func getBuildkitVersion() client.BuildkitVersion { + return client.BuildkitVersion{ + Package: version.Package, + Version: version.Version, + Revision: version.Revision, + } +} + func getDNSConfig(cfg *config.DNSConfig) *oci.DNSConfig { var dns *oci.DNSConfig if cfg != nil { @@ -774,7 +804,7 @@ func getDNSConfig(cfg *config.DNSConfig) *oci.DNSConfig { // parseBoolOrAuto returns (nil, nil) if s is "auto" func parseBoolOrAuto(s string) (*bool, error) { - if s == "" || strings.ToLower(s) == "auto" { + if s == "" || strings.EqualFold(s, "auto") { return nil, nil } b, err := strconv.ParseBool(s) diff --git a/cmd/buildkitd/main_containerd_worker.go b/cmd/buildkitd/main_containerd_worker.go index 00079676b1bb..7992b1bf87ac 100644 --- a/cmd/buildkitd/main_containerd_worker.go +++ b/cmd/buildkitd/main_containerd_worker.go @@ -25,7 +25,6 @@ import ( ) const ( - defaultContainerdAddress = "/run/containerd/containerd.sock" defaultContainerdNamespace = "buildkit" ) @@ -90,6 +89,11 @@ func init() { Usage: "path of cni binary files", Value: defaultConf.Workers.Containerd.NetworkConfig.CNIBinaryPath, }, + cli.IntFlag{ + Name: "containerd-cni-pool-size", + Usage: "size of cni network namespace pool", + Value: defaultConf.Workers.Containerd.NetworkConfig.CNIPoolSize, + }, cli.StringFlag{ Name: "containerd-worker-snapshotter", Usage: "snapshotter name to use", @@ -99,6 +103,10 @@ func init() { Name: "containerd-worker-apparmor-profile", Usage: "set the name of the apparmor profile applied to containers", }, + cli.BoolFlag{ + Name: "containerd-worker-selinux", + Usage: "apply SELinux labels", + }, } n := "containerd-worker-rootless" u := "enable rootless mode" @@ -208,6 +216,9 @@ func applyContainerdFlags(c *cli.Context, cfg *config.Config) error { if c.GlobalIsSet("containerd-cni-config-path") { cfg.Workers.Containerd.NetworkConfig.CNIConfigPath = c.GlobalString("containerd-cni-config-path") } + if c.GlobalIsSet("containerd-cni-pool-size") { + cfg.Workers.Containerd.NetworkConfig.CNIPoolSize = c.GlobalInt("containerd-cni-pool-size") + } if c.GlobalIsSet("containerd-cni-binary-dir") { cfg.Workers.Containerd.NetworkConfig.CNIBinaryPath = c.GlobalString("containerd-cni-binary-dir") } @@ -217,6 +228,9 @@ func applyContainerdFlags(c *cli.Context, cfg *config.Config) error { if c.GlobalIsSet("containerd-worker-apparmor-profile") { cfg.Workers.Containerd.ApparmorProfile = c.GlobalString("containerd-worker-apparmor-profile") } + if c.GlobalIsSet("containerd-worker-selinux") { + cfg.Workers.Containerd.SELinux = c.GlobalBool("containerd-worker-selinux") + } return nil } @@ -228,7 +242,7 @@ func containerdWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([ cfg := common.config.Workers.Containerd - if (cfg.Enabled == nil && !validContainerdSocket(cfg.Address)) || (cfg.Enabled != nil && !*cfg.Enabled) { + if (cfg.Enabled == nil && !validContainerdSocket(cfg)) || (cfg.Enabled != nil && !*cfg.Enabled) { return nil, nil } @@ -247,6 +261,7 @@ func containerdWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([ Root: common.config.Root, ConfigPath: common.config.Workers.Containerd.CNIConfigPath, BinaryDir: common.config.Workers.Containerd.CNIBinaryPath, + PoolSize: common.config.Workers.Containerd.CNIPoolSize, }, } @@ -259,11 +274,12 @@ func containerdWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([ if cfg.Snapshotter != "" { snapshotter = cfg.Snapshotter } - opt, err := containerd.NewWorkerOpt(common.config.Root, cfg.Address, snapshotter, cfg.Namespace, cfg.Rootless, cfg.Labels, dns, nc, common.config.Workers.Containerd.ApparmorProfile, parallelismSem, common.traceSocket, ctd.WithTimeout(60*time.Second)) + opt, err := containerd.NewWorkerOpt(common.config.Root, cfg.Address, snapshotter, cfg.Namespace, cfg.Rootless, cfg.Labels, dns, nc, common.config.Workers.Containerd.ApparmorProfile, common.config.Workers.Containerd.SELinux, parallelismSem, common.traceSocket, ctd.WithTimeout(60*time.Second)) if err != nil { return nil, err } opt.GCPolicy = getGCPolicy(cfg.GCConfig, common.config.Root) + opt.BuildkitVersion = getBuildkitVersion() opt.RegistryHosts = resolverFunc(common.config) if platformsStr := cfg.Platforms; len(platformsStr) != 0 { @@ -280,7 +296,8 @@ func containerdWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([ return []worker.Worker{w}, nil } -func validContainerdSocket(socket string) bool { +func validContainerdSocket(cfg config.ContainerdConfig) bool { + socket := cfg.Address if strings.HasPrefix(socket, "tcp://") { // FIXME(AkihiroSuda): prohibit tcp? return true @@ -291,6 +308,14 @@ func validContainerdSocket(socket string) bool { logrus.Warnf("skipping containerd worker, as %q does not exist", socketPath) return false } - // TODO: actually dial and call introspection API + c, err := ctd.New(socketPath, ctd.WithDefaultNamespace(cfg.Namespace)) + if err != nil { + logrus.Warnf("skipping containerd worker, as failed to connect client to %q: %v", socketPath, err) + return false + } + if _, err := c.Server(context.Background()); err != nil { + logrus.Warnf("skipping containerd worker, as failed to call introspection API on %q: %v", socketPath, err) + return false + } return true } diff --git a/cmd/buildkitd/main_oci_worker.go b/cmd/buildkitd/main_oci_worker.go index a2cdaa8113d7..64cedacea156 100644 --- a/cmd/buildkitd/main_oci_worker.go +++ b/cmd/buildkitd/main_oci_worker.go @@ -27,6 +27,7 @@ import ( fuseoverlayfs "github.com/containerd/fuse-overlayfs-snapshotter" sgzfs "github.com/containerd/stargz-snapshotter/fs" sgzconf "github.com/containerd/stargz-snapshotter/fs/config" + sgzlayer "github.com/containerd/stargz-snapshotter/fs/layer" sgzsource "github.com/containerd/stargz-snapshotter/fs/source" remotesn "github.com/containerd/stargz-snapshotter/snapshot" "github.com/moby/buildkit/cmd/buildkitd/config" @@ -100,6 +101,11 @@ func init() { Usage: "path of cni binary files", Value: defaultConf.Workers.OCI.NetworkConfig.CNIBinaryPath, }, + cli.IntFlag{ + Name: "oci-cni-pool-size", + Usage: "size of cni network namespace pool", + Value: defaultConf.Workers.OCI.NetworkConfig.CNIPoolSize, + }, cli.StringFlag{ Name: "oci-worker-binary", Usage: "name of specified oci worker binary", @@ -109,6 +115,10 @@ func init() { Name: "oci-worker-apparmor-profile", Usage: "set the name of the apparmor profile applied to containers", }, + cli.BoolFlag{ + Name: "oci-worker-selinux", + Usage: "apply SELinux labels", + }, } n := "oci-worker-rootless" u := "enable rootless mode" @@ -222,6 +232,9 @@ func applyOCIFlags(c *cli.Context, cfg *config.Config) error { if c.GlobalIsSet("oci-cni-binary-dir") { cfg.Workers.OCI.NetworkConfig.CNIBinaryPath = c.GlobalString("oci-cni-binary-dir") } + if c.GlobalIsSet("oci-cni-pool-size") { + cfg.Workers.OCI.NetworkConfig.CNIPoolSize = c.GlobalInt("oci-cni-pool-size") + } if c.GlobalIsSet("oci-worker-binary") { cfg.Workers.OCI.Binary = c.GlobalString("oci-worker-binary") } @@ -231,6 +244,10 @@ func applyOCIFlags(c *cli.Context, cfg *config.Config) error { if c.GlobalIsSet("oci-worker-apparmor-profile") { cfg.Workers.OCI.ApparmorProfile = c.GlobalString("oci-worker-apparmor-profile") } + if c.GlobalIsSet("oci-worker-selinux") { + cfg.Workers.OCI.SELinux = c.GlobalBool("oci-worker-selinux") + } + return nil } @@ -281,6 +298,7 @@ func ociWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([]worker Root: common.config.Root, ConfigPath: common.config.Workers.OCI.CNIConfigPath, BinaryDir: common.config.Workers.OCI.CNIBinaryPath, + PoolSize: common.config.Workers.OCI.CNIPoolSize, }, } @@ -289,11 +307,12 @@ func ociWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([]worker parallelismSem = semaphore.NewWeighted(int64(cfg.MaxParallelism)) } - opt, err := runc.NewWorkerOpt(common.config.Root, snFactory, cfg.Rootless, processMode, cfg.Labels, idmapping, nc, dns, cfg.Binary, cfg.ApparmorProfile, parallelismSem, common.traceSocket, cfg.DefaultCgroupParent) + opt, err := runc.NewWorkerOpt(common.config.Root, snFactory, cfg.Rootless, processMode, cfg.Labels, idmapping, nc, dns, cfg.Binary, cfg.ApparmorProfile, cfg.SELinux, parallelismSem, common.traceSocket, cfg.DefaultCgroupParent) if err != nil { return nil, err } opt.GCPolicy = getGCPolicy(cfg.GCConfig, common.config.Root) + opt.BuildkitVersion = getBuildkitVersion() opt.RegistryHosts = hosts if platformsStr := cfg.Platforms; len(platformsStr) != 0 { @@ -391,11 +410,20 @@ func snapshotterFactory(commonRoot string, cfg config.OCIConfig, sm *session.Man } } snFactory.New = func(root string) (ctdsnapshot.Snapshotter, error) { + userxattr, err := overlayutils.NeedsUserXAttr(root) + if err != nil { + logrus.WithError(err).Warnf("cannot detect whether \"userxattr\" option needs to be used, assuming to be %v", userxattr) + } + opq := sgzlayer.OverlayOpaqueTrusted + if userxattr { + opq = sgzlayer.OverlayOpaqueUser + } fs, err := sgzfs.NewFilesystem(filepath.Join(root, "stargz"), sgzCfg, // Source info based on the buildkit's registry config and session sgzfs.WithGetSources(sourceWithSession(hosts, sm)), sgzfs.WithMetricsLogLevel(logrus.DebugLevel), + sgzfs.WithOverlayOpaqueType(opq), ) if err != nil { return nil, err diff --git a/cmd/buildkitd/util_linux.go b/cmd/buildkitd/util_linux.go index a089f1f95af2..cfbc0a5c99c3 100644 --- a/cmd/buildkitd/util_linux.go +++ b/cmd/buildkitd/util_linux.go @@ -22,9 +22,9 @@ func parseIdentityMapping(str string) (*idtools.IdentityMapping, error) { logrus.Debugf("user namespaces: ID ranges will be mapped to subuid ranges of: %s", username) - mappings, err := idtools.NewIdentityMapping(username) + mappings, err := idtools.LoadIdentityMapping(username) if err != nil { return nil, errors.Wrap(err, "failed to create ID mappings") } - return mappings, nil + return &mappings, nil } diff --git a/control/control.go b/control/control.go index 0d3e7976e5b7..2bd06db2576b 100644 --- a/control/control.go +++ b/control/control.go @@ -2,34 +2,49 @@ package control import ( "context" + "fmt" + "strconv" "sync" "sync/atomic" "time" - "github.com/moby/buildkit/util/bklog" - + contentapi "github.com/containerd/containerd/api/services/content/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/services/content/contentserver" + "github.com/docker/distribution/reference" + "github.com/mitchellh/hashstructure/v2" controlapi "github.com/moby/buildkit/api/services/control" apitypes "github.com/moby/buildkit/api/types" "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/client" + "github.com/moby/buildkit/cmd/buildkitd/config" controlgateway "github.com/moby/buildkit/control/gateway" "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/util/epoch" "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/attestations" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/grpchijack" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/proc" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/util/throttle" "github.com/moby/buildkit/util/tracing/transform" + "github.com/moby/buildkit/version" "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "go.etcd.io/bbolt" sdktrace "go.opentelemetry.io/otel/sdk/trace" tracev1 "go.opentelemetry.io/proto/otlp/collector/trace/v1" "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -42,6 +57,10 @@ type Opt struct { ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc Entitlements []string TraceCollector sdktrace.SpanExporter + HistoryDB *bbolt.DB + LeaseManager leases.Manager + ContentStore content.Store + HistoryConfig *config.HistoryConfig } type Controller struct { // TODO: ControlService @@ -49,6 +68,7 @@ type Controller struct { // TODO: ControlService buildCount int64 opt Opt solver *llbsolver.Solver + history *llbsolver.HistoryQueue cache solver.CacheManager gatewayForwarder *controlgateway.GatewayForwarder throttledGC func() @@ -61,14 +81,31 @@ func NewController(opt Opt) (*Controller, error) { gatewayForwarder := controlgateway.NewGatewayForwarder() - solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, cache, opt.ResolveCacheImporterFuncs, gatewayForwarder, opt.SessionManager, opt.Entitlements) + hq := llbsolver.NewHistoryQueue(llbsolver.HistoryQueueOpt{ + DB: opt.HistoryDB, + LeaseManager: opt.LeaseManager, + ContentStore: opt.ContentStore, + CleanConfig: opt.HistoryConfig, + }) + + s, err := llbsolver.New(llbsolver.Opt{ + WorkerController: opt.WorkerController, + Frontends: opt.Frontends, + CacheManager: cache, + CacheResolvers: opt.ResolveCacheImporterFuncs, + GatewayForwarder: gatewayForwarder, + SessionManager: opt.SessionManager, + Entitlements: opt.Entitlements, + HistoryQueue: hq, + }) if err != nil { return nil, errors.Wrap(err, "failed to create solver") } c := &Controller{ opt: opt, - solver: solver, + solver: s, + history: hq, cache: cache, gatewayForwarder: gatewayForwarder, } @@ -81,11 +118,17 @@ func NewController(opt Opt) (*Controller, error) { return c, nil } -func (c *Controller) Register(server *grpc.Server) error { +func (c *Controller) Close() error { + return c.opt.WorkerController.Close() +} + +func (c *Controller) Register(server *grpc.Server) { controlapi.RegisterControlServer(server, c) c.gatewayForwarder.Register(server) tracev1.RegisterTraceServiceServer(server, c) - return nil + + store := &roContentStore{c.opt.ContentStore} + contentapi.RegisterContentServer(server, contentserver.New(store)) } func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) { @@ -205,6 +248,34 @@ func (c *Controller) Export(ctx context.Context, req *tracev1.ExportTraceService return &tracev1.ExportTraceServiceResponse{}, nil } +func (c *Controller) ListenBuildHistory(req *controlapi.BuildHistoryRequest, srv controlapi.Control_ListenBuildHistoryServer) error { + if err := sendTimestampHeader(srv); err != nil { + return err + } + return c.history.Listen(srv.Context(), req, func(h *controlapi.BuildHistoryEvent) error { + if err := srv.Send(h); err != nil { + return err + } + return nil + }) +} + +func (c *Controller) UpdateBuildHistory(ctx context.Context, req *controlapi.UpdateBuildHistoryRequest) (*controlapi.UpdateBuildHistoryResponse, error) { + if !req.Delete { + err := c.history.UpdateRef(ctx, req.Ref, func(r *controlapi.BuildHistoryRecord) error { + if req.Pinned == r.Pinned { + return nil + } + r.Pinned = req.Pinned + return nil + }) + return &controlapi.UpdateBuildHistoryResponse{}, err + } + + err := c.history.Delete(ctx, req.Ref) + return &controlapi.UpdateBuildHistoryResponse{}, err +} + func translateLegacySolveRequest(req *controlapi.SolveRequest) error { // translates ExportRef and ExportAttrs to new Exports (v0.4.0) if legacyExportRef := req.Cache.ExportRefDeprecated; legacyExportRef != "" { @@ -255,6 +326,26 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* if err != nil { return nil, err } + + // if SOURCE_DATE_EPOCH is set, enable it for the exporter + if v, ok := epoch.ParseBuildArgs(req.FrontendAttrs); ok { + if _, ok := req.ExporterAttrs[epoch.KeySourceDateEpoch]; !ok { + if req.ExporterAttrs == nil { + req.ExporterAttrs = make(map[string]string) + } + req.ExporterAttrs[epoch.KeySourceDateEpoch] = v + } + } + + if v, ok := req.FrontendAttrs["build-arg:BUILDKIT_BUILDINFO"]; ok && v != "" { + if _, ok := req.ExporterAttrs["buildinfo"]; !ok { + if req.ExporterAttrs == nil { + req.ExporterAttrs = make(map[string]string) + } + req.ExporterAttrs["buildinfo"] = v + } + } + if req.Exporter != "" { exp, err := w.Exporter(req.Exporter, c.opt.SessionManager) if err != nil { @@ -266,32 +357,42 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* } } - var ( - cacheExporter remotecache.Exporter - cacheExportMode solver.CacheExportMode - cacheImports []frontend.CacheOptionsEntry - ) - if len(req.Cache.Exports) > 1 { - // TODO(AkihiroSuda): this should be fairly easy - return nil, errors.New("specifying multiple cache exports is not supported currently") + if c, err := findDuplicateCacheOptions(req.Cache.Exports); err != nil { + return nil, err + } else if c != nil { + types := []string{} + for _, c := range c { + types = append(types, c.Type) + } + return nil, errors.Errorf("duplicate cache exports %s", types) } - - if len(req.Cache.Exports) == 1 { - e := req.Cache.Exports[0] + var cacheExporters []llbsolver.RemoteCacheExporter + for _, e := range req.Cache.Exports { cacheExporterFunc, ok := c.opt.ResolveCacheExporterFuncs[e.Type] if !ok { return nil, errors.Errorf("unknown cache exporter: %q", e.Type) } - cacheExporter, err = cacheExporterFunc(ctx, session.NewGroup(req.Session), e.Attrs) + var exp llbsolver.RemoteCacheExporter + exp.Exporter, err = cacheExporterFunc(ctx, session.NewGroup(req.Session), e.Attrs) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "failed to configure %v cache exporter", e.Type) } if exportMode, supported := parseCacheExportMode(e.Attrs["mode"]); !supported { bklog.G(ctx).Debugf("skipping invalid cache export mode: %s", e.Attrs["mode"]) } else { - cacheExportMode = exportMode + exp.CacheExportMode = exportMode + } + if ignoreErrorStr, ok := e.Attrs["ignore-error"]; ok { + if ignoreError, supported := parseCacheExportIgnoreError(ignoreErrorStr); !supported { + bklog.G(ctx).Debugf("skipping invalid cache export ignore-error: %s", e.Attrs["ignore-error"]) + } else { + exp.IgnoreError = ignoreError + } } + cacheExporters = append(cacheExporters, exp) } + + var cacheImports []frontend.CacheOptionsEntry for _, im := range req.Cache.Imports { cacheImports = append(cacheImports, frontend.CacheOptionsEntry{ Type: im.Type, @@ -299,6 +400,36 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* }) } + attests, err := attestations.Parse(req.FrontendAttrs) + if err != nil { + return nil, err + } + + var procs []llbsolver.Processor + + if attrs, ok := attests["sbom"]; ok { + src := attrs["generator"] + if src == "" { + return nil, errors.Errorf("sbom generator cannot be empty") + } + ref, err := reference.ParseNormalizedNamed(src) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse sbom generator %s", src) + } + + useCache := true + if v, ok := req.FrontendAttrs["no-cache"]; ok && v == "" { + // disable cache if cache is disabled for all stages + useCache = false + } + ref = reference.TagNameOnly(ref) + procs = append(procs, proc.SBOMProcessor(ref.String(), useCache)) + } + + if attrs, ok := attests["provenance"]; ok { + procs = append(procs, proc.ProvenanceProcessor(attrs)) + } + resp, err := c.solver.Solve(ctx, req.Ref, req.Session, frontend.SolveRequest{ Frontend: req.Frontend, Definition: req.Definition, @@ -306,10 +437,11 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* FrontendInputs: req.FrontendInputs, CacheImports: cacheImports, }, llbsolver.ExporterRequest{ - Exporter: expi, - CacheExporter: cacheExporter, - CacheExportMode: cacheExportMode, - }, req.Entitlements) + Exporter: expi, + CacheExporters: cacheExporters, + Type: req.Exporter, + Attrs: req.ExporterAttrs, + }, req.Entitlements, procs, req.Internal, req.SourcePolicy) if err != nil { return nil, err } @@ -319,6 +451,9 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* } func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Control_StatusServer) error { + if err := sendTimestampHeader(stream); err != nil { + return err + } ch := make(chan *client.SolveStatus, 8) eg, ctx := errgroup.WithContext(stream.Context()) @@ -332,68 +467,10 @@ func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Con if !ok { return nil } - logSize := 0 - for { - retry := false - sr := controlapi.StatusResponse{} - for _, v := range ss.Vertexes { - sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{ - Digest: v.Digest, - Inputs: v.Inputs, - Name: v.Name, - Started: v.Started, - Completed: v.Completed, - Error: v.Error, - Cached: v.Cached, - ProgressGroup: v.ProgressGroup, - }) - } - for _, v := range ss.Statuses { - sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{ - ID: v.ID, - Vertex: v.Vertex, - Name: v.Name, - Current: v.Current, - Total: v.Total, - Timestamp: v.Timestamp, - Started: v.Started, - Completed: v.Completed, - }) - } - for i, v := range ss.Logs { - sr.Logs = append(sr.Logs, &controlapi.VertexLog{ - Vertex: v.Vertex, - Stream: int64(v.Stream), - Msg: v.Data, - Timestamp: v.Timestamp, - }) - logSize += len(v.Data) + emptyLogVertexSize - // avoid logs growing big and split apart if they do - if logSize > 1024*1024 { - ss.Vertexes = nil - ss.Statuses = nil - ss.Logs = ss.Logs[i+1:] - retry = true - break - } - } - for _, v := range ss.Warnings { - sr.Warnings = append(sr.Warnings, &controlapi.VertexWarning{ - Vertex: v.Vertex, - Level: int64(v.Level), - Short: v.Short, - Detail: v.Detail, - Info: v.SourceInfo, - Ranges: v.Range, - Url: v.URL, - }) - } - if err := stream.SendMsg(&sr); err != nil { + for _, sr := range ss.Marshal() { + if err := stream.SendMsg(sr); err != nil { return err } - if !retry { - break - } } } }) @@ -426,15 +503,26 @@ func (c *Controller) ListWorkers(ctx context.Context, r *controlapi.ListWorkersR } for _, w := range workers { resp.Record = append(resp.Record, &apitypes.WorkerRecord{ - ID: w.ID(), - Labels: w.Labels(), - Platforms: pb.PlatformsFromSpec(w.Platforms(true)), - GCPolicy: toPBGCPolicy(w.GCPolicy()), + ID: w.ID(), + Labels: w.Labels(), + Platforms: pb.PlatformsFromSpec(w.Platforms(true)), + GCPolicy: toPBGCPolicy(w.GCPolicy()), + BuildkitVersion: toPBBuildkitVersion(w.BuildkitVersion()), }) } return resp, nil } +func (c *Controller) Info(ctx context.Context, r *controlapi.InfoRequest) (*controlapi.InfoResponse, error) { + return &controlapi.InfoResponse{ + BuildkitVersion: &apitypes.BuildkitVersion{ + Package: version.Package, + Version: version.Version, + Revision: version.Revision, + }, + }, nil +} + func (c *Controller) gc() { c.gcmu.Lock() defer c.gcmu.Unlock() @@ -488,6 +576,14 @@ func parseCacheExportMode(mode string) (solver.CacheExportMode, bool) { return solver.CacheExportModeMin, false } +func parseCacheExportIgnoreError(ignoreErrorStr string) (bool, bool) { + ignoreError, err := strconv.ParseBool(ignoreErrorStr) + if err != nil { + return false, false + } + return ignoreError, true +} + func toPBGCPolicy(in []client.PruneInfo) []*apitypes.GCPolicy { policy := make([]*apitypes.GCPolicy, 0, len(in)) for _, p := range in { @@ -500,3 +596,76 @@ func toPBGCPolicy(in []client.PruneInfo) []*apitypes.GCPolicy { } return policy } + +func toPBBuildkitVersion(in client.BuildkitVersion) *apitypes.BuildkitVersion { + return &apitypes.BuildkitVersion{ + Package: in.Package, + Version: in.Version, + Revision: in.Revision, + } +} + +func findDuplicateCacheOptions(cacheOpts []*controlapi.CacheOptionsEntry) ([]*controlapi.CacheOptionsEntry, error) { + seen := map[string]*controlapi.CacheOptionsEntry{} + duplicate := map[string]struct{}{} + for _, opt := range cacheOpts { + k, err := cacheOptKey(*opt) + if err != nil { + return nil, err + } + if _, ok := seen[k]; ok { + duplicate[k] = struct{}{} + } + seen[k] = opt + } + + var duplicates []*controlapi.CacheOptionsEntry + for k := range duplicate { + duplicates = append(duplicates, seen[k]) + } + return duplicates, nil +} + +func cacheOptKey(opt controlapi.CacheOptionsEntry) (string, error) { + if opt.Type == "registry" && opt.Attrs["ref"] != "" { + return opt.Attrs["ref"], nil + } + var rawOpt = struct { + Type string + Attrs map[string]string + }{ + Type: opt.Type, + Attrs: opt.Attrs, + } + hash, err := hashstructure.Hash(rawOpt, hashstructure.FormatV2, nil) + if err != nil { + return "", err + } + return fmt.Sprint(opt.Type, ":", hash), nil +} + +type roContentStore struct { + content.Store +} + +func (cs *roContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + return nil, errors.Errorf("read-only content store") +} + +func (cs *roContentStore) Delete(ctx context.Context, dgst digest.Digest) error { + return errors.Errorf("read-only content store") +} + +func (cs *roContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + return content.Info{}, errors.Errorf("read-only content store") +} + +func (cs *roContentStore) Abort(ctx context.Context, ref string) error { + return errors.Errorf("read-only content store") +} + +const timestampKey = "buildkit-current-timestamp" + +func sendTimestampHeader(srv grpc.ServerStream) error { + return srv.SendHeader(metadata.Pairs(timestampKey, time.Now().Format(time.RFC3339Nano))) +} diff --git a/control/control_test.go b/control/control_test.go new file mode 100644 index 000000000000..8707287e277a --- /dev/null +++ b/control/control_test.go @@ -0,0 +1,148 @@ +package control + +import ( + "testing" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/stretchr/testify/require" +) + +func TestDuplicateCacheOptions(t *testing.T) { + var testCases = []struct { + name string + opts []*controlapi.CacheOptionsEntry + expected []*controlapi.CacheOptionsEntry + }{ + { + name: "avoids unique opts", + opts: []*controlapi.CacheOptionsEntry{ + { + Type: "registry", + Attrs: map[string]string{ + "ref": "example.com/ref:v1.0.0", + }, + }, + { + Type: "local", + Attrs: map[string]string{ + "dest": "/path/for/export", + }, + }, + }, + expected: nil, + }, + { + name: "finds duplicate opts", + opts: []*controlapi.CacheOptionsEntry{ + { + Type: "registry", + Attrs: map[string]string{ + "ref": "example.com/ref:v1.0.0", + }, + }, + { + Type: "registry", + Attrs: map[string]string{ + "ref": "example.com/ref:v1.0.0", + }, + }, + { + Type: "local", + Attrs: map[string]string{ + "dest": "/path/for/export", + }, + }, + { + Type: "local", + Attrs: map[string]string{ + "dest": "/path/for/export", + }, + }, + }, + expected: []*controlapi.CacheOptionsEntry{ + { + Type: "registry", + Attrs: map[string]string{ + "ref": "example.com/ref:v1.0.0", + }, + }, + { + Type: "local", + Attrs: map[string]string{ + "dest": "/path/for/export", + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := findDuplicateCacheOptions(tc.opts) + require.NoError(t, err) + require.ElementsMatch(t, tc.expected, result) + }) + } +} + +func TestParseCacheExportIgnoreError(t *testing.T) { + tests := map[string]struct { + expectedIgnoreError bool + expectedSupported bool + }{ + "": { + expectedIgnoreError: false, + expectedSupported: false, + }, + ".": { + expectedIgnoreError: false, + expectedSupported: false, + }, + "fake": { + expectedIgnoreError: false, + expectedSupported: false, + }, + "true": { + expectedIgnoreError: true, + expectedSupported: true, + }, + "True": { + expectedIgnoreError: true, + expectedSupported: true, + }, + "TRUE": { + expectedIgnoreError: true, + expectedSupported: true, + }, + "truee": { + expectedIgnoreError: false, + expectedSupported: false, + }, + "false": { + expectedIgnoreError: false, + expectedSupported: true, + }, + "False": { + expectedIgnoreError: false, + expectedSupported: true, + }, + "FALSE": { + expectedIgnoreError: false, + expectedSupported: true, + }, + "ffalse": { + expectedIgnoreError: false, + expectedSupported: false, + }, + } + + for ignoreErrStr, test := range tests { + t.Run(ignoreErrStr, func(t *testing.T) { + ignoreErr, supported := parseCacheExportIgnoreError(ignoreErrStr) + t.Log("checking expectedIgnoreError") + require.Equal(t, ignoreErr, test.expectedIgnoreError) + t.Log("checking expectedSupported") + require.Equal(t, supported, test.expectedSupported) + }) + } +} diff --git a/control/gateway/gateway.go b/control/gateway/gateway.go index 62c696d6c448..4451e022d322 100644 --- a/control/gateway/gateway.go +++ b/control/gateway/gateway.go @@ -111,6 +111,14 @@ func (gwf *GatewayForwarder) ReadFile(ctx context.Context, req *gwapi.ReadFileRe return fwd.ReadFile(ctx, req) } +func (gwf *GatewayForwarder) Evaluate(ctx context.Context, req *gwapi.EvaluateRequest) (*gwapi.EvaluateResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding Evaluate") + } + return fwd.Evaluate(ctx, req) +} + func (gwf *GatewayForwarder) Ping(ctx context.Context, req *gwapi.PingRequest) (*gwapi.PongResponse, error) { fwd, err := gwf.lookupForwarder(ctx) if err != nil { diff --git a/control/init.go b/control/init.go deleted file mode 100644 index 2e86133e4120..000000000000 --- a/control/init.go +++ /dev/null @@ -1,10 +0,0 @@ -package control - -import controlapi "github.com/moby/buildkit/api/services/control" - -var emptyLogVertexSize int - -func init() { - emptyLogVertex := controlapi.VertexLog{} - emptyLogVertexSize = emptyLogVertex.Size() -} diff --git a/docs/annotations.md b/docs/annotations.md new file mode 100644 index 000000000000..a37573eeef9a --- /dev/null +++ b/docs/annotations.md @@ -0,0 +1,59 @@ +# Image annotations + +Buildkit supports attaching [OCI annotations](https://github.com/opencontainers/image-spec/blob/main/annotations.md) +to its built image manifests and indexes. These annotations can be used to +attach additional metadata to a built image, which may not be appropriate to +store in the image content itself. + +Annotations are similar to, but not a replacement for image labels. Annotations +can be attached at almost every level of the resulting image output, while +labels can be only included in the image configuration object. Additionally, +unless overridden, image labels are inherited by other images that use the +image as a base. + +Annotations support multiple pre-defined annotation keys which you can use, or +you can also create your own. + +To build an image with annotations, you can use the `image` or `oci` (and +related) exporter types, along with the `annotation.*` option. + +For example, to attach a human-readable title to your image, you can use the +following buildctl invocation: + + buildctl build ... \ + --opt platform=amd64,arm64 \ + --output "type=image,name=target,annotation.org.opencontainers.image.title=Target" + +This annotation will be added to each built image manifest, so each platform +you built for (in the above, `amd64` and `arm64`) will get a copy of the annotation. + +You want to allow different annotations for different platforms, e.g. maybe you +want to provide a different documentation URL per manifest. You can do this +with platform specific annotations, using the `annotation[].*` syntax +like so: + + buildctl build ... \ + --opt platform=amd64,arm64 \ + --output "type=image,name=target,annotation[linux/amd64].org.opencontainers.image.url=https://example.com/amd64,annotation[linux/arm64].org.opencontainers.image.url=https://example.com/arm64" + +Buildkit also allows you to finely control the exact destination where the +annotation will be written to using the syntax `annotation-.*`. You can +write to the following ``s: + +- The `manifest` (the default, as above) +- The `manifest-descriptor` + - This adds the annotation into the image index's descriptor for the manifest + - (discarded if the output does not contain an image index) +- The `index` + - This adds the annotation into the image index root + - (discarded if the output does not contain an image index) +- The `index-descriptor` + - This adds the annotation into the OCI layout's descriptor for the index + - (discarded if the output does not contain an OCI layout) + +For example, if you want to add the annotation at the image index level, so +that the annotation is shared between all architectures, you can instead: + + buildctl build ... \ + --opt platform=amd64,arm64 \ + --output "type=image,name=target,annotation-index.org.opencontainers.image.title=Target Image" diff --git a/docs/attestations/README.md b/docs/attestations/README.md new file mode 100644 index 000000000000..b4e4c1e76449 --- /dev/null +++ b/docs/attestations/README.md @@ -0,0 +1,16 @@ +# Attestations + +BuildKit supports creating and attaching attestations to build artifacts. +Generated attestations use the [in-toto attestation format](https://github.com/in-toto/attestation). + +The currently supported attestation types are: + +- [SBOMs](./sbom.md) +- [SLSA Provenance](./slsa-provenance.md) + +Upon generation, attestations are attached differently to the export result: + +- For the `image`, `oci` and `docker` exporters, attestations are exported + using the attached [attestation storage](./attestation-storage.md). +- For the `local` and `tar` exporters, attestations are written to separate + files within the output directory. diff --git a/docs/attestations/attestation-storage.md b/docs/attestations/attestation-storage.md new file mode 100644 index 000000000000..2df1d3668262 --- /dev/null +++ b/docs/attestations/attestation-storage.md @@ -0,0 +1,215 @@ +# Image Attestation Storage + +Buildkit supports creating and attaching attestations to build artifacts. These +attestations can provide valuable information from the build process, +including, but not limited to: [SBOMs](https://en.wikipedia.org/wiki/Software_supply_chain), +[SLSA Provenance](https://slsa.dev/provenance), build logs, etc. + +This document describes the current custom format used to store attestations, +which is designed to be compatible with current registry implementations today. +In the future, we may support exporting attestations in additional formats. + +Attestations are stored as manifest objects in the image index, similar in +style to OCI artifacts. + +## Properties + +### Attestation Manifest + +Attestation manifests are attached to the root image index object, under a +separate [OCI image manifest](https://github.com/opencontainers/image-spec/blob/main/manifest.md). +Each attestation manifest can contain multiple [attestation blobs](#attestation-blob), +with all the of the attestations in a manifest applying to a single platform +manifest. All properties of standard OCI and Docker manifests continue to +apply. + +The image `config` descriptor will point to a valid [image config](https://github.com/opencontainers/image-spec/blob/main/config.md), +however, it will not contain attestation-specific details, and should be +ignored as it is only included for compatibility purposes. + +Each image layer in `layers` will contain a descriptor for a single +[attestation blob](#attestation-blob). The `mediaType` of each layer will be +set in accordance to its contents, one of: + +- `application/vnd.in-toto+json` (currently, the only supported option) + + Indicates an in-toto attestation blob + +Any unknown `mediaType`s should be ignored. + +To assist attestation traversal, the following annotations may be set on each +layer descriptor: + +- `in-toto.io/predicate-type` + + This annotation will be set if the enclosed attestation is an in-toto + attestation (currently, the only supported option). The annotation will + be set to contain the same value as the `predicateType` property present + inside the attestation. + + When present, this annotation may be used to find the specific attestation(s) + they are looking for to avoid pulling the contents of the others. + +### Attestation Blob + +The contents of each layer will be a blob dependent on it's `mediaType`. + +- `application/vnd.in-toto+json` + + The blob contents will contain a full [in-toto attestation statement](https://github.com/in-toto/attestation/blob/main/spec/README.md#statement): + + ```json + { + "_type": "https://in-toto.io/Statement/v0.1", + "subject": [ + { + "name": "", + "digest": {"": ""} + }, + ... + ], + "predicateType": "", + "predicate": { ... } + } + ``` + + The subject of the attestation should be set to be the same digest as the + target manifest described in the [Attestation Manifest Descriptor](#attestation-manifest-descriptor), + or some object within. + +### Attestation Manifest Descriptor + +Attestation manifests are attached to the root [image index](https://github.com/opencontainers/image-spec/blob/main/image-index.md), +in the `manifests` key, after all the original runnable manifests. All +properties of standard OCI and Docker manifest descriptors continue to apply. + +To prevent container runtimes from accidentally pulling or running the image +described in the manifest, the `platform` property of the attestation manifest +will be set to `unknown/unknown`, as follows: + +```json +"platform": { + "architecture": "unknown", + "os": "unknown" +} +``` + +To assist index traversal, the following annotations will be set on the +manifest descriptor descriptor: + +- `vnd.docker.reference.type` + + This annotation describes the type of the artifact, and will be set + to `attestation-manifest`. If any other value is specified, the entire + manifest should be ignored. + +- `vnd.docker.reference.digest` + + This annotation will contain the digest of the object in the image index that + the attestation manifest refers to. + + When present, this annotation can be used to find the matching attestation + manifest for a selected image manifest. + +## Examples + +*Example showing an SBOM attestation attached to a `linux/amd64` image* + +#### Image index (`sha256:94acc2ca70c40f3f6291681f37ce9c767e3d251ce01c7e4e9b98ccf148c26260`): + +This image index defines two descriptors: an AMD64 image `sha256:23678f31..` and an attestation manifest `sha256:02cb9aa7..` for that image. + +```json +{ + "mediaType": "application/vnd.oci.image.index.v1+json", + "schemaVersion": 2, + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:23678f31b3b3586c4fb318aecfe64a96a1f0916ba8faf9b2be2abee63fa9e827", + "size": 1234, + "platform": { + "architecture": "amd64", + "os": "linux" + } + }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:02cb9aa7600e73fcf41ee9f0f19cc03122b2d8be43d41ce4b21335118f5dd943", + "size": 1234, + "annotations": { + "vnd.docker.reference.digest": "sha256:23678f31b3b3586c4fb318aecfe64a96a1f0916ba8faf9b2be2abee63fa9e827", + "vnd.docker.reference.type": "attestation-manifest" + }, + "platform": { + "architecture": "unknown", + "os": "unknown" + } + } + ] +} +``` + +#### Attestation manifest (`sha256:02cb9aa7600e73fcf41ee9f0f19cc03122b2d8be43d41ce4b21335118f5dd943`): + +This attestation manifest contains one attestation that is an in-toto attestation that contains a "https://spdx.dev/Document" predicate, signifying that it is defining a SBOM for the image. + +```json +{ + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "schemaVersion": 2, + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:a781560066f20ec9c28f2115a95a886e5e71c7c7aa9d8fd680678498b82f3ea3", + "size": 123 + }, + "layers": [ + { + "mediaType": "application/vnd.in-toto+json", + "digest": "sha256:133ae3f9bcc385295b66c2d83b28c25a9f294ce20954d5cf922dda860429734a", + "size": 1234, + "annotations": { + "in-toto.io/predicate-type": "https://spdx.dev/Document" + } + } + ] +} +``` + +#### Image config (`sha256:a781560066f20ec9c28f2115a95a886e5e71c7c7aa9d8fd680678498b82f3ea3`): + +```json +{ + "architecture": "unknown", + "os": "unknown", + "config": {}, + "rootfs": { + "type": "layers", + "diff_ids": [ + "sha256:133ae3f9bcc385295b66c2d83b28c25a9f294ce20954d5cf922dda860429734a" + ] + } +} +``` + +#### Layer content (`sha256:1ea07d5e55eb47ad0e6bbfa2ec180fb580974411e623814e519064c88f022f5c`): + +Attestation body containing the SBOM data listing the packages used during the build in SPDX format. + +```json +{ + "_type": "https://in-toto.io/Statement/v0.1", + "predicateType": "https://spdx.dev/Document", + "subject": [ + { + "name": "_", + "digest": { + "sha256": "23678f31b3b3586c4fb318aecfe64a96a1f0916ba8faf9b2be2abee63fa9e827" + } + } + ], + "predicate": { + "SPDXID": "SPDXRef-DOCUMENT", + "spdxVersion": "SPDX-2.2", + ... +``` diff --git a/docs/attestations/sbom-protocol.md b/docs/attestations/sbom-protocol.md new file mode 100644 index 000000000000..1505fbfffcca --- /dev/null +++ b/docs/attestations/sbom-protocol.md @@ -0,0 +1,72 @@ +# SBOM Scanning Protocol + +BuildKit supports automatic creation of [SBOMs](https://en.wikipedia.org/wiki/Software_supply_chain) +for builds, attaching them as [image attestations](./attestation-storage.md). + +To scan the filesystem contents, a user can specify an SBOM generator image. +When run, this image is passed the rootfs of the build stage as a read-only +mount, writes its SBOM scan data to a specified directory. + +The SBOM generator image is expected to follow the rules of the BuildKit SBOM +generator protocol, defined in this document. + +> **Note** +> +> Currently, only SBOMs in the [SPDX](https://spdx.dev) JSON format are +> supported. +> +> These SBOMs will be attached to the final image as an in-toto attestation +> with the `https://spdx.dev/Document` predicate type. + +## Implementations + +The following SBOM generator images are available: + +- [docker/buildkit-syft-scanner](https://github.com/docker/buildkit-syft-scanner) + +## Parameters + +A single run of a generator may specify multiple target filesystems to scan by +passing multiple paths - the scanner should scan all of them. Each filesystem +target has a **name**, specified by the final component of the path for that +target. A generator may produce any number of scans for the available targets - +though ideally it should aim to produce a single scan per target. + +These parameters will be passed to the generator image as environment variables +by BuildKit: + +- `BUILDKIT_SCAN_DESTINATION` (required) + + This variable specifies the directory where the scanner should write its + SBOM data. Scanners should write their SBOMs to `$BUILDKIT_SCAN_DESTINATION/.spdx.json` + where `` is the name of an arbitrary scan. A scanner may produce + multiple scans for a single target - scan names must be unique within a + target, but should not be considered significant by producers or consumers. + +- `BUILDKIT_SCAN_SOURCE` (required) + + This variable specifies the main target, passing the path to the root + filesystem of the final build result. + + The scanner should scan this filesystem, and write its SBOM result to + `$BUILDKIT_SCAN_DESTINATION/$(basename $BUILDKIT_SCAN_SOURCE).spdx.json`. + +- `BUILDKIT_SCAN_SOURCE_EXTRAS` (optional) + + This variable specifies additional targets, passing the path to a directory + of other root filesystems. If the variable is not set, is empty, or contains + a directory that does not exist, then no extras should be scanned. + + The scanner should iterate through this directory, and write its SBOM scans + to `$BUILDKIT_SCAN_DESTINATION/.spdx.json`, similar to above. + +A scanner must not error if optional parameters are not set. + +The scanner should produce SBOM results for all filesystems specified in +`BUILDKIT_SCAN_SOURCE` or `BUILDKIT_SCAN_SOURCE_EXTRAS` but must not produce +SBOM results for any other filesystems. + +## Further reading + +See [frontend/attest/sbom.go](https://github.com/moby/buildkit/blob/master/frontend/attest/sbom.go) +for the code that invokes the user-specified generator. diff --git a/docs/attestations/sbom.md b/docs/attestations/sbom.md new file mode 100644 index 000000000000..9cb008775a36 --- /dev/null +++ b/docs/attestations/sbom.md @@ -0,0 +1,198 @@ +# SBOMs + +BuildKit supports automatic creation of [SBOMs](https://en.wikipedia.org/wiki/Software_supply_chain) +to record the software components that make up the final image. These consist +of a list of software packages and the files that they own. + +They also usually contain metadata about each component, such as software +licenses, authors, and unique package identifiers which can be used for +vulnerability scanning. + +All SBOMs generated by BuildKit are wrapped inside [in-toto attestations](https://github.com/in-toto/attestation) +in the [SPDX](https://spdx.dev) JSON format. They can be generated using +generator images that follow the [SBOM generator protocol](./sbom-protocol.md). + +When the final output format is a container image, these SBOMs are attached +using the [attestation storage](./attestation-storage.md). + +To build an image with an attached SBOM (derived using the builtin default scanner, +[docker/buildkit-syft-scanner](https://github.com/docker/buildkit-syft-scanner)), +use the `attest:sbom` option: + +```bash +buildctl build \ + --frontend=dockerfile.v0 \ + --local context=. \ + --local dockerfile=. \ + --opt attest:sbom= +``` + +You can also specify a custom SBOM generator image: + +```bash +buildctl build \ + --frontend=dockerfile.v0 \ + --local context=. \ + --local dockerfile=. \ + --opt attest:sbom=generator=/ +``` + +## Dockerfile configuration + +By default, only the final build result is scanned - because of this, the +resulting SBOM will not include build-time dependencies that may be installed +in separate stages or the build context. This could cause you to miss +vulnerabilities in those dependencies, which could impact the security of your +final build artifacts. + +To include these build-time dependencies from your Dockerfile, you can set the +build arguments `BUILDKIT_SBOM_SCAN_CONTEXT` and `BUILDKIT_SBOM_SCAN_STAGE` to +additionally scan the build context and other build stages respectively. These +build arguments are special values, and cannot be used for variable +substitutions or as environment variables from within the Dockerfile, as they +exist solely to change the behavior of the scanner. + +Both arguments can be set as global meta arguments (before a `FROM`) or can be +individually set in each stage. If set globally, the value is propagated for +each stage in the Dockerfile. They can take the following values: + +- `true`: enables context/stage scanning (e.g. `BUILDKIT_SBOM_SCAN_STAGE=true`) +- `false`: disables context/stage scanning (e.g. `BUILDKIT_SBOM_SCAN_STAGE=false`) +- `[,]`: enables context/stage scanning for all stages + listed in the comma-separated list of provided stages (e.g. + `BUILDKIT_SBOM_SCAN_STAGE=x,y` will scan stages called `x` and `y`). + +Scanning will *never* be enabled for a stage that is not built, even if it is +enabled via the build arguments. + +For example: + +```dockerfile +FROM alpine:latest as build +# enable scanning for the intermediate build stage +ARG BUILDKIT_SBOM_SCAN_STAGE=true +WORKDIR /src +COPY . . +RUN ... # build some software + +FROM scratch as final +# scan the build context only if the build is run to completion +ARG BUILDKIT_SBOM_SCAN_CONTEXT=true +COPY --from=build /path/to/software /path/to/software +``` + +You can also directly override these `ARG`s on the command line, by passing +them as build arguments: + +```bash +buildctl build \ + --frontend=dockerfile.v0 \ + --local context=. \ + --local dockerfile=. \ + --opt build-arg:BUILDKIT_SBOM_SCAN_STAGE= \ + --opt build-arg:BUILDKIT_SBOM_SCAN_CONTEXT= \ + --opt attest:sbom= +``` + +Scanning will only override existing `ARG` definitions in the Dockerfile, and +so does not allow including other stages in the Dockerfile that do not declare +`BUILDKIT_SBOM_SCAN` arguments. + +## Output + +To inspect the SBOMs that were generated, and attached to a container image, +you can use the `docker buildx imagetools` command to explore the resulting +image in your registry, following the format described in the [attestation storage](./attestation-storage.md). + +For example, for a simple Docker image based on `alpine:latest`, we might get +the following SBOM: + +```json +{ + "_type": "https://in-toto.io/Statement/v0.1", + "predicateType": "https://spdx.dev/Document", + "subject": [ + { + "name": "pkg:docker//@?platform=", + "digest": { + "sha256": "e8275b2b76280af67e26f068e5d585eb905f8dfd2f1918b3229db98133cb4862" + } + } + ], + "predicate": { + "SPDXID": "SPDXRef-DOCUMENT", + "name": "/run/src/core", + "spdxVersion": "SPDX-2.2", + "creationInfo": { + "created": "2022-11-09T10:12:01.338817553Z", + "creators": [ + "Organization: Anchore, Inc", + "Tool: syft-[not provided]" + ], + "licenseListVersion": "3.18" + }, + "dataLicense": "CC0-1.0", + "documentNamespace": "https://anchore.com/syft/dir/run/src/core-4006bb64-24b1-4a22-a18f-94efc6b90edb", + "files": [ + { + "SPDXID": "SPDXRef-1ac501c94e2f9f81", + "comment": "layerID: sha256:9b18e9b68314027565b90ff6189d65942c0f7986da80df008b8431276885218e", + "fileName": "/bin/busybox", + "licenseConcluded": "NOASSERTION" + }, + ... + ], + "packages": [ + { + "SPDXID": "SPDXRef-980737451f148c56", + "description": "Size optimized toolbox of many common UNIX utilities", + "downloadLocation": "https://busybox.net/", + "externalRefs": [ + { + "referenceCategory": "SECURITY", + "referenceLocator": "cpe:2.3:a:busybox:busybox:1.35.0-r17:*:*:*:*:*:*:*", + "referenceType": "cpe23Type" + }, + { + "referenceCategory": "PACKAGE_MANAGER", + "referenceLocator": "pkg:alpine/busybox@1.35.0-r17?arch=aarch64&upstream=busybox&distro=alpine-3.16.2", + "referenceType": "purl" + } + ], + "filesAnalyzed": false, + "hasFiles": [ + "SPDXRef-1ac501c94e2f9f81", + ... + ], + "licenseConcluded": "GPL-2.0-only", + "licenseDeclared": "GPL-2.0-only", + "name": "busybox", + "originator": "Person: Sören Tempel ", + "sourceInfo": "acquired package info from APK DB: lib/apk/db/installed", + "versionInfo": "1.35.0-r17" + }, + ... + ], + "relationships": [ + { + "relatedSpdxElement": "SPDXRef-1ac501c94e2f9f81", + "relationshipType": "CONTAINS", + "spdxElementId": "SPDXRef-980737451f148c56" + }, + ... + ] + } +} +``` + +The exact output will depend on the generator you use, however, generally: + +- The `files` key will contain a list of all files in the image. +- The `packages` key will contain a list of all packages discovered from the + image. +- The `relationships` key links together various files and packages, together + with metadata about how they relate to each other. + +Entries in the `files` and `packages` will contain a `comment` field that +contains the `sha256` digest of the layer which introduced it if that layer is +present in the final image. diff --git a/docs/attestations/slsa-definitions.md b/docs/attestations/slsa-definitions.md new file mode 100644 index 000000000000..46e198efc1b2 --- /dev/null +++ b/docs/attestations/slsa-definitions.md @@ -0,0 +1,589 @@ +# SLSA definitions + +BuildKit supports the [creation of SLSA Provenance](./slsa-provenance.md) for builds that +it runs. + +The provenance format generated by BuildKit is defined by the +[SLSA Provenance format](https://slsa.dev/provenance/v0.2). + +This page describes how BuildKit populate each field, and whether the field gets +included when you generate attestations `mode=min` and `mode=max`. + +## `builder.id` [(SLSA)](https://slsa.dev/provenance/v0.2#builder.id) + +Included with `mode=min` and `mode=max`. + +The `builder.id` field is set to the URL of the build, if available. + +```json + "builder": { + "id": "https://github.com/docker/buildx/actions/runs/3709599520" + }, +``` + +This value can be set using the `builder-id` attestation parameter. + +## `buildType` [(SLSA)](https://slsa.dev/provenance/v0.2#buildType) + +Included with `mode=min` and `mode=max`. + +The `buildType` field is set to `https://mobyproject.org/buildkit@v1` can be +used to determine the structure of the provenance content. + +```json + "buildType": "https://mobyproject.org/buildkit@v1", +``` + +## `invocation.configSource` [(SLSA)](https://slsa.dev/provenance/v0.2#invocation.configSource) + +Included with `mode=min` and `mode=max`. + +Describes the config that initialized the build. + +```json + "invocation": { + "configSource": { + "uri": "https://github.com/moby/buildkit.git#refs/tags/v0.11.0", + "digest": { + "sha1": "4b220de5058abfd01ff619c9d2ff6b09a049bea0" + }, + "entryPoint": "Dockerfile" + }, + ... + }, +``` + +For builds initialized from a remote context, like a Git or HTTP URL, this +object defines the context URL and its immutable digest in the `uri` and `digest` fields. +For builds using a local frontend, such as a Dockerfile, the `entryPoint` field defines the path +for the frontend file that initialized the build (`filename` frontend option). + +## `invocation.parameters` [(SLSA)](https://slsa.dev/provenance/v0.2#invocation.parameters) + +Partially included with `mode=min`. + +Describes build inputs passed to the build. + +```json + "invocation": { + "parameters": { + "frontend": "gateway.v0", + "args": { + "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR": "1", + "label:FOO": "bar", + "source": "docker/dockerfile-upstream:master", + "target": "release" + }, + "secrets": [ + { + "id": "GIT_AUTH_HEADER", + "optional": true + }, + ... + ], + "ssh": [], + "locals": [] + }, + ... + }, +``` + +The following fields are included with both `mode=min` and `mode=max`: + +- `locals` lists any local sources used in the build, including the build + context and frontend file. +- `frontend` defines type of BuildKit frontend used for the build. Currently, + this can be `dockerfile.v0` or `gateway.v0`. +- `args` defines the build arguments passed to the BuildKit frontend. + + The keys inside the `args` object reflect the options as BuildKit receives + them. For example, `build-arg` and `label` prefixes are used for build + arguments and labels, and `target` key defines the target stage that was + built. The `source` key defines the source image for the Gateway frontend, if + used. + +The following fields are only included with `mode=max`: + +- `secrets` defines secrets used during the build. Note that actual secret + values are not included. +- `ssh` defines the ssh forwards used during the build. + +## `invocation.environment` [(SLSA)](https://slsa.dev/provenance/v0.2#invocation.environment) + +Included with `mode=min` and `mode=max`. + +```json + "invocation": { + "environment": { + "platform": "linux/amd64" + }, + ... + }, +``` + +The only value BuildKit currently sets is the `platform` of the current build +machine. Note that this is not necessarily the platform of the build result that +can be determined from the `in-toto` subject field. + +## `materials` [(SLSA)](https://slsa.dev/provenance/v0.2#materials) + +Included with `mode=min` and `mode=max`. + +Defines all the external artifacts that were part of the build. The value +depends on the type of artifact: + +- The URL of Git repositories containing source code for the image +- HTTP URLs if you are building from a remote tarball, or that was included + using an `ADD` command in Dockerfile +- Any Docker images used during the build + +The URLs to the Docker images will be in +[Package URL](https://github.com/package-url/purl-spec) format. + +All the build materials will include the immutable checksum of the artifact. +When building from a mutable tag, you can use the digest information to +determine if the artifact has been updated compared to when the build ran. + +```json + "materials": [ + { + "uri": "pkg:docker/alpine@3.17?platform=linux%2Famd64", + "digest": { + "sha256": "8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4" + } + }, + { + "uri": "https://github.com/moby/buildkit.git#refs/tags/v0.11.0", + "digest": { + "sha1": "4b220de5058abfd01ff619c9d2ff6b09a049bea0" + } + }, + ... + ], +``` + +## `buildConfig` [(SLSA)](https://slsa.dev/provenance/v0.2#buildConfig) + +Only included with `mode=max`. + +Defines the build steps performed during the build. + +BuildKit internally uses LLB definition to execute the build steps. The LLB +definition of the build steps is defined in `buildConfig.llbDefinition` field. + +Each LLB step is the JSON definition of the +[LLB ProtoBuf API](https://github.com/moby/buildkit/blob/v0.10.0/solver/pb/ops.proto). +The dependencies for a vertex in the LLB graph can be found in the `inputs` +field for every step. + +```json + "buildConfig": { + "llbDefinition": [ + { + "id": "step0", + "op": { + "Op": { + "exec": { + "meta": { + "args": [ + "/bin/sh", + "-c", + "go build ." + ], + "env": [ + "PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "GOPATH=/go", + "GOFLAGS=-mod=vendor", + ], + "cwd": "/src", + }, + "mounts": [...] + } + }, + "platform": {...}, + }, + "inputs": [ + "step8:0", + "step2:0", + ] + }, + ... + ] + }, +``` + +## `metadata.buildInvocationId` [(SLSA)](https://slsa.dev/provenance/v0.2#metadata.buildIncocationId) + +Included with `mode=min` and `mode=max`. + +Unique identifier for the build invocation. When building a multi-platform image +with a single build request, this value will be the shared by all the platform +versions of the image. + +```json + "metadata": { + "buildInvocationID": "rpv7a389uzil5lqmrgwhijwjz", + ... + }, +``` + +## `metadata.buildStartedOn` [(SLSA)](https://slsa.dev/provenance/v0.2#metadata.buildStartedOn) + +Included with `mode=min` and `mode=max`. + +Timestamp when the build started. + +```json + "metadata": { + "buildStartedOn": "2021-11-17T15:00:00Z", + ... + }, +``` + +## `metadata.buildFinishedOn` [(SLSA)](https://slsa.dev/provenance/v0.2#metadata.buildFinishedOn) + +Included with `mode=min` and `mode=max`. + +Timestamp when the build finished. + +```json + "metadata": { + "buildFinishedOn": "2021-11-17T15:01:00Z", + ... + }, +``` + +## `metadata.completeness` [(SLSA)](https://slsa.dev/provenance/v0.2#metadata.completeness) + +Included with `mode=min` and `mode=max`. + +Defines if the provenance information is complete. + +`completeness.parameters` is true if all the build arguments are included in the +`invocation.parameters` field. When building with `min` mode, the build +arguments are not included in the provenance information and parameters are not +complete. Parameters are also not complete on direct LLB builds that did not use +a frontend. + +`completeness.environment` is always true for BuildKit builds. + +`completeness.materials` is true if `materials` field includes all the +dependencies of the build. When building from un-tracked source in a local +directory, the materials are not complete, while when building from a remote Git +repository all materials can be tracked by BuildKit and `completeness.materials` +is true. + +```json + "metadata": { + "completeness": { + "parameters": true, + "environment": true, + "materials": true + }, + ... + }, +``` + +## `metadata.reproducible` [(SLSA)](https://slsa.dev/provenance/v0.2#metadata.reproducible) + +Defines if the build result is supposed to be byte-by-byte reproducible. This +value can be set by the user with the `reproducible=true` attestation parameter. + +```json + "metadata": { + "reproducible": false, + ... + }, +``` + +## `metadata.https://mobyproject.org/buildkit@v1#hermetic` + +Included with `mode=min` and `mode=max`. + +This extension field is set to true if the build was hermetic and did not access +the network. In Dockerfiles, a build is hermetic if it does not use `RUN` +commands or disables network with `--network=none` flag. + +```json + "metadata": { + "https://mobyproject.org/buildkit@v1#hermetic": true, + ... + }, +``` + +## `metadata.https://mobyproject.org/buildkit@v1#metadata` + +Partially included with `mode=min`. + +This extension field defines BuildKit-specific additional metadata that is not +part of the SLSA provenance spec. + +```json + "metadata": { + "https://mobyproject.org/buildkit@v1#metadata": { + "source": {...}, + "layers": {...}, + "vcs": {...}, + }, + ... + }, +``` + +### `source` + +Only included with `mode=max`. + +Defines a source mapping of LLB build steps, defined in the +`buildConfig.llbDefinition` field, to their original source code (for example, +Dockerfile commands). The `source.locations` field contains the ranges of all +the Dockerfile commands ran in an LLB step. `source.infos` array contains the +source code itself. This mapping is present if the BuildKit frontend provided it +when creating the LLB definition. + +### `layers` + +Only included with `mode=max`. + +Defines the layer mapping of LLB build step mounts defined in +`buildConfig.llbDefinition` to the OCI descriptors of equivalent layers. This +mapping is present if the layer data was available, usually when attestation is +for an image or if the build step pulled in image data as part of the build. + +### `vcs` + +Included with `mode=min` and `mode=max`. + +Defines optional metadata for the version control system used for the build. If +a build uses a remote context from Git repository, BuildKit extracts the details +of the version control system automatically and displays it in the +`invocation.configSource` field. But if the build uses a source from a local +directory, the VCS information is lost even if the directory contained a Git +repository. In this case, the build client can send additional `vcs:source` and +`vcs:revision` build options and BuildKit will add them to the provenance +attestations as extra metadata. Note that, contrary to the +`invocation.configSource` field, BuildKit doesn't verify the `vcs` values, and +as such they can't be trusted and should only be used as a metadata hint. + +## Output + +To inspect the provenance that was generated and attached to a container image, +you can use the `docker buildx imagetools` command to inspect the image in a +registry. Inspecting the attestation displays the format described in the +[attestation storage specification](./attestation-storage.md). + +For example, inspecting a simple Docker image based on `alpine:latest` results +in a provenance attestation similar to the following, for a `mode=min` build: + +```json +{ + "_type": "https://in-toto.io/Statement/v0.1", + "predicateType": "https://slsa.dev/provenance/v0.2", + "subject": [ + { + "name": "pkg:docker//@?platform=", + "digest": { + "sha256": "e8275b2b76280af67e26f068e5d585eb905f8dfd2f1918b3229db98133cb4862" + } + } + ], + "predicate": { + "builder": { + "id": "" + }, + "buildType": "https://mobyproject.org/buildkit@v1", + "materials": [ + { + "uri": "pkg:docker/alpine@latest?platform=linux%2Famd64", + "digest": { + "sha256": "8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4" + } + } + ], + "invocation": { + "configSource": { + "entryPoint": "Dockerfile" + }, + "parameters": { + "frontend": "dockerfile.v0", + "args": {}, + "locals": [ + { + "name": "context" + }, + { + "name": "dockerfile" + } + ] + }, + "environment": { + "platform": "linux/amd64" + } + }, + "metadata": { + "buildInvocationID": "yirbp1aosi1vqjmi3z6bc75nb", + "buildStartedOn": "2022-12-08T11:48:59.466513707Z", + "buildFinishedOn": "2022-12-08T11:49:01.256820297Z", + "reproducible": false, + "completeness": { + "parameters": true, + "environment": true, + "materials": false + }, + "https://mobyproject.org/buildkit@v1#metadata": {} + } + } +} +``` + +For a similar build, but with `mode=max`: + +```json +{ + "_type": "https://in-toto.io/Statement/v0.1", + "predicateType": "https://slsa.dev/provenance/v0.2", + "subject": [ + { + "name": "pkg:docker//@?platform=", + "digest": { + "sha256": "e8275b2b76280af67e26f068e5d585eb905f8dfd2f1918b3229db98133cb4862" + } + } + ], + "predicate": { + "builder": { + "id": "" + }, + "buildType": "https://mobyproject.org/buildkit@v1", + "materials": [ + { + "uri": "pkg:docker/alpine@latest?platform=linux%2Famd64", + "digest": { + "sha256": "8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4" + } + } + ], + "invocation": { + "configSource": { + "entryPoint": "Dockerfile" + }, + "parameters": { + "frontend": "dockerfile.v0", + "args": {}, + "locals": [ + { + "name": "context" + }, + { + "name": "dockerfile" + } + ] + }, + "environment": { + "platform": "linux/amd64" + } + }, + "buildConfig": { + "llbDefinition": [ + { + "id": "step0", + "op": { + "Op": { + "source": { + "identifier": "docker-image://docker.io/library/alpine:latest@sha256:8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4" + } + }, + "platform": { + "Architecture": "amd64", + "OS": "linux" + }, + "constraints": {} + } + }, + { + "id": "step1", + "op": { + "Op": null + }, + "inputs": ["step0:0"] + } + ] + }, + "metadata": { + "buildInvocationID": "46ue2x93k3xj5l463dektwldw", + "buildStartedOn": "2022-12-08T11:50:54.953375437Z", + "buildFinishedOn": "2022-12-08T11:50:55.447841328Z", + "reproducible": false, + "completeness": { + "parameters": true, + "environment": true, + "materials": false + }, + "https://mobyproject.org/buildkit@v1#metadata": { + "source": { + "locations": { + "step0": { + "locations": [ + { + "ranges": [ + { + "start": { + "line": 1 + }, + "end": { + "line": 1 + } + } + ] + } + ] + } + }, + "infos": [ + { + "filename": "Dockerfile", + "data": "RlJPTSBhbHBpbmU6bGF0ZXN0Cg==", + "llbDefinition": [ + { + "id": "step0", + "op": { + "Op": { + "source": { + "identifier": "local://dockerfile", + "attrs": { + "local.differ": "none", + "local.followpaths": "[\"Dockerfile\",\"Dockerfile.dockerignore\",\"dockerfile\"]", + "local.session": "q2jnwdkas0i0iu4knchd92jaz", + "local.sharedkeyhint": "dockerfile" + } + } + }, + "constraints": {} + } + }, + { + "id": "step1", + "op": { + "Op": null + }, + "inputs": ["step0:0"] + } + ] + } + ] + }, + "layers": { + "step0:0": [ + [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:c158987b05517b6f2c5913f3acef1f2182a32345a304fe357e3ace5fadcad715", + "size": 3370706 + } + ] + ] + } + } + } + } +} +``` diff --git a/docs/attestations/slsa-provenance.md b/docs/attestations/slsa-provenance.md new file mode 100644 index 000000000000..a1437e717027 --- /dev/null +++ b/docs/attestations/slsa-provenance.md @@ -0,0 +1,99 @@ +# SLSA provenance + +BuildKit supports automatic creation of provenance attestations for the build +process. Provenance attestations record information describing how a build was +created, and is important for tracking the security of your software artifacts. + +Provenance attestations created by BuildKit include details such as: + +- Build parameters and environment. +- Build timestamps. +- Version control metadata for your build sources. +- Build dependencies with their immutable checksums. For example, base images or external URLs used by the build. +- Descriptions of all build steps, with their source and layer mappings. + +Provenance generated by BuildKit is wrapped inside [in-toto attestations](https://github.com/in-toto/attestation) +in the [SLSA Provenance format](https://slsa.dev/provenance/v0.2). + +For more information about how the attestation fields get generated, see [SLSA definitions](./slsa-definitions.md). + +## Build with provenance attestations + +To build an image with provenance attestations using `buildctl`, use the `attest:provenance` option: + +```bash +buildctl build \ + --frontend=dockerfile.v0 \ + --local context=. \ + --local dockerfile=. \ + --opt attest:provenance= +``` + +You can also customize the attestations using parameters: + +```bash +buildctl build \ + --frontend=dockerfile.v0 \ + --local context=. \ + --local dockerfile=. \ + --opt attest:provenance=mode=min,inline-only=true +``` + +All BuildKit exporters support attaching attestations to build results. +When the final output format is a container image (`image` or `oci` exporter), provenance is attached +to the image using the format described in the [attestation storage specification](./attestation-storage.md). +When creating a multi-platform image, each platform version of the image gets its own provenance. + +If you use the `local` or `tar` exporter, the provenance will be written to a file named `provenance.json` +and exported with your build result, in the root directory. + +## Parameters + +| Parameter | Type | Default | Description | +| -------------- | -------------- | ---------------- | ----------------------------------------------------------------------------------------------------------- | +| `mode` | `min`,`max` | `max` | Configures the amount of provenance to be generated. See [mode](#mode) | +| `builder-id` | String | | Explicitly set SLSA [`builder.id`](https://slsa.dev/provenance/v0.2#builder.id) field | +| `filename` | String | `provenance.json` | Set filename for provenance attestation when exported with `local` or `tar` exporter | +| `reproducible` | `true`,`false` | `false` | Explicitly set SLSA [`metadata.reproducible`](https://slsa.dev/provenance/v0.2#metadata.reproducible) field | +| `inline-only` | `true`,`false` | `false` | Only embed provenance into exporters that support inline content. See [inline-only](#inline-only) | + +### `mode` + +Provenance can be generated in one of two modes: `min` or `max`. By default, +when provenance is enabled, the `mode` parameter will be set to `max`. + +In `min` mode, BuildKit generates only the bare minimum amount of provenance, +including: + +- Build timestamps +- The frontend used +- The build materials + +However, the values of build arguments, the identities of secrets, and rich +layer metadata will not be included. `mode=min` should be safe to set on all +builds, as it does not leak information from any part of the build environment. + +In `max` mode, BuildKit generates all of the above, as well as: + +- The source Dockerfile, and rich layer metadata with sourcemaps to connect the + source with the build result +- The values of passed build arguments +- Metadata about secrets and ssh mounts + +Wherever possible, you should prefer `mode=max` as it contains significantly +more detailed information for analysis. However, on some builds it may not be +appropriate, as it includes the values of various build arguments and metadata +about secrets - these builds should be refactored to prefer passing hidden +values through secrets wherever possible to prevent unnecessary information +leakage. + +### `inline-only` + +By default, provenance is by included in all exporters that support +attestations. The `inline-only` parameter allows configuring this behavior, to +only include the provenance results in exporters that support inline content, +specifically only exporters that produce container images. + +Since other exporters produce attestations into separate files, in their +filesystems, you may not want to include the provenance in these cases. + diff --git a/docs/build-repro.md b/docs/build-repro.md index 4c11bd5755a1..9b83ec0593ec 100644 --- a/docs/build-repro.md +++ b/docs/build-repro.md @@ -1,129 +1,92 @@ # Build reproducibility -## Build dependencies +## Reproducing the pinned dependencies -Build dependencies are generated when your image has been built. These -dependencies include versions of used images, git repositories and HTTP URLs -used by LLB `Source` operation as well as build request attributes. +Reproducing the pinned dependencies is supported since BuildKit v0.11. -The structure is base64 encoded and has the following format when decoded: +e.g., +```bash +buildctl build --frontend dockerfile.v0 --local dockerfile=. --local context=. --source-policy-file policy.json +``` +An example `policy.json`: ```json { - "frontend": "dockerfile.v0", - "attrs": { - "build-arg:foo": "bar", - "context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master", - "filename": "Dockerfile", - "platform": "linux/amd64,linux/arm64", - "source": "crazymax/dockerfile:master" - }, - "sources": [ - { - "type": "docker-image", - "ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0", - "pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0" - }, - { - "type": "docker-image", - "ref": "docker.io/library/alpine:3.13", - "pin": "sha256:1d30d1ba3cb90962067e9b29491fbd56997979d54376f23f01448b5c5cd8b462" - }, + "rules": [ { - "type": "git", - "ref": "https://github.com/crazy-max/buildkit-buildsources-test.git#master", - "pin": "259a5aa5aa5bb3562d12cc631fe399f4788642c1" + "action": "CONVERT", + "source": { + "type": "docker-image", + "identifier": "docker.io/library/alpine:latest" + }, + "destination": { + "identifier": "docker-image://docker.io/library/alpine:latest@sha256:4edbd2beb5f78b1014028f4fbb99f3237d9561100b6881aabbf5acce2c4f9454" + } }, { - "type": "http", - "ref": "https://raw.githubusercontent.com/moby/moby/master/README.md", - "pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c" + "action": "CONVERT", + "source": { + "type": "http", + "identifier": "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md" + }, + "destination": { + "attrs": {"http.checksum": "sha256:6e4b94fc270e708e1068be28bd3551dc6917a4fc5a61293d51bb36e6b75c4b53"} + } } ] } ``` -* `frontend` defines the frontend used to build. -* `attrs` defines build request attributes. -* `sources` defines build sources. - * `type` defines the source type (`docker-image`, `git` or `http`). - * `ref` is the reference of the source. - * `pin` is the source digest. -* `deps` defines build dependencies of input contexts. +Any source type is supported, but how to pin a source depends on the type. -### Image config +## `SOURCE_DATE_EPOCH` +[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/docs/source-date-epoch/) is the convention for pinning timestamps to a specific value. -A new field similar to the one for inline cache has been added to the image -configuration to embed build dependencies: +The Dockerfile frontend supports consuming the `SOURCE_DATE_EPOCH` value as a special build arg, since BuildKit 0.11. +Minimal support is also available on older BuildKit when using Dockerfile 1.5 frontend. -```json -{ - "moby.buildkit.buildinfo.v0": "" -} +```console +buildctl build --frontend dockerfile.v0 --opt build-arg:SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) ... ``` -By default, the build dependencies are inlined in the image configuration. You -can disable this behavior with the [`buildinfo` attribute](../README.md#imageregistry). +The `buildctl` CLI does not automatically propagate the `$SOURCE_DATE_EPOCH` environment value from the client host to the `SOURCE_DATE_EPOCH` build arg. +However, higher level build tools, such as Docker Buildx (>= 0.10), may automatically capture the environment value. -### Exporter response (metadata) +The build arg value is used for: +- the `created` timestamp in the [OCI Image Config](https://github.com/opencontainers/image-spec/blob/main/config.md#properties) +- the `created` timestamp in the `history` objects in the [OCI Image Config](https://github.com/opencontainers/image-spec/blob/main/config.md#properties) +- the `org.opencontainers.image.created` annotation in the [OCI Image Index](https://github.com/opencontainers/image-spec/blob/main/annotations.md#pre-defined-annotation-keys) +- the timestamp of the files exported with the `local` exporter +- the timestamp of the files exported with the `tar` exporter -The solver response (`ExporterResponse`) also contains a new key -`containerimage.buildinfo` with the same structure as image config encoded in -base64: +The build arg value is not used for the timestamps of the files inside the image currently ([Caveats](#caveats)). -```json -{ - "ExporterResponse": { - "containerimage.buildinfo": "", - "containerimage.digest": "sha256:..." - } -} -``` +See also the [documentation](/frontend/dockerfile/docs/reference.md#buildkit-built-in-build-args) of the Dockerfile frontend. -If multi-platforms are specified, they will be suffixed with the corresponding -platform: +## Caveats +### Timestamps of the files inside the image +Currently, the `SOURCE_DATE_EPOCH` value is not used for the timestamps of the files inside the image. -```json -{ - "ExporterResponse": { - "containerimage.buildinfo/linux/amd64": "", - "containerimage.buildinfo/linux/arm64": "", - "containerimage.digest": "sha256:..." - } -} +Workaround: +```dockerfile +# Limit the timestamp upper bound to SOURCE_DATE_EPOCH. +# Workaround for https://github.com/moby/buildkit/issues/3180 +ARG SOURCE_DATE_EPOCH +RUN find $( ls / | grep -E -v "^(dev|mnt|proc|sys)$" ) -newermt "@${SOURCE_DATE_EPOCH}" -writable -xdev | xargs touch --date="@${SOURCE_DATE_EPOCH}" --no-dereference ``` -### Metadata JSON output +The `touch` command above is [not effective](https://github.com/moby/buildkit/issues/3309) for mount point directories. +A workaround is to create mount point directories below `/dev` (tmpfs) so that the mount points will not be included in the image layer. -If you're using the `--metadata-file` flag with [`buildctl`](../README.md#metadata), -[`buildx build`](https://github.com/docker/buildx/blob/master/docs/reference/buildx_build.md) -or [`buildx bake`](https://github.com/docker/buildx/blob/master/docs/reference/buildx_bake.md): +### Timestamps of whiteouts +Currently, the `SOURCE_DATE_EPOCH` value is not used for the timestamps of "whiteouts" that are created on removing files. -```shell -jq '.' metadata.json -``` -```json -{ - "containerimage.buildinfo": { - "frontend": "dockerfile.v0", - "attrs": { - "context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master", - "filename": "Dockerfile", - "source": "docker/dockerfile:master" - }, - "sources": [ - { - "type": "docker-image", - "ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0", - "pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0" - }, - { - "type": "docker-image", - "ref": "docker.io/library/alpine:3.13", - "pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c" - } - ] - }, - "containerimage.digest": "sha256:..." -} +Workaround: +```dockerfile +# Squash the entire stage for resetting the whiteout timestamps. +# Workaround for https://github.com/moby/buildkit/issues/3168 +FROM scratch +COPY --from=0 / / ``` + +The timestamps of the regular files in the original stage are maintained in the squashed stage, so you do not need to touch the files after this `COPY` instruction. diff --git a/docs/buildctl.md b/docs/buildctl.md new file mode 100644 index 000000000000..bd95c27526d5 --- /dev/null +++ b/docs/buildctl.md @@ -0,0 +1,220 @@ +# buildctl + +`buildctl` is the command-line interface to `buildkitd`. + +``` +NAME: + buildctl - build utility + +USAGE: + buildctl [global options] command [command options] [arguments...] + +VERSION: + 0.0.0+unknown + +COMMANDS: + du disk usage + prune clean up build cache + build, b build + debug debug utilities + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --debug enable debug output in logs + --addr value buildkitd address (default: "unix:///run/buildkit/buildkitd.sock") + --tlsservername value buildkitd server name for certificate validation + --tlscacert value CA certificate for validation + --tlscert value client certificate + --tlskey value client key + --tlsdir value directory containing CA certificate, client certificate, and client key + --timeout value timeout backend connection after value seconds (default: 5) + --help, -h show help + --version, -v print the version +``` + +## Connecting + +`buildctl` connects to a running `buildkitd` instance. The connection can is in a URL format of `://
`. +Supported `` is any supported by [net.Dialer.DialContext()](https://pkg.go.dev/net#Dialer.DialContext). +Practically, that normally will be one of: + +* Unix-domain socket via `unix://path/to/socket`, e.g. `unix:///run/buildkit/buildkitd.sock` (which is the default) +* TCP socket via `tcp://:`, e.g. `tcp://10.0.0.1:2555` + +## `build` + +Synopsis: + +``` +buildctl build --frontend dockerfile.v0 --opt target=foo --opt build-arg:foo=bar --local context=. --local dockerfile=. --output type=image,name=docker.io/username/image,push=true +``` + +`buildctl build` uses a buildkit daemon `buildkitd` to drive a build. + +The build consists of the following key elements: + +* [frontend definition](#frontend): parses the build descriptor, e.g. dockerfile +* [local sources](#local_sources): sets relevant directories and files passed to the build +* [frontend options](#frontend_options): options that are relevant to the particular frontend +* [output](#output): defines what format of output to use and where to place it +* [cache](#cache): defines where to export the cache generated during the build to, or where to import from + +### frontend + +The frontend is declared by the flag `--frontend `. The `` must be one built into `buildkitd`, or an OCI +image that implements the frontend API. + +In the above example, we are using the built-in `dockerfile.v0` frontend, which knows how to parse a dockerfile and convert it to LLB. + +There currently are two options for `--frontend`: + +* `dockerfile.v0`: uses the dockerfile-to-LLB frontend converter that is built into buildkitd. +* `gateway.v0`: uses any OCI image that implements the front-end API, with the image provided by `--opt source=`. + +### local sources + +A build may need access to various sources local to the `buildctl` execution environment, +such as files and directories or OCI images. These can be provided from the local +environment to which the user of `buildctl` has access. These are provided as: + +* `--local =` - allow buildkitd to access a local-to-buildctl directory `` under the unique name ``. +* `--oci-layout =` - allow buildkitd to access OCI images in the local-to-buildctl directory `` under the unique name ``. + +Each of the above is expected to provide a unique name, for this invocation of `buildctl`, for a directory. Other parts of `buildctl` can then +use those "named contexts" to reference directories, files or OCI images. + +For example: + +``` +buildctl build --local test1=/var/lib/test1 +``` + +lets `buildkitd` access all of the files in `/var/lib/test1` (relative to wherever `buildctl` is running), referenced via the name `test1`. + +Similarly: + +``` +buildctl build --oci-layout foo=/var/lib/oci +``` + +lets `buildkitd` access OCI images under `/var/lib/oci` (relative to wherever `buildctl` is running), referenced via the name `test1`. + +These "named references" are used by the frontend, either directly or with explicit options. + +#### dockerfile frontend sources + +The dockerfile frontend, enabled via `buildctl build --frontend=dockerfile.v0`, expects to have access to 2 named references: + +* `context`: where to perform the build. +* `dockerfile`: where to find the dockerfile to parse describing the build. + +Thus, a dockerfile build invocation would include: + +``` +buildctl build --frontend dockerfile.v0 --local context=. --local dockerfile=. +``` + +The above means, "build using the dockerfile frontend, passing it the context of the current directory where I am running `buildctl`, and the +dockerfile in the current directory as well." + +### frontend options + +Frontend-specific options are defined via `--opt =`. The specific meanings of those are frontend-specific. + +#### dockerfile-specific options + +In the above example, we define two: + +* `--opt target=foo` - build only until the dockerfile target stage `foo`, the equivalent of `docker buildx build --target=foo`. +* `--opt build-arg:foo=bar` - set the build argument `foo` to `bar`. + +In addition, the dockerfile front-end supports additional build contexts. These allow you to "alias" an image reference or name +with something else entirely. + +To use the build contexts, pass `--opt context:=`, where the `` is the name in the dockerfile, +and `` is a properly formatted target. These can be the following: + +* `--opt context:alpine=foo1` - replace usage of `alpine` with a named context `foo1`, that already should have been loaded via `--local`. +* `--opt context:alpine=foo2@sha256:bd04a5b26dec16579cd1d7322e949c5905c4742269663fcbc84dcb2e9f4592fb` - replace usage of `alpine` with the image or index whose sha256 hash is `bd04a5b26dec16579cd1d7322e949c5905c4742269663fcbc84dcb2e9f4592fb` from an OCI layout whose named context `foo2`, that already should have been loaded via `--oci-layout`. +* `--opt context:alpine=docker-image://docker.io/library/ubuntu:latest` - replace usage of `alpine` with the docker image `docker.io/library/ubuntu:latest` from the registry. +* `--opt context:alpine=https://example.com/foo/bar.git` - replace usage of alpine with the contents of the git repository at `https://example.com/foo/bar.git` + +Complete examples of using local and OCI layout: + +```sh +$ buildctl build --frontend dockerfile.v0 --local context=. --local dockerfile=. --local foo1=/home/dir/abc --opt context:alpine=foo1 +$ buildctl build --frontend dockerfile.v0 --local context=. --local dockerfile=. --oci-layout foo2=/home/dir/oci --opt context:alpine=foo2@sha256:bd04a5b26dec16579cd1d7322e949c5905c4742269663fcbc84dcb2e9f4592fb +``` + +#### gateway-specific options + +The `gateway.v0` frontend passes all of its `--opt` options on to the OCI image that is called to convert the +input to LLB. The one requires option is `--opt source=`, which defines the OCI image to use to convert +the input to LLB. + +For example: + +``` +buildctl build \ + --frontend gateway.v0 \ + --opt source=docker/dockerfile \ + --local context=. \ + --local dockerfile=. +``` + +Will use `docker/dockerfile` image to convert the Dockerfile input to LLB. + +Other `--opt` options are passed to the frontend. + +### output + +Output defines what to do with the resultant artifact of the build. It should be a series of key=value pairs, comma-separated, the first of +which must be `type=`, where `` is one of the supported types. The result of the options depend on the type. + +In our above example: + +``` +--output type=image,name=docker.io/username/image,push=true +``` + +* `type=image`: output an OCI image. +* `name=docker.io/username/image`: the name of the image is `docker.io/username/image`. +* `push=true`: attempt to push the generated image to the registry using the `name` + +### cache + +Cache defines options for buildkit to do one or both of: + +* at the end of the build, export additions to cache from the build to external locations +* at the beginning of the build, import artifacts into the cache from external locations for use during the build + +#### export cache + +During the build process, `buildkitd` generates cache layers. These can be exported at the end of the build via: + +``` +--export-cache type=,=,... +``` + +The `` options are defined for the given types. + +For example: + +* `--export-cache type=registry,ref=example.com/foo/bar` - export the cache to an OCI image. +* `--export-cache type=local,dest=path/to/dir` - export the cache to a directory local to where `buildctl` is running. + +#### import cache + +During the build process, `buildkitd` uses its local cache to optimize its build. In addition, you +can augment what is in the cache from external locations, i.e. seed the cache. + +``` +--import-cache type=,= +``` + +The `` options are defined for the given types, and match those for `--export-cache`. + +For example: + +* `--import-cache type=registry,ref=example.com/foo/bar` - import into the cache from an OCI image. +* `--import-cache type=local,src=path/to/dir` - import into the cache from a directory local to where `buildctl` is running. diff --git a/docs/buildinfo.md b/docs/buildinfo.md new file mode 100644 index 000000000000..8863a5ea2d08 --- /dev/null +++ b/docs/buildinfo.md @@ -0,0 +1,141 @@ +# Build information + +> **Warning** +> +> Build information is deprecated and will be removed in the next release. See +> the [Deprecated features page](https://github.com/moby/buildkit/blob/master/docs/deprecated.md) +> for status and alternative recommendation about this feature. + +Build information structures are generated with build metadata that allows you +to see all the sources (images, git repositories) that were used by the build +with their exact versions and also the configuration that was passed to the +build. This information is also embedded into the image configuration if one +is generated. + +## Build dependencies + +Build dependencies are generated when your image has been built. These +dependencies include versions of used images, git repositories and HTTP URLs +used by LLB `Source` operation as well as build request attributes. + +The structure is base64 encoded and has the following format when decoded: + +```json +{ + "frontend": "dockerfile.v0", + "attrs": { + "build-arg:foo": "bar", + "context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master", + "filename": "Dockerfile", + "platform": "linux/amd64,linux/arm64", + "source": "crazymax/dockerfile:master" + }, + "sources": [ + { + "type": "docker-image", + "ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0", + "pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0" + }, + { + "type": "docker-image", + "ref": "docker.io/library/alpine:3.13", + "pin": "sha256:1d30d1ba3cb90962067e9b29491fbd56997979d54376f23f01448b5c5cd8b462" + }, + { + "type": "git", + "ref": "https://github.com/crazy-max/buildkit-buildsources-test.git#master", + "pin": "259a5aa5aa5bb3562d12cc631fe399f4788642c1" + }, + { + "type": "http", + "ref": "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", + "pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c" + } + ] +} +``` + +* `frontend` defines the frontend used to build. +* `attrs` defines build request attributes. +* `sources` defines build sources. + * `type` defines the source type (`docker-image`, `git` or `http`). + * `ref` is the reference of the source. + * `pin` is the source digest. +* `deps` defines build dependencies of input contexts. + +### Image config + +A new field similar to the one for inline cache has been added to the image +configuration to embed build dependencies: + +```json +{ + "moby.buildkit.buildinfo.v0": "" +} +``` + +By default, the build dependencies are inlined in the image configuration. You +can disable this behavior with the [`buildinfo` attribute](../README.md#imageregistry). + +### Exporter response (metadata) + +The solver response (`ExporterResponse`) also contains a new key +`containerimage.buildinfo` with the same structure as image config encoded in +base64: + +```json +{ + "ExporterResponse": { + "containerimage.buildinfo": "", + "containerimage.digest": "sha256:..." + } +} +``` + +If multi-platforms are specified, they will be suffixed with the corresponding +platform: + +```json +{ + "ExporterResponse": { + "containerimage.buildinfo/linux/amd64": "", + "containerimage.buildinfo/linux/arm64": "", + "containerimage.digest": "sha256:..." + } +} +``` + +### Metadata JSON output + +If you're using the `--metadata-file` flag with [`buildctl`](../README.md#metadata), +[`buildx build`](https://github.com/docker/buildx/blob/master/docs/reference/buildx_build.md) +or [`buildx bake`](https://github.com/docker/buildx/blob/master/docs/reference/buildx_bake.md): + +```shell +jq '.' metadata.json +``` +```json +{ + "containerimage.buildinfo": { + "frontend": "dockerfile.v0", + "attrs": { + "context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master", + "filename": "Dockerfile", + "source": "docker/dockerfile:master" + }, + "sources": [ + { + "type": "docker-image", + "ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0", + "pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0" + }, + { + "type": "docker-image", + "ref": "docker.io/library/alpine:3.13", + "pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c" + } + ] + }, + "containerimage.digest": "sha256:..." +} +``` diff --git a/docs/buildkitd.toml.md b/docs/buildkitd.toml.md index d2c60339f199..50b7a670ac24 100644 --- a/docs/buildkitd.toml.md +++ b/docs/buildkitd.toml.md @@ -1,12 +1,5 @@ # buildkitd.toml -## NAME - -buildkitd.toml - configuration file for buildkitd - - -## DESCRIPTION - The TOML file used to configure the buildkitd daemon settings has a short list of global settings followed by a series of sections for specific areas of daemon configuration. @@ -14,13 +7,11 @@ of daemon configuration. The file path is `/etc/buildkit/buildkitd.toml` for rootful mode, `~/.config/buildkit/buildkitd.toml` for rootless mode. -## EXAMPLE - -The following is a complete **buildkitd.toml** configuration example, -please note some of the configuration is only good for edge cases, please -take care of it carefully. +The following is a complete `buildkitd.toml` configuration example, please +note some configuration is only good for edge cases, please take care of it +carefully. -``` +```toml debug = true # root is where all buildkit state is stored. root = "/var/lib/buildkit" @@ -38,6 +29,13 @@ insecure-entitlements = [ "network.host", "security.insecure" ] key = "/etc/buildkit/tls.key" ca = "/etc/buildkit/tlsca.crt" +# config for build history API that stores information about completed build commands +[history] + # maxAge is the maximum age of history entries to keep, in seconds. + maxAge = 172800 + # maxEntries is the maximum number of history entries to keep. + maxEntries = 50 + [worker.oci] enabled = true # platforms is manually configure platforms, detected automatically if unset. @@ -57,6 +55,9 @@ insecure-entitlements = [ "network.host", "security.insecure" ] apparmor-profile = "" # limit the number of parallel build steps that can run at the same time max-parallelism = 4 + # maintain a pool of reusable CNI network namespaces to amortize the overhead + # of allocating and releasing the namespaces + cniPoolSize = 16 [worker.oci.labels] "foo" = "bar" @@ -77,6 +78,10 @@ insecure-entitlements = [ "network.host", "security.insecure" ] gc = true # gckeepstorage sets storage limit for default gc profile, in MB. gckeepstorage = 9000 + # maintain a pool of reusable CNI network namespaces to amortize the overhead + # of allocating and releasing the namespaces + cniPoolSize = 16 + [worker.containerd.labels] "foo" = "bar" @@ -97,7 +102,7 @@ insecure-entitlements = [ "network.host", "security.insecure" ] [[registry."docker.io".keypair]] key="/etc/config/key.pem" cert="/etc/config/cert.pem" - + # optionally mirror configuration can be done by defining it as a registry. [registry."yourmirror.local:5000"] http = true diff --git a/docs/deprecated.md b/docs/deprecated.md new file mode 100644 index 000000000000..c6c75f0b8ae6 --- /dev/null +++ b/docs/deprecated.md @@ -0,0 +1,49 @@ +# Deprecated features + +This page provides an overview of features that are deprecated in BuildKit. + +As changes are made to BuildKit there may be times when existing features need +to be removed or replaced with newer features. Before an existing feature is +removed it is labeled as "deprecated" within the documentation and remains in +BuildKit for at least one stable release unless specified explicitly otherwise. +After that time it may be removed. + +Users are expected to take note of the list of deprecated features each release +and plan their migration away from those features, and (if applicable) towards +the replacement features as soon as possible. + +The table below provides an overview of the current status of deprecated +features: + +- **Deprecated**: the feature is marked "deprecated" and should no longer be + used. The feature may be removed, disabled, or change behavior in a future + release. The _"Deprecated"_ column contains the release in which the feature + was marked deprecated, whereas the _"Remove"_ column contains a tentative + release in which the feature is to be removed. If no release is included in + the _"Remove"_ column, the release is yet to be decided on. +- **Removed**: the feature was removed, disabled, or hidden. Refer to the linked + section for details. Some features are "soft" deprecated, which means that + they remain functional for backward compatibility, and to allow users to + migrate to alternatives. In such cases, a warning may be printed, and users + should not rely on this feature. + +| Status | Feature | Deprecated | Remove | Recommendation | +|------------|-----------------------------------------|------------|--------|------------------------------------------------------------------| +| Deprecated | [Build information](#build-information) | v0.11 | v0.12 | Use [provenance attestations](./attestations/slsa-provenance.md) | + +## Build information + +[Build information](https://github.com/moby/buildkit/blob/v0.11/docs/buildinfo.md) +structures have been introduced in [BuildKit v0.10.0](https://github.com/moby/buildkit/releases/tag/v0.10.0) +and are generated with build metadata that allows you to see all the sources +(images, git repositories) that were used by the build with their exact +versions and also the configuration that was passed to the build. This +information is also embedded into the image configuration if one is generated. + +With the introduction of [provenance attestations](./attestations/slsa-provenance.md) +in [BuildKit v0.11.0](https://github.com/moby/buildkit/releases/tag/v0.11.0), +the build information feature has been deprecated and will be removed in the +next release. + +To completely disable the build information feature, set the build-arg +`BUILDKIT_BUILDINFO=false`. diff --git a/docs/dev/README.md b/docs/dev/README.md new file mode 100644 index 000000000000..0c3c358d71ec --- /dev/null +++ b/docs/dev/README.md @@ -0,0 +1,53 @@ +# BuildKit Developer Docs + +These are the BuildKit developer docs, designed to be read by technical users +interested in contributing to or integrating with BuildKit. + +> **Warning** +> +> While these docs attempt to keep up with the current state of our `master` +> development branch, the code is constantly changing and updating, as bugs are +> fixed, and features are added. Remember, the ultimate source of truth is +> always the code base. + + +## Video + +You can find recording for "BuildKit architecture and internals" session in [here](https://drive.google.com/file/d/1zGMQipL5WJ3sLySu7gHZ_o6tFpxRXRHs/view) ([slides](https://docs.google.com/presentation/d/1tEnuMOENuoVQ3l6viBmguYUn7XpjIHIC-3RHzfyIgjc/edit?usp=sharing)). This session gives an overview how BuildKit works under the hood and how it was designed. If you’re thinking about contributing to BuildKit, this session should give you an overview of the most important components that make up BuildKit and how they work together. + +## Jargon + +The following terms are often used throughout the codebase and the developer +documentation to describe different components and processes in the image build +process. + +| Name | Description | +| :--- | :---------- | +| **LLB** | LLB stands for low-level build definition, which is a binary intermediate format used for defining the dependency graph for processes running part of your build. | +| **Definition** | Definition is the LLB serialized using protocol buffers. This is the protobuf type that is transported over the gRPC interfaces. | +| **Frontend** | Frontends are builders of LLB and may issue requests to Buildkit’s gRPC server like solving graphs. Currently there is only `dockerfile.v0` and `gateway.v0` implemented, but the gateway frontend allows running container images that function as frontends. | +| **State** | State is a helper object to build LLBs from higher level concepts like images, shell executions, mounts, etc. Frontends use the state API in order to build LLBs and marshal them into the definition. | +| **Solver** | Solver is an abstract interface to solve a graph of vertices and edges to find the final result. An LLB solver is a solver that understands that vertices are implemented by container-based operations, and that edges map to container-snapshot results. | +| **Vertex** | Vertex is a node in a build graph. It defines an interface for a content addressable operation and its inputs. | +| **Op** | Op defines how the solver can evaluate the properties of a vertex operation. An op is retrieved from a vertex and executed in the worker. For example, there are op implementations for image sources, git sources, exec processes, etc. | +| **Edge** | Edge is a connection point between vertices. An edge references a specific output a vertex’s operation. Edges are used as inputs to other vertices. | +| **Result** | Result is an abstract interface return value of a solve. In LLB, the result is a generic interface over a container snapshot. | +| **Worker** | Worker is a backend that can run OCI images. Currently, Buildkit can run with workers using either runc or containerd. | + +## Table of Contents + +The developer documentation is split across various files. + +For an overview of the process of building images: + +- [Request lifecycle](./request-lifecycle.md) - observe how incoming requests + are solved to produce a final artifact. +- [Dockerfile to LLB](./dockerfile-llb.md) - understand how `Dockerfile` + instructions are converted to the LLB format. +- [Solver](./solver.md) - understand how LLB is evaluated by the solver to + produce the solve graph. + +We also have a number of more specific guides: + +- [MergeOp and DiffOp](./merge-diff.md) - learn how MergeOp and DiffOp are + implemented, and how to program with them in LLB. diff --git a/docs/dev/dockerfile-llb.md b/docs/dev/dockerfile-llb.md new file mode 100644 index 000000000000..1df0c853fc3d --- /dev/null +++ b/docs/dev/dockerfile-llb.md @@ -0,0 +1,212 @@ +# Dockerfile conversion to LLB + +If you want to understand how Buildkit translates Dockerfile instructions into +LLB, or you want to write your own frontend, then seeing how Dockerfile maps to +using the Buildkit LLB package will give you a jump start. + +The `llb` package from Buildkit provides a chainable state object to help +construct a LLB. Then you can marshal the state object into a definition using +protocol buffers, and send it off in a solve request over gRPC. + +In code, these transformations are performed by the [`Dockerfile2LLB()`](../../frontend/dockerfile/dockerfile2llb/convert.go) +function, which takes a raw `Dockerfile`'s contents and converts it to an LLB +state, and associated image config, which are then both assembled in the +[`Build()`](../../frontend/dockerfile/builder/build.go) function. + +## Basic examples + +Here are a few Dockerfile instructions you should be familiar with: + +- Base image + + ```dockerfile + FROM golang:1.12 + ``` + + ```golang + st := llb.Image("golang:1.12") + ``` + +- Scratch image + + ```dockerfile + FROM scratch + ``` + + ```golang + st := llb.Scratch() + ``` + +- Environment variables + + ```dockerfile + ENV DEBIAN_FRONTEND=noninteractive + ``` + + ```golang + st = st.AddEnv("DEBIAN_FRONTEND", "noninteractive") + ``` + +- Running programs + + ```dockerfile + RUN echo hello + ``` + + ```golang + st = st.Run( + llb.Shlex("echo hello"), + ).Root() + ``` + +- Working directory + + ```dockerfile + WORKDIR /path + ``` + + ```golang + st = st.Dir("/path") + ``` + +## File operations + +This is where LLB starts to deviate from Dockerfile in features. In +Dockerfiles, the run command is completely opaque to the builder and just +executes the command. But in LLB, there are file operations that have better +caching semantics and understanding of the command: + +- Copying files + + ```dockerfile + COPY --from=builder /files/* /files + ``` + + ```golang + var CopyOptions = &llb.CopyInfo{ + FollowSymlinks: true, + CopyDirContentsOnly: true, + AttemptUnpack: false, + CreateDestPath: true, + AllowWildcard: true, + AllowEmptyWildcard: true, + } + st = st.File( + llb.Copy(builder, "/files/*", "/files", CopyOptions), + ) + ``` + +- Adding files + + ```dockerfile + ADD --from=builder /files.tgz /files + ``` + + ```golang + var AddOptions = &llb.CopyInfo{ + FollowSymlinks: true, + CopyDirContentsOnly: true, + AttemptUnpack: true, + CreateDestPath: true, + AllowWildcard: true, + AllowEmptyWildcard: true, + } + st = st.File( + llb.Copy(builder, "/files.tgz", "files", AddOptions), + ) + ``` + +- Chaining file commands + + ```dockerfile + # not possible without RUN in Dockerfile + RUN mkdir -p /some && echo hello > /some/file + ``` + + ```golang + st = st.File( + llb.Mkdir("/some", 0755), + ).File( + llb.Mkfile("/some/file", 0644, "hello"), + ) + ``` + +## Bind mounts + +Bind mounts allow unidirectional syncing of the host's local file system into +the build environment. + +Bind mounts in Buildkit should not be confused with bind mounts in the linux +kernel - they do not sync bidirectionally. Bind mounts are only a snapshot of +your local state, which is specified through the `llb.Local` state object: + +- Using bind mounts + + ```dockerfile + WORKDIR /builder + RUN --mount=type=bind,target=/builder \ + PIP_INDEX_URL=https://my-proxy.com/pypi \ + pip install . + ``` + + ```golang + localState := llb.Local( + "context", + llb.SessionID(client.BuildOpts().SessionID), + llb.WithCustomName("loading .") + llb.FollowPaths([]string{"."}), + ) + + execState = st.Dir("/builder").Run( + llb.Shlex("pip install ."), + llb.AddEnv( + "PIP_INDEX_URL", + "https://my-proxy.com/pypi", + ), + ) + _ := execState.AddMount("/builder", localState) + // the return value of AddMount captures the resulting state of the mount + // after the exec operation has completed + + st := execState.Root() + ``` + +## Cache mounts + +Cache mounts allow for a shared file cache location between build invocations, +which allow manually caching expensive operations, such as package downloads. +Mounts have options to persist between builds with different sharing modes. + +- Using cache mounts + + ```dockerfile + RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt \ + apt-get update + ``` + + ```golang + var VarCacheAptMount = llb.AddMount( + "/var/cache/apt", + llb.Scratch(), + llb.AsPersistentCacheDir( + "some-cache-id", + llb.CacheMountLocked, + ), + ) + + var VarLibAptMount = llb.AddMount( + "/var/lib/apt", + llb.Scratch(), + llb.AsPersistentCacheDir( + "another-cache-id", + llb.CacheMountShared, + ), + ) + + st := st.Run( + llb.Shlex("apt-get update"), + VarCacheAptMount, + VarLibAptMount, + ).Root() + ``` diff --git a/docs/dev/merge-diff.md b/docs/dev/merge-diff.md new file mode 100644 index 000000000000..ba0bf19849d1 --- /dev/null +++ b/docs/dev/merge-diff.md @@ -0,0 +1,684 @@ +# Merge and Diff Ops + +MergeOp and DiffOp are two interrelated LLB operations that enable the rebasing +of LLB results onto other results and the separation of LLB results from their +base, respectively. Underneath the hood, these ops enable fine grain +manipulation of container layer chains that can result in highly efficient +operations for many use cases. + +This doc assumes some familiarity with LLB and ops like ExecOp and FileOp. More +background on LLB can be obtained from the README.md in Buildkit's git +repository. This doc also uses the Go LLB client for examples, though MergeOp +and DiffOp are not in any way language specific. + +## MergeOp + +MergeOp has a very simple interface: + +```go +func Merge(inputs []llb.State) llb.State +``` + +The intuition is that it merges the contents of the provided states together +into one state (hence the name), with files from later states taking precedence +over those from earlier ones. + +To be more concrete, MergeOp returns a state where each of the input states are +rebased on top of each other in the order provided. "Rebasing" a state `B` onto +another state `A` creates a state that: + +- Has all the contents of `B` +- Has all the contents of `A` except when a path exists in both `B` and `A`. In this case: + - If both paths are directories, their contents are merged. Metadata (such + as permissions) on the directory from `B` take precedence. + - If one of the paths is not a directory, whatever is present in `B` takes + precedence. This also means that if a file in `B` overwrites a dir in `A`, + then all files/dirs in the tree under at that path in `A` are also + removed. + +MergeOp is associative, i.e. using shorthand notation: `Merge(A, B, C) == +Merge(Merge(A, B), C) == Merge(A, Merge(B, C))`. Buildkit knows this and +internally optimizes LLB merges that are equivalent in this way to re-use the +same cache entries. + +There are more subtleties to the behavior of MergeOp, such as when deletions +are present in a layer making up a state, discussed in the "Advanced Details" +section of this doc. + +States created by MergeOp are the same as any other LLB states in that they can +be used as the base for exec, be mounted to arbitrary paths in execs, be +plugged into other merges and diffs, be exported, etc. + +As a very simple example: + +```go +// a has /dir/a +a := llb.Scratch(). + File(llb.Mkdir("/dir", 0755)). + File(llb.Mkfile("/dir/a", 0644, []byte("a"))) + +// b has /dir/b and /otherdir +b := llb.Scratch(). + File(llb.Mkdir("/dir", 0755)). + File(llb.Mkfile("/dir/b", 0644, []byte("b"))). + File(llb.Mkdir("/otherdir", 0755)) + +// c has /dir/a and /dir/c +c := llb.Scratch(). + File(llb.Mkdir("/dir", 0700)). + File(llb.Mkfile("/dir/a", 0644, []byte("overwritten"))). + File(llb.Mkfile("/dir/c", 0644, []byte("c"))) + +// merged will consist of /dir/a, /dir/b, /dir/c and /otherdir. +// The file at /dir/a will have contents set to "overwritten" because c is merged after a. +// /dir will have permissions set to 0700 for the same reason. +merged := llb.Merge([]llb.State{a, b, c}) + +// merged can be used as the base for new states +mergedPlusMore := merged.File(llb.Mkdir("/yetanotherdir", 0755)) +// or as the input to other merges +mergedPlusMore = llb.Merge([]llb.State{merged, llb.Scratch().File(llb.Mkdir("/yetanotherdir", 0755))}) +``` + +### MergeOp Container Image Export + +When the result of a MergeOp is exported as a container image, the image will +consist of the layers making up each input joined together in the order of the +MergeOp. If Buildkit has cached any one of these layers already, they will not +need to be re-exported (i.e. re-packaged into compressed tarballs). +Additionally, if the image is being pushed to a registry and the registry +indicates it already has any of the layers, then Buildkit can skip pushing +those layers entirely. + +Layers joined together by MergeOp do not have dependencies on each other, so a +cache invalidation of the layers of one input doesn't cascade to the layers of +the other inputs. + +## DiffOp + +DiffOp also has a very simple interface: + +```go +func Diff(lower llb.State, upper llb.State) llb.State +``` + +The intuition is that it returns a state whose contents are the difference +between `lower` and `upper`. It can be viewed as something like the inverse of +MergeOp; whereas MergeOp "adds" states together, DiffOp "subtracts" `lower` +from `upper` (in a manner of speaking). + +More specifically, DiffOp returns a state that has the contents present in +`upper` that either aren't present in `lower` or have changed from `lower` to +`upper`. Another way of thinking about it is that if you start at `A` and apply +`Diff(A, B)`, you will end up at `B`. Or, even more succinctly, `Merge(A, +Diff(A, B)) == B`. + +Files and dirs are considered to have changed between `lower` and `upper` if +their contents are unequal or if metadata like permissions and `mtime` have +changed. Unequal `atime` or `ctime` values are not considered to be a change. + +There are more subtleties to the behavior of DiffOp discussed in the "Advanced +Details" section of this doc. + +States created by DiffOp are the same as any other LLB states in that they can +be used as the base for exec, be mounted to arbitrary paths in execs, be +plugged into merges and other diffs, be exported, etc. + +As a very simple example: + +```go +base := llb.Image("alpine") +basePlusBuilt := base.Run(llb.Shlex("touch /foo")).Root() +// diffed consists of just the file /foo, nothing in the alpine image is present +diffed := llb.Diff(base, basePlusBuilt) +``` + +### DiffOp Container Image Export + +When the result of a DiffOp is exported as a container image, layers will be +re-used as much as possible. To explain, consider this case: + +```go +lower := llb.Image("alpine") +middle := lower.Run(llb.Shlex("touch /foo")).Root() +upper := middle.Run(llb.Shlex("touch /bar")).Root() +diff := llb.Diff(lower, upper) +``` + +In this case, there is a "known chain" from `lower` to `upper` because `lower` +is a state in `upper`'s history. This means that when the DiffOp is exported as +a container image, it can just consist of the container layers for `middle` +joined with the container layers for `upper`. + +Another way of thinking about this is that when `lower` is a state in `upper`'s +history, the diff between the two is equivalent to a merge of the states +between them. So, using the example above: + +```go +llb.Diff(lower, upper) == llb.Merge([]llb.State{ + llb.Diff(lower, middle), + llb.Diff(middle, upper), +}) +``` + +This behavior extends to arbitrary numbers of states separating `lower` and `upper`. + +In the case where there is not a chain between `lower` and `upper` that +Buildkit can determine, DiffOp still works consistently but, when exported, +will always result in a single layer that is not re-used from its inputs. + +## Example Use Case: Better "Copy Chains" with MergeOp + +### The Problem + +A common pattern when building container images is to independently assemble +components of the image and then combine those components together into a final +image using a chain of Copy FileOps. For example, when using the Dockerfile +frontend, this is the multi-stage build pattern and a chain of `COPY +--from=...` statements. + +One issue with this type of pattern is that if any of the inputs to the copy +chain change, that doesn't just invalidate Buildkit's cache for that input, it +also invalidates Buildkit's cache for any copied layers after that one. + +To be a bit more concrete, consider the following LLB as specified with the Go client: + +```go +// stage a +a := llb.Image("alpine").Run("build a").Root() +// stage b +b := llb.Image("alpine").Run("build b").Root() +// stage c +c := llb.Image("alpine").Run("build c").Root() + +// final combined stage +combined := llb.Image("alpine"). + File(llb.Copy(a, "/bin/a", "/usr/local/bin/a")). + File(llb.Copy(b, "/bin/b", "/usr/local/bin/b")). + File(llb.Copy(c, "/bin/c", "/usr/local/bin/c")) +``` + +Note that this is basically the equivalent of the following Dockerfile: + +```dockerfile +FROM alpine as a +RUN build a + +FROM alpine as b +RUN build b + +FROM alpine as c +RUN build c + +FROM alpine as combined +COPY --from=a /bin/a /usr/local/bin/a +COPY --from=b /bin/b /usr/local/bin/b +COPY --from=c /bin/c /usr/local/bin/c +``` + +Now, say you do a build of this LLB and export the `combined` stage as a +container image to a registry. If you were to then repeat the same build with +the same instance of Buildkit, each part of the build should be cached, +resulting in no work needing to be done and no layers needing to be exported or +pushed to the registry. + +Then, say you later do the build again but this time with a change to `a`. The +build for `a` is thus not cached, which means that the copy of `/bin/a` into +`/usr/local/bin/a` of `combined` is also not cached and has to be re-run. The +problem is that because each copy in to `combined` is chained together, the +invalidation of the copy from `a` also cascades to its descendants, namely the +copies from `b` and `c`. This is despite the fact that `b` and `c` are +independent of `a` and thus don't need to be invalidated. In graphical form: + +```mermaid +graph TD + alpine("alpine") --> |CACHE HIT fa:fa-check| A("build a2.0") + alpine -->|CACHE HIT fa:fa-check| B("build b") + alpine -->|CACHE HIT fa:fa-check| C("build c") + + A --> |CACHE MISS fa:fa-ban| ACopy(/usr/local/bin/a) + busybox("busybox") -->|CACHE HIT fa:fa-check| ACopy + B -->|CACHE HIT fa:fa-check| BCopy(/usr/local/bin/b) + ACopy -->|CACHE MISS fa:fa-ban| BCopy + C -->|CACHE HIT fa:fa-check| CCopy(/usr/local/bin/c) + BCopy -->|CACHE MISS fa:fa-ban| CCopy + + classDef green fill:#5aa43a,stroke:#333,stroke-width:2px; + class alpine,B,C,busybox green + classDef red fill:#c72020,stroke:#333,stroke-width:2px; + class A,ACopy,BCopy,CCopy red +``` + +As a result, not only do the copies from `b` and `c` to create +`/usr/local/bin/b` and `/usr/local/bin/c` need to run again, they also result +in new layers needing to be exported and then pushed to a registry. For many +use cases, this becomes a significant source of overhead in terms of build +times and the amount of data that needs to be stored and transferred. + +### The Solution + +MergeOp can be used to fix the problem of cascading invalidation in copy chains: + +```go +a := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build a").Root(), "/bin/a", "/usr/local/bin/a")) +b := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build b").Root(), "/bin/b", "/usr/local/bin/b")) +c := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build c").Root(), "/bin/c", "/usr/local/bin/c")) +combined := llb.Merge([]llb.State{ + llb.Image("busybox"), + a, + b, + c, +}) +``` + +(*Note that newer versions of Dockerfiles support a `--link` flag when using +`COPY`, which results in basically this same pattern*) + +Two changes have been made from the previous version: + +1. `a`, `b`, and `c` have been updated to copy their desired contents to + `Scratch` (a new, empty state). +1. `combined` is defined as a MergeOp of the states desired in the final image. + +Say you're doing this build for the first time. The build will first create +states `a`, `b`, and `c`, resulting in each being a single layer consisting +only of contents `/usr/local/bin/a`, `/usr/local/bin/b`, and `/usr/local/bin/c` +respectively. Then, the MergeOp rebases each of those states on to the base +`busybox` image. As discussed earlier, the container image export of a MergeOp +will consist of the layers of the merge inputs joined together, so the final +image looks mostly the same as before. + +The benefits of MergeOp become apparent when considering what happens if the +build of `a` is modified. Whereas before this led to invalidation of the copy +of `b` and `c`, now those merge inputs are completely unaffected; no new cache +entries or new container layers need to be created for them. So, the end result +is that the only work Buildkit does when `a` changes is re-build `a` and then +push the new layers for `/usr/local/bin/a` (plus a new image manifest). +`/usr/local/bin/b` and `/usr/local/bin/c` do not need to be re-exported and do +not need to be re-pushed to the registry. In graphical form: + +```mermaid +graph TD + alpine("alpine") --> |CACHE HIT fa:fa-check| A("build a2.0") + alpine -->|CACHE HIT fa:fa-check| B("build b") + alpine -->|CACHE HIT fa:fa-check| C("build c") + + busybox("busybox") -->|CACHE HIT fa:fa-check| Merge("Merge (lazy)") + A --> |CACHE MISS fa:fa-ban| ACopy(/usr/local/bin/a) + ACopy -->|CACHE MISS fa:fa-ban| Merge + B -->|CACHE HIT fa:fa-check| BCopy(/usr/local/bin/b) + BCopy -->|CACHE HIT fa:fa-check| Merge + C -->|CACHE HIT fa:fa-check| CCopy(/usr/local/bin/c) + CCopy -->|CACHE HIT fa:fa-check| Merge + + classDef green fill:#5aa43a,stroke:#333,stroke-width:2px; + class alpine,B,BCopy,C,CCopy,busybox green + classDef red fill:#c72020,stroke:#333,stroke-width:2px; + class A,ACopy red +``` + +An important aspect of this behavior is that MergeOp is implemented lazily, +which means that its on-disk filesystem representation is only created locally +when strictly required. This means that even though a change to `a` invalidates +the MergeOp as a whole, no work needs to be done to create the merged state +on-disk when it's only being exported as a container image. This laziness +behavior is discussed more in the "Performance Considerations" section of the +doc. + +You can see a working-code example of this by comparing `examples/buildkit3` +with `examples/buildkit4` in the Buildkit git repo. + +## Example Use Case: Remote-only Image Append with MergeOp + +If you have some layers already pushed to a remote registry, MergeOp allows you +to create new images that combine those layers in arbitrary ways without having +to actually pull any layers down first. For example: + +```go +foo := llb.Image("fooApp:v0.1") +bar := llb.Image("barApp:v0.3") +qaz := llb.Image("qazApp:v1.2") +merged := llb.Merge([]llb.State{foo, bar, qaz}) +``` + +If `merged` is being exported to the same registry that already has the layers +for `fooApp`, `barApp` and `qazApp`, then the only thing Buildkit does during +the export is create an image manifest (just some metadata) and push it to the +registry. No layers need to be pushed (they are already there) and they don't +even need to be pulled locally to Buildkit either. + +Note that if you were to instead do this: + +```go +merged := llb.Merge([]llb.State{foo, bar, qaz}).Run(llb.Shlex("extra command")).Root() +``` + +Then `fooApp`, `barApp` and `qazApp` will need to be pulled, though they will +usually be merged together more efficiently than the naive solution of just +unpacking the layers on top of each other. See the "Performance Details" +section for more info. + +Additionally, if you export your Buildkit cache to a registry, this same idea +can be extended to any LLB types, not just `llb.Image`. So, using the same +example as the previous use case: + +```go +a := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build a").Root(), "/bin/a", "/usr/bin/a")) +b := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build b").Root(), "/bin/b", "/usr/bin/b")) +c := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build c").Root(), "/bin/c", "/usr/bin/c")) +combined := llb.Merge([]llb.State{ + llb.Image("alpine"), + a, + b, + c, +}) +``` + +If you do a build that includes a remote cache export to a registry, then any +Buildkit worker importing that cache can run builds that do different merges of +those layers without having to pull anything down. For instance, if a separate +Buildkit worker imported that remote cache and then built this: + +```go +combined2 := llb.Merge([]llb.State{ + c, + a +}) +``` + +An export of `combined2` would not need to pull any layers down because it's +just a merge of `c` and `a`, which already have layers in the registry thanks +to the remote cache. This works because a remote cache import is actually just +a metadata download; layers are only pulled locally once needed and they aren't +needed for this MergeOp. + +## Example Use Case: Modeling Package Builds with MergeOp+DiffOp + +Merge and Diff have many potential use cases, but one primary one is to assist +higher level tooling that's using LLB to model "dependency-based builds", such +as what's found in many package managers and other build systems. + +More specifically, the following is a common pattern used to model the build of a "package" (or equivalent concept) in such systems: + +1. The build-time dependencies of the package are combined into a filesystem. + The dependencies are themselves just already-built packages. +1. A build is run by executing some commands that have access to the combined + dependencies, producing new build artifacts that are somehow isolated from + the dependencies. These isolated build artifacts become the new package's + contents. +1. The new package can then be used as a dependency of other packages and/or + served directly to end users, while being careful to ensure that any runtime + dependencies are also present when the package needs to be utilized. + +One way to adapt the above model to LLB might be like this: + +```go +// "Packages" are just LLB states. Build-time dependencies are combined +// together into a filesystem using MergeOp. +runtimeDeps := llb.Merge([]llb.State{depC, depD}) +buildDeps := llb.Merge([]llb.State{src, depA, depB, runtimeDeps}) + +// Builds of a new package are ExecOps on top of the MergeOp from the previous step +// (one ExecOp for the build and one for the install). The install ExecOp is defined +// such that build artifacts are written to a dedicated Mount, isolating them from +// the dependencies under /output. +builtPackage := buildDeps.Run( + llb.Dir("/src"), + llb.Shlex("make"), +).Root().Run( + llb.Dir("/src"), + llb.Shlex("make install"), + llb.AddEnv("DESTDIR", "/output"), + llb.AddMount("/output", llb.Scratch()), +).GetMount("/output") + +// If the package needs to be run as part of a different build or by an +// end user, the runtime deps of the state can be included via a MergeOp. +llb.Merge([]llb.State{runtimeDeps, builtPackage}) +``` + +While the above is a bit of an over-simplification (it, for instance, ignores +the need to topologically sort dependency DAGs before merging them together), +the important point is that it only needs MergeOp and ExecOp; DiffOp is left +out entirely. For many use cases, this is completely fine and DiffOp is not +needed. + +Some use cases can run into issues though, specifically with the part where +build artifacts need to be isolated from their dependencies. The above example +uses the convention of setting `DESTDIR`, an environment variable that +specifies a directory that `make install` should place artifacts under. Most +build systems support either `DESTDIR` or some type of equivalent mechanism for +isolating installed build artifacts. However, there are times when this +convention is either not available or not desired, in which case DiffOp can +come to the rescue as a generic, tool-agnostic way of separating states out +from their original dependency base. The modification from the previous example +is quite small: + +```go +// Same `make` command as before +buildBase := buildDeps.Run( + llb.Dir("/src"), + llb.Shlex("make"), +).Root() + +// Now, `make install` doesn't use DESTDIR and just installs directly +// to the rootfs of the build. The package contents are instead isolated +// by diffing the rootfs from before and after the install command. +builtPackage := llb.Diff(buildBase, buildBase.Run( + llb.Dir("/src"), + llb.Shlex("make install"), +).Root()) +``` + +This approach using DiffOp should achieve the same end result as the previous +version but without having to rely on `DESTDIR` support being present in the +`make install` step. + +The fact that DiffOp is more generic and arguably simpler than setting +`DESTDIR` or equivalents doesn't mean it's strictly better for every case. The +following should be kept in mind when dealing with use cases where both +approaches are viable: + +1. The version that uses `DESTDIR` will likely have *slightly* better + performance than the version using DiffOp for many use cases. This is because + it's faster for Buildkit to merge in a state that is just a single layer on top + of scratch (i.e. the first version of `builtPackage` that used `DESTDIR`) than + it is to merge in a state whose diff is between two non-empty states (i.e. the + DiffOp version). Whether the performance difference actually matters needs to + be evaluated on a case-by-case basis. +1. DiffOp has some subtle behavior discussed in the "Advanced Details" section + that, while irrelevant to most use cases, can occasionally distinguish it from + the `DESTDIR` approach. + +## Performance Considerations + +### Laziness + +MergeOp and DiffOp are both implemented lazily in that their on-disk filesystem +representations will only be created when absolutely necessary. + +The most common situation in which a Merge/Diff result will need to be +"unlazied" (created on disk) is when it is used as the input to an Exec or File +op. For example: + +```go +rootfs := llb.Merge([]llb.State{A, B}) +extraLayer := rootfs.Run(llb.Shlex("some command")).Root() +``` + +In this case, if `extraLayer` is not already cached, `extraLayer` will need +`rootfs` to exist on disk in order to run, so `rootfs` will have to be +unlazied. The same idea applies if `extraLayer` was defined as a FileOp or if +`rootfs` was defined using a `DiffOp`. + +What's perhaps more interesting are cases in which merge/diff results *don't* +need to be unlazied. One such situation is when they are exported as a +container image. As discussed previously, layers from the inputs of merge/diff +are re-used as much as possible during image exports, so that means that the +final merged/diffed result is not needed, only the inputs. + +Another situation that doesn't require unlazying is when a merge/diff is used +as an input to another merge/diff. For example: + +```go +diff1 := llb.Diff(A, B) +diff2 := llb.Diff(C, D) +merge := llb.Merge([]llb.State{diff1, diff2}) +``` + +In this case, even though `diff1` and `diff2` are used as an input to `merge`, they do not need to be unlazied because `merge` is also lazy. If `A`, `B`, `C` or `D` are lazy LLB states, they also do not need to be unlazied. Laziness is transitive in this respect. + +### Snapshotter-dependent Optimizations + +There are some optimizations in the implementation of Merge and Diff op that +are relevant to users concerned with scaling large builds involving many +different merges and/or diffs. These optimizations are ultimately +implementation details though and don't have any impact on the actual contents +of merge/diff results. + +When a merge or diff result needs to be unlazied, the "universal" fallback +implementation that works for all snapshotter backends is to create them by +copying files from the inputs as needed into a new filesystem. This works but +it can become costly in terms of disk space and CPU time at a certain scale. + +However, for two of the default snapshotters (overlay and native), there is an +optimization in place to avoid copying files and instead hardlink them from the +inputs into the merged/diffed filesystem. This is at least as fast as copying +the files and often significantly faster for inputs with large file sizes. + +## Advanced Details + +These details are not expected to impact many use cases, but are worth +reviewing if you are experiencing surprising behavior while using Merge and +Diff op or otherwise want to understand them at a deeper level. + +### Layer-like Behavior of Merge and Diff + +One important principal of LLB results is that when they are exported as +container images, an external runtime besides Buildkit that pulls and unpacks +the image must see the same filesystem that is seen during build time. + +That may seem a bit obvious, but it has important implications for Merge and +Diff, which are ops that are designed to re-use container layers from their +inputs as much as possible in order to maximize cache re-use and efficiency. +Many of the more surprising aspects of the behavior discussed in the rest of +this doc are a result of needing to ensure that Merge+Diff results look the +same before and after export as container layers. + +### Deletions + +When either 1) an LLB state deletes a file present in its parent chain or 2) +`upper` lacks a path that is present in `lower` while using DiffOp, that +deletion is considered an "entity" in the same way that a directory or file is +and can have an effect when using that state as a merge input. For example: + +```go +// create a state that only has /foo +foo := llb.Scratch().File(llb.Mkfile("/foo", 0644, nil)) + +// create a state where the file /foo has been removed, leaving nothing +rmFoo := foo.File(llb.Rm("/foo")) + +// create a state containing the file /bar on top of the previous "empty" state +bar := rmFoo.File(llb.Mkfile("/bar", 0644, nil)) + +merged := llb.Merge([]llb.State{foo, bar}) +``` + +You might assume that `merged` would consist of the files `/foo` and `/bar`, +but it will actually just consist of `/bar`. This is because the state `bar` +also includes a deletion of the file `/foo` in its chain and thus a part of its +definition. + +One way of understanding this is that when you merge `foo` and `bar`, you are +actually merging the diffs making up each state in the chain that created `foo` +and `bar`, i.e.: + +```go +llb.Merge([]llb.State{foo, bar}) == llb.Merge([]llb.State{ + // foo's chain (only 1 layer) + llb.Diff(llb.Scratch(), foo), // create /foo + // bar's chain (3 layers) + llb.Diff(llb.Scratch(), foo), // create /foo + llb.Diff(foo, rmFoo), // delete /foo + llb.Diff(rmFoo, bar), // create /bar +}) +``` + +As you can see, `Diff(foo, rmFoo)` is included there and its only "content" is +a deletion of `/foo`. Therefore, when `merged` is being constructed, it will +apply that deletion and `/foo` will not exist in the final `merged` result. + +Also note that if the order of the merge was reversed to be `Merge([]State{bar, +foo})`, then `/foo` will actually exist in `merged` alongside `/bar` because +then the contents of `foo` take precedent over the contents of `bar`, and then +create of `/foo` therefore "overwrites" the previous deletion of it. + +One final detail to note is that even though deletions are entities in the same +way files/dirs are, they do not show up when mounted. For example, if you were +to mount `llb.Diff(foo, rmFoo)` during a build, you would just see an empty +directory. Deletions only have an impact when used as an input to MergeOp. + +#### Workarounds + +For use cases that are experiencing this behavior and do not want it, the best +option is to find a way to avoid including the problematic deletion in your +build definition. This can be very use-case specific, but using the previous +example one option might be this: + +```go +justBar := llb.Diff(rmFoo, bar) +merged := llb.Merge([]llb.State{foo, justBar}) +``` + +Now, `merged` consists of both `/foo` and `/bar` because `justBar` has "diffed +out" its parent `rmFoo` and consists only of the final layer that creates +`/bar`. Other use cases may require different approaches like changing build +commands to avoid unneeded deletions of files and directories. + +For use cases that can't avoid the deletion for whatever reason, the fallback +option is to use a Copy op to squash the merge input and discard any deletions. +So, building off the previous example: + +```go +squashedBar := llb.Scratch().File(llb.Copy(bar, "/", "/")) +merged := llb.Merge([]llb.State{foo, squashedBar}) +``` + +This results in `merged` consisting of both `/foo` and `/bar`. This is because +`squashedBar` is a single layer that only consists of the file+directories that +existed in `bar`, not any of its deletions. + +Note that there are currently performance tradeoffs to this copy approach in +that it will actually result in a copy on disk (i.e. no hardlink +optimizations), the copy will not be lazy and `squashedBar` will be a distinct +layer from its inputs as far as the Buildkit cache and any remote registries +are concerned, which may or may not matter depending on the use-case. + +### Diff Corner Cases + +There are some cases where it's ambiguous what the right behavior should be +when merging diffs together. As stated before, Merge+Diff resolve these +ambiguities by following the same behavior as container image import/export +implementations in order to maintain consistency. + +One example: + +```go +dir := llb.Scratch().File(llb.Mkdir("/dir", 0755)) +dirFoo := dir.File(llb.Mkfile("/dir/foo", 0755, nil)) +// rmFoo consists of a delete of /dir/foo +rmFoo := llb.Diff(dirFoo, dirFoo.File(llb.Rm("/dir/foo"))) + +// otherdir just consists of /otherdir +otherdir := llb.Scratch().File(llb.Mkdir("/otherdir", 0755)) + +// merged consists of /otherdir and /dir (no /dir/foo though) +merged := llb.Merge([]llb.State{otherdir, rmFoo}) +``` + +In this case, you start with just `/otherdir` and apply `rmFoo`, which is a +deletion of `/dir/foo`. But `/dir/foo` doesn't exist, so it may be reasonable +to expect that it just has no effect. However, image import/export code will +actually create `/dir` even though it only exists in order to hold an +inapplicable delete. As a result, Merge+Diff also have this same behavior. diff --git a/docs/dev/request-lifecycle.md b/docs/dev/request-lifecycle.md new file mode 100644 index 000000000000..92ded05c874e --- /dev/null +++ b/docs/dev/request-lifecycle.md @@ -0,0 +1,246 @@ +# Solve Request Lifecycle + +Buildkit solves build graphs to find the final result. By default, nothing will +be exported to the client, but requests can be made after solving the graph to +export results to external destinations (like the client’s filesystem). + +A solve request goes through the following: + +1. Client makes a solve request and sends it to buildkitd over gRPC. The + request may either include a LLB definition, or the name of a frontend (must + be `dockerfile.v0` or `gateway.v0`), but it must not be both. +2. Buildkitd receives the solve request with the Controller. The controller is + registered as the ControlServer gRPC service. +3. The controller passes it down to the LLB solver, which will create a job for + this request. It will also create a FrontendLLBBridge, that provides a + solving interface over the job object. +4. The request is processed: + - If the request is definition-based, it will simply build the definition. + - If the request is frontend-based, it will run the frontend over the + gateway while passing it a reference to the FrontendLLBBridge. Frontends + must return a result for the solve request, but they may also issue solve + requests themselves to the bridge. +5. The results are plumbed back to the client, and the temporary job and bridge + are discarded. + + +```mermaid +sequenceDiagram + ControlClient ->> ControlServer : Solve + ControlServer ->> Solver : Solve + + Solver ->> Job : Create job + activate Job + + Solver ->> FrontendLLBBridge : Create bridge over Job + activate FrontendLLBBridge + + Solver ->> FrontendLLBBridge : Solve + + alt definition-based solve + FrontendLLBBridge ->> Job : Build + activate Job + Job -->> FrontendLLBBridge : Result + deactivate Job + else frontend-based solve + FrontendLLBBridge ->> Frontend : Solve + activate Frontend + note over FrontendLLBBridge, Frontend : Frontend must be either
dockerfile.v0 or gateway.v0. + + loop + Frontend ->> FrontendLLBBridge : Solve + FrontendLLBBridge ->> Job : Build + activate Job + note over FrontendLLBBridge, Frontend : Implementations may also
call FrontendLLBBridge to
solve graphs before
returning the result. + Job -->> FrontendLLBBridge : Result + deactivate Job + FrontendLLBBridge -->> Frontend : Result + end + + Frontend -->> FrontendLLBBridge : Result + deactivate Frontend + end + + FrontendLLBBridge -->> Solver : Result + Solver ->> FrontendLLBBridge : Discard + deactivate FrontendLLBBridge + + Solver ->> Job : Discard + deactivate Job + + Solver -->> ControlServer : Result + ControlServer -->> ControlClient : Result +``` + +An important detail is that frontends may also issue solve requests, which are +often definition-based solves, but can also be frontend-based solves, allowing +for composability of frontends. Note that if a frontend makes a frontend-based +solve request, they will share the same FrontendLLBBridge and underlying job. + +## Dockerfile frontend (`dockerfile.v0`) + +Buildkit comes with a Dockerfile frontend which essentially is a parser that +translates Dockerfile instructions into a LLB definition. In order to introduce +new features into the Dockerfile DSL without breaking backwards compatibility, +Dockerfiles can include a syntax directive at the top of the file to indicate a +frontend image to use. + +For example, users can include a syntax directive to use +`docker/dockerfile:1-labs` to opt-in for an extended Dockerfile DSL that +takes advantage of Buildkit features. However, the frontend image doesn’t have +to be Dockerfile-specific. One can write a frontend that reads a YAML file, and +using the syntax directive, issue the build request using `docker build -f +my-config.yaml`. + +The lifecycle of a `dockerfile.v0` frontend-based solve request goes through +the following: + +1. Starting from the "frontend-based solve" path, the bridge looks up the + Dockerfile frontend if the frontend key is `dockerfile.v0`, and requests a + solve to the frontend. The gateway forwarder implements the frontend + interface and wraps over a BuildFunc that builds Dockerfiles. +2. The BuildFunc issues a solve request to read the Dockerfile from a source + (local context, git, or HTTP), and parses it to find a syntax directive. + - If a syntax directive is found, it delegates the solve to the `gateway.v0` + frontend. + - If a syntax directive is not found, then it parses the Dockerfile + instructions and builds an LLB. The LLB is marshaled into a definition and + sent in a solve request. + + +```mermaid +sequenceDiagram + participant Job + participant FrontendLLBBridge + + # FIXME: use boxes with https://github.com/mermaid-js/mermaid/issues/1505 + # box "Dockerfile frontend" + participant Frontend as Gateway Forwarder + participant BuildFunc + # end box + + # FIXME: use incoming messages with https://github.com/mermaid-js/mermaid/issues/1357 + Job ->> FrontendLLBBridge : Solve + FrontendLLBBridge ->> Frontend : Solve + + Frontend ->> BuildFunc : Call + activate BuildFunc + + BuildFunc ->> FrontendLLBBridge : Solve + FrontendLLBBridge ->> Job : Build + activate Job + note over Frontend : Solve to read
Dockerfile + Job -->> FrontendLLBBridge : Result + deactivate Job + FrontendLLBBridge -->> BuildFunc : Result + + alt Dockerfile has syntax directive + BuildFunc ->> FrontendLLBBridge : Solve + activate FrontendLLBBridge #FFBBBB + note over Frontend : Dockerfile delegates
to gateway.v0 + FrontendLLBBridge -->> BuildFunc : Result + deactivate FrontendLLBBridge + else Dockerfile has no syntax directive + BuildFunc ->> FrontendLLBBridge : Solve + FrontendLLBBridge ->> Job : Build + activate Job + note over Frontend : Solved by
Dockerfile2LLB + Job -->> FrontendLLBBridge : Result + deactivate Job + FrontendLLBBridge -->> BuildFunc : Result + end + + BuildFunc -->> Frontend : Return + deactivate BuildFunc + + Frontend -->> FrontendLLBBridge : Result + FrontendLLBBridge -->> Job : Result +``` + +## Gateway frontend (`gateway.v0`) + +The gateway frontend allows external frontends to be implemented as container +images, allowing for a pluggable architecture. The container images have access +to the gRPC service through stdin/stdout. The easiest way to implement a +frontend image is to create a golang binary that vendors buildkit because they +have a convenient LLB builders and utilities. + +The lifecycle of a `gateway.v0` frontend-based solve request goes through the +following: + +1. Starting from the "frontend-based solve" path, the bridge looks up the + Gateway frontend if the frontend key is `gateway.v0`, and requests a solve + to the frontend. +2. The gateway frontend resolves a frontend image from the `source` key + and solves the request to retrieve the rootfs for the image. +3. A temporary gRPC server is created that forwards requests to the LLB bridge. +4. A container using the frontend image rootfs is created, and a gRPC + connection is established from a process inside the container to the + temporary bridge forwarder. +5. The frontend image is then able to build LLBs and send solve requests + through the forwarder. +6. The container exits, and then the results are plumbed back to the LLB + bridge, which plumbs them back to the client. + + +```mermaid +sequenceDiagram + participant Job + participant FrontendLLBBridge + participant Frontend as Gateway frontend + participant Worker + participant LLBBridgeForwarder + participant Executor + participant Container as Frontend Container + + Job ->> FrontendLLBBridge : Solve + FrontendLLBBridge ->> Frontend : Solve + Frontend ->> Worker : ResolveImageConfig + activate Worker + Worker -->> Frontend : Digest + deactivate Worker + Frontend ->> FrontendLLBBridge : Solve + + FrontendLLBBridge ->> Job : Build + activate Job + note over FrontendLLBBridge, Frontend : The frontend image specified
by build option "source" is solved
and the rootfs of that image
is then used to run the container. + Job -->> FrontendLLBBridge : Result + deactivate Job + + FrontendLLBBridge -->> Frontend : Result + + note over LLBBridgeForwarder, Executor : A temporary gRPC server is created
that listens on stdio of frontend
container. Requests are then
forwarded to LLB bridge. + Frontend ->> LLBBridgeForwarder : Create forwarder + activate LLBBridgeForwarder + + Frontend ->> FrontendLLBBridge : Exec + FrontendLLBBridge ->> Worker : Exec + Worker ->> Executor : Exec + + Executor ->> Container : Create container task + activate Container #MediumSlateBlue + + rect rgba(100, 100, 100, .1) + note over Executor, Container : Frontend images may request
definition/frontend-based solves
like any other client. + loop + Container ->> LLBBridgeForwarder : Solve + LLBBridgeForwarder ->> FrontendLLBBridge : Solve + activate FrontendLLBBridge #FFBBBB + FrontendLLBBridge -->> LLBBridgeForwarder : Result + deactivate FrontendLLBBridge + LLBBridgeForwarder -->> Container : Result + end + end + + Container -->> Executor : Exit + deactivate Container + + Executor -->> Worker : Exit + Worker -->> FrontendLLBBridge : Exit + FrontendLLBBridge -->> Frontend : Exit + Frontend ->> LLBBridgeForwarder : Discard + deactivate LLBBridgeForwarder + + Frontend -->> FrontendLLBBridge : Result + FrontendLLBBridge -->> Job : Result +``` diff --git a/docs/dev/solver.md b/docs/dev/solver.md new file mode 100644 index 000000000000..db8b9e146d13 --- /dev/null +++ b/docs/dev/solver.md @@ -0,0 +1,325 @@ +# Buildkit solver design + +The solver is a component in BuildKit responsible for parsing the build +definition and scheduling the operations to the workers for execution. + +Solver package is heavily optimized for deduplication of work, concurrent +requests, remote and local caching and different per-vertex caching modes. It +also allows operations and frontends to call back to itself with new definition +that they have generated. + +The implementation of the solver is quite complicated, mostly because it is +supposed to be performant with snapshot-based storage layer and distribution +model using layer tarballs. It is expected that calculating the content based +checksum of snapshots between every operation or after every command execution +is too slow for common use cases and needs to be postponed to when it is likely +to have a meaningful impact. Ideally, the user shouldn't realize that these +optimizations are taking place and just get intuitive caching. It is also hoped +that if some implementations can provide better cache capabilities, the solver +would take advantage of that without requiring significant modification. + +In addition to avoiding content checksum scanning the implementation is also +designed to make decisions with minimum available data. For example, for remote +caching sources to be effective the solver will not require the cache to be +loaded or exists for all the vertexes in the graph but will only load it for +the final node that is determined to match cache. As another example, if one of +the inputs (for example image) can produce a definition based cache match for a +vertex, and another (for example local source files) can only produce a +content-based(slower) cache match, the solver is designed to detect it and skip +content-based check for the first input(that would cause a pull to happen). + +## Build definition + +The solver takes in a build definition in the form of a content addressable +operation definition that forms a graph. + +A vertex in this graph is defined by these properties: + +```go +type Vertex interface { + Digest() digest.Digest + Options() VertexOptions + Sys() interface{} + Inputs() []Edge + Name() string +} + +type Edge struct { + Index Index + Vertex Vertex +} + +type Index int +``` + +Every vertex has a content-addressable digest that represents a checksum of the +definition graph up to that vertex including all of its inputs. If two vertexes +have the same checksum, they are considered identical when they are executing +concurrently. That means that if two other vertexes request a vertex with the +same digest as an input, they will wait for the same operation to finish. + +The vertex digest can only be used for comparison while the solver is running +and not between different invocations. For example, if parallel builds require +using `docker.io/library/alpine:latest` image as one of the operations, it is +pulled only once. But if a build using `docker.io/library/alpine:latest` was +built earlier, the checksum based on that name can't be used for finding if the +vertex was already built because the image might have changed in the registry +and "latest" tag might be pointing to another image. + +`Sys()` method returns an object that is used to resolve the executor for the +operation. This is how a definition can pass logic to the worker that will +execute the task associated with the vertex, without the solver needing to know +anything about the implementation. When the solver needs to execute a vertex, +it will send this object to a worker, so the worker needs to be configured to +understand the object returned by `Sys()`. The solver itself doesn't care how +the operations are implemented and therefore doesn't define a type for this +value. In LLB solver this value would be with type `llb.Op`. + +`Inputs()` returns an array of other vertexes the current vertex depends on. A +vertex may have zero inputs. After an operation has executed, it returns an +array of return references. If another operation wants to depend on any of +these references they would define an input with that vertex and an index of +the reference from the return array(starting from zero). Inputs need to be +contained in the `Digest()` of the vertex - two vertexes with different inputs +should never have the same digest. + +Options contain extra information that can be associated with the vertex but +what doesn't change the definition(or equality check) of it. Normally this is +either a hint to the solver, for example, to ignore cache when executing. It +can also be used for associating messages with the vertex that can be helpful +for tracing purposes. + +## Operation interface + +Operation interface is how the solver can evaluate the properties of the actual +vertex operation. These methods run on the worker, and their implementation is +determined by the value of `vertex.Sys()`. The solver is configured with a +"resolve" function that can convert a `vertex.Sys()` into an `Op`. + +```go +// Op is an implementation for running a vertex +type Op interface { + // CacheMap returns structure describing how the operation is cached. + // Currently only roots are allowed to return multiple cache maps per op. + CacheMap(context.Context, int) (*CacheMap, bool, error) + // Exec runs an operation given results from previous operations. + // Note that this is not the process execution but can have any definition. + Exec(ctx context.Context, inputs []Result) (outputs []Result, err error) +} + +type CacheMap struct { + // Digest is a base digest for operation that needs to be combined with + // inputs cache or selectors for dependencies. + Digest digest.Digest + Deps []struct { + // Optional digest that is merged with the cache key of the input + Selector digest.Digest + // Optional function that returns a digest for the input based on its + // return value + ComputeDigestFunc ResultBasedCacheFunc + } +} + +type ResultBasedCacheFunc func(context.Context, Result) (digest.Digest, error) + + +// Result is an abstract return value for a solve +type Result interface { + ID() string + Release(context.Context) error + Sys() interface{} +} +``` + +There are two functions that every operation defines. One describes how to +calculate a cache key for a vertex and another how to execute it. + +`CacheMap` is a description for calculating the cache key. It contains a digest +that is combined with the cache keys of the inputs to determine the stable +checksum that can be used to cache the operation result. For the vertexes that +don't have inputs(roots), it is important that this digest is a stable secure +checksum. For example, in LLB this digest is a manifest digest for container +images or a commit SHA for git sources. + +`CacheMap` may also define optional selectors or content-based cache functions +for its inputs. A selector is combined with the input cache key and useful for +describing when different parts of an input are being used, and inputs cache +key needs to be customized. Content-based cache function allows computing a new +cache key for an input after it has completed. In LLB this is used for +calculating cache key based on the checksum of file contents of the input +snapshots. + +`Exec` executes the operation defined by a vertex by passing in the results of +the inputs. + +## Shared graph + +After new build request is sent to the solver, it first loads all the vertexes +to the shared graph structure. For status tracking, a job instance needs to be +created, and vertexes are loaded through jobs. A job ID is assigned to every +vertex. If vertex with the same digest has already been loaded to the shared +graph, a new job ID is appended to the existing record. When the job finishes, +it removes all of its references from the loaded vertex. The resources are +released if no more references remain. + +Loading a vertex also creates a progress writer associated with it and sets up +the cache sources associated with the specific vertex. + +After vertexes have been loaded to the job, it is safe to request a result from +an edge pointing to a previously loaded vertex. To do this `build(ctx, Edge) +(CachedResult, error)` method is called on the static scheduler instance +associated with the solver. + +## Scheduler + +The scheduler is a component responsible for invoking the individual operations +needed to find the result for the graph. While the build definition is defined +with vertexes, the scheduler is solving edges. In the case of LLB solver, a +result of a solved edge is associated with a snapshot. Usually, to solve an +edge, the input edges need to be solved first and this can be done +concurrently, but there are many exceptions like edge may be cached but its +input might be not, or solving one input might cause a cache hit while solving +others would just be wasteful. Scheduler tries do handle all these cases. + +The scheduler is implemented as a single threaded non-blocking event loop. The +single threaded constraint is for simplicity and might be removed in the future - +currently, it is not known if this would have any performance impact. All the +events in the scheduler have one fixed sender and receiver. The interface for +interacting with the scheduler is to create a "pipe" between a sender and a +receiver. One or both sides of the pipe may be an edge instance of the graph. +If a pipe is added it to the scheduler and an edge receives an event from the +pipe, the scheduler will "unpark" that edge so it can process all the events it +had received. + +The unpark handler for an edge needs to be non-blocking and execute quickly. +The edge will process the data from the incoming events and update its internal +state. When calling unpark, the scheduler has already separated out the sender +and receiver sides of the pipes that in the code are referred as incoming and +outgoing requests. The incoming requests are usually requests to retrieve a +result or a cache key from an edge. If it appears that an edge doesn't have +enough internal state to satisfy the requests, it can make new pipes and +register them with the scheduler. These new pipes are generally of two types: +ones asking for some async function to be completed and others that request an +input edge to reach a specific state first. + +To avoid bugs and deadlocks in this logic, the unpark method needs to follow +the following rules. If unpark has finished without completing all incoming +requests it needs to create outgoing requests. Similarly, if an incoming +request remains pending, at least one outgoing request needs to exist as well. +Failing to comply with this rule will cause the scheduler to panic as a +precaution to avoid leaks and hiding errors. + +## Edge state + +During unpark, edge state is incremented until it can fulfill the incoming +requests. + +An edge can be in the following states: initial, cache-fast, cache-slow, +completed. Completed edge contains a reference to the final result, +in-progress edge may have zero or more cache keys. + +The initial state is the starting state for any edge. If a state has reached a +cache-fast state, it means that all the definition based cache key lookups have +been performed. Cache-slow means that content-based cache lookup has been +performed as well. If possible, the scheduler will avoid looking up the slow +keys of inputs if they are unnecessary for solving current edge. + +The unpark method is split into four phases. The first phase processes all +incoming events (responses from outgoing requests or new incoming requests) +that caused the unpark to be called. These contain responses from async +functions like calls to get the cachemap, execution result or content-based +checksum for an input, or responses from input edges when their state or number +of cache keys has changed. All the results are stored in edge's internal state. +For the new cache keys, a query is performed to determine if any of them can +create potential matches to the current edge. + +After that, if any of the updates caused changes to edge's properties, a new +state is calculated for the current vertex. In this step, all potential cache +keys from inputs can cause new cache keys for the edge to be created and the +status of an edge might be updated. + +Third, the edge will go over all of its incoming requests, to determine if the +current internal state is sufficient for satisfying them all. There are a +couple of possibilities how this check may end up. If all requests can be +completed and there are no outgoing requests the requests finish and unpark +method returns. If there are outgoing requests but the edge has reached the +completed state or all incoming requests have been canceled, the outgoing +requests are canceled. This is an async operation as well and will cause unpark +to be called again after completion. If this condition didn't apply but +requests could be completed and there are outgoing requests, then the incoming +request is answered but not completed. The receiver can then decide to cancel +this request if needed. If no new data has appeared to answer the incoming +requests, the desired state for an edge is determined for an edge from the +incoming requests, and we continue to the next step. + +The fourth step sets up outgoing requests based on the desired state determined +in the third step. If the current state requires calling any async functions to +move forward then it is done here. We will also loop through all the inputs to +determine if it is important to raise their desired state. Depending on what +inputs can produce content based cache keys and what inputs have already +returned possible cache matches, the desired state for inputs may be raised at +different times. + +When an edge needs to resolve an operation to call the async `CacheMap` and +`Exec` methods, it does so by calling back to the shared graph. This makes sure +that two different edges pointing to the same vertex do not execute twice. The +result values for the operation that is shared by the edges is also cached +until the vertex is cleaned up. Progress reporting is also handled and +forwarded to the job through this shared vertex instance. + +Edge state is cleaned up when a final job that loaded the vertexes that they +are connected to is discarded. + +## Cache providers + +Cache providers determine if there is a result that matches the cache keys +generated during the build that could be reused instead of fully reevaluating +the vertex and its inputs. There can be multiple cache providers, and specific +providers can be defined per vertex using the vertex options. + +There are multiple backend implementations for cache providers, in-memory one +used in unit tests, the default local one using bbolt and one based on cache +manifests in a remote registry. + +Simplified cache provider has following methods: + +```go +Query(...) ([]*CacheKey, error) +Records(ck *CacheKey) ([]*CacheRecord, error) +Load(ctx context.Context, rec *CacheRecord) (Result, error) +Save(key *CacheKey, s Result) (*ExportableCacheKey, error) +``` + +Query method is used to determine if there exist a possible cache link between +the input and a vertex. It takes parameters provided by `op.CacheMap` and cache +keys returned by the calling the same method on its inputs. + +If a cache key has been found, the matching records can be asked for them. A +cache key can have zero or more records. Having a record means that a cached +result can be loaded for a specific vertex. The solver supports partial cache +chains, meaning that not all inputs need to have a cache record to match cache +for a vertex. + +Load method is used to load a specific record into a result reference. This +value is the same type as the one returned by the `op.Exec` method. + +Save allows adding more records to the cache. + +## Merging edges + +One final piece of solver logic allows merging two edges into one when they +have both returned the same cache key. In practice, this appears for example +when a build uses image references `alpine:latest` and `alpine@sha256:abcabc` +in its definition and they actually point to the same image. Another case where +this appears is when same source files from different sources are being used as +part of the build. + +After scheduler has called `unpark()` on an edge it checks it the method added +any new cache keys to its state. If it did it will check its internal index if +another active edge already exists with the same cache key. If it does it +performs some basic validation, for example checking that the new edge has not +explicitly asked cache to be ignored, and if it passes, merges the states of +two edges. + +In the result of the merge, the edge that was checked is deleted, its ongoing +requests are canceled and the incoming ones are added to the original edge. diff --git a/docs/experimental-syntaxes.md b/docs/experimental-syntaxes.md index 138050f94919..6dbb48754ec0 100644 --- a/docs/experimental-syntaxes.md +++ b/docs/experimental-syntaxes.md @@ -1,4 +1,3 @@ # Dockerfile frontend syntaxes -Documentation for Dockerfile syntaxes can be found in the -[Dockerfile frontend documentation](/frontend/dockerfile/docs/syntax.md) +This page has moved to [Dockerfile reference documentation](/frontend/dockerfile/docs/reference.md) diff --git a/docs/images-readme.md b/docs/images-readme.md index 8959f13abcd0..0ac33f3367e6 100644 --- a/docs/images-readme.md +++ b/docs/images-readme.md @@ -4,15 +4,15 @@ BuildKit is a concurrent, cache-efficient, and Dockerfile-agnostic builder toolk Report issues at https://github.com/moby/buildkit -Join `#buildkit` channel on [Docker Community Slack](http://dockr.ly/slack) +Join `#buildkit` channel on [Docker Community Slack](https://dockr.ly/comm-slack) # Tags ### Latest stable release -- [`v0.9.0`, `latest`](https://github.com/moby/buildkit/blob/v0.9.0/Dockerfile) +- [`v0.10.0`, `latest`](https://github.com/moby/buildkit/blob/v0.10.0/Dockerfile) -- [`v0.9.0-rootless`, `rootless`](https://github.com/moby/buildkit/blob/v0.9.0/Dockerfile) (see [`docs/rootless.md`](https://github.com/moby/buildkit/blob/master/docs/rootless.md) for usage) +- [`v0.10.0-rootless`, `rootless`](https://github.com/moby/buildkit/blob/v0.10.0/Dockerfile) (see [`docs/rootless.md`](https://github.com/moby/buildkit/blob/master/docs/rootless.md) for usage) ### Development build from master branch diff --git a/docs/merge+diff.md b/docs/merge+diff.md deleted file mode 100644 index 09322f332b58..000000000000 --- a/docs/merge+diff.md +++ /dev/null @@ -1,418 +0,0 @@ -# Merge and Diff Ops -MergeOp and DiffOp are two interrelated LLB operations that enable the rebasing of LLB results onto other results and the separation of LLB results from their base, respectively. Underneath the hood, these ops enable fine grain manipulation of container layer chains that can result in highly efficient operations for many use cases. - -This doc assumes some familiarity with LLB and ops like ExecOp and FileOp. More background on LLB can be obtained from the README.md in Buildkit's git repository. This doc also uses the Go LLB client for examples, though MergeOp and DiffOp are not in any way language specific. - -## MergeOp -MergeOp has a very simple interface: -```go -func Merge(inputs []llb.State) llb.State -``` - -The intuition is that it merges the contents of the provided states together into one state (hence the name), with files from later states taking precedence over those from earlier ones. - -To be more concrete, MergeOp returns a state where each of the input states are rebased on top of each other in the order provided. "Rebasing" a state `B` onto another state `A` creates a state that: -* Has all the contents of `B` -* Has all the contents of `A` except when a path exists in both `B` and `A`. In this case: - * If both paths are directories, their contents are merged. Metadata (such as permissions) on the directory from `B` take precedence. - * If one of the paths is not a directory, whatever is present in `B` takes precedence. This also means that if a file in `B` overwrites a dir in `A`, then all files/dirs in the tree under at that path in `A` are also removed. - -MergeOp is associative, i.e. using shorthand notation: `Merge(A, B, C) == Merge(Merge(A, B), C) == Merge(A, Merge(B, C))`. Buildkit knows this and internally optimizes LLB merges that are equivalent in this way to re-use the same cache entries. - -There are more subtleties to the behavior of MergeOp, such as when deletions are present in a layer making up a state, discussed in the "Advanced Details" section of this doc. - -States created by MergeOp are the same as any other LLB states in that they can be used as the base for exec, be mounted to arbitrary paths in execs, be plugged into other merges and diffs, be exported, etc. - -As a very simple example: -```go -// a has /dir/a -a := llb.Scratch(). - File(llb.Mkdir("/dir", 0755)). - File(llb.Mkfile("/dir/a", 0644, []byte("a"))) - -// b has /dir/b and /otherdir -b := llb.Scratch(). - File(llb.Mkdir("/dir", 0755)). - File(llb.Mkfile("/dir/b", 0644, []byte("b"))). - File(llb.Mkdir("/otherdir", 0755)) - -// c has /dir/a and /dir/c -c := llb.Scratch(). - File(llb.Mkdir("/dir", 0700)). - File(llb.Mkfile("/dir/a", 0644, []byte("overwritten"))). - File(llb.Mkfile("/dir/c", 0644, []byte("c"))) - -// merged will consist of /dir/a, /dir/b, /dir/c and /otherdir. -// The file at /dir/a will have contents set to "overwritten" because c is merged after a. -// /dir will have permissions set to 0700 for the same reason. -merged := llb.Merge([]llb.State{a, b, c}) - -// merged can be used as the base for new states -mergedPlusMore := merged.File(llb.Mkdir("/yetanotherdir", 0755)) -// or as the input to other merges -mergedPlusMore = llb.Merge([]llb.State{merged, llb.Scratch().File(llb.Mkdir("/yetanotherdir", 0755))}) -``` - -### Container Image Export -When the result of a MergeOp is exported as a container image, the image will consist of the layers making up each input joined together in the order of the MergeOp. If Buildkit has cached any one of these layers already, they will not need to be re-exported (i.e. re-packaged into compressed tarballs). Additionally, if the image is being pushed to a registry and the registry indicates it already has any of the layers, then Buildkit can skip pushing those layers entirely. - -Layers joined together by MergeOp do not have dependencies on each other, so a cache invalidation of the layers of one input doesn't cascade to the layers of the other inputs. - -## DiffOp -DiffOp also has a very simple interface: -```go -func Diff(lower llb.State, upper llb.State) llb.State -``` - -The intuition is that it returns a state whose contents are the difference between `lower` and `upper`. It can be viewed as something like the inverse of MergeOp; whereas MergeOp "adds" states together, DiffOp "subtracts" `lower` from `upper` (in a manner of speaking). - -More specifically, DiffOp returns a state that has the contents present in `upper` that either aren't present in `lower` or have changed from `lower` to `upper`. Another way of thinking about it is that if you start at `A` and apply `Diff(A, B)`, you will end up at `B`. Or, even more succinctly, `Merge(A, Diff(A, B)) == B`. - -Files and dirs are considered to have changed between `lower` and `upper` if their contents are unequal or if metadata like permissions and `mtime` have changed. Unequal `atime` or `ctime` values are not considered to be a change. - -There are more subtleties to the behavior of DiffOp discussed in the "Advanced Details" section of this doc. - -States created by DiffOp are the same as any other LLB states in that they can be used as the base for exec, be mounted to arbitrary paths in execs, be plugged into merges and other diffs, be exported, etc. - -As a very simple example: -```go -base := llb.Image("alpine") -basePlusBuilt := base.Run(llb.Shlex("touch /foo")).Root() -// diffed consists of just the file /foo, nothing in the alpine image is present -diffed := llb.Diff(base, basePlusBuilt) -``` - -### Container Image Export -When the result of a DiffOp is exported as a container image, layers will be re-used as much as possible. To explain, consider this case: -```go -lower := llb.Image("alpine") -middle := lower.Run(llb.Shlex("touch /foo")).Root() -upper := middle.Run(llb.Shlex("touch /bar")).Root() -diff := llb.Diff(lower, upper) -``` - -In this case, there is a "known chain" from `lower` to `upper` because `lower` is a state in `upper`'s history. This means that when the DiffOp is exported as a container image, it can just consist of the container layers for `middle` joined with the container layers for `upper`. - -Another way of thinking about this is that when `lower` is a state in `upper`'s history, the diff between the two is equivalent to a merge of the states between them. So, using the example above: -```go -llb.Diff(lower, upper) == llb.Merge([]llb.State{ - llb.Diff(lower, middle), - llb.Diff(middle, upper), -}) -```` -This behavior extends to arbitrary numbers of states separating `lower` and `upper`. - -In the case where there is not a chain between `lower` and `upper` that Buildkit can determine, DiffOp still works consistently but, when exported, will always result in a single layer that is not re-used from its inputs. - -## Example Use Case: Better "Copy Chains" with MergeOp -### The Problem -A common pattern when building container images is to independently assemble components of the image and then combine those components together into a final image using a chain of Copy FileOps. For example, when using the Dockerfile frontend, this is the multi-stage build pattern and a chain of `COPY --from=...` statements. - -One issue with this type of pattern is that if any of the inputs to the copy chain change, that doesn't just invalidate Buildkit's cache for that input, it also invalidates Buildkit's cache for any copied layers after that one. - -To be a bit more concrete, consider the following LLB as specified with the Go client: -```go -// stage a -a := llb.Image("alpine").Run("build a").Root() -// stage b -b := llb.Image("alpine").Run("build b").Root() -// stage c -c := llb.Image("alpine").Run("build c").Root() - -// final combined stage -combined := llb.Image("alpine"). - File(llb.Copy(a, "/bin/a", "/usr/local/bin/a")). - File(llb.Copy(b, "/bin/b", "/usr/local/bin/b")). - File(llb.Copy(c, "/bin/c", "/usr/local/bin/c")) -``` - -Note that this is basically the equivalent of the following Dockerfile: -```dockerfile -FROM alpine as a -RUN build a - -FROM alpine as b -RUN build b - -FROM alpine as c -RUN build c - -FROM alpine as combined -COPY --from=a /bin/a /usr/local/bin/a -COPY --from=b /bin/b /usr/local/bin/b -COPY --from=c /bin/c /usr/local/bin/c -``` - -Now, say you do a build of this LLB and export the `combined` stage as a container image to a registry. If you were to then repeat the same build with the same instance of Buildkit, each part of the build should be cached, resulting in no work needing to be done and no layers needing to be exported or pushed to the registry. - -Then, say you later do the build again but this time with a change to `a`. The build for `a` is thus not cached, which means that the copy of `/bin/a` into `/usr/local/bin/a` of `combined` is also not cached and has to be re-run. The problem is that because each copy in to `combined` is chained together, the invalidation of the copy from `a` also cascades to its descendants, namely the copies from `b` and `c`. This is despite the fact that `b` and `c` are independent of `a` and thus don't need to be invalidated. In graphical form: -```mermaid -graph TD - alpine("alpine") --> |CACHE HIT fa:fa-check| A("build a2.0") - alpine -->|CACHE HIT fa:fa-check| B("build b") - alpine -->|CACHE HIT fa:fa-check| C("build c") - - A --> |CACHE MISS fa:fa-ban| ACopy(/usr/local/bin/a) - busybox("busybox") -->|CACHE HIT fa:fa-check| ACopy - B -->|CACHE HIT fa:fa-check| BCopy(/usr/local/bin/b) - ACopy -->|CACHE MISS fa:fa-ban| BCopy - C -->|CACHE HIT fa:fa-check| CCopy(/usr/local/bin/c) - BCopy -->|CACHE MISS fa:fa-ban| CCopy - - classDef green fill:#5aa43a,stroke:#333,stroke-width:2px; - class alpine,B,C,busybox green - classDef red fill:#c72020,stroke:#333,stroke-width:2px; - class A,ACopy,BCopy,CCopy red -``` - -As a result, not only do the copies from `b` and `c` to create `/usr/local/bin/b` and `/usr/local/bin/c` need to run again, they also result in new layers needing to be exported and then pushed to a registry. For many use cases, this becomes a significant source of overhead in terms of build times and the amount of data that needs to be stored and transferred. - -### The Solution -MergeOp can be used to fix the problem of cascading invalidation in copy chains: -```go -a := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build a").Root(), "/bin/a", "/usr/local/bin/a")) -b := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build b").Root(), "/bin/b", "/usr/local/bin/b")) -c := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build c").Root(), "/bin/c", "/usr/local/bin/c")) -combined := llb.Merge([]llb.State{ - llb.Image("busybox"), - a, - b, - c, -}) -``` - -(*Note that newer versions of Dockerfiles support a `--link` flag when using `COPY`, which results in basically this same pattern*) - -Two changes have been made from the previous version: -1. `a`, `b`, and `c` have been updated to copy their desired contents to `Scratch` (a new, empty state). -1. `combined` is defined as a MergeOp of the states desired in the final image. - -Say you're doing this build for the first time. The build will first create states `a`, `b`, and `c`, resulting in each being a single layer consisting only of contents `/usr/local/bin/a`, `/usr/local/bin/b`, and `/usr/local/bin/c` respectively. Then, the MergeOp rebases each of those states on to the base `busybox` image. As discussed earlier, the container image export of a MergeOp will consist of the layers of the merge inputs joined together, so the final image looks mostly the same as before. - -The benefits of MergeOp become apparent when considering what happens if the build of `a` is modified. Whereas before this led to invalidation of the copy of `b` and `c`, now those merge inputs are completely unaffected; no new cache entries or new container layers need to be created for them. So, the end result is that the only work Buildkit does when `a` changes is re-build `a` and then push the new layers for `/usr/local/bin/a` (plus a new image manifest). `/usr/local/bin/b` and `/usr/local/bin/c` do not need to be re-exported and do not need to be re-pushed to the registry. In graphical form: -```mermaid -graph TD - alpine("alpine") --> |CACHE HIT fa:fa-check| A("build a2.0") - alpine -->|CACHE HIT fa:fa-check| B("build b") - alpine -->|CACHE HIT fa:fa-check| C("build c") - - busybox("busybox") -->|CACHE HIT fa:fa-check| Merge("Merge (lazy)") - A --> |CACHE MISS fa:fa-ban| ACopy(/usr/local/bin/a) - ACopy -->|CACHE MISS fa:fa-ban| Merge - B -->|CACHE HIT fa:fa-check| BCopy(/usr/local/bin/b) - BCopy -->|CACHE HIT fa:fa-check| Merge - C -->|CACHE HIT fa:fa-check| CCopy(/usr/local/bin/c) - CCopy -->|CACHE HIT fa:fa-check| Merge - - classDef green fill:#5aa43a,stroke:#333,stroke-width:2px; - class alpine,B,BCopy,C,CCopy,busybox green - classDef red fill:#c72020,stroke:#333,stroke-width:2px; - class A,ACopy red -``` - -An important aspect of this behavior is that MergeOp is implemented lazily, which means that its on-disk filesystem representation is only created locally when strictly required. This means that even though a change to `a` invalidates the MergeOp as a whole, no work needs to be done to create the merged state on-disk when it's only being exported as a container image. This laziness behavior is discussed more in the "Performance Considerations" section of the doc. - -You can see a working-code example of this by comparing `examples/buildkit3` with `examples/buildkit4` in the Buildkit git repo. - -## Example Use Case: Remote-only Image Append with MergeOp -If you have some layers already pushed to a remote registry, MergeOp allows you to create new images that combine those layers in arbitrary ways without having to actually pull any layers down first. For example: -```go -foo := llb.Image("fooApp:v0.1") -bar := llb.Image("barApp:v0.3") -qaz := llb.Image("qazApp:v1.2") -merged := llb.Merge([]llb.State{foo, bar, qaz}) -``` -If `merged` is being exported to the same registry that already has the layers for `fooApp`, `barApp` and `qazApp`, then the only thing Buildkit does during the export is create an image manifest (just some metadata) and push it to the registry. No layers need to be pushed (they are already there) and they don't even need to be pulled locally to Buildkit either. - -Note that if you were to instead do this: -```go -merged := llb.Merge([]llb.State{foo, bar, qaz}).Run(llb.Shlex("extra command")).Root() -``` -Then `fooApp`, `barApp` and `qazApp` will need to be pulled, though they will usually be merged together more efficiently than the naive solution of just unpacking the layers on top of each other. See the "Performance Details" section for more info. - -Additionally, if you export your Buildkit cache to a registry, this same idea can be extended to any LLB types, not just `llb.Image`. So, using the same example as the previous use case: -```go -a := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build a").Root(), "/bin/a", "/usr/bin/a")) -b := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build b").Root(), "/bin/b", "/usr/bin/b")) -c := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build c").Root(), "/bin/c", "/usr/bin/c")) -combined := llb.Merge([]llb.State{ - llb.Image("alpine"), - a, - b, - c, -}) -``` - -If you do a build that includes a remote cache export to a registry, then any Buildkit worker importing that cache can run builds that do different merges of those layers without having to pull anything down. For instance, if a separate Buildkit worker imported that remote cache and then built this: -```go -combined2 := llb.Merge([]llb.State{ - c, - a -}) -``` -An export of `combined2` would not need to pull any layers down because it's just a merge of `c` and `a`, which already have layers in the registry thanks to the remote cache. This works because a remote cache import is actually just a metadata download; layers are only pulled locally once needed and they aren't needed for this MergeOp. - -## Example Use Case: Modeling Package Builds with MergeOp+DiffOp -Merge and Diff have many potential use cases, but one primary one is to assist higher level tooling that's using LLB to model "dependency-based builds", such as what's found in many package managers and other build systems. - -More specifically, the following is a common pattern used to model the build of a "package" (or equivalent concept) in such systems: -1. The build-time dependencies of the package are combined into a filesystem. The dependencies are themselves just already-built packages. -1. A build is run by executing some commands that have access to the combined dependencies, producing new build artifacts that are somehow isolated from the dependencies. These isolated build artifacts become the new package's contents. -1. The new package can then be used as a dependency of other packages and/or served directly to end users, while being careful to ensure that any runtime dependencies are also present when the package needs to be utilized. - -One way to adapt the above model to LLB might be like this: -```go -// "Packages" are just LLB states. Build-time dependencies are combined -// together into a filesystem using MergeOp. -runtimeDeps := llb.Merge([]llb.State{depC, depD}) -buildDeps := llb.Merge([]llb.State{src, depA, depB, runtimeDeps}) - -// Builds of a new package are ExecOps on top of the MergeOp from the previous step -// (one ExecOp for the build and one for the install). The install ExecOp is defined -// such that build artifacts are written to a dedicated Mount, isolating them from -// the dependencies under /output. -builtPackage := buildDeps.Run( - llb.Dir("/src"), - llb.Shlex("make"), -).Root().Run( - llb.Dir("/src"), - llb.Shlex("make install"), - llb.AddEnv("DESTDIR", "/output"), - llb.AddMount("/output", llb.Scratch()), -).GetMount("/output") - -// If the package needs to be run as part of a different build or by an -// end user, the runtime deps of the state can be included via a MergeOp. -llb.Merge([]llb.State{runtimeDeps, builtPackage}) -``` - -While the above is a bit of an over-simplification (it, for instance, ignores the need to topologically sort dependency DAGs before merging them together), the important point is that it only needs MergeOp and ExecOp; DiffOp is left out entirely. For many use cases, this is completely fine and DiffOp is not needed. - -Some use cases can run into issues though, specifically with the part where build artifacts need to be isolated from their dependencies. The above example uses the convention of setting `DESTDIR`, an environment variable that specifies a directory that `make install` should place artifacts under. Most build systems support either `DESTDIR` or some type of equivalent mechanism for isolating installed build artifacts. However, there are times when this convention is either not available or not desired, in which case DiffOp can come to the rescue as a generic, tool-agnostic way of separating states out from their original dependency base. The modification from the previous example is quite small: -```go -// Same `make` command as before -buildBase := buildDeps.Run( - llb.Dir("/src"), - llb.Shlex("make"), -).Root() - -// Now, `make install` doesn't use DESTDIR and just installs directly -// to the rootfs of the build. The package contents are instead isolated -// by diffing the rootfs from before and after the install command. -builtPackage := llb.Diff(buildBase, buildBase.Run( - llb.Dir("/src"), - llb.Shlex("make install"), -).Root()) -``` - -This approach using DiffOp should achieve the same end result as the previous version but without having to rely on `DESTDIR` support being present in the `make install` step. - -The fact that DiffOp is more generic and arguably simpler than setting `DESTDIR` or equivalents doesn't mean it's strictly better for every case. The following should be kept in mind when dealing with use cases where both approaches are viable: -1. The version that uses `DESTDIR` will likely have *slightly* better performance than the version using DiffOp for many use cases. This is because it's faster for Buildkit to merge in a state that is just a single layer on top of scratch (i.e. the first version of `builtPackage` that used `DESTDIR`) than it is to merge in a state whose diff is between two non-empty states (i.e. the DiffOp version). Whether the performance difference actually matters needs to be evaluated on a case-by-case basis. -1. DiffOp has some subtle behavior discussed in the "Advanced Details" section that, while irrelevant to most use cases, can occasionally distinguish it from the `DESTDIR` approach. - -## Performance Considerations -### Laziness -MergeOp and DiffOp are both implemented lazily in that their on-disk filesystem representations will only be created when absolutely necessary. - -The most common situation in which a Merge/Diff result will need to be "unlazied" (created on disk) is when it is used as the input to an Exec or File op. For example: -```go -rootfs := llb.Merge([]llb.State{A, B}) -extraLayer := rootfs.Run(llb.Shlex("some command")).Root() -``` -In this case, if `extraLayer` is not already cached, `extraLayer` will need `rootfs` to exist on disk in order to run, so `rootfs` will have to be unlazied. The same idea applies if `extraLayer` was defined as a FileOp or if `rootfs` was defined using a `DiffOp`. - -What's perhaps more interesting are cases in which merge/diff results *don't* need to be unlazied. One such situation is when they are exported as a container image. As discussed previously, layers from the inputs of merge/diff are re-used as much as possible during image exports, so that means that the final merged/diffed result is not needed, only the inputs. - -Another situation that doesn't require unlazying is when a merge/diff is used as an input to another merge/diff. For example: -```go -diff1 := llb.Diff(A, B) -diff2 := llb.Diff(C, D) -merge := llb.Merge([]llb.State{diff1, diff2}) -``` - -In this case, even though `diff1` and `diff2` are used as an input to `merge`, they do not need to be unlazied because `merge` is also lazy. If `A`, `B`, `C` or `D` are lazy LLB states, they also do not need to be unlazied. Laziness is transitive in this respect. - -### Snapshotter-dependent Optimizations -There are some optimizations in the implementation of Merge and Diff op that are relevant to users concerned with scaling large builds involving many different merges and/or diffs. These optimizations are ultimately implementation details though and don't have any impact on the actual contents of merge/diff results. - -When a merge or diff result needs to be unlazied, the "universal" fallback implementation that works for all snapshotter backends is to create them by copying files from the inputs as needed into a new filesystem. This works but it can become costly in terms of disk space and CPU time at a certain scale. - -However, for two of the default snapshotters (overlay and native), there is an optimization in place to avoid copying files and instead hardlink them from the inputs into the merged/diffed filesystem. This is at least as fast as copying the files and often significantly faster for inputs with large file sizes. - -## Advanced Details -These details are not expected to impact many use cases, but are worth reviewing if you are experiencing surprising behavior while using Merge and Diff op or otherwise want to understand them at a deeper level. - -### Layer-like Behavior of Merge and Diff -One important principal of LLB results is that when they are exported as container images, an external runtime besides Buildkit that pulls and unpacks the image must see the same filesystem that is seen during build time. - -That may seem a bit obvious, but it has important implications for Merge and Diff, which are ops that are designed to re-use container layers from their inputs as much as possible in order to maximize cache re-use and efficiency. Many of the more surprising aspects of the behavior discussed in the rest of this doc are a result of needing to ensure that Merge+Diff results look the same before and after export as container layers. - -### Deletions -When either 1) an LLB state deletes a file present in its parent chain or 2) `upper` lacks a path that is present in `lower` while using DiffOp, that deletion is considered an "entity" in the same way that a directory or file is and can have an effect when using that state as a merge input. For example: -```go -// create a state that only has /foo -foo := llb.Scratch().File(llb.Mkfile("/foo", 0644, nil)) - -// create a state where the file /foo has been removed, leaving nothing -rmFoo := foo.File(llb.Rm("/foo")) - -// create a state containing the file /bar on top of the previous "empty" state -bar := rmFoo.File(llb.Mkfile("/bar", 0644, nil)) - -merged := llb.Merge([]llb.State{foo, bar}) -``` -You might assume that `merged` would consist of the files `/foo` and `/bar`, but it will actually just consist of `/bar`. This is because the state `bar` also includes a deletion of the file `/foo` in its chain and thus a part of its definition. - -One way of understanding this is that when you merge `foo` and `bar`, you are actually merging the diffs making up each state in the chain that created `foo` and `bar`, i.e.: -```go -llb.Merge([]llb.State{foo, bar}) == llb.Merge([]llb.State{ - // foo's chain (only 1 layer) - llb.Diff(llb.Scratch(), foo), // create /foo - // bar's chain (3 layers) - llb.Diff(llb.Scratch(), foo), // create /foo - llb.Diff(foo, rmFoo), // delete /foo - llb.Diff(rmFoo, bar), // create /bar -}) -``` -As you can see, `Diff(foo, rmFoo)` is included there and its only "content" is a deletion of `/foo`. Therefore, when `merged` is being constructed, it will apply that deletion and `/foo` will not exist in the final `merged` result. - -Also note that if the order of the merge was reversed to be `Merge([]State{bar, foo})`, then `/foo` will actually exist in `merged` alongside `/bar` because then the contents of `foo` take precedent over the contents of `bar`, and then create of `/foo` therefore "overwrites" the previous deletion of it. - -One final detail to note is that even though deletions are entities in the same way files/dirs are, they do not show up when mounted. For example, if you were to mount `llb.Diff(foo, rmFoo)` during a build, you would just see an empty directory. Deletions only have an impact when used as an input to MergeOp. - -#### Workarounds -For use cases that are experiencing this behavior and do not want it, the best option is to find a way to avoid including the problematic deletion in your build definition. This can be very use-case specific, but using the previous example one option might be this: -```go -justBar := llb.Diff(rmFoo, bar) -merged := llb.Merge([]llb.State{foo, justBar}) -``` -Now, `merged` consists of both `/foo` and `/bar` because `justBar` has "diffed out" its parent `rmFoo` and consists only of the final layer that creates `/bar`. Other use cases may require different approaches like changing build commands to avoid unneeded deletions of files and directories. - -For use cases that can't avoid the deletion for whatever reason, the fallback option is to use a Copy op to squash the merge input and discard any deletions. So, building off the previous example: -```go -squashedBar := llb.Scratch().File(llb.Copy(bar, "/", "/")) -merged := llb.Merge([]llb.State{foo, squashedBar}) -``` -This results in `merged` consisting of both `/foo` and `/bar`. This is because `squashedBar` is a single layer that only consists of the file+directories that existed in `bar`, not any of its deletions. - -Note that there are currently performance tradeoffs to this copy approach in that it will actually result in a copy on disk (i.e. no hardlink optimizations), the copy will not be lazy and `squashedBar` will be a distinct layer from its inputs as far as the Buildkit cache and any remote registries are concerned, which may or may not matter depending on the use-case. - -### Diff Corner Cases -There are some cases where it's ambiguous what the right behavior should be when merging diffs together. As stated before, Merge+Diff resolve these ambiguities by following the same behavior as container image import/export implementations in order to maintain consistency. - -One example: -```go -dir := llb.Scratch().File(llb.Mkdir("/dir", 0755)) -dirFoo := dir.File(llb.Mkfile("/dir/foo", 0755, nil)) -// rmFoo consists of a delete of /dir/foo -rmFoo := llb.Diff(dirFoo, dirFoo.File(llb.Rm("/dir/foo"))) - -// otherdir just consists of /otherdir -otherdir := llb.Scratch().File(llb.Mkdir("/otherdir", 0755)) - -// merged consists of /otherdir and /dir (no /dir/foo though) -merged := llb.Merge([]llb.State{otherdir, rmFoo}) -``` - -In this case, you start with just `/otherdir` and apply `rmFoo`, which is a deletion of `/dir/foo`. But `/dir/foo` doesn't exist, so it may be reasonable to expect that it just has no effect. However, image import/export code will actually create `/dir` even though it only exists in order to hold an inapplicable delete. As a result, Merge+Diff also have this same behavior. diff --git a/docs/multi-platform.md b/docs/multi-platform.md index 8506e885b3ac..73331bfa6846 100644 --- a/docs/multi-platform.md +++ b/docs/multi-platform.md @@ -41,3 +41,9 @@ docker run --privileged --rm tonistiigi/binfmt --install all ``` See also [`tonistiigi/binfmt` documentation](https://github.com/tonistiigi/binfmt/). + +### Builds are very slow through emulation + +Running binaries made for a different architecture through a software emulation layer is much slower than running binaries natively. Therefore this approach is not recommended for CPU intensive tasks like compiling binaries. It is provided as a simple solution to build existing Dockerfiles and usually works well for common tasks like installing packages and running scripts. To get native performance for compilation steps you should modify your Dockerfile to perform cross-compilation using [predefined platform ARGs](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope). Learn more from https://medium.com/@tonistiigi/faster-multi-platform-builds-dockerfile-cross-compilation-guide-part-1-ec087c719eaf . You can also use [xx](https://github.com/tonistiigi/xx) project to add cross-compilation toolchains into Dockerfiles with minimal changes. + +[Docker Buildx](https://github.com/docker/buildx) also supports multi-node builders where single image can be built with multiple machines that each build components for their native architectures. diff --git a/docs/nydus.md b/docs/nydus.md new file mode 100644 index 000000000000..709340b55d16 --- /dev/null +++ b/docs/nydus.md @@ -0,0 +1,47 @@ +## Nydus image formats + +Nydus is an OCI/Docker-compatible accelerated image format provided by the Dragonfly [image-service](https://github.com/dragonflyoss/image-service) project, which offers the ability to pull image data on-demand, without waiting for the entire image pull to complete and then start the container. It has been put in production usage and shown vast improvements to significantly reduce the overhead costs on time, network, disk IO of pulling image or starting container. + +Nydus image can be flexibly configured as a FUSE-based user-space filesystem or in-kernel [EROFS](https://www.kernel.org/doc/html/latest/filesystems/erofs.html) (from Linux kernel v5.16) with nydus daemon in user-space, integrating with VM-based container runtime like [KataContainers](https://katacontainers.io/) is much easier. + +## Creating Nydus images + +### Buildkitd with Nydus Support + +To enable buildkit support for Nydus image export, we need to build `buildkitd` with the following command: + +``` +go build -tags=nydus -o ./bin/buildkitd ./cmd/buildkitd +``` + +### Building Nydus with BuildKit + +Download `nydus-image` binary from [nydus release page](https://github.com/dragonflyoss/image-service/releases) (require v2.1.0 or higher), then put the `nydus-image` binary path into $PATH or specifying it on `NYDUS_BUILDER` environment variable for buildkitd: + +``` +env NYDUS_BUILDER=/path/to/nydus-image buildkitd ... +``` + +Note: some nydus intermediate files will be created in the working directory during the build process, which will be cleaned up automatically after the build is completed. Use the `NYDUS_WORKDIR` environment variable to change this working directory. + +On buildctl side, export nydus image as the one of compression types by specifying `compression=nydus` option: + +``` +buildctl build ... \ + --output type=image,name=docker.io/username/image,push=true,compression=nydus,oci-mediatypes=true +``` + +### Known limitations + +- The export of Nydus image and runtime (e.g. [docker](https://github.com/dragonflyoss/image-service/tree/master/contrib/docker-nydus-graphdriver), [containerd](https://github.com/containerd/nydus-snapshotter), etc.) is currently only supported on linux platform. +- Nydus image layers cannot be mixed with other compression types in the same image, so the `force-compression=true` option is automatically enabled when exporting both Nydus compression type and other compression types. +- Specifying a Nydus image as a base image in a Dockerfile is supported, but it does not currently support lazy pulling. +- Since exported Nydus image will always have one more metadata layer than images in other compression types, Nydus image cannot be exported/imported as cache. + +### Other ways to create Nydus images + +Pre-converted nydus images are available at [`ghcr.io/dragonflyoss/image-service` repository](https://github.com/orgs/dragonflyoss/packages?ecosystem=container) (mainly for testing purpose). + +[`Nydusify`](https://github.com/dragonflyoss/image-service/blob/master/docs/nydusify.md) The Nydusify CLI tool pulls & converts an OCIv1 image into a nydus image, and pushes nydus image to registry. + +[`Harbor Acceld`](https://github.com/goharbor/acceleration-service) Harbor acceld provides a general service to convert OCIv1 image to acceleration image like [Nydus](https://github.com/dragonflyoss/image-service) and [eStargz](https://github.com/containerd/stargz-snapshotter) etc. diff --git a/docs/rootless.md b/docs/rootless.md index de41b328b259..ee25875e76ee 100644 --- a/docs/rootless.md +++ b/docs/rootless.md @@ -4,6 +4,30 @@ Rootless mode allows running BuildKit daemon as a non-root user. ## Distribution-specific hint Using Ubuntu kernel is recommended. +### Container-Optimized OS from Google +Make sure to have an `emptyDir` volume below: +```yaml +spec: + containers: + - name: buildkitd + volumeMounts: + # Dockerfile has `VOLUME /home/user/.local/share/buildkit` by default too, + # but the default VOLUME does not work with rootless on Google's Container-Optimized OS + # as it is mounted with `nosuid,nodev`. + # https://github.com/moby/buildkit/issues/879#issuecomment-1240347038 + - mountPath: /home/user/.local/share/buildkit + name: buildkitd + volumes: + - name: buildkitd + emptyDir: {} +``` + +See also the [example manifests](#Kubernetes). + +
+Old distributions + +

### Debian GNU/Linux 10 Add `kernel.unprivileged_userns_clone=1` to `/etc/sysctl.conf` (or `/etc/sysctl.d`) and run `sudo sysctl -p`. @@ -16,8 +40,8 @@ This step is not needed for RHEL/CentOS 8 and later. ### Fedora, before kernel 5.13 You may have to disable SELinux, or run BuildKit with `--oci-worker-snapshotter=fuse-overlayfs`. -### Container-Optimized OS from Google -:warning: Currently unsupported. See [#879](https://github.com/moby/buildkit/issues/879). +

+
## Known limitations * Using the `overlayfs` snapshotter requires kernel >= 5.11 or Ubuntu kernel. @@ -77,6 +101,9 @@ $ rootlesskit buildkitd --oci-worker-snapshotter=native ### Error related to `newuidmap` or `/etc/subuid` See https://rootlesscontaine.rs/getting-started/common/subuid/ +### Error `Options:[rbind ro]}]: operation not permitted` +Make sure to mount an `emptyDir` volume on `/home/user/.local/share/buildkit` . + ## Containerized deployment ### Kubernetes diff --git a/docs/solver.md b/docs/solver.md deleted file mode 100644 index 45b81c5cb078..000000000000 --- a/docs/solver.md +++ /dev/null @@ -1,161 +0,0 @@ -## Buildkit solver design - -The solver is a component in BuildKit responsible for parsing the build definition and scheduling the operations to the workers for execution. - -Solver package is heavily optimized for deduplication of work, concurrent requests, remote and local caching and different per-vertex caching modes. It also allows operations and frontends to call back to itself with new definition that they have generated. - -The implementation of the solver is quite complicated, mostly because it is supposed to be performant with snapshot-based storage layer and distribution model using layer tarballs. It is expected that calculating the content based checksum of snapshots between every operation or after every command execution is too slow for common use cases and needs to be postponed to when it is likely to have a meaningful impact. Ideally, the user shouldn't realize that these optimizations are taking place and just get intuitive caching. It is also hoped that if some implementations can provide better cache capabilities, the solver would take advantage of that without requiring significant modification. - -In addition to avoiding content checksum scanning the implementation is also designed to make decisions with minimum available data. For example, for remote caching sources to be effective the solver will not require the cache to be loaded or exists for all the vertexes in the graph but will only load it for the final node that is determined to match cache. As another example, if one of the inputs (for example image) can produce a definition based cache match for a vertex, and another (for example local source files) can only produce a content-based(slower) cache match, the solver is designed to detect it and skip content-based check for the first input(that would cause a pull to happen). - -### Build definition - -The solver takes in a build definition in the form of a content addressable operation definition that forms a graph. - -A vertex in this graph is defined by these properties: - -```go -type Vertex interface { - Digest() digest.Digest - Options() VertexOptions - Sys() interface{} - Inputs() []Edge - Name() string -} - -type Edge struct { - Index Index - Vertex Vertex -} - -type Index int -``` - -Every vertex has a content-addressable digest that represents a checksum of the definition graph up to that vertex including all of its inputs. If two vertexes have the same checksum, they are considered identical when they are executing concurrently. That means that if two other vertexes request a vertex with the same digest as an input, they will wait for the same operation to finish. - -The vertex digest can only be used for comparison while the solver is running and not between different invocations. For example, if parallel builds require using `docker.io/library/alpine:latest` image as one of the operations, it is pulled only once. But if a build using `docker.io/library/alpine:latest` was built earlier, the checksum based on that name can't be used for finding if the vertex was already built because the image might have changed in the registry and "latest" tag might be pointing to another image. - -`Sys()` method returns an object that is used to resolve the executor for the operation. This is how a definition can pass logic to the worker that will execute the task associated with the vertex, without the solver needing to know anything about the implementation. When the solver needs to execute a vertex, it will send this object to a worker, so the worker needs to be configured to understand the object returned by `Sys()`. The solver itself doesn't care how the operations are implemented and therefore doesn't define a type for this value. In LLB solver this value would be with type `llb.Op`. - -`Inputs()` returns an array of other vertexes the current vertex depends on. A vertex may have zero inputs. After an operation has executed, it returns an array of return references. If another operation wants to depend on any of these references they would define an input with that vertex and an index of the reference from the return array(starting from zero). Inputs need to be contained in the `Digest()` of the vertex - two vertexes with different inputs should never have the same digest. - -Options contain extra information that can be associated with the vertex but what doesn't change the definition(or equality check) of it. Normally this is either a hint to the solver, for example, to ignore cache when executing. It can also be used for associating messages with the vertex that can be helpful for tracing purposes. - - -### Operation interface - -Operation interface is how the solver can evaluate the properties of the actual vertex operation. These methods run on the worker, and their implementation is determined by the value of `vertex.Sys()`. The solver is configured with a "resolve" function that can convert a `vertex.Sys()` into an `Op`. - -```go -// Op is an implementation for running a vertex -type Op interface { - // CacheMap returns structure describing how the operation is cached. - // Currently only roots are allowed to return multiple cache maps per op. - CacheMap(context.Context, int) (*CacheMap, bool, error) - // Exec runs an operation given results from previous operations. - // Note that this is not the process execution but can have any definition. - Exec(ctx context.Context, inputs []Result) (outputs []Result, err error) -} - -type CacheMap struct { - // Digest is a base digest for operation that needs to be combined with - // inputs cache or selectors for dependencies. - Digest digest.Digest - Deps []struct { - // Optional digest that is merged with the cache key of the input - Selector digest.Digest - // Optional function that returns a digest for the input based on its - // return value - ComputeDigestFunc ResultBasedCacheFunc - } -} - -type ResultBasedCacheFunc func(context.Context, Result) (digest.Digest, error) - - -// Result is an abstract return value for a solve -type Result interface { - ID() string - Release(context.Context) error - Sys() interface{} -} -``` - -There are two functions that every operation defines. One describes how to calculate a cache key for a vertex and another how to execute it. - -`CacheMap` is a description for calculating the cache key. It contains a digest that is combined with the cache keys of the inputs to determine the stable checksum that can be used to cache the operation result. For the vertexes that don't have inputs(roots), it is important that this digest is a stable secure checksum. For example, in LLB this digest is a manifest digest for container images or a commit SHA for git sources. - -`CacheMap` may also define optional selectors or content-based cache functions for its inputs. A selector is combined with the input cache key and useful for describing when different parts of an input are being used, and inputs cache key needs to be customized. Content-based cache function allows computing a new cache key for an input after it has completed. In LLB this is used for calculating cache key based on the checksum of file contents of the input snapshots. - -`Exec` executes the operation defined by a vertex by passing in the results of the inputs. - - -### Shared graph - -After new build request is sent to the solver, it first loads all the vertexes to the shared graph structure. For status tracking, a job instance needs to be created, and vertexes are loaded through jobs. A job ID is assigned to every vertex. If vertex with the same digest has already been loaded to the shared graph, a new job ID is appended to the existing record. When the job finishes, it removes all of its references from the loaded vertex. The resources are released if no more references remain. - -Loading a vertex also creates a progress writer associated with it and sets up the cache sources associated with the specific vertex. - -After vertexes have been loaded to the job, it is safe to request a result from an edge pointing to a previously loaded vertex. To do this `build(ctx, Edge) (CachedResult, error)` method is called on the static scheduler instance associated with the solver. - -### Scheduler - -The scheduler is a component responsible for invoking the individual operations needed to find the result for the graph. While the build definition is defined with vertexes, the scheduler is solving edges. In the case of LLB solver, a result of a solved edge is associated with a snapshot. Usually, to solve an edge, the input edges need to be solved first and this can be done concurrently, but there are many exceptions like edge may be cached but its input might be not, or solving one input might cause a cache hit while solving others would just be wasteful. Scheduler tries do handle all these cases. - -The scheduler is implemented as a single threaded non-blocking event loop. The single threaded constraint is for simplicity and might be removed in the future - currently, it is not known if this would have any performance impact. All the events in the scheduler have one fixed sender and receiver. The interface for interacting with the scheduler is to create a "pipe" between a sender and a receiver. One or both sides of the pipe may be an edge instance of the graph. If a pipe is added it to the scheduler and an edge receives an event from the pipe, the scheduler will "unpark" that edge so it can process all the events it had received. - -The unpark handler for an edge needs to be non-blocking and execute quickly. The edge will process the data from the incoming events and update its internal state. When calling unpark, the scheduler has already separated out the sender and receiver sides of the pipes that in the code are referred as incoming and outgoing requests. The incoming requests are usually requests to retrieve a result or a cache key from an edge. If it appears that an edge doesn't have enough internal state to satisfy the requests, it can make new pipes and register them with the scheduler. These new pipes are generally of two types: ones asking for some async function to be completed and others that request an input edge to reach a specific state first. - -To avoid bugs and deadlocks in this logic, the unpark method needs to follow the following rules. If unpark has finished without completing all incoming requests it needs to create outgoing requests. Similarly, if an incoming request remains pending, at least one outgoing request needs to exist as well. Failing to comply with this rule will cause the scheduler to panic as a precaution to avoid leaks and hiding errors. - -### Edge state - -During unpark, edge state is incremented until it can fulfill the incoming requests. - -An edge can be in the following states: initial, cache-fast, cache-slow, completed. Completed edge contains a reference to the final result, in-progress edge may have zero or more cache keys. - -The initial state is the starting state for any edge. If a state has reached a cache-fast state, it means that all the definition based cache key lookups have been performed. Cache-slow means that content-based cache lookup has been performed as well. If possible, the scheduler will avoid looking up the slow keys of inputs if they are unnecessary for solving current edge. - -The unpark method is split into four phases. The first phase processes all incoming events (responses from outgoing requests or new incoming requests) that caused the unpark to be called. These contain responses from async functions like calls to get the cachemap, execution result or content-based checksum for an input, or responses from input edges when their state or number of cache keys has changed. All the results are stored in edge's internal state. For the new cache keys, a query is performed to determine if any of them can create potential matches to the current edge. - -After that, if any of the updates caused changes to edge's properties, a new state is calculated for the current vertex. In this step, all potential cache keys from inputs can cause new cache keys for the edge to be created and the status of an edge might be updated. - -Third, the edge will go over all of its incoming requests, to determine if the current internal state is sufficient for satisfying them all. There are a couple of possibilities how this check may end up. If all requests can be completed and there are no outgoing requests the requests finish and unpark method returns. If there are outgoing requests but the edge has reached the completed state or all incoming requests have been canceled, the outgoing requests are canceled. This is an async operation as well and will cause unpark to be called again after completion. If this condition didn't apply but requests could be completed and there are outgoing requests, then the incoming request is answered but not completed. The receiver can then decide to cancel this request if needed. If no new data has appeared to answer the incoming requests, the desired state for an edge is determined for an edge from the incoming requests, and we continue to the next step. - -The fourth step sets up outgoing requests based on the desired state determined in the third step. If the current state requires calling any async functions to move forward then it is done here. We will also loop through all the inputs to determine if it is important to raise their desired state. Depending on what inputs can produce content based cache keys and what inputs have already returned possible cache matches, the desired state for inputs may be raised at different times. - -When an edge needs to resolve an operation to call the async `CacheMap` and `Exec` methods, it does so by calling back to the shared graph. This makes sure that two different edges pointing to the same vertex do not execute twice. The result values for the operation that is shared by the edges is also cached until the vertex is cleaned up. Progress reporting is also handled and forwarded to the job through this shared vertex instance. - -Edge state is cleaned up when a final job that loaded the vertexes that they are connected to is discarded. - - -### Cache providers - -Cache providers determine if there is a result that matches the cache keys generated during the build that could be reused instead of fully reevaluating the vertex and its inputs. There can be multiple cache providers, and specific providers can be defined per vertex using the vertex options. - -There are multiple backend implementations for cache providers, in-memory one used in unit tests, the default local one using bbolt and one based on cache manifests in a remote registry. - -Simplified cache provider has following methods: - -```go -Query(...) ([]*CacheKey, error) -Records(ck *CacheKey) ([]*CacheRecord, error) -Load(ctx context.Context, rec *CacheRecord) (Result, error) -Save(key *CacheKey, s Result) (*ExportableCacheKey, error) -``` - -Query method is used to determine if there exist a possible cache link between the input and a vertex. It takes parameters provided by `op.CacheMap` and cache keys returned by the calling the same method on its inputs. - -If a cache key has been found, the matching records can be asked for them. A cache key can have zero or more records. Having a record means that a cached result can be loaded for a specific vertex. The solver supports partial cache chains, meaning that not all inputs need to have a cache record to match cache for a vertex. - -Load method is used to load a specific record into a result reference. This value is the same type as the one returned by the `op.Exec` method. - -Save allows adding more records to the cache. - -### Merging edges - -One final piece of solver logic allows merging two edges into one when they have both returned the same cache key. In practice, this appears for example when a build uses image references `alpine:latest` and `alpine@sha256:abcabc` in its definition and they actually point to the same image. Another case where this appears is when same source files from different sources are being used as part of the build. - -After scheduler has called `unpark()` on an edge it checks it the method added any new cache keys to its state. If it did it will check its internal index if another active edge already exists with the same cache key. If it does it performs some basic validation, for example checking that the new edge has not explicitly asked cache to be ignored, and if it passes, merges the states of two edges. - -In the result of the merge, the edge that was checked is deleted, its ongoing requests are canceled and the incoming ones are added to the original edge. \ No newline at end of file diff --git a/examples/README.md b/examples/README.md index e43ccda44654..0e0c13c17410 100644 --- a/examples/README.md +++ b/examples/README.md @@ -34,7 +34,7 @@ Different versions of the example scripts show different ways of describing the - `./buildkit1` - cloning git repositories has been separated for extra concurrency. - `./buildkit2` - uses git sources directly instead of running `git clone`, allowing better performance and much safer caching. - `./buildkit3` - allows using local source files for separate components eg. `./buildkit3 --runc=local | buildctl build --local runc-src=some/local/path` -- `./buildkit4` - uses MergeOp to optimize copy chains for better caching behavior (see `docs/merge+diff.md` for more details) +- `./buildkit4` - uses MergeOp to optimize copy chains for better caching behavior (see `docs/dev/merge-diff.md` for more details) - `./dockerfile2llb` - can be used to convert a Dockerfile to LLB for debugging purposes - `./nested-llb` - shows how to use nested invocation to generate LLB - `./gobuild` - shows how to use nested invocation to generate LLB for Go package internal dependencies diff --git a/examples/build-using-dockerfile/README.md b/examples/build-using-dockerfile/README.md index b0087ad9c903..1ec17bdf198b 100644 --- a/examples/build-using-dockerfile/README.md +++ b/examples/build-using-dockerfile/README.md @@ -1,6 +1,5 @@ # `build-using-dockerfile` example -:information_source: [BuildKit has been integrated to `docker build` since Docker 18.06.](https://docs.docker.com/develop/develop-images/build_enhancements/) The `build-using-dockerfile` CLI is just provided as an example for writing a BuildKit client application. For people familiar with `docker build` command, `build-using-dockerfile` is provided as an example for building Dockerfiles with BuildKit using a syntax similar to `docker build`. diff --git a/examples/buildctl-daemonless/buildctl-daemonless.sh b/examples/buildctl-daemonless/buildctl-daemonless.sh index 15885dadb783..ab181d16c7e2 100755 --- a/examples/buildctl-daemonless/buildctl-daemonless.sh +++ b/examples/buildctl-daemonless/buildctl-daemonless.sh @@ -19,7 +19,7 @@ set -eu # * addr # * log tmp=$(mktemp -d /tmp/buildctl-daemonless.XXXXXX) -trap "kill \$(cat $tmp/pid); wait \$(cat $tmp/pid) || true; rm -rf $tmp" EXIT +trap "kill \$(cat $tmp/pid) || true; wait \$(cat $tmp/pid) || true; rm -rf $tmp" EXIT startBuildkitd() { addr= diff --git a/examples/buildkit0/buildkit.go b/examples/buildkit0/buildkit.go index 78f988d8584d..24810e730ff5 100644 --- a/examples/buildkit0/buildkit.go +++ b/examples/buildkit0/buildkit.go @@ -33,7 +33,7 @@ func main() { } func goBuildBase() llb.State { - goAlpine := llb.Image("docker.io/library/golang:1.17-alpine") + goAlpine := llb.Image("docker.io/library/golang:1.19-alpine") return goAlpine. AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnvUnix). AddEnv("GOPATH", "/go"). diff --git a/examples/buildkit1/buildkit.go b/examples/buildkit1/buildkit.go index 98793d0ab6e2..9f8201b2bddf 100644 --- a/examples/buildkit1/buildkit.go +++ b/examples/buildkit1/buildkit.go @@ -33,7 +33,7 @@ func main() { } func goBuildBase() llb.State { - goAlpine := llb.Image("docker.io/library/golang:1.17-alpine") + goAlpine := llb.Image("docker.io/library/golang:1.19-alpine") return goAlpine. AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnvUnix). AddEnv("GOPATH", "/go"). diff --git a/examples/buildkit2/buildkit.go b/examples/buildkit2/buildkit.go index 7a88562cf72e..5ae6b201678b 100644 --- a/examples/buildkit2/buildkit.go +++ b/examples/buildkit2/buildkit.go @@ -33,7 +33,7 @@ func main() { } func goBuildBase() llb.State { - goAlpine := llb.Image("docker.io/library/golang:1.17-alpine") + goAlpine := llb.Image("docker.io/library/golang:1.19-alpine") return goAlpine. AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnvUnix). AddEnv("GOPATH", "/go"). diff --git a/examples/buildkit3/buildkit.go b/examples/buildkit3/buildkit.go index fa985f18eb84..58de1ebe0666 100644 --- a/examples/buildkit3/buildkit.go +++ b/examples/buildkit3/buildkit.go @@ -34,7 +34,7 @@ func main() { } func goBuildBase() llb.State { - goAlpine := llb.Image("docker.io/library/golang:1.17-alpine") + goAlpine := llb.Image("docker.io/library/golang:1.19-alpine") return goAlpine. AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnvUnix). AddEnv("GOPATH", "/go"). diff --git a/examples/buildkit4/buildkit.go b/examples/buildkit4/buildkit.go index c20f13ea0c05..0bae9e7899e8 100644 --- a/examples/buildkit4/buildkit.go +++ b/examples/buildkit4/buildkit.go @@ -37,7 +37,7 @@ func main() { } func goBuildBase() llb.State { - goAlpine := llb.Image("docker.io/library/golang:1.17-alpine") + goAlpine := llb.Image("docker.io/library/golang:1.19-alpine") return goAlpine. AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnvUnix). AddEnv("GOPATH", "/go"). diff --git a/examples/dockerfile2llb/main.go b/examples/dockerfile2llb/main.go index 94455565b1ec..2fd693a4ae80 100644 --- a/examples/dockerfile2llb/main.go +++ b/examples/dockerfile2llb/main.go @@ -2,9 +2,9 @@ package main import ( "context" + "encoding/json" "flag" - "io/ioutil" - "log" + "io" "os" "github.com/moby/buildkit/client/llb" @@ -12,40 +12,66 @@ import ( "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/appcontext" + "github.com/sirupsen/logrus" ) type buildOpt struct { - target string + target string + partialImageConfigFile string + partialMetadataFile string } func main() { + if err := xmain(); err != nil { + logrus.Fatal(err) + } +} + +func xmain() error { var opt buildOpt flag.StringVar(&opt.target, "target", "", "target stage") + flag.StringVar(&opt.partialImageConfigFile, "partial-image-config-file", "", "Output partial image config as a JSON file") + flag.StringVar(&opt.partialMetadataFile, "partial-metadata-file", "", "Output partial metadata sa a JSON file") flag.Parse() - df, err := ioutil.ReadAll(os.Stdin) + df, err := io.ReadAll(os.Stdin) if err != nil { - panic(err) + return err } caps := pb.Caps.CapSet(pb.Caps.All()) - state, img, bi, err := dockerfile2llb.Dockerfile2LLB(appcontext.Context(), df, dockerfile2llb.ConvertOpt{ + state, img, _, err := dockerfile2llb.Dockerfile2LLB(appcontext.Context(), df, dockerfile2llb.ConvertOpt{ MetaResolver: imagemetaresolver.Default(), Target: opt.target, LLBCaps: &caps, }) if err != nil { - log.Printf("err: %+v", err) - panic(err) + return err } - _ = img - _ = bi - dt, err := state.Marshal(context.TODO()) if err != nil { - panic(err) + return err + } + if err := llb.WriteTo(dt, os.Stdout); err != nil { + return err + } + if opt.partialImageConfigFile != "" { + if err := writeJSON(opt.partialImageConfigFile, img); err != nil { + return err + } + } + return nil +} + +func writeJSON(f string, x interface{}) error { + b, err := json.Marshal(x) + if err != nil { + return err + } + if err := os.RemoveAll(f); err != nil { + return err } - llb.WriteTo(dt, os.Stdout) + return os.WriteFile(f, b, 0o644) } diff --git a/examples/kubernetes/README.md b/examples/kubernetes/README.md index 66a36c102a9b..c8973dc5645d 100644 --- a/examples/kubernetes/README.md +++ b/examples/kubernetes/README.md @@ -7,8 +7,7 @@ This directory contains Kubernetes manifests for `Pod`, `Deployment` (with `Serv * `Job`: good if you don't want to have daemon pods Using Rootless mode (`*.rootless.yaml`) is recommended because Rootless mode image is executed as non-root user (UID 1000) and doesn't need `securityContext.privileged`. - -:warning: Rootless mode may not work on some host kernels. See [`../../docs/rootless.md`](../../docs/rootless.md). +See [`../../docs/rootless.md`](../../docs/rootless.md). See also ["Building Images Efficiently And Securely On Kubernetes With BuildKit" (KubeCon EU 2019)](https://kccnceu19.sched.com/event/MPX5). diff --git a/examples/kubernetes/consistenthash/main.go b/examples/kubernetes/consistenthash/main.go index ebe5f64f2840..4b100777ce3c 100644 --- a/examples/kubernetes/consistenthash/main.go +++ b/examples/kubernetes/consistenthash/main.go @@ -16,7 +16,7 @@ package main import ( "fmt" - "io/ioutil" + "io" "os" "strings" @@ -38,7 +38,7 @@ func xmain() error { return errors.New("should not reach here") } key := os.Args[1] - stdin, err := ioutil.ReadAll(os.Stdin) + stdin, err := io.ReadAll(os.Stdin) if err != nil { return err } diff --git a/examples/kubernetes/deployment+service.rootless.yaml b/examples/kubernetes/deployment+service.rootless.yaml index 00f89f7342f9..0b554096fde6 100644 --- a/examples/kubernetes/deployment+service.rootless.yaml +++ b/examples/kubernetes/deployment+service.rootless.yaml @@ -15,7 +15,6 @@ spec: app: buildkitd annotations: container.apparmor.security.beta.kubernetes.io/buildkitd: unconfined - container.seccomp.security.alpha.kubernetes.io/buildkitd: unconfined # see buildkit/docs/rootless.md for caveats of rootless mode spec: containers: @@ -52,6 +51,9 @@ spec: initialDelaySeconds: 5 periodSeconds: 30 securityContext: + # Needs Kubernetes >= 1.19 + seccompProfile: + type: Unconfined # To change UID/GID, you need to rebuild the image runAsUser: 1000 runAsGroup: 1000 @@ -61,11 +63,19 @@ spec: - name: certs readOnly: true mountPath: /certs + # Dockerfile has `VOLUME /home/user/.local/share/buildkit` by default too, + # but the default VOLUME does not work with rootless on Google's Container-Optimized OS + # as it is mounted with `nosuid,nodev`. + # https://github.com/moby/buildkit/issues/879#issuecomment-1240347038 + - mountPath: /home/user/.local/share/buildkit + name: buildkitd volumes: # buildkit-daemon-certs must contain ca.pem, cert.pem, and key.pem - name: certs secret: secretName: buildkit-daemon-certs + - name: buildkitd + emptyDir: {} --- apiVersion: v1 kind: Service diff --git a/examples/kubernetes/job.rootless.yaml b/examples/kubernetes/job.rootless.yaml index 7c9941d05434..06e608c6ab35 100644 --- a/examples/kubernetes/job.rootless.yaml +++ b/examples/kubernetes/job.rootless.yaml @@ -7,7 +7,6 @@ spec: metadata: annotations: container.apparmor.security.beta.kubernetes.io/buildkit: unconfined - container.seccomp.security.alpha.kubernetes.io/buildkit: unconfined # see buildkit/docs/rootless.md for caveats of rootless mode spec: restartPolicy: Never @@ -43,6 +42,9 @@ spec: # To push the image to a registry, add # `--output type=image,name=docker.io/username/image,push=true` securityContext: + # Needs Kubernetes >= 1.19 + seccompProfile: + type: Unconfined # To change UID/GID, you need to rebuild the image runAsUser: 1000 runAsGroup: 1000 @@ -50,8 +52,16 @@ spec: - name: workspace readOnly: true mountPath: /workspace + # Dockerfile has `VOLUME /home/user/.local/share/buildkit` by default too, + # but the default VOLUME does not work with rootless on Google's Container-Optimized OS + # as it is mounted with `nosuid,nodev`. + # https://github.com/moby/buildkit/issues/879#issuecomment-1240347038 + - mountPath: /home/user/.local/share/buildkit + name: buildkitd # To push the image, you also need to create `~/.docker/config.json` secret # and set $DOCKER_CONFIG to `/path/to/.docker` directory. volumes: - name: workspace emptyDir: {} + - name: buildkitd + emptyDir: {} diff --git a/examples/kubernetes/pod.rootless.yaml b/examples/kubernetes/pod.rootless.yaml index ea63b35d177a..130ea43633fe 100644 --- a/examples/kubernetes/pod.rootless.yaml +++ b/examples/kubernetes/pod.rootless.yaml @@ -4,7 +4,6 @@ metadata: name: buildkitd annotations: container.apparmor.security.beta.kubernetes.io/buildkitd: unconfined - container.seccomp.security.alpha.kubernetes.io/buildkitd: unconfined # see buildkit/docs/rootless.md for caveats of rootless mode spec: containers: @@ -29,6 +28,19 @@ spec: initialDelaySeconds: 5 periodSeconds: 30 securityContext: + # Needs Kubernetes >= 1.19 + seccompProfile: + type: Unconfined # To change UID/GID, you need to rebuild the image runAsUser: 1000 runAsGroup: 1000 + volumeMounts: + # Dockerfile has `VOLUME /home/user/.local/share/buildkit` by default too, + # but the default VOLUME does not work with rootless on Google's Container-Optimized OS + # as it is mounted with `nosuid,nodev`. + # https://github.com/moby/buildkit/issues/879#issuecomment-1240347038 + - mountPath: /home/user/.local/share/buildkit + name: buildkitd + volumes: + - name: buildkitd + emptyDir: {} diff --git a/examples/kubernetes/statefulset.rootless.yaml b/examples/kubernetes/statefulset.rootless.yaml index e67c5a0bf246..0533d2a1004f 100644 --- a/examples/kubernetes/statefulset.rootless.yaml +++ b/examples/kubernetes/statefulset.rootless.yaml @@ -17,7 +17,6 @@ spec: app: buildkitd annotations: container.apparmor.security.beta.kubernetes.io/buildkitd: unconfined - container.seccomp.security.alpha.kubernetes.io/buildkitd: unconfined # see buildkit/docs/rootless.md for caveats of rootless mode spec: containers: @@ -42,6 +41,19 @@ spec: initialDelaySeconds: 5 periodSeconds: 30 securityContext: + # Needs Kubernetes >= 1.19 + seccompProfile: + type: Unconfined # To change UID/GID, you need to rebuild the image runAsUser: 1000 runAsGroup: 1000 + volumeMounts: + # Dockerfile has `VOLUME /home/user/.local/share/buildkit` by default too, + # but the default VOLUME does not work with rootless on Google's Container-Optimized OS + # as it is mounted with `nosuid,nodev`. + # https://github.com/moby/buildkit/issues/879#issuecomment-1240347038 + - mountPath: /home/user/.local/share/buildkit + name: buildkitd + volumes: + - name: buildkitd + emptyDir: {} diff --git a/examples/nested-llb/main.go b/examples/nested-llb/main.go index 74eb58864af1..93f9d41c2dcc 100644 --- a/examples/nested-llb/main.go +++ b/examples/nested-llb/main.go @@ -32,7 +32,7 @@ func main() { } func goBuildBase() llb.State { - goAlpine := llb.Image("docker.io/library/golang:1.17-alpine") + goAlpine := llb.Image("docker.io/library/golang:1.19-alpine") return goAlpine. AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnvUnix). AddEnv("GOPATH", "/go"). diff --git a/executor/containerdexecutor/executor.go b/executor/containerdexecutor/executor.go index 43a05cccefe9..ac195c431588 100644 --- a/executor/containerdexecutor/executor.go +++ b/executor/containerdexecutor/executor.go @@ -3,7 +3,6 @@ package containerdexecutor import ( "context" "io" - "io/ioutil" "os" "path/filepath" "sync" @@ -41,12 +40,26 @@ type containerdExecutor struct { running map[string]chan error mu sync.Mutex apparmorProfile string + selinux bool traceSocket string rootless bool } +// OnCreateRuntimer provides an alternative to OCI hooks for applying network +// configuration to a container. If the [network.Provider] returns a +// [network.Namespace] which also implements this interface, the containerd +// executor will run the callback at the appropriate point in the container +// lifecycle. +type OnCreateRuntimer interface { + // OnCreateRuntime is analogous to the createRuntime OCI hook. The + // function is called after the container is created, before the user + // process has been executed. The argument is the container PID in the + // runtime namespace. + OnCreateRuntime(pid uint32) error +} + // New creates a new executor backed by connection to containerd API -func New(client *containerd.Client, root, cgroup string, networkProviders map[pb.NetMode]network.Provider, dnsConfig *oci.DNSConfig, apparmorProfile string, traceSocket string, rootless bool) executor.Executor { +func New(client *containerd.Client, root, cgroup string, networkProviders map[pb.NetMode]network.Provider, dnsConfig *oci.DNSConfig, apparmorProfile string, selinux bool, traceSocket string, rootless bool) executor.Executor { // clean up old hosts/resolv.conf file. ignore errors os.RemoveAll(filepath.Join(root, "hosts")) os.RemoveAll(filepath.Join(root, "resolv.conf")) @@ -59,6 +72,7 @@ func New(client *containerd.Client, root, cgroup string, networkProviders map[pb dnsConfig: dnsConfig, running: make(map[string]chan error), apparmorProfile: apparmorProfile, + selinux: selinux, traceSocket: traceSocket, rootless: rootless, } @@ -121,7 +135,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M return err } defer lm.Unmount() - defer executor.MountStubsCleaner(rootfsPath, mounts)() + defer executor.MountStubsCleaner(rootfsPath, mounts, meta.RemoveMountStubsRecursive)() uid, gid, sgids, err := oci.GetUser(rootfsPath, meta.User) if err != nil { @@ -147,7 +161,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M if !ok { return errors.Errorf("unknown network mode %s", meta.NetMode) } - namespace, err := provider.New() + namespace, err := provider.New(ctx, meta.Hostname) if err != nil { return err } @@ -163,7 +177,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M } processMode := oci.ProcessSandbox // FIXME(AkihiroSuda) - spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.traceSocket, opts...) + spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, opts...) if err != nil { return err } @@ -204,11 +218,17 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M } defer func() { - if _, err1 := task.Delete(context.TODO()); err == nil && err1 != nil { + if _, err1 := task.Delete(context.TODO(), containerd.WithProcessKill); err == nil && err1 != nil { err = errors.Wrapf(err1, "failed to delete task %s", id) } }() + if nn, ok := namespace.(OnCreateRuntimer); ok { + if err := nn.OnCreateRuntime(task.Pid()); err != nil { + return err + } + } + trace.SpanFromContext(ctx).AddEvent("Container created") err = w.runProcess(ctx, task, process.Resize, process.Signal, func() { startedOnce.Do(func() { @@ -315,10 +335,10 @@ func fixProcessOutput(process *executor.ProcessInfo) { // failed to start io pipe copy: unable to copy pipes: containerd-shim: opening file "" failed: open : no such file or directory: unknown // So just stub out any missing output if process.Stdout == nil { - process.Stdout = &nopCloser{ioutil.Discard} + process.Stdout = &nopCloser{io.Discard} } if process.Stderr == nil { - process.Stderr = &nopCloser{ioutil.Discard} + process.Stderr = &nopCloser{io.Discard} } } diff --git a/executor/executor.go b/executor/executor.go index 4727af4b03ef..a323bcc9cc94 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -23,6 +23,8 @@ type Meta struct { CgroupParent string NetMode pb.NetMode SecurityMode pb.SecurityMode + + RemoveMountStubsRecursive bool } type Mountable interface { diff --git a/executor/oci/hosts.go b/executor/oci/hosts.go index d0505c28ccd9..0d193555c941 100644 --- a/executor/oci/hosts.go +++ b/executor/oci/hosts.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "io/ioutil" "os" "path/filepath" @@ -56,7 +55,7 @@ func makeHostsFile(stateDir string, extraHosts []executor.HostIP, idmap *idtools } tmpPath := p + ".tmp" - if err := ioutil.WriteFile(tmpPath, b.Bytes(), 0644); err != nil { + if err := os.WriteFile(tmpPath, b.Bytes(), 0644); err != nil { return "", nil, err } diff --git a/executor/oci/resolvconf.go b/executor/oci/resolvconf.go index c510a1a1bc18..3ac0feda7aea 100644 --- a/executor/oci/resolvconf.go +++ b/executor/oci/resolvconf.go @@ -2,12 +2,10 @@ package oci import ( "context" - "io/ioutil" "os" "path/filepath" "github.com/docker/docker/libnetwork/resolvconf" - "github.com/docker/docker/libnetwork/types" "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/util/flightcontrol" "github.com/pkg/errors" @@ -74,7 +72,7 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity if dns != nil { var ( - dnsNameservers = resolvconf.GetNameservers(dt, types.IP) + dnsNameservers = resolvconf.GetNameservers(dt, resolvconf.IP) dnsSearchDomains = resolvconf.GetSearchDomains(dt) dnsOptions = resolvconf.GetOptions(dt) ) @@ -101,7 +99,7 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity } tmpPath := p + ".tmp" - if err := ioutil.WriteFile(tmpPath, f.Content, 0644); err != nil { + if err := os.WriteFile(tmpPath, f.Content, 0644); err != nil { return "", err } diff --git a/executor/oci/resolvconf_test.go b/executor/oci/resolvconf_test.go index 316bcefda283..ec073885eb34 100644 --- a/executor/oci/resolvconf_test.go +++ b/executor/oci/resolvconf_test.go @@ -2,7 +2,6 @@ package oci import ( "context" - "io/ioutil" "os" "testing" @@ -27,13 +26,10 @@ nameserver 8.8.4.4 nameserver 2001:4860:4860::8888 nameserver 2001:4860:4860::8844` - dir, err := ioutil.TempDir("", "buildkit-test") - require.NoError(t, err) - defer os.RemoveAll(dir) ctx := context.Background() - p, err := GetResolvConf(ctx, dir, nil, nil) + p, err := GetResolvConf(ctx, t.TempDir(), nil, nil) require.NoError(t, err) - b, err := ioutil.ReadFile(p) + b, err := os.ReadFile(p) require.NoError(t, err) require.Equal(t, string(b), defaultResolvConf) } diff --git a/executor/oci/spec.go b/executor/oci/spec.go index ea8741995a11..94b48a7aa9ff 100644 --- a/executor/oci/spec.go +++ b/executor/oci/spec.go @@ -50,7 +50,7 @@ func (pm ProcessMode) String() string { // GenerateSpec generates spec using containerd functionality. // opts are ignored for s.Process, s.Hostname, and s.Mounts . -func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, cgroupParent string, processMode ProcessMode, idmap *idtools.IdentityMapping, apparmorProfile string, tracingSocket string, opts ...oci.SpecOpts) (*specs.Spec, func(), error) { +func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, cgroupParent string, processMode ProcessMode, idmap *idtools.IdentityMapping, apparmorProfile string, selinuxB bool, tracingSocket string, opts ...oci.SpecOpts) (*specs.Spec, func(), error) { c := &containers.Container{ ID: id, } @@ -81,7 +81,7 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou return nil, nil, err } - if securityOpts, err := generateSecurityOpts(meta.SecurityMode, apparmorProfile); err == nil { + if securityOpts, err := generateSecurityOpts(meta.SecurityMode, apparmorProfile, selinuxB); err == nil { opts = append(opts, securityOpts...) } else { return nil, nil, err diff --git a/executor/oci/spec_unix.go b/executor/oci/spec_unix.go index 5f4908ca6b6c..f906f79b6bac 100644 --- a/executor/oci/spec_unix.go +++ b/executor/oci/spec_unix.go @@ -16,7 +16,9 @@ import ( "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/entitlements/security" specs "github.com/opencontainers/runtime-spec/specs-go" + selinux "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" ) func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) { @@ -30,7 +32,10 @@ func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) { } // generateSecurityOpts may affect mounts, so must be called after generateMountOpts -func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string) (opts []oci.SpecOpts, _ error) { +func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string, selinuxB bool) (opts []oci.SpecOpts, _ error) { + if selinuxB && !selinux.GetEnabled() { + return nil, errors.New("selinux is not available") + } switch mode { case pb.SecurityMode_INSECURE: return []oci.SpecOpts{ @@ -39,7 +44,9 @@ func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string) (opts [] oci.WithWriteableSysfs, func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error { var err error - s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels([]string{"disable"}) + if selinuxB { + s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels([]string{"disable"}) + } return err }, }, nil @@ -52,7 +59,9 @@ func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string) (opts [] } opts = append(opts, func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error { var err error - s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels(nil) + if selinuxB { + s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels(nil) + } return err }) return opts, nil @@ -77,7 +86,7 @@ func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) { return nil, nil } return []oci.SpecOpts{ - oci.WithUserNamespace(specMapping(idmap.UIDs()), specMapping(idmap.GIDs())), + oci.WithUserNamespace(specMapping(idmap.UIDMaps), specMapping(idmap.GIDMaps)), }, nil } diff --git a/executor/oci/spec_windows.go b/executor/oci/spec_windows.go index bc1a6261e284..48b0969e3922 100644 --- a/executor/oci/spec_windows.go +++ b/executor/oci/spec_windows.go @@ -15,7 +15,7 @@ func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) { } // generateSecurityOpts may affect mounts, so must be called after generateMountOpts -func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string) ([]oci.SpecOpts, error) { +func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string, selinuxB bool) ([]oci.SpecOpts, error) { if mode == pb.SecurityMode_INSECURE { return nil, errors.New("no support for running in insecure mode on Windows") } diff --git a/executor/oci/user.go b/executor/oci/user.go index eb459f391fbe..bb58e834f634 100644 --- a/executor/oci/user.go +++ b/executor/oci/user.go @@ -91,6 +91,7 @@ func parseUID(str string) (uint32, error) { // once the PR in containerd is merged we should remove this function. func WithUIDGID(uid, gid uint32, sgids []uint32) containerdoci.SpecOpts { return func(_ context.Context, _ containerdoci.Client, _ *containers.Container, s *containerdoci.Spec) error { + defer ensureAdditionalGids(s) setProcess(s) s.Process.User.UID = uid s.Process.User.GID = gid @@ -106,3 +107,15 @@ func setProcess(s *containerdoci.Spec) { s.Process = &specs.Process{} } } + +// ensureAdditionalGids ensures that the primary GID is also included in the additional GID list. +// From https://github.com/containerd/containerd/blob/v1.7.0-beta.4/oci/spec_opts.go#L124-L133 +func ensureAdditionalGids(s *containerdoci.Spec) { + setProcess(s) + for _, f := range s.Process.User.AdditionalGids { + if f == s.Process.User.GID { + return + } + } + s.Process.User.AdditionalGids = append([]uint32{s.Process.User.GID}, s.Process.User.AdditionalGids...) +} diff --git a/executor/runcexecutor/executor.go b/executor/runcexecutor/executor.go index 702d513102b2..213ebb73665a 100644 --- a/executor/runcexecutor/executor.go +++ b/executor/runcexecutor/executor.go @@ -48,6 +48,7 @@ type Opt struct { DNS *oci.DNSConfig OOMScoreAdj *int ApparmorProfile string + SELinux bool TracingSocket string } @@ -67,6 +68,7 @@ type runcExecutor struct { running map[string]chan error mu sync.Mutex apparmorProfile string + selinux bool tracingSocket string } @@ -131,6 +133,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex oomScoreAdj: opt.OOMScoreAdj, running: make(map[string]chan error), apparmorProfile: opt.ApparmorProfile, + selinux: opt.SELinux, tracingSocket: opt.TracingSocket, } return w, nil @@ -161,7 +164,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, if !ok { return errors.Errorf("unknown network mode %s", meta.NetMode) } - namespace, err := provider.New() + namespace, err := provider.New(ctx, meta.Hostname) if err != nil { return err } @@ -221,7 +224,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, } defer mount.Unmount(rootFSPath, 0) - defer executor.MountStubsCleaner(rootFSPath, mounts)() + defer executor.MountStubsCleaner(rootFSPath, mounts, meta.RemoveMountStubsRecursive)() uid, gid, sgids, err := oci.GetUser(rootFSPath, meta.User) if err != nil { @@ -251,7 +254,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, } } - spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, w.processMode, w.idmap, w.apparmorProfile, w.tracingSocket, opts...) + spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, w.processMode, w.idmap, w.apparmorProfile, w.selinux, w.tracingSocket, opts...) if err != nil { return err } diff --git a/executor/stubs.go b/executor/stubs.go index 2c13b13053a4..22a8ac1310c4 100644 --- a/executor/stubs.go +++ b/executor/stubs.go @@ -7,9 +7,11 @@ import ( "syscall" "github.com/containerd/continuity/fs" + "github.com/moby/buildkit/util/system" + "github.com/sirupsen/logrus" ) -func MountStubsCleaner(dir string, mounts []Mount) func() { +func MountStubsCleaner(dir string, mounts []Mount, recursive bool) func() { names := []string{"/etc/resolv.conf", "/etc/hosts"} for _, m := range mounts { @@ -28,9 +30,22 @@ func MountStubsCleaner(dir string, mounts []Mount) func() { continue } - _, err = os.Lstat(realPath) - if errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) { + for { + _, err = os.Lstat(realPath) + if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR)) { + break + } paths = append(paths, realPath) + + if !recursive { + break + } + + realPathNext := filepath.Dir(realPath) + if realPath == realPathNext { + break + } + realPath = realPathNext } } @@ -40,10 +55,41 @@ func MountStubsCleaner(dir string, mounts []Mount) func() { if err != nil { continue } - if st.Size() != 0 { + if st.IsDir() { + entries, err := os.ReadDir(p) + if err != nil { + continue + } + if len(entries) != 0 { + continue + } + } else if st.Size() != 0 { continue } - os.Remove(p) + + // Back up the timestamps of the dir for reproducible builds + // https://github.com/moby/buildkit/issues/3148 + dir := filepath.Dir(p) + dirSt, err := os.Stat(dir) + if err != nil { + logrus.WithError(err).Warnf("Failed to stat %q (parent of mount stub %q)", dir, p) + continue + } + mtime := dirSt.ModTime() + atime, err := system.Atime(dirSt) + if err != nil { + logrus.WithError(err).Warnf("Failed to stat atime of %q (parent of mount stub %q)", dir, p) + atime = mtime + } + + if err := os.Remove(p); err != nil { + logrus.WithError(err).Warnf("Failed to remove mount stub %q", p) + } + + // Restore the timestamps of the dir + if err := os.Chtimes(dir, atime, mtime); err != nil { + logrus.WithError(err).Warnf("Failed to restore time time mount stub timestamp (os.Chtimes(%q, %v, %v))", dir, atime, mtime) + } } } } diff --git a/exporter/attestation/filter.go b/exporter/attestation/filter.go new file mode 100644 index 000000000000..5abc234b875e --- /dev/null +++ b/exporter/attestation/filter.go @@ -0,0 +1,45 @@ +package attestation + +import ( + "bytes" + + "github.com/moby/buildkit/exporter" +) + +func Filter(attestations []exporter.Attestation, include map[string][]byte, exclude map[string][]byte) []exporter.Attestation { + if len(include) == 0 && len(exclude) == 0 { + return attestations + } + + result := []exporter.Attestation{} + for _, att := range attestations { + meta := att.Metadata + if meta == nil { + meta = map[string][]byte{} + } + + match := true + for k, v := range include { + if !bytes.Equal(meta[k], v) { + match = false + break + } + } + if !match { + continue + } + + for k, v := range exclude { + if bytes.Equal(meta[k], v) { + match = false + break + } + } + if !match { + continue + } + + result = append(result, att) + } + return result +} diff --git a/exporter/attestation/make.go b/exporter/attestation/make.go new file mode 100644 index 000000000000..8ed910c1e8d3 --- /dev/null +++ b/exporter/attestation/make.go @@ -0,0 +1,138 @@ +package attestation + +import ( + "context" + "encoding/json" + "os" + + "github.com/containerd/continuity/fs" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/exporter" + gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/result" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +// ReadAll reads the content of an attestation. +func ReadAll(ctx context.Context, s session.Group, att exporter.Attestation) ([]byte, error) { + var content []byte + if att.ContentFunc != nil { + data, err := att.ContentFunc() + if err != nil { + return nil, err + } + content = data + } else if att.Ref != nil { + mount, err := att.Ref.Mount(ctx, true, s) + if err != nil { + return nil, err + } + lm := snapshot.LocalMounter(mount) + src, err := lm.Mount() + if err != nil { + return nil, err + } + defer lm.Unmount() + + p, err := fs.RootPath(src, att.Path) + if err != nil { + return nil, err + } + content, err = os.ReadFile(p) + if err != nil { + return nil, errors.Wrap(err, "cannot read in-toto attestation") + } + } else { + return nil, errors.New("no available content for attestation") + } + if len(content) == 0 { + content = nil + } + return content, nil +} + +// MakeInTotoStatements iterates over all provided result attestations and +// generates intoto attestation statements. +func MakeInTotoStatements(ctx context.Context, s session.Group, attestations []exporter.Attestation, defaultSubjects []intoto.Subject) ([]intoto.Statement, error) { + eg, ctx := errgroup.WithContext(ctx) + statements := make([]intoto.Statement, len(attestations)) + + for i, att := range attestations { + i, att := i, att + eg.Go(func() error { + content, err := ReadAll(ctx, s, att) + if err != nil { + return err + } + + switch att.Kind { + case gatewaypb.AttestationKindInToto: + stmt, err := makeInTotoStatement(ctx, content, att, defaultSubjects) + if err != nil { + return err + } + statements[i] = *stmt + case gatewaypb.AttestationKindBundle: + return errors.New("bundle attestation kind must be un-bundled first") + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, err + } + return statements, nil +} + +func makeInTotoStatement(ctx context.Context, content []byte, attestation exporter.Attestation, defaultSubjects []intoto.Subject) (*intoto.Statement, error) { + if len(attestation.InToto.Subjects) == 0 { + attestation.InToto.Subjects = []result.InTotoSubject{{ + Kind: gatewaypb.InTotoSubjectKindSelf, + }} + } + subjects := []intoto.Subject{} + for _, subject := range attestation.InToto.Subjects { + subjectName := "_" + if subject.Name != "" { + subjectName = subject.Name + } + + switch subject.Kind { + case gatewaypb.InTotoSubjectKindSelf: + for _, defaultSubject := range defaultSubjects { + subjectNames := []string{} + subjectNames = append(subjectNames, defaultSubject.Name) + if subjectName != "_" { + subjectNames = append(subjectNames, subjectName) + } + + for _, name := range subjectNames { + subjects = append(subjects, intoto.Subject{ + Name: name, + Digest: defaultSubject.Digest, + }) + } + } + case gatewaypb.InTotoSubjectKindRaw: + subjects = append(subjects, intoto.Subject{ + Name: subjectName, + Digest: result.ToDigestMap(subject.Digest...), + }) + default: + return nil, errors.Errorf("unknown attestation subject type %T", subject) + } + } + + stmt := intoto.Statement{ + StatementHeader: intoto.StatementHeader{ + Type: intoto.StatementInTotoV01, + PredicateType: attestation.InToto.PredicateType, + Subject: subjects, + }, + Predicate: json.RawMessage(content), + } + return &stmt, nil +} diff --git a/exporter/attestation/unbundle.go b/exporter/attestation/unbundle.go new file mode 100644 index 000000000000..a2120d7975e1 --- /dev/null +++ b/exporter/attestation/unbundle.go @@ -0,0 +1,192 @@ +package attestation + +import ( + "context" + "encoding/json" + "os" + "path" + "strings" + + "github.com/containerd/continuity/fs" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/exporter" + gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/result" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +// Unbundle iterates over all provided result attestations and un-bundles any +// bundled attestations by loading them from the provided refs map. +func Unbundle(ctx context.Context, s session.Group, bundled []exporter.Attestation) ([]exporter.Attestation, error) { + if err := Validate(bundled); err != nil { + return nil, err + } + + eg, ctx := errgroup.WithContext(ctx) + unbundled := make([][]exporter.Attestation, len(bundled)) + + for i, att := range bundled { + i, att := i, att + eg.Go(func() error { + switch att.Kind { + case gatewaypb.AttestationKindInToto: + if strings.HasPrefix(att.InToto.PredicateType, "https://slsa.dev/provenance/") { + if att.ContentFunc == nil { + // provenance may only be set buildkit-side using ContentFunc + return errors.New("frontend may not set provenance attestations") + } + } + unbundled[i] = append(unbundled[i], att) + case gatewaypb.AttestationKindBundle: + if att.ContentFunc != nil { + return errors.New("attestation bundle cannot have callback") + } + if att.Ref == nil { + return errors.Errorf("no ref provided for attestation bundle") + } + + mount, err := att.Ref.Mount(ctx, true, s) + if err != nil { + return err + } + lm := snapshot.LocalMounter(mount) + src, err := lm.Mount() + if err != nil { + return err + } + defer lm.Unmount() + + atts, err := unbundle(ctx, src, att) + if err != nil { + return err + } + for _, att := range atts { + if strings.HasPrefix(att.InToto.PredicateType, "https://slsa.dev/provenance/") { + return errors.New("frontend may not bundle provenance attestations") + } + } + unbundled[i] = append(unbundled[i], atts...) + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, err + } + + var joined []exporter.Attestation + for _, atts := range unbundled { + joined = append(joined, atts...) + } + joined = sort(joined) + + if err := Validate(joined); err != nil { + return nil, err + } + return joined, nil +} + +func sort(atts []exporter.Attestation) []exporter.Attestation { + isCore := make([]bool, len(atts)) + for i, att := range atts { + name, ok := att.Metadata[result.AttestationSBOMCore] + if !ok { + continue + } + if n, _, _ := strings.Cut(att.Path, "."); n != string(name) { + continue + } + isCore[i] = true + } + + result := make([]exporter.Attestation, 0, len(atts)) + for i, att := range atts { + if isCore[i] { + result = append(result, att) + } + } + for i, att := range atts { + if !isCore[i] { + result = append(result, att) + } + } + return result +} + +func unbundle(ctx context.Context, root string, bundle exporter.Attestation) ([]exporter.Attestation, error) { + dir, err := fs.RootPath(root, bundle.Path) + if err != nil { + return nil, err + } + entries, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + + var unbundled []exporter.Attestation + for _, entry := range entries { + p, err := fs.RootPath(dir, entry.Name()) + if err != nil { + return nil, err + } + f, err := os.Open(p) + if err != nil { + return nil, err + } + dec := json.NewDecoder(f) + var stmt intoto.Statement + if err := dec.Decode(&stmt); err != nil { + return nil, errors.Wrap(err, "cannot decode in-toto statement") + } + if bundle.InToto.PredicateType != "" && stmt.PredicateType != bundle.InToto.PredicateType { + return nil, errors.Errorf("bundle entry %s does not match required predicate type %s", stmt.PredicateType, bundle.InToto.PredicateType) + } + + predicate, err := json.Marshal(stmt.Predicate) + if err != nil { + return nil, err + } + + subjects := make([]result.InTotoSubject, len(stmt.Subject)) + for i, subject := range stmt.Subject { + subjects[i] = result.InTotoSubject{ + Kind: gatewaypb.InTotoSubjectKindRaw, + Name: subject.Name, + Digest: result.FromDigestMap(subject.Digest), + } + } + unbundled = append(unbundled, exporter.Attestation{ + Kind: gatewaypb.AttestationKindInToto, + Metadata: bundle.Metadata, + Path: path.Join(bundle.Path, entry.Name()), + ContentFunc: func() ([]byte, error) { return predicate, nil }, + InToto: result.InTotoAttestation{ + PredicateType: stmt.PredicateType, + Subjects: subjects, + }, + }) + } + return unbundled, nil +} + +func Validate(atts []exporter.Attestation) error { + for _, att := range atts { + if err := validate(att); err != nil { + return err + } + } + return nil +} + +func validate(att exporter.Attestation) error { + if att.Kind != gatewaypb.AttestationKindBundle && att.Path == "" { + return errors.New("attestation does not have set path") + } + if att.Ref == nil && att.ContentFunc == nil { + return errors.New("attestation does not have available content") + } + return nil +} diff --git a/exporter/containerimage/annotations.go b/exporter/containerimage/annotations.go new file mode 100644 index 000000000000..cdb5e945096c --- /dev/null +++ b/exporter/containerimage/annotations.go @@ -0,0 +1,139 @@ +package containerimage + +import ( + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/exporter/containerimage/exptypes" +) + +type Annotations struct { + Index map[string]string + IndexDescriptor map[string]string + Manifest map[string]string + ManifestDescriptor map[string]string +} + +// AnnotationsGroup is a map of annotations keyed by the reference key +type AnnotationsGroup map[string]*Annotations + +func ParseAnnotations(data map[string][]byte) (AnnotationsGroup, map[string][]byte, error) { + ag := make(AnnotationsGroup) + rest := make(map[string][]byte) + + for k, v := range data { + a, ok, err := exptypes.ParseAnnotationKey(k) + if !ok { + rest[k] = v + continue + } + if err != nil { + return nil, nil, err + } + + p := a.PlatformString() + + if ag[p] == nil { + ag[p] = &Annotations{ + IndexDescriptor: make(map[string]string), + Index: make(map[string]string), + Manifest: make(map[string]string), + ManifestDescriptor: make(map[string]string), + } + } + + switch a.Type { + case exptypes.AnnotationIndex: + ag[p].Index[a.Key] = string(v) + case exptypes.AnnotationIndexDescriptor: + ag[p].IndexDescriptor[a.Key] = string(v) + case exptypes.AnnotationManifest: + ag[p].Manifest[a.Key] = string(v) + case exptypes.AnnotationManifestDescriptor: + ag[p].ManifestDescriptor[a.Key] = string(v) + default: + return nil, nil, errors.Errorf("unrecognized annotation type %s", a.Type) + } + } + return ag, rest, nil +} + +func (ag AnnotationsGroup) Platform(p *ocispecs.Platform) *Annotations { + res := &Annotations{ + IndexDescriptor: make(map[string]string), + Index: make(map[string]string), + Manifest: make(map[string]string), + ManifestDescriptor: make(map[string]string), + } + + ps := []string{""} + if p != nil { + ps = append(ps, platforms.Format(*p)) + } + + for _, a := range ag { + for k, v := range a.Index { + res.Index[k] = v + } + for k, v := range a.IndexDescriptor { + res.IndexDescriptor[k] = v + } + } + for _, pk := range ps { + if _, ok := ag[pk]; !ok { + continue + } + + for k, v := range ag[pk].Manifest { + res.Manifest[k] = v + } + for k, v := range ag[pk].ManifestDescriptor { + res.ManifestDescriptor[k] = v + } + } + return res +} + +func (ag AnnotationsGroup) Merge(other AnnotationsGroup) AnnotationsGroup { + if other == nil { + return ag + } + if ag == nil { + ag = make(AnnotationsGroup) + } + + for k, v := range other { + ag[k] = ag[k].merge(v) + } + return ag +} + +func (a *Annotations) merge(other *Annotations) *Annotations { + if other == nil { + return a + } + if a == nil { + a = &Annotations{ + IndexDescriptor: make(map[string]string), + Index: make(map[string]string), + Manifest: make(map[string]string), + ManifestDescriptor: make(map[string]string), + } + } + + for k, v := range other.Index { + a.Index[k] = v + } + for k, v := range other.IndexDescriptor { + a.IndexDescriptor[k] = v + } + for k, v := range other.Manifest { + a.Manifest[k] = v + } + for k, v := range other.ManifestDescriptor { + a.ManifestDescriptor[k] = v + } + + return a +} diff --git a/exporter/containerimage/attestations.go b/exporter/containerimage/attestations.go new file mode 100644 index 000000000000..a41c6039f0ba --- /dev/null +++ b/exporter/containerimage/attestations.go @@ -0,0 +1,220 @@ +package containerimage + +import ( + "bytes" + "context" + "fmt" + "io/fs" + "path/filepath" + "strings" + + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/attestation" + gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/result" + "github.com/moby/buildkit/version" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + spdx_json "github.com/spdx/tools-golang/json" + "github.com/spdx/tools-golang/spdx/common" + spdx "github.com/spdx/tools-golang/spdx/v2_3" +) + +var intotoPlatform ocispecs.Platform = ocispecs.Platform{ + Architecture: "unknown", + OS: "unknown", +} + +// supplementSBOM modifies SPDX attestations to include the file layers +func supplementSBOM(ctx context.Context, s session.Group, target cache.ImmutableRef, targetRemote *solver.Remote, att exporter.Attestation) (exporter.Attestation, error) { + if target == nil { + return att, nil + } + if att.Kind != gatewaypb.AttestationKindInToto { + return att, nil + } + if att.InToto.PredicateType != intoto.PredicateSPDX { + return att, nil + } + name, ok := att.Metadata[result.AttestationSBOMCore] + if !ok { + return att, nil + } + if n, _, _ := strings.Cut(filepath.Base(att.Path), "."); n != string(name) { + return att, nil + } + + content, err := attestation.ReadAll(ctx, s, att) + if err != nil { + return att, err + } + + doc, err := decodeSPDX(content) + if err != nil { + // ignore decoding error + return att, nil + } + + layers, err := newFileLayerFinder(target, targetRemote) + if err != nil { + return att, err + } + modifyFile := func(f *spdx.File) error { + if f == nil { + // Skip over nil entries - this is likely a bug in the SPDX parser, + // but we shouldn't accidentally panic if we encounter it. + return nil + } + + if f.FileComment != "" { + // Skip over files that already have a comment - since the data is + // unstructured, we can't correctly overwrite this field without + // possibly breaking some scanner functionality. + return nil + } + + _, desc, err := layers.find(ctx, s, f.FileName) + if err != nil { + if !errors.Is(err, fs.ErrNotExist) { + return err + } + return nil + } + f.FileComment = fmt.Sprintf("layerID: %s", desc.Digest.String()) + return nil + } + for _, f := range doc.Files { + if err := modifyFile(f); err != nil { + return att, err + } + } + for _, p := range doc.Packages { + for _, f := range p.Files { + if err := modifyFile(f); err != nil { + return att, err + } + } + } + + if doc.CreationInfo == nil { + doc.CreationInfo = &spdx.CreationInfo{} + } + doc.CreationInfo.Creators = append(doc.CreationInfo.Creators, common.Creator{ + CreatorType: "Tool", + Creator: "buildkit-" + version.Version, + }) + + content, err = encodeSPDX(doc) + if err != nil { + return att, err + } + + return exporter.Attestation{ + Kind: att.Kind, + Path: att.Path, + ContentFunc: func() ([]byte, error) { return content, nil }, + InToto: att.InToto, + }, nil +} + +func decodeSPDX(dt []byte) (s *spdx.Document, err error) { + doc, err := spdx_json.Load2_3(bytes.NewReader(dt)) + if err != nil { + return nil, errors.Wrap(err, "unable to decode spdx") + } + if doc == nil { + return nil, errors.New("decoding produced empty spdx document") + } + return doc, nil +} + +func encodeSPDX(s *spdx.Document) (dt []byte, err error) { + w := bytes.NewBuffer(nil) + err = spdx_json.Save2_3(s, w) + if err != nil { + return nil, errors.Wrap(err, "unable to encode spdx") + } + return w.Bytes(), nil +} + +// fileLayerFinder finds the layer that contains a file, with caching to avoid +// repeated FileList lookups. +type fileLayerFinder struct { + pending []fileLayerEntry + cache map[string]fileLayerEntry +} + +type fileLayerEntry struct { + ref cache.ImmutableRef + desc ocispecs.Descriptor +} + +func newFileLayerFinder(target cache.ImmutableRef, remote *solver.Remote) (fileLayerFinder, error) { + chain := target.LayerChain() + descs := remote.Descriptors + if len(chain) != len(descs) { + return fileLayerFinder{}, errors.New("layer chain and descriptor list are not the same length") + } + + pending := make([]fileLayerEntry, len(chain)) + for i, ref := range chain { + pending[i] = fileLayerEntry{ref: ref, desc: descs[i]} + } + return fileLayerFinder{ + pending: pending, + cache: map[string]fileLayerEntry{}, + }, nil +} + +// find finds the layer that contains the file, returning the ImmutableRef and +// descriptor for the layer. If the file searched for was deleted, find returns +// the layer that created the file, not the one that deleted it. +// +// find is not concurrency-safe. +func (c *fileLayerFinder) find(ctx context.Context, s session.Group, filename string) (cache.ImmutableRef, *ocispecs.Descriptor, error) { + filename = filepath.Join("/", filename) + + // return immediately if we've already found the layer containing filename + if cache, ok := c.cache[filename]; ok { + return cache.ref, &cache.desc, nil + } + + for len(c.pending) > 0 { + // pop the last entry off the pending list (we traverse the layers backwards) + pending := c.pending[len(c.pending)-1] + files, err := pending.ref.FileList(ctx, s) + if err != nil { + return nil, nil, err + } + c.pending = c.pending[:len(c.pending)-1] + + found := false + for _, f := range files { + f = filepath.Join("/", f) + + if strings.HasPrefix(filepath.Base(f), ".wh.") { + // skip whiteout files, we only care about file creations + continue + } + + // add all files in this layer to the cache + if _, ok := c.cache[f]; ok { + continue + } + c.cache[f] = pending + + // if we found the file, return the layer (but finish populating the cache first) + if f == filename { + found = true + } + } + if found { + return pending.ref, &pending.desc, nil + } + } + return nil, nil, fs.ErrNotExist +} diff --git a/exporter/containerimage/export.go b/exporter/containerimage/export.go index 429a3ce6df3b..55eaf3ff5803 100644 --- a/exporter/containerimage/export.go +++ b/exporter/containerimage/export.go @@ -14,8 +14,10 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/rootfs" + intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/moby/buildkit/cache" cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/exporter" @@ -25,32 +27,27 @@ import ( "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/push" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( - keyImageName = "name" - keyPush = "push" - keyPushByDigest = "push-by-digest" - keyInsecure = "registry.insecure" - keyUnpack = "unpack" - keyDanglingPrefix = "dangling-name-prefix" - keyNameCanonical = "name-canonical" - keyLayerCompression = "compression" - keyForceCompression = "force-compression" - keyCompressionLevel = "compression-level" - keyBuildInfo = "buildinfo" - keyBuildInfoAttrs = "buildinfo-attrs" - ociTypes = "oci-mediatypes" - // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was - // already found to use a non-distributable media type. - // When this option is not set, the exporter will change the media type of the layer to a distributable one. - preferNondistLayersKey = "prefer-nondist-layers" + keyPush = "push" + keyPushByDigest = "push-by-digest" + keyInsecure = "registry.insecure" + keyUnpack = "unpack" + keyDanglingPrefix = "dangling-name-prefix" + keyNameCanonical = "name-canonical" + keyStore = "store" + + // keyUnsafeInternalStoreAllowIncomplete should only be used for tests. This option allows exporting image to the image store + // as well as lacking some blobs in the content store. Some integration tests for lazyref behaviour depends on this option. + // Ignored when store=false. + keyUnsafeInternalStoreAllowIncomplete = "unsafe-internal-store-allow-incomplete" ) type Opt struct { @@ -76,16 +73,24 @@ func New(opt Opt) (exporter.Exporter, error) { func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { i := &imageExporterInstance{ - imageExporter: e, - layerCompression: compression.Default, - buildInfo: true, + imageExporter: e, + opts: ImageCommitOpts{ + RefCfg: cacheconfig.RefConfig{ + Compression: compression.New(compression.Default), + }, + BuildInfo: true, + ForceInlineAttestations: true, + }, + store: true, + } + + opt, err := i.opts.Load(opt) + if err != nil { + return nil, err } - var esgz bool for k, v := range opt { switch k { - case keyImageName: - i.targetName = v case keyPush: if v == "" { i.push = true @@ -126,85 +131,38 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } i.unpack = b - case ociTypes: + case keyStore: if v == "" { - i.ociTypes = true + i.store = true continue } b, err := strconv.ParseBool(v) if err != nil { return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } - i.ociTypes = b - case keyDanglingPrefix: - i.danglingPrefix = v - case keyNameCanonical: + i.store = b + case keyUnsafeInternalStoreAllowIncomplete: if v == "" { - i.nameCanonical = true + i.storeAllowIncomplete = true continue } b, err := strconv.ParseBool(v) if err != nil { return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } - i.nameCanonical = b - case keyLayerCompression: - switch v { - case "gzip": - i.layerCompression = compression.Gzip - case "estargz": - i.layerCompression = compression.EStargz - esgz = true - case "zstd": - i.layerCompression = compression.Zstd - case "uncompressed": - i.layerCompression = compression.Uncompressed - default: - return nil, errors.Errorf("unsupported layer compression type: %v", v) - } - case keyForceCompression: - if v == "" { - i.forceCompression = true - continue - } - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, k) - } - i.forceCompression = b - case keyCompressionLevel: - ii, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, errors.Wrapf(err, "non-integer value %s specified for %s", v, k) - } - v := int(ii) - i.compressionLevel = &v - case keyBuildInfo: - if v == "" { - i.buildInfo = true - continue - } - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value specified for %s", k) - } - i.buildInfo = b - case keyBuildInfoAttrs: + i.storeAllowIncomplete = b + case keyDanglingPrefix: + i.danglingPrefix = v + case keyNameCanonical: if v == "" { - i.buildInfoAttrs = false + i.nameCanonical = true continue } b, err := strconv.ParseBool(v) if err != nil { return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } - i.buildInfoAttrs = b - case preferNondistLayersKey: - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, k) - } - i.preferNondistLayers = b + i.nameCanonical = b default: if i.meta == nil { i.meta = make(map[string][]byte) @@ -212,51 +170,32 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp i.meta[k] = []byte(v) } } - if esgz && !i.ociTypes { - logrus.Warn("forcibly turning on oci-mediatype mode for estargz") - i.ociTypes = true - } return i, nil } type imageExporterInstance struct { *imageExporter - targetName string - push bool - pushByDigest bool - unpack bool - insecure bool - ociTypes bool - nameCanonical bool - danglingPrefix string - layerCompression compression.Type - forceCompression bool - compressionLevel *int - buildInfo bool - buildInfoAttrs bool - meta map[string][]byte - preferNondistLayers bool + opts ImageCommitOpts + push bool + pushByDigest bool + unpack bool + store bool + storeAllowIncomplete bool + insecure bool + nameCanonical bool + danglingPrefix string + meta map[string][]byte } func (e *imageExporterInstance) Name() string { return "exporting to image" } -func (e *imageExporterInstance) Config() exporter.Config { - return exporter.Config{ - Compression: e.compression(), - } -} - -func (e *imageExporterInstance) compression() compression.Config { - c := compression.New(e.layerCompression).SetForce(e.forceCompression) - if e.compressionLevel != nil { - c = c.SetLevel(*e.compressionLevel) - } - return c +func (e *imageExporterInstance) Config() *exporter.Config { + return exporter.NewConfigWithCompression(e.opts.RefCfg.Compression) } -func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, sessionID string) (map[string]string, error) { +func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source, sessionID string) (_ map[string]string, descref exporter.DescriptorReference, err error) { if src.Metadata == nil { src.Metadata = make(map[string][]byte) } @@ -264,39 +203,50 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, src.Metadata[k] = v } - ctx, done, err := leaseutil.WithLease(ctx, e.opt.LeaseManager, leaseutil.MakeTemporary) + opts := e.opts + as, _, err := ParseAnnotations(src.Metadata) if err != nil { - return nil, err + return nil, nil, err } - defer done(context.TODO()) + opts.Annotations = opts.Annotations.Merge(as) - refCfg := e.refCfg() - desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, refCfg, e.buildInfo, e.buildInfoAttrs, sessionID) + ctx, done, err := leaseutil.WithLease(ctx, e.opt.LeaseManager, leaseutil.MakeTemporary) if err != nil { - return nil, err + return nil, nil, err } + defer func() { + if descref == nil { + done(context.TODO()) + } + }() + desc, err := e.opt.ImageWriter.Commit(ctx, src, sessionID, &opts) + if err != nil { + return nil, nil, err + } defer func() { - e.opt.ImageWriter.ContentStore().Delete(context.TODO(), desc.Digest) + if err == nil { + descref = NewDescriptorReference(*desc, done) + } }() resp := make(map[string]string) - if n, ok := src.Metadata["image.name"]; e.targetName == "*" && ok { - e.targetName = string(n) + if n, ok := src.Metadata["image.name"]; e.opts.ImageName == "*" && ok { + e.opts.ImageName = string(n) } nameCanonical := e.nameCanonical - if e.targetName == "" && e.danglingPrefix != "" { - e.targetName = e.danglingPrefix + "@" + desc.Digest.String() + if e.opts.ImageName == "" && e.danglingPrefix != "" { + e.opts.ImageName = e.danglingPrefix + "@" + desc.Digest.String() nameCanonical = false } - if e.targetName != "" { - targetNames := strings.Split(e.targetName, ",") + if e.opts.ImageName != "" { + targetNames := strings.Split(e.opts.ImageName, ",") for _, targetName := range targetNames { - if e.opt.Images != nil { - tagDone := oneOffProgress(ctx, "naming to "+targetName) + if e.opt.Images != nil && e.store { + tagDone := progress.OneOff(ctx, "naming to "+targetName) img := images.Image{ Target: *desc, CreatedAt: time.Now(), @@ -309,56 +259,59 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, img.Name = targetName + sfx if _, err := e.opt.Images.Update(ctx, img); err != nil { if !errors.Is(err, errdefs.ErrNotFound) { - return nil, tagDone(err) + return nil, nil, tagDone(err) } if _, err := e.opt.Images.Create(ctx, img); err != nil { - return nil, tagDone(err) + return nil, nil, tagDone(err) } } } tagDone(nil) - if e.unpack { + if src.Ref != nil && e.unpack { if err := e.unpackImage(ctx, img, src, session.NewGroup(sessionID)); err != nil { - return nil, err - } - } - } - if e.push { - annotations := map[digest.Digest]map[string]string{} - mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) - if src.Ref != nil { - remotes, err := src.Ref.GetRemotes(ctx, false, refCfg, false, session.NewGroup(sessionID)) - if err != nil { - return nil, err - } - remote := remotes[0] - for _, desc := range remote.Descriptors { - mprovider.Add(desc.Digest, remote.Provider) - addAnnotations(annotations, desc) + return nil, nil, err } } - if len(src.Refs) > 0 { - for _, r := range src.Refs { - remotes, err := r.GetRemotes(ctx, false, refCfg, false, session.NewGroup(sessionID)) + + if !e.storeAllowIncomplete { + if src.Ref != nil { + remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) if err != nil { - return nil, err + return nil, nil, err } remote := remotes[0] - for _, desc := range remote.Descriptors { - mprovider.Add(desc.Digest, remote.Provider) - addAnnotations(annotations, desc) + if unlazier, ok := remote.Provider.(cache.Unlazier); ok { + if err := unlazier.Unlazy(ctx); err != nil { + return nil, nil, err + } + } + } + if len(src.Refs) > 0 { + for _, r := range src.Refs { + remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + if err != nil { + return nil, nil, err + } + remote := remotes[0] + if unlazier, ok := remote.Provider.(cache.Unlazier); ok { + if err := unlazier.Unlazy(ctx); err != nil { + return nil, nil, err + } + } } } } - - if err := push.Push(ctx, e.opt.SessionManager, sessionID, mprovider, e.opt.ImageWriter.ContentStore(), desc.Digest, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest, annotations); err != nil { - return nil, err + } + if e.push { + err := e.pushImage(ctx, src, sessionID, targetName, desc.Digest) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to push %v", targetName) } } } - resp["image.name"] = e.targetName + resp["image.name"] = e.opts.ImageName } resp[exptypes.ExporterImageDigestKey] = desc.Digest.String() @@ -369,22 +322,47 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, dtdesc, err := json.Marshal(desc) if err != nil { - return nil, err + return nil, nil, err } resp[exptypes.ExporterImageDescriptorKey] = base64.StdEncoding.EncodeToString(dtdesc) - return resp, nil + return resp, nil, nil } -func (e *imageExporterInstance) refCfg() cacheconfig.RefConfig { - return cacheconfig.RefConfig{ - Compression: e.compression(), - PreferNonDistributable: e.preferNondistLayers, +func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Source, sessionID string, targetName string, dgst digest.Digest) error { + annotations := map[digest.Digest]map[string]string{} + mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) + if src.Ref != nil { + remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + if err != nil { + return err + } + remote := remotes[0] + for _, desc := range remote.Descriptors { + mprovider.Add(desc.Digest, remote.Provider) + addAnnotations(annotations, desc) + } + } + if len(src.Refs) > 0 { + for _, r := range src.Refs { + remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + if err != nil { + return err + } + remote := remotes[0] + for _, desc := range remote.Descriptors { + mprovider.Add(desc.Digest, remote.Provider) + addAnnotations(annotations, desc) + } + } } + + ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto") + return push.Push(ctx, e.opt.SessionManager, sessionID, mprovider, e.opt.ImageWriter.ContentStore(), dgst, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest, annotations) } -func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Image, src exporter.Source, s session.Group) (err0 error) { - unpackDone := oneOffProgress(ctx, "unpacking to "+img.Name) +func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Image, src *exporter.Source, s session.Group) (err0 error) { + unpackDone := progress.OneOff(ctx, "unpacking to "+img.Name) defer func() { unpackDone(err0) }() @@ -403,14 +381,14 @@ func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Imag topLayerRef := src.Ref if len(src.Refs) > 0 { - if r, ok := src.Refs[platforms.DefaultString()]; ok { + if r, ok := src.Refs[defaultPlatform()]; ok { topLayerRef = r } else { - return errors.Errorf("no reference for default platform %s", platforms.DefaultString()) + return errors.Errorf("no reference for default platform %s", defaultPlatform()) } } - remotes, err := topLayerRef.GetRemotes(ctx, true, e.refCfg(), false, s) + remotes, err := topLayerRef.GetRemotes(ctx, true, e.opts.RefCfg, false, s) if err != nil { return err } @@ -482,3 +460,29 @@ func addAnnotations(m map[digest.Digest]map[string]string, desc ocispecs.Descrip a[k] = v } } + +func defaultPlatform() string { + // Use normalized platform string to avoid the mismatch with platform options which + // are normalized using platforms.Normalize() + return platforms.Format(platforms.Normalize(platforms.DefaultSpec())) +} + +func NewDescriptorReference(desc ocispecs.Descriptor, release func(context.Context) error) exporter.DescriptorReference { + return &descriptorReference{ + desc: desc, + release: release, + } +} + +type descriptorReference struct { + desc ocispecs.Descriptor + release func(context.Context) error +} + +func (d *descriptorReference) Descriptor() ocispecs.Descriptor { + return d.desc +} + +func (d *descriptorReference) Release() error { + return d.release(context.TODO()) +} diff --git a/exporter/containerimage/exptypes/annotations.go b/exporter/containerimage/exptypes/annotations.go new file mode 100644 index 000000000000..e7697d916ad2 --- /dev/null +++ b/exporter/containerimage/exptypes/annotations.go @@ -0,0 +1,115 @@ +package exptypes + +import ( + "fmt" + "regexp" + + "github.com/containerd/containerd/platforms" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + AnnotationIndex = "index" + AnnotationIndexDescriptor = "index-descriptor" + AnnotationManifest = "manifest" + AnnotationManifestDescriptor = "manifest-descriptor" +) + +var ( + keyAnnotationRegexp = regexp.MustCompile(`^annotation(?:-([a-z-]+))?(?:\[([A-Za-z0-9_/-]+)\])?\.(\S+)$`) +) + +type AnnotationKey struct { + Type string + Platform *ocispecs.Platform + Key string +} + +func (k AnnotationKey) String() string { + prefix := "annotation" + + switch k.Type { + case "": + case AnnotationManifest, AnnotationManifestDescriptor: + prefix += fmt.Sprintf("-%s", k.Type) + if p := k.PlatformString(); p != "" { + prefix += fmt.Sprintf("[%s]", p) + } + case AnnotationIndex, AnnotationIndexDescriptor: + prefix += "-" + k.Type + default: + panic("unknown annotation type") + } + + return fmt.Sprintf("%s.%s", prefix, k.Key) +} + +func (k AnnotationKey) PlatformString() string { + if k.Platform == nil { + return "" + } + return platforms.Format(*k.Platform) +} + +func AnnotationIndexKey(key string) string { + return AnnotationKey{ + Type: AnnotationIndex, + Key: key, + }.String() +} + +func AnnotationIndexDescriptorKey(key string) string { + return AnnotationKey{ + Type: AnnotationIndexDescriptor, + Key: key, + }.String() +} + +func AnnotationManifestKey(p *ocispecs.Platform, key string) string { + return AnnotationKey{ + Type: AnnotationManifest, + Platform: p, + Key: key, + }.String() +} + +func AnnotationManifestDescriptorKey(p *ocispecs.Platform, key string) string { + return AnnotationKey{ + Type: AnnotationManifestDescriptor, + Platform: p, + Key: key, + }.String() +} + +func ParseAnnotationKey(result string) (AnnotationKey, bool, error) { + groups := keyAnnotationRegexp.FindStringSubmatch(result) + if groups == nil { + return AnnotationKey{}, false, nil + } + + tp, platform, key := groups[1], groups[2], groups[3] + switch tp { + case AnnotationIndex, AnnotationIndexDescriptor, AnnotationManifest, AnnotationManifestDescriptor: + case "": + tp = AnnotationManifest + default: + return AnnotationKey{}, true, errors.Errorf("unrecognized annotation type %s", tp) + } + + var ociPlatform *ocispecs.Platform + if platform != "" { + p, err := platforms.Parse(platform) + if err != nil { + return AnnotationKey{}, true, err + } + ociPlatform = &p + } + + annotation := AnnotationKey{ + Type: tp, + Platform: ociPlatform, + Key: key, + } + return annotation, true, nil +} diff --git a/exporter/containerimage/exptypes/parse.go b/exporter/containerimage/exptypes/parse.go new file mode 100644 index 000000000000..f77cd3f52565 --- /dev/null +++ b/exporter/containerimage/exptypes/parse.go @@ -0,0 +1,56 @@ +package exptypes + +import ( + "encoding/json" + "fmt" + + "github.com/containerd/containerd/platforms" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func ParsePlatforms(meta map[string][]byte) (Platforms, error) { + if platformsBytes, ok := meta[ExporterPlatformsKey]; ok { + var ps Platforms + if len(platformsBytes) > 0 { + if err := json.Unmarshal(platformsBytes, &ps); err != nil { + return Platforms{}, errors.Wrapf(err, "failed to parse platforms passed to provenance processor") + } + } + return ps, nil + } + + p := platforms.DefaultSpec() + if imgConfig, ok := meta[ExporterImageConfigKey]; ok { + var img ocispecs.Image + err := json.Unmarshal(imgConfig, &img) + if err != nil { + return Platforms{}, err + } + + if img.OS != "" && img.Architecture != "" { + p = ocispecs.Platform{ + Architecture: img.Architecture, + OS: img.OS, + OSVersion: img.OSVersion, + OSFeatures: img.OSFeatures, + Variant: img.Variant, + } + } + } + p = platforms.Normalize(p) + pk := platforms.Format(p) + ps := Platforms{ + Platforms: []Platform{{ID: pk, Platform: p}}, + } + return ps, nil +} + +func ParseKey(meta map[string][]byte, key string, p Platform) []byte { + if v, ok := meta[fmt.Sprintf("%s/%s", key, p.ID)]; ok { + return v + } else if v, ok := meta[key]; ok { + return v + } + return nil +} diff --git a/exporter/containerimage/exptypes/types.go b/exporter/containerimage/exptypes/types.go index a18d660a5c4a..4531360afa80 100644 --- a/exporter/containerimage/exptypes/types.go +++ b/exporter/containerimage/exptypes/types.go @@ -11,10 +11,19 @@ const ( ExporterImageConfigDigestKey = "containerimage.config.digest" ExporterImageDescriptorKey = "containerimage.descriptor" ExporterInlineCache = "containerimage.inlinecache" - ExporterBuildInfo = "containerimage.buildinfo" + ExporterBuildInfo = "containerimage.buildinfo" // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md ExporterPlatformsKey = "refs.platforms" + ExporterEpochKey = "source.date.epoch" ) +// KnownRefMetadataKeys are the subset of exporter keys that can be suffixed by +// a platform to become platform specific +var KnownRefMetadataKeys = []string{ + ExporterImageConfigKey, + ExporterInlineCache, + ExporterBuildInfo, +} + type Platforms struct { Platforms []Platform } diff --git a/exporter/containerimage/image/docker_image.go b/exporter/containerimage/image/docker_image.go new file mode 100644 index 000000000000..a35d811d55cf --- /dev/null +++ b/exporter/containerimage/image/docker_image.go @@ -0,0 +1,52 @@ +package image + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// ImageConfig is a docker compatible config for an image +type ImageConfig struct { + ocispecs.ImageConfig + + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + + // NetworkDisabled bool `json:",omitempty"` // Is network disabled + // MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + ocispecs.Image + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` +} diff --git a/exporter/containerimage/opts.go b/exporter/containerimage/opts.go new file mode 100644 index 000000000000..4948eaad2431 --- /dev/null +++ b/exporter/containerimage/opts.go @@ -0,0 +1,161 @@ +package containerimage + +import ( + "strconv" + "time" + + cacheconfig "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/exporter/util/epoch" + "github.com/moby/buildkit/util/compression" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + keyImageName = "name" + keyLayerCompression = "compression" + keyCompressionLevel = "compression-level" + keyForceCompression = "force-compression" + keyOCITypes = "oci-mediatypes" + keyBuildInfo = "buildinfo" + keyBuildInfoAttrs = "buildinfo-attrs" + keyForceInlineAttestations = "attestation-inline" + + // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was + // already found to use a non-distributable media type. + // When this option is not set, the exporter will change the media type of the layer to a distributable one. + keyPreferNondistLayers = "prefer-nondist-layers" +) + +type ImageCommitOpts struct { + ImageName string + RefCfg cacheconfig.RefConfig + OCITypes bool + Annotations AnnotationsGroup + Epoch *time.Time + + ForceInlineAttestations bool // force inline attestations to be attached + + BuildInfo bool // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md + BuildInfoAttrs bool // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md +} + +func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) { + rest := make(map[string]string) + + as, optb, err := ParseAnnotations(toBytesMap(opt)) + if err != nil { + return nil, err + } + opt = toStringMap(optb) + + c.Epoch, opt, err = epoch.ParseExporterAttrs(opt) + if err != nil { + return nil, err + } + + for k, v := range opt { + var err error + switch k { + case keyImageName: + c.ImageName = v + case keyLayerCompression: + c.RefCfg.Compression.Type, err = compression.Parse(v) + case keyCompressionLevel: + ii, err2 := strconv.ParseInt(v, 10, 64) + if err != nil { + err = errors.Wrapf(err2, "non-int value %s specified for %s", v, k) + break + } + v := int(ii) + c.RefCfg.Compression.Level = &v + case keyForceCompression: + err = parseBoolWithDefault(&c.RefCfg.Compression.Force, k, v, true) + case keyOCITypes: + err = parseBoolWithDefault(&c.OCITypes, k, v, true) + case keyBuildInfo: + err = parseBoolWithDefault(&c.BuildInfo, k, v, true) + case keyBuildInfoAttrs: + err = parseBoolWithDefault(&c.BuildInfoAttrs, k, v, false) + case keyForceInlineAttestations: + err = parseBool(&c.ForceInlineAttestations, k, v) + case keyPreferNondistLayers: + err = parseBool(&c.RefCfg.PreferNonDistributable, k, v) + default: + rest[k] = v + } + + if err != nil { + return nil, err + } + } + + if c.RefCfg.Compression.Type.OnlySupportOCITypes() { + c.EnableOCITypes(c.RefCfg.Compression.Type.String()) + } + + if c.RefCfg.Compression.Type.NeedsForceCompression() { + c.EnableForceCompression(c.RefCfg.Compression.Type.String()) + } + + c.Annotations = c.Annotations.Merge(as) + + return rest, nil +} + +func (c *ImageCommitOpts) EnableOCITypes(reason string) { + if !c.OCITypes { + message := "forcibly turning on oci-mediatype mode" + if reason != "" { + message += " for " + reason + } + logrus.Warn(message) + + c.OCITypes = true + } +} + +func (c *ImageCommitOpts) EnableForceCompression(reason string) { + if !c.RefCfg.Compression.Force { + message := "forcibly turning on force-compression mode" + if reason != "" { + message += " for " + reason + } + logrus.Warn(message) + + c.RefCfg.Compression.Force = true + } +} + +func parseBool(dest *bool, key string, value string) error { + b, err := strconv.ParseBool(value) + if err != nil { + return errors.Wrapf(err, "non-bool value specified for %s", key) + } + *dest = b + return nil +} + +func parseBoolWithDefault(dest *bool, key string, value string, defaultValue bool) error { + if value == "" { + *dest = defaultValue + return nil + } + return parseBool(dest, key, value) +} + +func toBytesMap(m map[string]string) map[string][]byte { + result := make(map[string][]byte) + for k, v := range m { + result[k] = []byte(v) + } + return result +} + +func toStringMap(m map[string][]byte) map[string]string { + result := make(map[string]string) + for k, v := range m { + result[k] = string(v) + } + return result +} diff --git a/exporter/containerimage/patch.go b/exporter/containerimage/patch.go new file mode 100644 index 000000000000..93866b018bda --- /dev/null +++ b/exporter/containerimage/patch.go @@ -0,0 +1,18 @@ +//go:build !nydus +// +build !nydus + +package containerimage + +import ( + "context" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func patchImageLayers(ctx context.Context, remote *solver.Remote, history []ocispecs.History, ref cache.ImmutableRef, opts *ImageCommitOpts, sg session.Group) (*solver.Remote, []ocispecs.History, error) { + remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes) + return remote, history, nil +} diff --git a/exporter/containerimage/patch_nydus.go b/exporter/containerimage/patch_nydus.go new file mode 100644 index 000000000000..3a9336a66f64 --- /dev/null +++ b/exporter/containerimage/patch_nydus.go @@ -0,0 +1,35 @@ +//go:build nydus +// +build nydus + +package containerimage + +import ( + "context" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// patchImageLayers appends an extra nydus bootstrap layer +// to the manifest of nydus image, normalizes layers and +// history. The nydus bootstrap layer represents the whole +// metadata of filesystem view for the entire image. +func patchImageLayers(ctx context.Context, remote *solver.Remote, history []ocispecs.History, ref cache.ImmutableRef, opts *ImageCommitOpts, sg session.Group) (*solver.Remote, []ocispecs.History, error) { + if opts.RefCfg.Compression.Type != compression.Nydus { + remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes) + return remote, history, nil + } + + desc, err := cache.MergeNydus(ctx, ref, opts.RefCfg.Compression, sg) + if err != nil { + return nil, nil, errors.Wrap(err, "merge nydus layer") + } + remote.Descriptors = append(remote.Descriptors, *desc) + + remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes) + return remote, history, nil +} diff --git a/exporter/containerimage/writer.go b/exporter/containerimage/writer.go index e5ec1519803d..068d86958f8f 100644 --- a/exporter/containerimage/writer.go +++ b/exporter/containerimage/writer.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "fmt" + "strconv" "strings" "time" @@ -12,18 +13,24 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" + intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/moby/buildkit/cache" cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/attestation" "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/util/epoch" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/result" + attestationTypes "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/buildinfo" binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/purl" "github.com/moby/buildkit/util/system" "github.com/moby/buildkit/util/tracing" digest "github.com/opencontainers/go-digest" @@ -50,57 +57,123 @@ type ImageWriter struct { opt WriterOpt } -func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool, refCfg cacheconfig.RefConfig, buildInfo bool, buildInfoAttrs bool, sessionID string) (*ocispecs.Descriptor, error) { - platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey] - - if len(inp.Refs) > 0 && !ok { +func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, sessionID string, opts *ImageCommitOpts) (*ocispecs.Descriptor, error) { + if _, ok := inp.Metadata[exptypes.ExporterPlatformsKey]; len(inp.Refs) > 0 && !ok { return nil, errors.Errorf("unable to export multiple refs, missing platforms mapping") } - if len(inp.Refs) == 0 { - remotes, err := ic.exportLayers(ctx, refCfg, session.NewGroup(sessionID), inp.Ref) + isMap := len(inp.Refs) > 0 + + ps, err := exptypes.ParsePlatforms(inp.Metadata) + if err != nil { + return nil, err + } + + if !isMap { + // enable index if we need to include attestations + for _, p := range ps.Platforms { + if atts, ok := inp.Attestations[p.ID]; ok { + if !opts.ForceInlineAttestations { + // if we don't need force inline attestations (for oci + // exporter), filter them out + atts = attestation.Filter(atts, nil, map[string][]byte{ + result.AttestationInlineOnlyKey: []byte(strconv.FormatBool(true)), + }) + } + if len(atts) > 0 { + isMap = true + break + } + } + } + } + if opts.Epoch == nil { + if tm, ok, err := epoch.ParseSource(inp); err != nil { + return nil, err + } else if ok { + opts.Epoch = tm + } + } + + for pk, a := range opts.Annotations { + if pk != "" { + if _, ok := inp.FindRef(pk); !ok { + return nil, errors.Errorf("invalid annotation: no platform %s found in source", pk) + } + } + if len(a.Index)+len(a.IndexDescriptor)+len(a.ManifestDescriptor) > 0 { + opts.EnableOCITypes("annotations") + } + } + + if !isMap { + if len(ps.Platforms) > 1 { + return nil, errors.Errorf("cannot export multiple platforms without multi-platform enabled") + } + + var ref cache.ImmutableRef + var p exptypes.Platform + if len(ps.Platforms) > 0 { + p = ps.Platforms[0] + if r, ok := inp.FindRef(p.ID); ok { + ref = r + } + } else { + ref = inp.Ref + } + + remotes, err := ic.exportLayers(ctx, opts.RefCfg, session.NewGroup(sessionID), ref) if err != nil { return nil, err } var dtbi []byte - if buildInfo { - if dtbi, err = buildinfo.Format(inp.Metadata[exptypes.ExporterBuildInfo], buildinfo.FormatOpts{ - RemoveAttrs: !buildInfoAttrs, + if opts.BuildInfo { + if dtbi, err = buildinfo.Format(exptypes.ParseKey(inp.Metadata, exptypes.ExporterBuildInfo, p), buildinfo.FormatOpts{ + RemoveAttrs: !opts.BuildInfoAttrs, }); err != nil { return nil, err } } - mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, inp.Ref, inp.Metadata[exptypes.ExporterImageConfigKey], &remotes[0], oci, inp.Metadata[exptypes.ExporterInlineCache], dtbi) + annotations := opts.Annotations.Platform(nil) + if len(annotations.Index) > 0 || len(annotations.IndexDescriptor) > 0 { + return nil, errors.Errorf("index annotations not supported for single platform export") + } + + config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p) + inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p) + mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, ref, config, &remotes[0], annotations, inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID)) if err != nil { return nil, err } if mfstDesc.Annotations == nil { mfstDesc.Annotations = make(map[string]string) } + if len(ps.Platforms) == 1 { + mfstDesc.Platform = &ps.Platforms[0].Platform + } mfstDesc.Annotations[exptypes.ExporterConfigDigestKey] = configDesc.Digest.String() return mfstDesc, nil } - var p exptypes.Platforms - if err := json.Unmarshal(platformsBytes, &p); err != nil { - return nil, errors.Wrapf(err, "failed to parse platforms passed to exporter") - } - - if len(p.Platforms) != len(inp.Refs) { - return nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs)) + if len(inp.Attestations) > 0 { + opts.EnableOCITypes("attestations") } refs := make([]cache.ImmutableRef, 0, len(inp.Refs)) remotesMap := make(map[string]int, len(inp.Refs)) - for id, r := range inp.Refs { - remotesMap[id] = len(refs) + for _, p := range ps.Platforms { + r, ok := inp.FindRef(p.ID) + if !ok { + return nil, errors.Errorf("failed to find ref for ID %s", p.ID) + } + remotesMap[p.ID] = len(refs) refs = append(refs, r) } - remotes, err := ic.exportLayers(ctx, refCfg, session.NewGroup(sessionID), refs...) + remotes, err := ic.exportLayers(ctx, opts.RefCfg, session.NewGroup(sessionID), refs...) if err != nil { return nil, err } @@ -114,36 +187,46 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool }{ MediaType: ocispecs.MediaTypeImageIndex, Index: ocispecs.Index{ + Annotations: opts.Annotations.Platform(nil).Index, Versioned: specs.Versioned{ SchemaVersion: 2, }, }, } - if !oci { + if !opts.OCITypes { idx.MediaType = images.MediaTypeDockerSchema2ManifestList } labels := map[string]string{} - for i, p := range p.Platforms { - r, ok := inp.Refs[p.ID] + var attestationManifests []ocispecs.Descriptor + + for i, p := range ps.Platforms { + r, ok := inp.FindRef(p.ID) if !ok { return nil, errors.Errorf("failed to find ref for ID %s", p.ID) } - config := inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.ID)] - inlineCache := inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, p.ID)] + config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p) + inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p) var dtbi []byte - if buildInfo { - if dtbi, err = buildinfo.Format(inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p.ID)], buildinfo.FormatOpts{ - RemoveAttrs: !buildInfoAttrs, + if opts.BuildInfo { + if dtbi, err = buildinfo.Format(exptypes.ParseKey(inp.Metadata, exptypes.ExporterBuildInfo, p), buildinfo.FormatOpts{ + RemoveAttrs: !opts.BuildInfoAttrs, }); err != nil { return nil, err } } - desc, _, err := ic.commitDistributionManifest(ctx, r, config, &remotes[remotesMap[p.ID]], oci, inlineCache, dtbi) + remote := &remotes[remotesMap[p.ID]] + if remote == nil { + remote = &solver.Remote{ + Provider: ic.opt.ContentStore, + } + } + + desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, remote, opts.Annotations.Platform(&p.Platform), inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID)) if err != nil { return nil, err } @@ -152,20 +235,75 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool idx.Manifests = append(idx.Manifests, *desc) labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = desc.Digest.String() + + if attestations, ok := inp.Attestations[p.ID]; ok { + attestations, err := attestation.Unbundle(ctx, session.NewGroup(sessionID), attestations) + if err != nil { + return nil, err + } + + eg, ctx2 := errgroup.WithContext(ctx) + for i, att := range attestations { + i, att := i, att + eg.Go(func() error { + att, err := supplementSBOM(ctx2, session.NewGroup(sessionID), r, remote, att) + if err != nil { + return err + } + attestations[i] = att + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, err + } + + var defaultSubjects []intoto.Subject + for _, name := range strings.Split(opts.ImageName, ",") { + if name == "" { + continue + } + pl, err := purl.RefToPURL(name, &p.Platform) + if err != nil { + return nil, err + } + defaultSubjects = append(defaultSubjects, intoto.Subject{ + Name: pl, + Digest: result.ToDigestMap(desc.Digest), + }) + } + stmts, err := attestation.MakeInTotoStatements(ctx, session.NewGroup(sessionID), attestations, defaultSubjects) + if err != nil { + return nil, err + } + + desc, err := ic.commitAttestationsManifest(ctx, opts, p, desc.Digest.String(), stmts) + if err != nil { + return nil, err + } + desc.Platform = &intotoPlatform + attestationManifests = append(attestationManifests, *desc) + } + } + + for i, mfst := range attestationManifests { + idx.Manifests = append(idx.Manifests, mfst) + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", len(ps.Platforms)+i)] = mfst.Digest.String() } - idxBytes, err := json.MarshalIndent(idx, "", " ") + idxBytes, err := json.MarshalIndent(idx, "", " ") if err != nil { return nil, errors.Wrap(err, "failed to marshal index") } idxDigest := digest.FromBytes(idxBytes) idxDesc := ocispecs.Descriptor{ - Digest: idxDigest, - Size: int64(len(idxBytes)), - MediaType: idx.MediaType, + Digest: idxDigest, + Size: int64(len(idxBytes)), + MediaType: idx.MediaType, + Annotations: opts.Annotations.Platform(nil).IndexDescriptor, } - idxDone := oneOffProgress(ctx, "exporting manifest list "+idxDigest.String()) + idxDone := progress.OneOff(ctx, "exporting manifest list "+idxDigest.String()) if err := content.WriteBlob(ctx, ic.opt.ContentStore, idxDigest.String(), bytes.NewReader(idxBytes), idxDesc, content.WithLabels(labels)); err != nil { return nil, idxDone(errors.Wrapf(err, "error writing manifest list blob %s", idxDigest)) @@ -186,7 +324,7 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefC span, ctx := tracing.StartSpan(ctx, "export layers", trace.WithAttributes(attr...)) eg, ctx := errgroup.WithContext(ctx) - layersDone := oneOffProgress(ctx, "exporting layers") + layersDone := progress.OneOff(ctx, "exporting layers") out := make([]solver.Remote, len(refs)) @@ -212,29 +350,26 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefC return out, err } -func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache.ImmutableRef, config []byte, remote *solver.Remote, oci bool, inlineCache []byte, buildInfo []byte) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) { +func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache []byte, buildInfo []byte, epoch *time.Time, sg session.Group) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) { if len(config) == 0 { var err error - config, err = emptyImageConfig() + config, err = defaultImageConfig() if err != nil { return nil, nil, err } } - if remote == nil { - remote = &solver.Remote{ - Provider: ic.opt.ContentStore, - } - } - history, err := parseHistoryFromConfig(config) if err != nil { return nil, nil, err } - remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, oci) + remote, history, err = patchImageLayers(ctx, remote, history, ref, opts, sg) + if err != nil { + return nil, nil, err + } - config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, buildInfo) + config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, buildInfo, epoch) if err != nil { return nil, nil, err } @@ -246,7 +381,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache ) // Use docker media types for older Docker versions and registries - if !oci { + if !opts.OCITypes { manifestType = images.MediaTypeDockerSchema2Manifest configType = images.MediaTypeDockerSchema2Config } @@ -260,6 +395,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache }{ MediaType: manifestType, Manifest: ocispecs.Manifest{ + Annotations: annotations.Manifest, Versioned: specs.Versioned{ SchemaVersion: 2, }, @@ -275,25 +411,12 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache "containerd.io/gc.ref.content.0": configDigest.String(), } - for i, desc := range remote.Descriptors { - // oci supports annotations but don't export internal annotations - if oci { - delete(desc.Annotations, "containerd.io/uncompressed") - delete(desc.Annotations, "buildkit/createdat") - for k := range desc.Annotations { - if strings.HasPrefix(k, "containerd.io/distribution.source.") { - delete(desc.Annotations, k) - } - } - } else { - desc.Annotations = nil - } - + for _, desc := range remote.Descriptors { + desc.Annotations = RemoveInternalLayerAnnotations(desc.Annotations, opts.OCITypes) mfst.Layers = append(mfst.Layers, desc) - labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = desc.Digest.String() } - mfstJSON, err := json.MarshalIndent(mfst, "", " ") + mfstJSON, err := json.MarshalIndent(mfst, "", " ") if err != nil { return nil, nil, errors.Wrap(err, "failed to marshal manifest") } @@ -303,7 +426,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache Digest: mfstDigest, Size: int64(len(mfstJSON)), } - mfstDone := oneOffProgress(ctx, "exporting manifest "+mfstDigest.String()) + mfstDone := progress.OneOff(ctx, "exporting manifest "+mfstDigest.String()) if err := content.WriteBlob(ctx, ic.opt.ContentStore, mfstDigest.String(), bytes.NewReader(mfstJSON), mfstDesc, content.WithLabels((labels))); err != nil { return nil, nil, mfstDone(errors.Wrapf(err, "error writing manifest blob %s", mfstDigest)) @@ -315,18 +438,125 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache Size: int64(len(config)), MediaType: configType, } - configDone := oneOffProgress(ctx, "exporting config "+configDigest.String()) + configDone := progress.OneOff(ctx, "exporting config "+configDigest.String()) if err := content.WriteBlob(ctx, ic.opt.ContentStore, configDigest.String(), bytes.NewReader(config), configDesc); err != nil { return nil, nil, configDone(errors.Wrap(err, "error writing config blob")) } configDone(nil) + return &ocispecs.Descriptor{ + Annotations: annotations.ManifestDescriptor, + Digest: mfstDigest, + Size: int64(len(mfstJSON)), + MediaType: manifestType, + }, &configDesc, nil +} + +func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, opts *ImageCommitOpts, p exptypes.Platform, target string, statements []intoto.Statement) (*ocispecs.Descriptor, error) { + var ( + manifestType = ocispecs.MediaTypeImageManifest + configType = ocispecs.MediaTypeImageConfig + ) + if !opts.OCITypes { + manifestType = images.MediaTypeDockerSchema2Manifest + configType = images.MediaTypeDockerSchema2Config + } + + layers := make([]ocispecs.Descriptor, len(statements)) + for i, statement := range statements { + i, statement := i, statement + + data, err := json.Marshal(statement) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal attestation") + } + digest := digest.FromBytes(data) + desc := ocispecs.Descriptor{ + MediaType: attestationTypes.MediaTypeDockerSchema2AttestationType, + Digest: digest, + Size: int64(len(data)), + Annotations: map[string]string{ + "containerd.io/uncompressed": digest.String(), + "in-toto.io/predicate-type": statement.PredicateType, + }, + } + + if err := content.WriteBlob(ctx, ic.opt.ContentStore, digest.String(), bytes.NewReader(data), desc); err != nil { + return nil, errors.Wrapf(err, "error writing data blob %s", digest) + } + layers[i] = desc + } + + config, err := attestationsConfig(layers) + if err != nil { + return nil, err + } + configDigest := digest.FromBytes(config) + configDesc := ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, + } + + mfst := struct { + // MediaType is reserved in the OCI spec but + // excluded from go types. + MediaType string `json:"mediaType,omitempty"` + + ocispecs.Manifest + }{ + MediaType: manifestType, + Manifest: ocispecs.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, + }, + }, + } + + labels := map[string]string{ + "containerd.io/gc.ref.content.0": configDigest.String(), + } + for i, desc := range layers { + desc.Annotations = RemoveInternalLayerAnnotations(desc.Annotations, opts.OCITypes) + mfst.Layers = append(mfst.Layers, desc) + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = desc.Digest.String() + } + + mfstJSON, err := json.MarshalIndent(mfst, "", " ") + if err != nil { + return nil, errors.Wrap(err, "failed to marshal manifest") + } + + mfstDigest := digest.FromBytes(mfstJSON) + mfstDesc := ocispecs.Descriptor{ + Digest: mfstDigest, + Size: int64(len(mfstJSON)), + } + + done := progress.OneOff(ctx, "exporting attestation manifest "+mfstDigest.String()) + if err := content.WriteBlob(ctx, ic.opt.ContentStore, mfstDigest.String(), bytes.NewReader(mfstJSON), mfstDesc, content.WithLabels((labels))); err != nil { + return nil, done(errors.Wrapf(err, "error writing manifest blob %s", mfstDigest)) + } + if err := content.WriteBlob(ctx, ic.opt.ContentStore, configDigest.String(), bytes.NewReader(config), configDesc); err != nil { + return nil, done(errors.Wrap(err, "error writing config blob")) + } + done(nil) + return &ocispecs.Descriptor{ Digest: mfstDigest, Size: int64(len(mfstJSON)), MediaType: manifestType, - }, &configDesc, nil + Annotations: map[string]string{ + attestationTypes.DockerAnnotationReferenceType: attestationTypes.DockerAnnotationReferenceTypeDefault, + attestationTypes.DockerAnnotationReferenceDigest: target, + }, + }, nil } func (ic *ImageWriter) ContentStore() content.Store { @@ -341,22 +571,13 @@ func (ic *ImageWriter) Applier() diff.Applier { return ic.opt.Applier } -func emptyImageConfig() ([]byte, error) { +func defaultImageConfig() ([]byte, error) { pl := platforms.Normalize(platforms.DefaultSpec()) - type image struct { - ocispecs.Image - - // Variant defines platform variant. To be added to OCI. - Variant string `json:"variant,omitempty"` - } - - img := image{ - Image: ocispecs.Image{ - Architecture: pl.Architecture, - OS: pl.OS, - }, - Variant: pl.Variant, + img := ocispecs.Image{ + Architecture: pl.Architecture, + OS: pl.OS, + Variant: pl.Variant, } img.RootFS.Type = "layers" img.Config.WorkingDir = "/" @@ -365,6 +586,22 @@ func emptyImageConfig() ([]byte, error) { return dt, errors.Wrap(err, "failed to create empty image config") } +func attestationsConfig(layers []ocispecs.Descriptor) ([]byte, error) { + img := ocispecs.Image{ + Architecture: intotoPlatform.Architecture, + OS: intotoPlatform.OS, + OSVersion: intotoPlatform.OSVersion, + OSFeatures: intotoPlatform.OSFeatures, + Variant: intotoPlatform.Variant, + } + img.RootFS.Type = "layers" + for _, layer := range layers { + img.RootFS.DiffIDs = append(img.RootFS.DiffIDs, digest.Digest(layer.Annotations["containerd.io/uncompressed"])) + } + dt, err := json.Marshal(img) + return dt, errors.Wrap(err, "failed to create attestations image config") +} + func parseHistoryFromConfig(dt []byte) ([]ocispecs.History, error) { var config struct { History []ocispecs.History @@ -375,7 +612,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispecs.History, error) { return config.History, nil } -func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, buildInfo []byte) ([]byte, error) { +func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, buildInfo []byte, epoch *time.Time) ([]byte, error) { m := map[string]json.RawMessage{} if err := json.Unmarshal(dt, &m); err != nil { return nil, errors.Wrap(err, "failed to parse image config for patch") @@ -392,12 +629,35 @@ func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs } m["rootfs"] = dt + if epoch != nil { + for i, h := range history { + if h.Created == nil || h.Created.After(*epoch) { + history[i].Created = epoch + } + } + } + dt, err = json.Marshal(history) if err != nil { return nil, errors.Wrap(err, "failed to marshal history") } m["history"] = dt + // if epoch is set then clamp creation time + if v, ok := m["created"]; ok && epoch != nil { + var tm time.Time + if err := json.Unmarshal(v, &tm); err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal creation time %q", m["created"]) + } + if tm.After(*epoch) { + dt, err = json.Marshal(&epoch) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal creation time") + } + m["created"] = dt + } + } + if _, ok := m["created"]; !ok { var tm *time.Time for _, h := range history { @@ -426,7 +686,7 @@ func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs return nil, err } m[binfotypes.ImageConfigField] = dt - } else if _, ok := m[binfotypes.ImageConfigField]; ok { + } else { delete(m, binfotypes.ImageConfigField) } @@ -521,6 +781,26 @@ func normalizeLayersAndHistory(ctx context.Context, remote *solver.Remote, histo return remote, history } +func RemoveInternalLayerAnnotations(in map[string]string, oci bool) map[string]string { + if len(in) == 0 || !oci { + return nil + } + m := make(map[string]string, len(in)) + for k, v := range in { + // oci supports annotations but don't export internal annotations + switch k { + case "containerd.io/uncompressed", "buildkit/createdat": + continue + default: + if strings.HasPrefix(k, "containerd.io/distribution.source.") { + continue + } + m[k] = v + } + } + return m +} + type refMetadata struct { description string createdAt *time.Time @@ -553,20 +833,3 @@ func getRefMetadata(ref cache.ImmutableRef, limit int) []refMetadata { } return metas } - -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.NewFromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} diff --git a/exporter/exporter.go b/exporter/exporter.go index 610481b710f6..0e7d8d14f280 100644 --- a/exporter/exporter.go +++ b/exporter/exporter.go @@ -4,25 +4,49 @@ import ( "context" "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/solver/result" "github.com/moby/buildkit/util/compression" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) +type Source = result.Result[cache.ImmutableRef] + +type Attestation = result.Attestation[cache.ImmutableRef] + type Exporter interface { Resolve(context.Context, map[string]string) (ExporterInstance, error) } type ExporterInstance interface { Name() string - Config() Config - Export(ctx context.Context, src Source, sessionID string) (map[string]string, error) + Config() *Config + Export(ctx context.Context, src *Source, sessionID string) (map[string]string, DescriptorReference, error) } -type Source struct { - Ref cache.ImmutableRef - Refs map[string]cache.ImmutableRef - Metadata map[string][]byte +type DescriptorReference interface { + Release() error + Descriptor() ocispecs.Descriptor } type Config struct { - Compression compression.Config + // Make the field private in case it is initialized with nil compression.Type + compression compression.Config +} + +func NewConfig() *Config { + return &Config{ + compression: compression.Config{ + Type: compression.Default, + }, + } +} + +func NewConfigWithCompression(comp compression.Config) *Config { + return &Config{ + compression: comp, + } +} + +func (c *Config) Compression() compression.Config { + return c.compression } diff --git a/exporter/local/export.go b/exporter/local/export.go index 5daa4aa4268c..7d08b172e019 100644 --- a/exporter/local/export.go +++ b/exporter/local/export.go @@ -2,24 +2,28 @@ package local import ( "context" - "io/ioutil" "os" "strings" "time" - "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/util/epoch" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/filesync" - "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/util/progress" + "github.com/pkg/errors" "github.com/tonistiigi/fsutil" fstypes "github.com/tonistiigi/fsutil/types" "golang.org/x/sync/errgroup" "golang.org/x/time/rate" ) +const ( + keyAttestationPrefix = "attestation-prefix" +) + type Opt struct { SessionManager *session.Manager } @@ -35,93 +39,103 @@ func New(opt Opt) (exporter.Exporter, error) { } func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { - return &localExporterInstance{localExporter: e}, nil + tm, _, err := epoch.ParseExporterAttrs(opt) + if err != nil { + return nil, err + } + + i := &localExporterInstance{ + localExporter: e, + opts: CreateFSOpts{ + Epoch: tm, + }, + } + + for k, v := range opt { + switch k { + case keyAttestationPrefix: + i.opts.AttestationPrefix = v + } + } + + return i, nil } type localExporterInstance struct { *localExporter + opts CreateFSOpts } func (e *localExporterInstance) Name() string { - return "exporting to client" + return "exporting to client directory" } -func (e *localExporter) Config() exporter.Config { - return exporter.Config{} +func (e *localExporter) Config() *exporter.Config { + return exporter.NewConfig() } -func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) { +func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) { timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() + if e.opts.Epoch == nil { + if tm, ok, err := epoch.ParseSource(inp); err != nil { + return nil, nil, err + } else if ok { + e.opts.Epoch = tm + } + } + caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false) if err != nil { - return nil, err + return nil, nil, err } isMap := len(inp.Refs) > 0 - export := func(ctx context.Context, k string, ref cache.ImmutableRef) func() error { - return func() error { - var src string - var err error - var idmap *idtools.IdentityMapping - if ref == nil { - src, err = ioutil.TempDir("", "buildkit") - if err != nil { - return err - } - defer os.RemoveAll(src) - } else { - mount, err := ref.Mount(ctx, true, session.NewGroup(sessionID)) - if err != nil { - return err - } - - lm := snapshot.LocalMounter(mount) + if _, ok := inp.Metadata[exptypes.ExporterPlatformsKey]; isMap && !ok { + return nil, nil, errors.Errorf("unable to export multiple refs, missing platforms mapping") + } + p, err := exptypes.ParsePlatforms(inp.Metadata) + if err != nil { + return nil, nil, err + } - src, err = lm.Mount() - if err != nil { - return err - } + if !isMap && len(p.Platforms) > 1 { + return nil, nil, errors.Errorf("unable to export multiple platforms without map") + } - idmap = mount.IdentityMapping() + now := time.Now().Truncate(time.Second) - defer lm.Unmount() + export := func(ctx context.Context, k string, ref cache.ImmutableRef, attestations []exporter.Attestation) func() error { + return func() error { + outputFS, cleanup, err := CreateFS(ctx, sessionID, k, ref, attestations, now, e.opts) + if err != nil { + return err } - - walkOpt := &fsutil.WalkOpt{} - - if idmap != nil { - walkOpt.Map = func(p string, st *fstypes.Stat) bool { - uid, gid, err := idmap.ToContainer(idtools.Identity{ - UID: int(st.Uid), - GID: int(st.Gid), - }) - if err != nil { - return false - } - st.Uid = uint32(uid) - st.Gid = uint32(gid) - return true - } + if cleanup != nil { + defer cleanup() } - fs := fsutil.NewFS(src, walkOpt) lbl := "copying files" if isMap { lbl += " " + k - fs, err = fsutil.SubDirFS([]fsutil.Dir{{FS: fs, Stat: fstypes.Stat{ + st := fstypes.Stat{ Mode: uint32(os.ModeDir | 0755), Path: strings.Replace(k, "/", "_", -1), - }}}) + } + if e.opts.Epoch != nil { + st.ModTime = e.opts.Epoch.UnixNano() + } + + outputFS, err = fsutil.SubDirFS([]fsutil.Dir{{FS: outputFS, Stat: st}}) if err != nil { return err } } - progress := newProgressHandler(ctx, lbl) - if err := filesync.CopyToCaller(ctx, fs, caller, progress); err != nil { + progress := NewProgressHandler(ctx, lbl) + if err := filesync.CopyToCaller(ctx, outputFS, caller, progress); err != nil { return err } return nil @@ -130,21 +144,25 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, eg, ctx := errgroup.WithContext(ctx) - if isMap { - for k, ref := range inp.Refs { - eg.Go(export(ctx, k, ref)) + if len(p.Platforms) > 0 { + for _, p := range p.Platforms { + r, ok := inp.FindRef(p.ID) + if !ok { + return nil, nil, errors.Errorf("failed to find ref for ID %s", p.ID) + } + eg.Go(export(ctx, p.ID, r, inp.Attestations[p.ID])) } } else { - eg.Go(export(ctx, "", inp.Ref)) + eg.Go(export(ctx, "", inp.Ref, nil)) } if err := eg.Wait(); err != nil { - return nil, err + return nil, nil, err } - return nil, nil + return nil, nil, nil } -func newProgressHandler(ctx context.Context, id string) func(int, bool) { +func NewProgressHandler(ctx context.Context, id string) func(int, bool) { limiter := rate.NewLimiter(rate.Every(100*time.Millisecond), 1) pw, _, _ := progress.NewFromContext(ctx) now := time.Now() diff --git a/exporter/local/fs.go b/exporter/local/fs.go new file mode 100644 index 000000000000..c5a524aae32f --- /dev/null +++ b/exporter/local/fs.go @@ -0,0 +1,161 @@ +package local + +import ( + "context" + "encoding/json" + "io" + "io/fs" + "os" + "path" + "strconv" + "time" + + "github.com/docker/docker/pkg/idtools" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/attestation" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/result" + "github.com/moby/buildkit/util/staticfs" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + fstypes "github.com/tonistiigi/fsutil/types" +) + +type CreateFSOpts struct { + Epoch *time.Time + AttestationPrefix string +} + +func CreateFS(ctx context.Context, sessionID string, k string, ref cache.ImmutableRef, attestations []exporter.Attestation, defaultTime time.Time, opt CreateFSOpts) (fsutil.FS, func() error, error) { + var cleanup func() error + var src string + var err error + var idmap *idtools.IdentityMapping + if ref == nil { + src, err = os.MkdirTemp("", "buildkit") + if err != nil { + return nil, nil, err + } + cleanup = func() error { return os.RemoveAll(src) } + } else { + mount, err := ref.Mount(ctx, true, session.NewGroup(sessionID)) + if err != nil { + return nil, nil, err + } + + lm := snapshot.LocalMounter(mount) + + src, err = lm.Mount() + if err != nil { + return nil, nil, err + } + + idmap = mount.IdentityMapping() + + cleanup = lm.Unmount + } + + walkOpt := &fsutil.WalkOpt{} + var idMapFunc func(p string, st *fstypes.Stat) fsutil.MapResult + + if idmap != nil { + idMapFunc = func(p string, st *fstypes.Stat) fsutil.MapResult { + uid, gid, err := idmap.ToContainer(idtools.Identity{ + UID: int(st.Uid), + GID: int(st.Gid), + }) + if err != nil { + return fsutil.MapResultExclude + } + st.Uid = uint32(uid) + st.Gid = uint32(gid) + return fsutil.MapResultKeep + } + } + + walkOpt.Map = func(p string, st *fstypes.Stat) fsutil.MapResult { + res := fsutil.MapResultKeep + if idMapFunc != nil { + res = idMapFunc(p, st) + } + if opt.Epoch != nil { + st.ModTime = opt.Epoch.UnixNano() + } + return res + } + + outputFS := fsutil.NewFS(src, walkOpt) + attestations = attestation.Filter(attestations, nil, map[string][]byte{ + result.AttestationInlineOnlyKey: []byte(strconv.FormatBool(true)), + }) + attestations, err = attestation.Unbundle(ctx, session.NewGroup(sessionID), attestations) + if err != nil { + return nil, nil, err + } + if len(attestations) > 0 { + subjects := []intoto.Subject{} + err = outputFS.Walk(ctx, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + if !info.Mode().IsRegular() { + return nil + } + f, err := outputFS.Open(path) + if err != nil { + return err + } + defer f.Close() + d := digest.Canonical.Digester() + if _, err := io.Copy(d.Hash(), f); err != nil { + return err + } + subjects = append(subjects, intoto.Subject{ + Name: path, + Digest: result.ToDigestMap(d.Digest()), + }) + return nil + }) + if err != nil { + return nil, nil, err + } + + stmts, err := attestation.MakeInTotoStatements(ctx, session.NewGroup(sessionID), attestations, subjects) + if err != nil { + return nil, nil, err + } + stmtFS := staticfs.NewFS() + + names := map[string]struct{}{} + for i, stmt := range stmts { + dt, err := json.MarshalIndent(stmt, "", " ") + if err != nil { + return nil, nil, errors.Wrap(err, "failed to marshal attestation") + } + + name := opt.AttestationPrefix + path.Base(attestations[i].Path) + if _, ok := names[name]; ok { + return nil, nil, errors.Errorf("duplicate attestation path name %s", name) + } + names[name] = struct{}{} + + st := fstypes.Stat{ + Mode: 0600, + Path: name, + ModTime: defaultTime.UnixNano(), + } + if opt.Epoch != nil { + st.ModTime = opt.Epoch.UnixNano() + } + stmtFS.Add(name, st, dt) + } + + outputFS = staticfs.NewMergeFS(outputFS, stmtFS) + } + + return outputFS, cleanup, nil +} diff --git a/exporter/oci/export.go b/exporter/oci/export.go index 153211c9b709..60982f4daf3c 100644 --- a/exporter/oci/export.go +++ b/exporter/oci/export.go @@ -4,19 +4,23 @@ import ( "context" "encoding/base64" "encoding/json" + "fmt" "strconv" "strings" "time" archiveexporter "github.com/containerd/containerd/images/archive" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/remotes" "github.com/docker/distribution/reference" + intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/moby/buildkit/cache" cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/exporter" "github.com/moby/buildkit/exporter/containerimage" "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/session" + sessioncontent "github.com/moby/buildkit/session/content" "github.com/moby/buildkit/session/filesync" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/contentutil" @@ -25,26 +29,18 @@ import ( "github.com/moby/buildkit/util/progress" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" ) type ExporterVariant string const ( - keyImageName = "name" - keyLayerCompression = "compression" - VariantOCI = "oci" - VariantDocker = "docker" - ociTypes = "oci-mediatypes" - keyForceCompression = "force-compression" - keyCompressionLevel = "compression-level" - keyBuildInfo = "buildinfo" - keyBuildInfoAttrs = "buildinfo-attrs" - // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was - // already found to use a non-distributable media type. - // When this option is not set, the exporter will change the media type of the layer to a distributable one. - preferNondistLayersKey = "prefer-nondist-layers" + VariantOCI = "oci" + VariantDocker = "docker" +) + +const ( + keyTar = "tar" ) type Opt struct { @@ -64,85 +60,35 @@ func New(opt Opt) (exporter.Exporter, error) { } func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { - var ot *bool i := &imageExporterInstance{ - imageExporter: e, - layerCompression: compression.Default, - buildInfo: true, + imageExporter: e, + tar: true, + opts: containerimage.ImageCommitOpts{ + RefCfg: cacheconfig.RefConfig{ + Compression: compression.New(compression.Default), + }, + BuildInfo: true, + OCITypes: e.opt.Variant == VariantOCI, + }, + } + + opt, err := i.opts.Load(opt) + if err != nil { + return nil, err } - var esgz bool + for k, v := range opt { switch k { - case keyImageName: - i.name = v - case keyLayerCompression: - switch v { - case "gzip": - i.layerCompression = compression.Gzip - case "estargz": - i.layerCompression = compression.EStargz - esgz = true - case "zstd": - i.layerCompression = compression.Zstd - case "uncompressed": - i.layerCompression = compression.Uncompressed - default: - return nil, errors.Errorf("unsupported layer compression type: %v", v) - } - case keyForceCompression: - if v == "" { - i.forceCompression = true - continue - } - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value %v specified for %s", v, k) - } - i.forceCompression = b - case keyCompressionLevel: - ii, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, errors.Wrapf(err, "non-int value %s specified for %s", v, k) - } - v := int(ii) - i.compressionLevel = &v - case ociTypes: - ot = new(bool) - if v == "" { - *ot = true - continue - } - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value specified for %s", k) - } - *ot = b - case keyBuildInfo: - if v == "" { - i.buildInfo = true - continue - } - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value specified for %s", k) - } - i.buildInfo = b - case keyBuildInfoAttrs: + case keyTar: if v == "" { - i.buildInfoAttrs = false + i.tar = true continue } b, err := strconv.ParseBool(v) if err != nil { return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } - i.buildInfoAttrs = b - case preferNondistLayersKey: - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value specified for %s", k) - } - i.preferNonDist = b + i.tar = b default: if i.meta == nil { i.meta = make(map[string][]byte) @@ -150,59 +96,27 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp i.meta[k] = []byte(v) } } - if ot == nil { - i.ociTypes = e.opt.Variant == VariantOCI - } else { - i.ociTypes = *ot - } - if esgz && !i.ociTypes { - logrus.Warn("forcibly turning on oci-mediatype mode for estargz") - i.ociTypes = true - } return i, nil } type imageExporterInstance struct { *imageExporter - meta map[string][]byte - name string - ociTypes bool - layerCompression compression.Type - forceCompression bool - compressionLevel *int - buildInfo bool - buildInfoAttrs bool - preferNonDist bool + opts containerimage.ImageCommitOpts + tar bool + meta map[string][]byte } func (e *imageExporterInstance) Name() string { - return "exporting to oci image format" + return fmt.Sprintf("exporting to %s image format", e.opt.Variant) } -func (e *imageExporterInstance) Config() exporter.Config { - return exporter.Config{ - Compression: e.compression(), - } +func (e *imageExporterInstance) Config() *exporter.Config { + return exporter.NewConfigWithCompression(e.opts.RefCfg.Compression) } -func (e *imageExporterInstance) compression() compression.Config { - c := compression.New(e.layerCompression).SetForce(e.forceCompression) - if e.compressionLevel != nil { - c = c.SetLevel(*e.compressionLevel) - } - return c -} - -func (e *imageExporterInstance) refCfg() cacheconfig.RefConfig { - return cacheconfig.RefConfig{ - Compression: e.compression(), - PreferNonDistributable: e.preferNonDist, - } -} - -func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, sessionID string) (map[string]string, error) { +func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source, sessionID string) (_ map[string]string, descref exporter.DescriptorReference, err error) { if e.opt.Variant == VariantDocker && len(src.Refs) > 0 { - return nil, errors.Errorf("docker exporter does not currently support exporting manifest lists") + return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists") } if src.Metadata == nil { @@ -212,24 +126,43 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, src.Metadata[k] = v } + opts := e.opts + as, _, err := containerimage.ParseAnnotations(src.Metadata) + if err != nil { + return nil, nil, err + } + opts.Annotations = opts.Annotations.Merge(as) + ctx, done, err := leaseutil.WithLease(ctx, e.opt.LeaseManager, leaseutil.MakeTemporary) if err != nil { - return nil, err + return nil, nil, err } - defer done(context.TODO()) + defer func() { + if descref == nil { + done(context.TODO()) + } + }() - desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.refCfg(), e.buildInfo, e.buildInfoAttrs, sessionID) + desc, err := e.opt.ImageWriter.Commit(ctx, src, sessionID, &opts) if err != nil { - return nil, err + return nil, nil, err } defer func() { - e.opt.ImageWriter.ContentStore().Delete(context.TODO(), desc.Digest) + if err == nil { + descref = containerimage.NewDescriptorReference(*desc, done) + } }() if desc.Annotations == nil { desc.Annotations = map[string]string{} } - desc.Annotations[ocispecs.AnnotationCreated] = time.Now().UTC().Format(time.RFC3339) + if _, ok := desc.Annotations[ocispecs.AnnotationCreated]; !ok { + tm := time.Now() + if opts.Epoch != nil { + tm = *opts.Epoch + } + desc.Annotations[ocispecs.AnnotationCreated] = tm.UTC().Format(time.RFC3339) + } resp := make(map[string]string) @@ -241,17 +174,17 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, dtdesc, err := json.Marshal(desc) if err != nil { - return nil, err + return nil, nil, err } resp[exptypes.ExporterImageDescriptorKey] = base64.StdEncoding.EncodeToString(dtdesc) - if n, ok := src.Metadata["image.name"]; e.name == "*" && ok { - e.name = string(n) + if n, ok := src.Metadata["image.name"]; e.opts.ImageName == "*" && ok { + e.opts.ImageName = string(n) } - names, err := normalizedNames(e.name) + names, err := normalizedNames(e.opts.ImageName) if err != nil { - return nil, err + return nil, nil, err } if len(names) != 0 { @@ -264,7 +197,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, expOpts = append(expOpts, archiveexporter.WithAllPlatforms(), archiveexporter.WithSkipDockerManifest()) case VariantDocker: default: - return nil, errors.Errorf("invalid variant %q", e.opt.Variant) + return nil, nil, errors.Errorf("invalid variant %q", e.opt.Variant) } timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) @@ -272,26 +205,21 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false) if err != nil { - return nil, err - } - - w, err := filesync.CopyFileWriter(ctx, resp, caller) - if err != nil { - return nil, err + return nil, nil, err } mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) if src.Ref != nil { - remotes, err := src.Ref.GetRemotes(ctx, false, e.refCfg(), false, session.NewGroup(sessionID)) + remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) if err != nil { - return nil, err + return nil, nil, err } remote := remotes[0] // unlazy before tar export as the tar writer does not handle // layer blobs in parallel (whereas unlazy does) if unlazier, ok := remote.Provider.(cache.Unlazier); ok { if err := unlazier.Unlazy(ctx); err != nil { - return nil, err + return nil, nil, err } } for _, desc := range remote.Descriptors { @@ -300,14 +228,14 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, } if len(src.Refs) > 0 { for _, r := range src.Refs { - remotes, err := r.GetRemotes(ctx, false, e.refCfg(), false, session.NewGroup(sessionID)) + remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) if err != nil { - return nil, err + return nil, nil, err } remote := remotes[0] if unlazier, ok := remote.Provider.(cache.Unlazier); ok { if err := unlazier.Unlazy(ctx); err != nil { - return nil, err + return nil, nil, err } } for _, desc := range remote.Descriptors { @@ -316,36 +244,41 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, } } - report := oneOffProgress(ctx, "sending tarball") - if err := archiveexporter.Export(ctx, mprovider, w, expOpts...); err != nil { - w.Close() + if e.tar { + w, err := filesync.CopyFileWriter(ctx, resp, caller) + if err != nil { + return nil, nil, err + } + + report := progress.OneOff(ctx, "sending tarball") + if err := archiveexporter.Export(ctx, mprovider, w, expOpts...); err != nil { + w.Close() + if grpcerrors.Code(err) == codes.AlreadyExists { + return resp, nil, report(nil) + } + return nil, nil, report(err) + } + err = w.Close() if grpcerrors.Code(err) == codes.AlreadyExists { - return resp, report(nil) + return resp, nil, report(nil) + } + if err != nil { + return nil, nil, report(err) + } + report(nil) + } else { + ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto") + store := sessioncontent.NewCallerStore(caller, "export") + if err != nil { + return nil, nil, err + } + err := contentutil.CopyChain(ctx, store, mprovider, *desc) + if err != nil { + return nil, nil, err } - return nil, report(err) - } - err = w.Close() - if grpcerrors.Code(err) == codes.AlreadyExists { - return resp, report(nil) } - return resp, report(err) -} -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.NewFromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } + return resp, nil, nil } func normalizedNames(name string) ([]string, error) { diff --git a/exporter/tar/export.go b/exporter/tar/export.go index 0febefd0b023..4d136c89c1ca 100644 --- a/exporter/tar/export.go +++ b/exporter/tar/export.go @@ -2,18 +2,18 @@ package local import ( "context" - "io/ioutil" "os" "strconv" "strings" "time" - "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/local" + "github.com/moby/buildkit/exporter/util/epoch" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/filesync" - "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/util/progress" "github.com/pkg/errors" "github.com/tonistiigi/fsutil" @@ -21,6 +21,8 @@ import ( ) const ( + attestationPrefixKey = "attestation-prefix" + // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was // already found to use a non-distributable media type. // When this option is not set, the exporter will change the media type of the layer to a distributable one. @@ -44,13 +46,23 @@ func New(opt Opt) (exporter.Exporter, error) { func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { li := &localExporterInstance{localExporter: e} - v, ok := opt[preferNondistLayersKey] - if ok { - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value for %s: %s", preferNondistLayersKey, v) + tm, opt, err := epoch.ParseExporterAttrs(opt) + if err != nil { + return nil, err + } + li.opts.Epoch = tm + + for k, v := range opt { + switch k { + case preferNondistLayersKey: + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value for %s: %s", preferNondistLayersKey, v) + } + li.preferNonDist = b + case attestationPrefixKey: + li.opts.AttestationPrefix = v } - li.preferNonDist = b } return li, nil @@ -58,19 +70,20 @@ func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exp type localExporterInstance struct { *localExporter + opts local.CreateFSOpts preferNonDist bool } func (e *localExporterInstance) Name() string { - return "exporting to client" + return "exporting to client tarball" } -func (e *localExporterInstance) Config() exporter.Config { - return exporter.Config{} +func (e *localExporterInstance) Config() *exporter.Config { + return exporter.NewConfig() } -func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) { - var defers []func() +func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) { + var defers []func() error defer func() { for i := len(defers) - 1; i >= 0; i-- { @@ -78,80 +91,79 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, } }() - getDir := func(ctx context.Context, k string, ref cache.ImmutableRef) (*fsutil.Dir, error) { - var src string - var err error - var idmap *idtools.IdentityMapping - if ref == nil { - src, err = ioutil.TempDir("", "buildkit") - if err != nil { - return nil, err - } - defers = append(defers, func() { os.RemoveAll(src) }) - } else { - mount, err := ref.Mount(ctx, true, session.NewGroup(sessionID)) - if err != nil { - return nil, err - } - - lm := snapshot.LocalMounter(mount) - - src, err = lm.Mount() - if err != nil { - return nil, err - } + if e.opts.Epoch == nil { + if tm, ok, err := epoch.ParseSource(inp); err != nil { + return nil, nil, err + } else if ok { + e.opts.Epoch = tm + } + } - idmap = mount.IdentityMapping() + now := time.Now().Truncate(time.Second) - defers = append(defers, func() { lm.Unmount() }) + getDir := func(ctx context.Context, k string, ref cache.ImmutableRef, attestations []exporter.Attestation) (*fsutil.Dir, error) { + outputFS, cleanup, err := local.CreateFS(ctx, sessionID, k, ref, attestations, now, e.opts) + if err != nil { + return nil, err + } + if cleanup != nil { + defers = append(defers, cleanup) } - walkOpt := &fsutil.WalkOpt{} - - if idmap != nil { - walkOpt.Map = func(p string, st *fstypes.Stat) bool { - uid, gid, err := idmap.ToContainer(idtools.Identity{ - UID: int(st.Uid), - GID: int(st.Gid), - }) - if err != nil { - return false - } - st.Uid = uint32(uid) - st.Gid = uint32(gid) - return true - } + st := fstypes.Stat{ + Mode: uint32(os.ModeDir | 0755), + Path: strings.Replace(k, "/", "_", -1), + } + if e.opts.Epoch != nil { + st.ModTime = e.opts.Epoch.UnixNano() } return &fsutil.Dir{ - FS: fsutil.NewFS(src, walkOpt), - Stat: fstypes.Stat{ - Mode: uint32(os.ModeDir | 0755), - Path: strings.Replace(k, "/", "_", -1), - }, + FS: outputFS, + Stat: st, }, nil } + isMap := len(inp.Refs) > 0 + if _, ok := inp.Metadata[exptypes.ExporterPlatformsKey]; isMap && !ok { + return nil, nil, errors.Errorf("unable to export multiple refs, missing platforms mapping") + } + p, err := exptypes.ParsePlatforms(inp.Metadata) + if err != nil { + return nil, nil, err + } + if !isMap && len(p.Platforms) > 1 { + return nil, nil, errors.Errorf("unable to export multiple platforms without map") + } + var fs fsutil.FS - if len(inp.Refs) > 0 { - dirs := make([]fsutil.Dir, 0, len(inp.Refs)) - for k, ref := range inp.Refs { - d, err := getDir(ctx, k, ref) + if len(p.Platforms) > 0 { + dirs := make([]fsutil.Dir, 0, len(p.Platforms)) + for _, p := range p.Platforms { + r, ok := inp.FindRef(p.ID) + if !ok { + return nil, nil, errors.Errorf("failed to find ref for ID %s", p.ID) + } + d, err := getDir(ctx, p.ID, r, inp.Attestations[p.ID]) if err != nil { - return nil, err + return nil, nil, err } dirs = append(dirs, *d) } - var err error - fs, err = fsutil.SubDirFS(dirs) - if err != nil { - return nil, err + if isMap { + var err error + fs, err = fsutil.SubDirFS(dirs) + if err != nil { + return nil, nil, err + } + } else { + fs = dirs[0].FS } } else { - d, err := getDir(ctx, "", inp.Ref) + d, err := getDir(ctx, "", inp.Ref, nil) if err != nil { - return nil, err + return nil, nil, err } fs = d.FS } @@ -161,34 +173,17 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false) if err != nil { - return nil, err + return nil, nil, err } w, err := filesync.CopyFileWriter(ctx, nil, caller) if err != nil { - return nil, err + return nil, nil, err } - report := oneOffProgress(ctx, "sending tarball") + report := progress.OneOff(ctx, "sending tarball") if err := fsutil.WriteTar(ctx, fs, w); err != nil { w.Close() - return nil, report(err) - } - return nil, report(w.Close()) -} - -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.NewFromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err + return nil, nil, report(err) } + return nil, nil, report(w.Close()) } diff --git a/exporter/util/epoch/parse.go b/exporter/util/epoch/parse.go new file mode 100644 index 000000000000..63f806e1b76a --- /dev/null +++ b/exporter/util/epoch/parse.go @@ -0,0 +1,65 @@ +package epoch + +import ( + "strconv" + "time" + + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/pkg/errors" +) + +const ( + frontendSourceDateEpochArg = "build-arg:SOURCE_DATE_EPOCH" + + KeySourceDateEpoch = "source-date-epoch" +) + +func ParseBuildArgs(opt map[string]string) (string, bool) { + v, ok := opt[frontendSourceDateEpochArg] + return v, ok +} + +func ParseExporterAttrs(opt map[string]string) (*time.Time, map[string]string, error) { + rest := make(map[string]string, len(opt)) + + var tm *time.Time + + for k, v := range opt { + switch k { + case KeySourceDateEpoch: + var err error + tm, err = parseTime(k, v) + if err != nil { + return nil, nil, err + } + default: + rest[k] = v + } + } + + return tm, rest, nil +} + +func ParseSource(inp *exporter.Source) (*time.Time, bool, error) { + if v, ok := inp.Metadata[exptypes.ExporterEpochKey]; ok { + epoch, err := parseTime("", string(v)) + if err != nil { + return nil, false, errors.Wrapf(err, "invalid SOURCE_DATE_EPOCH from frontend: %q", v) + } + return epoch, true, nil + } + return nil, false, nil +} + +func parseTime(key, value string) (*time.Time, error) { + if value == "" { + return nil, nil + } + sde, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "invalid %s: %s", key, err) + } + tm := time.Unix(sde, 0).UTC() + return &tm, nil +} diff --git a/frontend/attestations/parse.go b/frontend/attestations/parse.go new file mode 100644 index 000000000000..00de649fdefe --- /dev/null +++ b/frontend/attestations/parse.go @@ -0,0 +1,81 @@ +package attestations + +import ( + "encoding/csv" + "strings" + + "github.com/pkg/errors" +) + +const ( + KeyTypeSbom = "sbom" + KeyTypeProvenance = "provenance" +) + +const ( + defaultSBOMGenerator = "docker/buildkit-syft-scanner:stable-1" +) + +func Filter(v map[string]string) map[string]string { + attests := make(map[string]string) + for k, v := range v { + if strings.HasPrefix(k, "attest:") { + attests[k] = v + continue + } + if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") { + attests[k] = v + continue + } + } + return attests +} + +func Validate(values map[string]map[string]string) (map[string]map[string]string, error) { + for k := range values { + if k != KeyTypeSbom && k != KeyTypeProvenance { + return nil, errors.Errorf("unknown attestation type %q", k) + } + } + return values, nil +} + +func Parse(values map[string]string) (map[string]map[string]string, error) { + attests := make(map[string]string) + for k, v := range values { + if strings.HasPrefix(k, "attest:") { + attests[strings.ToLower(strings.TrimPrefix(k, "attest:"))] = v + continue + } + if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") { + attests[strings.ToLower(strings.TrimPrefix(k, "build-arg:BUILDKIT_ATTEST_"))] = v + continue + } + } + + out := make(map[string]map[string]string) + for k, v := range attests { + attrs := make(map[string]string) + out[k] = attrs + if k == KeyTypeSbom { + attrs["generator"] = defaultSBOMGenerator + } + if v == "" { + continue + } + csvReader := csv.NewReader(strings.NewReader(v)) + fields, err := csvReader.Read() + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", k) + } + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + parts = append(parts, "") + } + attrs[parts[0]] = parts[1] + } + } + + return Validate(out) +} diff --git a/frontend/attestations/sbom/sbom.go b/frontend/attestations/sbom/sbom.go new file mode 100644 index 000000000000..113797b2139c --- /dev/null +++ b/frontend/attestations/sbom/sbom.go @@ -0,0 +1,112 @@ +package sbom + +import ( + "context" + "encoding/json" + "fmt" + "path" + "strings" + + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/client/llb" + gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/solver/result" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + CoreSBOMName = "sbom" + ExtraSBOMPrefix = CoreSBOMName + "-" + + srcDir = "/run/src/" + outDir = "/run/out/" +) + +// Scanner is a function type for scanning the contents of a state and +// returning a new attestation and state representing the scan results. +// +// A scanner is designed a scan a single state, however, additional states can +// also be attached, for attaching additional information, such as scans of +// build-contexts or multi-stage builds. Handling these separately allows the +// scanner to optionally ignore these or to mark them as such in the +// attestation. +type Scanner func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[llb.State], error) + +func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scanner string) (Scanner, error) { + if scanner == "" { + return nil, nil + } + + _, dt, err := resolver.ResolveImageConfig(ctx, scanner, llb.ResolveImageConfigOpt{}) + if err != nil { + return nil, err + } + + var cfg ocispecs.Image + if err := json.Unmarshal(dt, &cfg); err != nil { + return nil, err + } + + var args []string + args = append(args, cfg.Config.Entrypoint...) + args = append(args, cfg.Config.Cmd...) + if len(args) == 0 { + return nil, errors.Errorf("scanner %s does not have cmd", scanner) + } + + return func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[llb.State], error) { + var env []string + env = append(env, cfg.Config.Env...) + env = append(env, "BUILDKIT_SCAN_DESTINATION="+outDir) + env = append(env, "BUILDKIT_SCAN_SOURCE="+path.Join(srcDir, "core", CoreSBOMName)) + if len(extras) > 0 { + env = append(env, "BUILDKIT_SCAN_SOURCE_EXTRAS="+path.Join(srcDir, "extras/")) + } + + runOpts := []llb.RunOption{ + llb.WithCustomName(fmt.Sprintf("[%s] generating sbom using %s", name, scanner)), + } + for _, opt := range opts { + runOpts = append(runOpts, opt) + } + runOpts = append(runOpts, llb.Dir(cfg.Config.WorkingDir)) + runOpts = append(runOpts, llb.Args(args)) + for _, e := range env { + k, v, _ := strings.Cut(e, "=") + runOpts = append(runOpts, llb.AddEnv(k, v)) + } + + runscan := llb.Image(scanner).Run(runOpts...) + runscan.AddMount("/tmp", llb.Scratch(), llb.Tmpfs()) + + runscan.AddMount(path.Join(srcDir, "core", CoreSBOMName), ref, llb.Readonly) + for k, extra := range extras { + runscan.AddMount(path.Join(srcDir, "extras", ExtraSBOMPrefix+k), extra, llb.Readonly) + } + + stsbom := runscan.AddMount(outDir, llb.Scratch()) + return result.Attestation[llb.State]{ + Kind: gatewaypb.AttestationKindBundle, + Ref: stsbom, + Metadata: map[string][]byte{ + result.AttestationReasonKey: []byte(result.AttestationReasonSBOM), + result.AttestationSBOMCore: []byte(CoreSBOMName), + }, + InToto: result.InTotoAttestation{ + PredicateType: intoto.PredicateSPDX, + }, + }, nil + }, nil +} + +func HasSBOM[T any](res *result.Result[T]) bool { + for _, as := range res.Attestations { + for _, a := range as { + if a.InToto.PredicateType == intoto.PredicateSPDX { + return true + } + } + } + return false +} diff --git a/frontend/dockerfile/builder/build.go b/frontend/dockerfile/builder/build.go index e0a1806901bc..aafd9c9a73f3 100644 --- a/frontend/dockerfile/builder/build.go +++ b/frontend/dockerfile/builder/build.go @@ -12,6 +12,7 @@ import ( "regexp" "strconv" "strings" + "time" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" @@ -19,15 +20,20 @@ import ( controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/attestations" + "github.com/moby/buildkit/frontend/attestations/sbom" "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" "github.com/moby/buildkit/frontend/dockerfile/dockerignore" "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/buildkit/frontend/gateway/client" gwpb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/frontend/subrequests/outline" + "github.com/moby/buildkit/frontend/subrequests/targets" "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" + "github.com/moby/buildkit/solver/result" + "github.com/moby/buildkit/util/gitutil" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -39,45 +45,59 @@ const ( defaultDockerfileName = "Dockerfile" dockerignoreFilename = ".dockerignore" - buildArgPrefix = "build-arg:" - labelPrefix = "label:" - - keyTarget = "target" - keyFilename = "filename" - keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports - keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry - keyCgroupParent = "cgroup-parent" - keyContextSubDir = "contextsubdir" - keyForceNetwork = "force-network-mode" - keyGlobalAddHosts = "add-hosts" - keyHostname = "hostname" - keyImageResolveMode = "image-resolve-mode" - keyMultiPlatform = "multi-platform" - keyNameContext = "contextkey" - keyNameDockerfile = "dockerfilekey" - keyNoCache = "no-cache" - keyOverrideCopyImage = "override-copy-image" // remove after CopyOp implemented - keyShmSize = "shm-size" - keyTargetPlatform = "platform" - keyUlimit = "ulimit" + buildArgPrefix = "build-arg:" + labelPrefix = "label:" + contextPrefix = "context:" + inputMetadataPrefix = "input-metadata:" + + keyTarget = "target" + keyFilename = "filename" + keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports + keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry + keyCgroupParent = "cgroup-parent" + keyContextSubDir = "contextsubdir" + keyForceNetwork = "force-network-mode" + keyGlobalAddHosts = "add-hosts" + keyHostname = "hostname" + keyImageResolveMode = "image-resolve-mode" + keyMultiPlatform = "multi-platform" + keyNameContext = "contextkey" + keyNameDockerfile = "dockerfilekey" + keyNoCache = "no-cache" + keyShmSize = "shm-size" + keyTargetPlatform = "platform" + keyUlimit = "ulimit" + keyRequestID = "requestid" // Don't forget to update frontend documentation if you add - // a new build-arg: frontend/dockerfile/docs/syntax.md + // a new build-arg: frontend/dockerfile/docs/reference.md keyCacheNSArg = "build-arg:BUILDKIT_CACHE_MOUNT_NS" keyContextKeepGitDirArg = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" keyHostnameArg = "build-arg:BUILDKIT_SANDBOX_HOSTNAME" keyMultiPlatformArg = "build-arg:BUILDKIT_MULTI_PLATFORM" keySyntaxArg = "build-arg:BUILDKIT_SYNTAX" + keySourceDateEpoch = "build-arg:SOURCE_DATE_EPOCH" ) var httpPrefix = regexp.MustCompile(`^https?://`) -var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`) -func Build(ctx context.Context, c client.Client) (*client.Result, error) { +func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { opts := c.BuildOpts().Opts caps := c.BuildOpts().LLBCaps gwcaps := c.BuildOpts().Caps + if err := caps.Supports(pb.CapFileBase); err != nil { + return nil, errors.Wrap(err, "needs BuildKit 0.5 or later") + } + if opts["override-copy-image"] != "" { + return nil, errors.New("support for \"override-copy-image\" was removed in BuildKit 0.11") + } + if v, ok := opts["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok { + if b, err := strconv.ParseBool(v); err == nil && b { + return nil, errors.New("support for \"build-arg:BUILDKIT_DISABLE_FILEOP\" was removed in BuildKit 0.11") + } + } + allowForward, capsError := validateCaps(opts["frontend.caps"]) if !allowForward && capsError != nil { return nil, capsError @@ -168,11 +188,13 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { llb.Differ(llb.DiffNone, false), ) - fileop := useFileOp(opts, &caps) - var buildContext *llb.State isNotLocalContext := false - if st, ok := detectGitContext(opts[localNameContext], opts[keyContextKeepGitDirArg]); ok { + keepGit := false + if v, err := strconv.ParseBool(opts[keyContextKeepGitDirArg]); err == nil { + keepGit = v + } + if st, ok := detectGitContext(opts[localNameContext], keepGit); ok { if !forceLocalDockerfile { src = *st } @@ -205,28 +227,13 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return nil, errors.Wrapf(err, "failed to read downloaded context") } if isArchive(dt) { - if fileop { - bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{ - AttemptUnpack: true, - })) - if !forceLocalDockerfile { - src = bc - } - buildContext = &bc - } else { - copyImage := opts[keyOverrideCopyImage] - if copyImage == "" { - copyImage = dockerfile2llb.DefaultCopyImage - } - unpack := llb.Image(copyImage, dockerfile2llb.WithInternalName("helper image for file operations")). - Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("extracting build context")) - unpack.AddMount("/src", httpContext, llb.Readonly) - bc := unpack.AddMount("/out", llb.Scratch()) - if !forceLocalDockerfile { - src = bc - } - buildContext = &bc + bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{ + AttemptUnpack: true, + })) + if !forceLocalDockerfile { + src = bc } + buildContext = &bc } else { filename = "context" if !forceLocalDockerfile { @@ -257,7 +264,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { if buildContext != nil { if sub, ok := opts[keyContextSubDir]; ok { - buildContext = scopeToSubDir(buildContext, fileop, sub) + buildContext = scopeToSubDir(buildContext, sub) } } @@ -380,7 +387,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return nil, errors.Wrapf(err, "failed with %s = %s", keySyntaxArg, cmdline) } return res, err - } else if ref, cmdline, loc, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile)); ok { + } else if ref, cmdline, loc, ok := parser.DetectSyntax(dtDockerfile); ok { res, err := forwardGateway(ctx, c, ref, cmdline) if err != nil && len(errdefs.Sources(err)) == 0 { return nil, wrapSource(err, sourceMap, loc) @@ -408,7 +415,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return nil, errors.Errorf("invalid boolean value %s", v) } if !b && exportMap { - return nil, errors.Errorf("returning multiple target plaforms is not allowed") + return nil, errors.Errorf("returning multiple target platforms is not allowed") } exportMap = b } @@ -422,55 +429,107 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { opts[keyHostname] = v } - eg, ctx = errgroup.WithContext(ctx) + epoch, err := parseSourceDateEpoch(opts[keySourceDateEpoch]) + if err != nil { + return nil, err + } + + target := opts[keyTarget] + convertOpt := dockerfile2llb.ConvertOpt{ + Target: target, + MetaResolver: c, + BuildArgs: filter(opts, buildArgPrefix), + Labels: filter(opts, labelPrefix), + CacheIDNamespace: opts[keyCacheNSArg], + SessionID: c.BuildOpts().SessionID, + BuildContext: buildContext, + Excludes: excludes, + IgnoreCache: ignoreCache, + TargetPlatform: targetPlatforms[0], + BuildPlatforms: buildPlatforms, + ImageResolveMode: resolveMode, + PrefixPlatform: exportMap, + ExtraHosts: extraHosts, + ShmSize: shmSize, + Ulimit: ulimit, + CgroupParent: opts[keyCgroupParent], + ForceNetMode: defaultNetMode, + LLBCaps: &caps, + SourceMap: sourceMap, + Hostname: opts[keyHostname], + SourceDateEpoch: epoch, + Warn: func(msg, url string, detail [][]byte, location *parser.Range) { + c.Warn(ctx, defVtx, msg, warnOpts(sourceMap, location, detail, url)) + }, + ContextByName: contextByNameFunc(c, c.BuildOpts().SessionID), + } + + defer func() { + var el *parser.ErrorLocation + if errors.As(err, &el) { + err = wrapSource(err, sourceMap, el.Location) + } + }() + + if req, ok := opts[keyRequestID]; ok { + switch req { + case outline.SubrequestsOutlineDefinition.Name: + o, err := dockerfile2llb.Dockefile2Outline(ctx, dtDockerfile, convertOpt) + if err != nil { + return nil, err + } + return o.ToResult() + case targets.SubrequestsTargetsDefinition.Name: + targets, err := dockerfile2llb.ListTargets(ctx, dtDockerfile) + if err != nil { + return nil, err + } + return targets.ToResult() + default: + return nil, errdefs.NewUnsupportedSubrequestError(req) + } + } + + var scanner sbom.Scanner + attests, err := attestations.Parse(opts) + if err != nil { + return nil, err + } + if attrs, ok := attests[attestations.KeyTypeSbom]; ok { + src, ok := attrs["generator"] + if !ok { + return nil, errors.Errorf("sbom scanner cannot be empty") + } + ref, err := reference.ParseNormalizedNamed(src) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse sbom scanner %s", src) + } + ref = reference.TagNameOnly(ref) + + scanner, err = sbom.CreateSBOMScanner(ctx, c, ref.String()) + if err != nil { + return nil, err + } + } + scanTargets := make([]*dockerfile2llb.SBOMTargets, len(targetPlatforms)) + + eg, ctx2 = errgroup.WithContext(ctx) for i, tp := range targetPlatforms { func(i int, tp *ocispecs.Platform) { eg.Go(func() (err error) { - defer func() { - var el *parser.ErrorLocation - if errors.As(err, &el) { - err = wrapSource(err, sourceMap, el.Location) - } - }() - - st, img, bi, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{ - Target: opts[keyTarget], - MetaResolver: c, - BuildArgs: filter(opts, buildArgPrefix), - Labels: filter(opts, labelPrefix), - CacheIDNamespace: opts[keyCacheNSArg], - SessionID: c.BuildOpts().SessionID, - BuildContext: buildContext, - Excludes: excludes, - IgnoreCache: ignoreCache, - TargetPlatform: tp, - BuildPlatforms: buildPlatforms, - ImageResolveMode: resolveMode, - PrefixPlatform: exportMap, - ExtraHosts: extraHosts, - ShmSize: shmSize, - Ulimit: ulimit, - CgroupParent: opts[keyCgroupParent], - ForceNetMode: defaultNetMode, - OverrideCopyImage: opts[keyOverrideCopyImage], - LLBCaps: &caps, - SourceMap: sourceMap, - Hostname: opts[keyHostname], - Warn: func(msg, url string, detail [][]byte, location *parser.Range) { - if i != 0 { - return - } - c.Warn(ctx, defVtx, msg, warnOpts(sourceMap, location, detail, url)) - }, - ContextByName: contextByNameFunc(c, tp), - }) - + opt := convertOpt + opt.TargetPlatform = tp + if i != 0 { + opt.Warn = nil + } + opt.ContextByName = contextByNameFunc(c, c.BuildOpts().SessionID) + st, img, scanTarget, err := dockerfile2llb.Dockerfile2LLB(ctx2, dtDockerfile, opt) if err != nil { return err } - def, err := st.Marshal(ctx) + def, err := st.Marshal(ctx2) if err != nil { return errors.Wrapf(err, "failed to marshal LLB definition") } @@ -506,7 +565,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { } } - r, err := c.Solve(ctx, client.SolveRequest{ + r, err := c.Solve(ctx2, client.SolveRequest{ Definition: def.ToPB(), CacheImports: cacheImports, }) @@ -519,30 +578,30 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return err } - buildinfo, err := json.Marshal(bi) - if err != nil { - return errors.Wrapf(err, "failed to marshal build info") + p := platforms.DefaultSpec() + if tp != nil { + p = *tp } + p = platforms.Normalize(p) + k := platforms.Format(p) if !exportMap { res.AddMeta(exptypes.ExporterImageConfigKey, config) - res.AddMeta(exptypes.ExporterBuildInfo, buildinfo) res.SetRef(ref) - } else { - p := platforms.DefaultSpec() - if tp != nil { - p = *tp - } - k := platforms.Format(p) + expPlatforms.Platforms[i] = exptypes.Platform{ + ID: k, + Platform: p, + } + } else { res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config) - res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), buildinfo) res.AddRef(k, ref) expPlatforms.Platforms[i] = exptypes.Platform{ ID: k, Platform: p, } } + scanTargets[i] = scanTarget return nil }) }(i, tp) @@ -552,14 +611,45 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return nil, err } - if exportMap { - dt, err := json.Marshal(expPlatforms) - if err != nil { - return nil, err + if scanner != nil { + for i, p := range expPlatforms.Platforms { + target := scanTargets[i] + + var opts []llb.ConstraintsOpt + if target.IgnoreCache { + opts = append(opts, llb.IgnoreCache) + } + att, err := scanner(ctx, p.ID, target.Core, target.Extras, opts...) + if err != nil { + return nil, err + } + + attSolve, err := result.ConvertAttestation(&att, func(st llb.State) (client.Reference, error) { + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err := c.Solve(ctx, frontend.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + return r.Ref, nil + }) + if err != nil { + return nil, err + } + res.AddAttestation(p.ID, *attSolve) } - res.AddMeta(exptypes.ExporterPlatformsKey, dt) } + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + return res, nil } @@ -606,40 +696,21 @@ func filter(opt map[string]string, key string) map[string]string { return m } -func detectGitContext(ref, gitContext string) (*llb.State, bool) { - found := false - if httpPrefix.MatchString(ref) && gitURLPathWithFragmentSuffix.MatchString(ref) { - found = true - } - - keepGit := false - if gitContext != "" { - if v, err := strconv.ParseBool(gitContext); err == nil { - keepGit = v - } - } - - for _, prefix := range []string{"git://", "github.com/", "git@"} { - if strings.HasPrefix(ref, prefix) { - found = true - break - } - } - if !found { +func detectGitContext(ref string, keepGit bool) (*llb.State, bool) { + g, err := gitutil.ParseGitRef(ref) + if err != nil { return nil, false } - - parts := strings.SplitN(ref, "#", 2) - branch := "" - if len(parts) > 1 { - branch = parts[1] + commit := g.Commit + if g.SubDir != "" { + commit += ":" + g.SubDir } gitOpts := []llb.GitOption{dockerfile2llb.WithInternalName("load git source " + ref)} if keepGit { gitOpts = append(gitOpts, llb.KeepGitDir()) } - st := llb.Git(parts[0], branch, gitOpts...) + st := llb.Git(g.Remote, commit, gitOpts...) return &st, true } @@ -765,27 +836,10 @@ func parseNetMode(v string) (pb.NetMode, error) { } } -func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { - enabled := true - if v, ok := args["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok { - if b, err := strconv.ParseBool(v); err == nil { - enabled = !b - } - } - return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil -} - -func scopeToSubDir(c *llb.State, fileop bool, dir string) *llb.State { - if fileop { - bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{ - CopyDirContentsOnly: true, - })) - return &bc - } - unpack := llb.Image(dockerfile2llb.DefaultCopyImage, dockerfile2llb.WithInternalName("helper image for file operations")). - Run(llb.Shlexf("copy %s/. /out/", path.Join("/src", dir)), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("filtering build context")) - unpack.AddMount("/src", *c, llb.Readonly) - bc := unpack.AddMount("/out", llb.Scratch()) +func scopeToSubDir(c *llb.State, dir string) *llb.State { + bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{ + CopyDirContentsOnly: true, + })) return &bc } @@ -812,11 +866,11 @@ func warnOpts(sm *llb.SourceMap, r *parser.Range, detail [][]byte, url string) c return opts } -func contextByNameFunc(c client.Client, p *ocispecs.Platform) func(context.Context, string) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) { - return func(ctx context.Context, name string) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) { +func contextByNameFunc(c client.Client, sessionID string) func(context.Context, string, string, *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, error) { + return func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, error) { named, err := reference.ParseNormalizedNamed(name) if err != nil { - return nil, nil, nil, errors.Wrapf(err, "invalid context name %s", name) + return nil, nil, errors.Wrapf(err, "invalid context name %s", name) } name = strings.TrimSuffix(reference.FamiliarString(named), ":latest") @@ -825,77 +879,174 @@ func contextByNameFunc(c client.Client, p *ocispecs.Platform) func(context.Conte p = &pp } if p != nil { - name := name + "::" + platforms.Format(platforms.Normalize(*p)) - st, img, bi, err := contextByName(ctx, c, name, p) + pname := name + "::" + platforms.Format(platforms.Normalize(*p)) + st, img, err := contextByName(ctx, c, sessionID, name, pname, p, resolveMode) if err != nil { - return nil, nil, nil, err + return nil, nil, err } if st != nil { - return st, img, bi, nil + return st, img, nil } } - return contextByName(ctx, c, name, p) + return contextByName(ctx, c, sessionID, name, name, p, resolveMode) } } -func contextByName(ctx context.Context, c client.Client, name string, platform *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) { +func contextByName(ctx context.Context, c client.Client, sessionID, name string, pname string, platform *ocispecs.Platform, resolveMode string) (*llb.State, *dockerfile2llb.Image, error) { opts := c.BuildOpts().Opts - v, ok := opts["context:"+name] + v, ok := opts[contextPrefix+pname] if !ok { - return nil, nil, nil, nil + return nil, nil, nil } vv := strings.SplitN(v, ":", 2) if len(vv) != 2 { - return nil, nil, nil, errors.Errorf("invalid context specifier %s for %s", v, name) + return nil, nil, errors.Errorf("invalid context specifier %s for %s", v, pname) + } + // allow git@ without protocol for SSH URLs for backwards compatibility + if strings.HasPrefix(vv[0], "git@") { + vv[0] = "git" } switch vv[0] { case "docker-image": ref := strings.TrimPrefix(vv[1], "//") + if ref == "scratch" { + st := llb.Scratch() + return &st, nil, nil + } + imgOpt := []llb.ImageOption{ - llb.WithCustomName("[context " + name + "] " + ref), - llb.WithMetaResolver(c), + llb.WithCustomName("[context " + pname + "] " + ref), } if platform != nil { imgOpt = append(imgOpt, llb.Platform(*platform)) } + + named, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, nil, err + } + + named = reference.TagNameOnly(named) + + _, data, err := c.ResolveImageConfig(ctx, named.String(), llb.ResolveImageConfigOpt{ + Platform: platform, + ResolveMode: resolveMode, + LogName: fmt.Sprintf("[context %s] load metadata for %s", pname, ref), + ResolverType: llb.ResolverTypeRegistry, + }) + if err != nil { + return nil, nil, err + } + + var img dockerfile2llb.Image + if err := json.Unmarshal(data, &img); err != nil { + return nil, nil, err + } + img.Created = nil + st := llb.Image(ref, imgOpt...) - return &st, nil, nil, nil + st, err = st.WithImageConfig(data) + if err != nil { + return nil, nil, err + } + return &st, &img, nil case "git": - st, ok := detectGitContext(v, "1") + st, ok := detectGitContext(v, true) if !ok { - return nil, nil, nil, errors.Errorf("invalid git context %s", v) + return nil, nil, errors.Errorf("invalid git context %s", v) } - return st, nil, nil, nil + return st, nil, nil case "http", "https": - st, ok := detectGitContext(v, "1") + st, ok := detectGitContext(v, true) if !ok { - httpst := llb.HTTP(v, llb.WithCustomName("[context "+name+"] "+v)) + httpst := llb.HTTP(v, llb.WithCustomName("[context "+pname+"] "+v)) st = &httpst } - return st, nil, nil, nil + return st, nil, nil + case "oci-layout": + refSpec := strings.TrimPrefix(vv[1], "//") + ref, err := reference.Parse(refSpec) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", refSpec) + } + named, ok := ref.(reference.Named) + if !ok { + return nil, nil, errors.Errorf("oci-layout reference %q has no name", ref.String()) + } + dgstd, ok := named.(reference.Digested) + if !ok { + return nil, nil, errors.Errorf("oci-layout reference %q has no digest", named.String()) + } + + // for the dummy ref primarily used in log messages, we can use the + // original name, since the store key may not be significant + dummyRef, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", name) + } + dummyRef, err = reference.WithDigest(dummyRef, dgstd.Digest()) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not wrap %q with digest", name) + } + + _, data, err := c.ResolveImageConfig(ctx, dummyRef.String(), llb.ResolveImageConfigOpt{ + Platform: platform, + ResolveMode: resolveMode, + LogName: fmt.Sprintf("[context %s] load metadata for %s", pname, dummyRef.String()), + ResolverType: llb.ResolverTypeOCILayout, + Store: llb.ResolveImageConfigOptStore{ + SessionID: sessionID, + StoreID: named.Name(), + }, + }) + if err != nil { + return nil, nil, err + } + + var img dockerfile2llb.Image + if err := json.Unmarshal(data, &img); err != nil { + return nil, nil, errors.Wrap(err, "could not parse oci-layout image config") + } + + ociOpt := []llb.OCILayoutOption{ + llb.WithCustomName("[context " + pname + "] OCI load from client"), + llb.OCIStore(c.BuildOpts().SessionID, named.Name()), + } + if platform != nil { + ociOpt = append(ociOpt, llb.Platform(*platform)) + } + st := llb.OCILayout( + dummyRef.String(), + ociOpt..., + ) + st, err = st.WithImageConfig(data) + if err != nil { + return nil, nil, err + } + return &st, &img, nil case "local": st := llb.Local(vv[1], llb.SessionID(c.BuildOpts().SessionID), llb.FollowPaths([]string{dockerignoreFilename}), - llb.SharedKeyHint("context:"+name+"-"+dockerignoreFilename), - llb.WithCustomName("[context "+name+"] load "+dockerignoreFilename), + llb.SharedKeyHint("context:"+pname+"-"+dockerignoreFilename), + llb.WithCustomName("[context "+pname+"] load "+dockerignoreFilename), llb.Differ(llb.DiffNone, false), ) def, err := st.Marshal(ctx) if err != nil { - return nil, nil, nil, err + return nil, nil, err } res, err := c.Solve(ctx, client.SolveRequest{ Evaluate: true, Definition: def.ToPB(), }) if err != nil { - return nil, nil, nil, err + return nil, nil, err } ref, err := res.SingleRef() if err != nil { - return nil, nil, nil, err + return nil, nil, err } dt, _ := ref.ReadFile(ctx, client.ReadRequest{ Filename: dockerignoreFilename, @@ -904,58 +1055,46 @@ func contextByName(ctx context.Context, c client.Client, name string, platform * if len(dt) != 0 { excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dt)) if err != nil { - return nil, nil, nil, err + return nil, nil, err } } st = llb.Local(vv[1], - llb.WithCustomName("[context "+name+"] load from client"), + llb.WithCustomName("[context "+pname+"] load from client"), llb.SessionID(c.BuildOpts().SessionID), - llb.SharedKeyHint("context:"+name), + llb.SharedKeyHint("context:"+pname), llb.ExcludePatterns(excludes), ) - return &st, nil, nil, nil + return &st, nil, nil case "input": inputs, err := c.Inputs(ctx) if err != nil { - return nil, nil, nil, err + return nil, nil, err } st, ok := inputs[vv[1]] if !ok { - return nil, nil, nil, errors.Errorf("invalid input %s for %s", vv[1], name) + return nil, nil, errors.Errorf("invalid input %s for %s", vv[1], pname) } - md, ok := opts["input-metadata:"+vv[1]] + md, ok := opts[inputMetadataPrefix+vv[1]] if ok { m := make(map[string][]byte) if err := json.Unmarshal([]byte(md), &m); err != nil { - return nil, nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md) - } - var bi *binfotypes.BuildInfo - if dtbi, ok := m[exptypes.ExporterBuildInfo]; ok { - var depbi binfotypes.BuildInfo - if err := json.Unmarshal(dtbi, &depbi); err != nil { - return nil, nil, nil, errors.Wrapf(err, "failed to parse buildinfo for %s", name) - } - bi = &binfotypes.BuildInfo{ - Deps: map[string]binfotypes.BuildInfo{ - strings.SplitN(vv[1], "::", 2)[0]: depbi, - }, - } + return nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md) } var img *dockerfile2llb.Image if dtic, ok := m[exptypes.ExporterImageConfigKey]; ok { st, err = st.WithImageConfig(dtic) if err != nil { - return nil, nil, nil, err + return nil, nil, err } if err := json.Unmarshal(dtic, &img); err != nil { - return nil, nil, nil, errors.Wrapf(err, "failed to parse image config for %s", name) + return nil, nil, errors.Wrapf(err, "failed to parse image config for %s", pname) } } - return &st, img, bi, nil + return &st, img, nil } - return &st, nil, nil, nil + return &st, nil, nil default: - return nil, nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], name) + return nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], pname) } } @@ -985,3 +1124,15 @@ func wrapSource(err error, sm *llb.SourceMap, ranges []parser.Range) error { } return errdefs.WithSource(err, s) } + +func parseSourceDateEpoch(v string) (*time.Time, error) { + if v == "" { + return nil, nil + } + sde, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "invalid SOURCE_DATE_EPOCH: %s", v) + } + tm := time.Unix(sde, 0).UTC() + return &tm, nil +} diff --git a/frontend/dockerfile/builder/subrequests.go b/frontend/dockerfile/builder/subrequests.go index 6d30b7b8cc1c..844953023822 100644 --- a/frontend/dockerfile/builder/subrequests.go +++ b/frontend/dockerfile/builder/subrequests.go @@ -1,16 +1,19 @@ package builder import ( + "bytes" "context" "encoding/json" "github.com/moby/buildkit/frontend/gateway/client" "github.com/moby/buildkit/frontend/subrequests" + "github.com/moby/buildkit/frontend/subrequests/outline" + "github.com/moby/buildkit/frontend/subrequests/targets" "github.com/moby/buildkit/solver/errdefs" ) func checkSubRequest(ctx context.Context, opts map[string]string) (*client.Result, bool, error) { - req, ok := opts["requestid"] + req, ok := opts[keyRequestID] if !ok { return nil, false, nil } @@ -18,6 +21,8 @@ func checkSubRequest(ctx context.Context, opts map[string]string) (*client.Resul case subrequests.RequestSubrequestsDescribe: res, err := describe() return res, true, err + case outline.RequestSubrequestsOutline, targets.RequestTargets: // handled later + return nil, false, nil default: return nil, true, errdefs.NewUnsupportedSubrequestError(req) } @@ -25,15 +30,25 @@ func checkSubRequest(ctx context.Context, opts map[string]string) (*client.Resul func describe() (*client.Result, error) { all := []subrequests.Request{ + outline.SubrequestsOutlineDefinition, + targets.SubrequestsTargetsDefinition, subrequests.SubrequestsDescribeDefinition, } - dt, err := json.MarshalIndent(all, " ", "") + dt, err := json.MarshalIndent(all, "", " ") if err != nil { return nil, err } + + b := bytes.NewBuffer(nil) + if err := subrequests.PrintDescribe(dt, b); err != nil { + return nil, err + } + res := client.NewResult() res.Metadata = map[string][]byte{ "result.json": dt, + "result.txt": b.Bytes(), + "version": []byte(subrequests.SubrequestsDescribeDefinition.Version), } return res, nil } diff --git a/frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile b/frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile index 7f181948816b..23b1fd288fa0 100644 --- a/frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile +++ b/frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile @@ -1,9 +1,9 @@ # syntax=docker/dockerfile-upstream:master # xx is a helper for cross-compilation -FROM --platform=$BUILDPLATFORM tonistiigi/xx:golang@sha256:810dc54d5144f133a218e88e319184bf8b9ce01d37d46ddb37573e90decd9eef AS xx +FROM --platform=$BUILDPLATFORM tonistiigi/xx:master@sha256:d4254d9739ce2de9fb88e09bdc716aa0c65f0446a2a2143399f991d71136a3d4 AS xx -FROM --platform=$BUILDPLATFORM golang:1.17-alpine AS base +FROM --platform=$BUILDPLATFORM golang:1.19-alpine AS base RUN apk add git bash COPY --from=xx / / WORKDIR /src diff --git a/frontend/dockerfile/cmd/dockerfile-frontend/hack/release b/frontend/dockerfile/cmd/dockerfile-frontend/hack/release index b572bda1756b..7a7909c5fca3 100755 --- a/frontend/dockerfile/cmd/dockerfile-frontend/hack/release +++ b/frontend/dockerfile/cmd/dockerfile-frontend/hack/release @@ -1,10 +1,11 @@ #!/usr/bin/env bash . $(dirname $0)/../../../../../hack/util -set -e +set -eu -: ${PLATFORMS=} -: ${DAILY_TARGETS=} +: "${RELEASE=false}" +: "${PLATFORMS=}" +: "${DAILY_TARGETS=}" usage() { echo "$0 (master|tag|daily) (tag|channel) [push]" @@ -23,7 +24,7 @@ parseTag() { fi local suffix=$(echo $1 | awk -F- '{print $NF}') local tagf=./frontend/dockerfile/release/$suffix/tags - if [ "$sufffix" == "$1" ] || [ ! -f $tagf ]; then + if [ "$suffix" == "$1" ] || [ ! -f $tagf ]; then suffix="mainline" fi @@ -70,6 +71,11 @@ if [ "$PUSH" = "push" ]; then pushFlag="push=true" fi +nocacheFilterFlag="" +if [[ "$RELEASE" = "true" ]] && [[ "$GITHUB_ACTIONS" = "true" ]]; then + nocacheFilterFlag="--no-cache-filter=base" +fi + case $TYP in "master") tagf=./frontend/dockerfile/release/$TAG/tags @@ -84,10 +90,10 @@ case $TYP in pushTag=${pushTag}-$TAG fi - buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags \ + buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags $nocacheFilterFlag $(buildAttestFlags) \ --build-arg "CHANNEL=$TAG" \ --build-arg "BUILDTAGS=$buildTags" \ - --output "type=image,name=$REPO:$pushTag,buildinfo-attrs=true,$pushFlag" \ + --output "type=image,name=$REPO:$pushTag,$pushFlag" \ --file "./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile" \ $currentcontext ;; @@ -101,10 +107,10 @@ case $TYP in fi buildTags=$(cat $tagf) - buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags \ + buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags $nocacheFilterFlag $(buildAttestFlags) \ --build-arg "CHANNEL=$TAG" \ --build-arg "BUILDTAGS=$buildTags" \ - --output "type=image,\"name=$publishedNames\",buildinfo-attrs=true,$pushFlag" \ + --output "type=image,\"name=$publishedNames\",$pushFlag" \ --file "./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile" \ $currentcontext ;; @@ -127,7 +133,7 @@ case $TYP in tmp=$(mktemp -d -t buildid.XXXXXXXXXX) dt=$(date +%Y%m%d) - buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags \ + buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags $nocacheFilterFlag \ --target "buildid" \ --build-arg "CHANNEL=$TAG" \ --build-arg "BUILDTAGS=$buildTags" \ @@ -141,10 +147,10 @@ case $TYP in buildid=$(cat $tmp/buildid) echo "buildid: $buildid" - buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags \ + buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags $nocacheFilterFlag $(buildAttestFlags) \ --build-arg "CHANNEL=$TAG" \ --build-arg "BUILDTAGS=$buildTags" \ - --output "type=image,name=$REPO:$dt-$TAG,buildinfo-attrs=true,$pushFlag" \ + --output "type=image,name=$REPO:$dt-$TAG,$pushFlag" \ --file "./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile" \ $currentcontext rm $tmp/buildid diff --git a/frontend/dockerfile/dockerfile2llb/convert.go b/frontend/dockerfile/dockerfile2llb/convert.go index 7ac6b9bdd774..6476267e2d32 100644 --- a/frontend/dockerfile/dockerfile2llb/convert.go +++ b/frontend/dockerfile/dockerfile2llb/convert.go @@ -13,22 +13,28 @@ import ( "sort" "strconv" "strings" + "time" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/go-connections/nat" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/client/llb/imagemetaresolver" + "github.com/moby/buildkit/exporter/containerimage/image" "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/moby/buildkit/frontend/subrequests/outline" + "github.com/moby/buildkit/frontend/subrequests/targets" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" binfotypes "github.com/moby/buildkit/util/buildinfo/types" + "github.com/moby/buildkit/util/gitutil" "github.com/moby/buildkit/util/suggest" "github.com/moby/buildkit/util/system" "github.com/moby/sys/signal" + digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -39,9 +45,15 @@ const ( defaultContextLocalName = "context" historyComment = "buildkit.dockerfile.v0" - DefaultCopyImage = "docker/dockerfile-copy:v0.1.9@sha256:e8f159d3f00786604b93c675ee2783f8dc194bb565e61ca5788f6a6e9d304061" + sbomScanContext = "BUILDKIT_SBOM_SCAN_CONTEXT" + sbomScanStage = "BUILDKIT_SBOM_SCAN_STAGE" ) +var nonEnvArgs = map[string]struct{}{ + sbomScanContext: {}, + sbomScanStage: {}, +} + type ConvertOpt struct { Target string MetaResolver llb.ImageMetaResolver @@ -54,57 +66,127 @@ type ConvertOpt struct { // Empty slice means ignore cache for all stages. Nil doesn't disable cache. IgnoreCache []string // CacheIDNamespace scopes the IDs for different cache mounts - CacheIDNamespace string - ImageResolveMode llb.ResolveMode - TargetPlatform *ocispecs.Platform - BuildPlatforms []ocispecs.Platform - PrefixPlatform bool - ExtraHosts []llb.HostIP - ShmSize int64 - Ulimit []pb.Ulimit - CgroupParent string - ForceNetMode pb.NetMode - OverrideCopyImage string - LLBCaps *apicaps.CapSet - ContextLocalName string - SourceMap *llb.SourceMap - Hostname string - Warn func(short, url string, detail [][]byte, location *parser.Range) - ContextByName func(context.Context, string) (*llb.State, *Image, *binfotypes.BuildInfo, error) -} - -func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, *binfotypes.BuildInfo, error) { - buildInfo := &binfotypes.BuildInfo{} + CacheIDNamespace string + ImageResolveMode llb.ResolveMode + TargetPlatform *ocispecs.Platform + BuildPlatforms []ocispecs.Platform + PrefixPlatform bool + ExtraHosts []llb.HostIP + ShmSize int64 + Ulimit []pb.Ulimit + CgroupParent string + ForceNetMode pb.NetMode + LLBCaps *apicaps.CapSet + ContextLocalName string + SourceMap *llb.SourceMap + Hostname string + SourceDateEpoch *time.Time + Warn func(short, url string, detail [][]byte, location *parser.Range) + ContextByName func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, error) +} + +type SBOMTargets struct { + Core llb.State + Extras map[string]llb.State + + IgnoreCache bool +} + +func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, *SBOMTargets, error) { + ds, err := toDispatchState(ctx, dt, opt) + if err != nil { + return nil, nil, nil, err + } + + sbom := SBOMTargets{ + Core: ds.state, + Extras: map[string]llb.State{}, + } + if ds.scanContext { + sbom.Extras["context"] = ds.opt.buildContext + } + if ds.ignoreCache { + sbom.IgnoreCache = true + } + for _, dsi := range findReachable(ds) { + if ds != dsi && dsi.scanStage { + sbom.Extras[dsi.stageName] = dsi.state + if dsi.ignoreCache { + sbom.IgnoreCache = true + } + } + } + + return &ds.state, &ds.image, &sbom, nil +} + +func Dockefile2Outline(ctx context.Context, dt []byte, opt ConvertOpt) (*outline.Outline, error) { + ds, err := toDispatchState(ctx, dt, opt) + if err != nil { + return nil, err + } + o := ds.Outline(dt) + return &o, nil +} + +func ListTargets(ctx context.Context, dt []byte) (*targets.List, error) { + dockerfile, err := parser.Parse(bytes.NewReader(dt)) + if err != nil { + return nil, err + } + stages, _, err := instructions.Parse(dockerfile.AST) + if err != nil { + return nil, err + } + + l := &targets.List{ + Sources: [][]byte{dt}, + } + + for i, s := range stages { + t := targets.Target{ + Name: s.Name, + Description: s.Comment, + Default: i == len(stages)-1, + Base: s.BaseName, + Platform: s.Platform, + Location: toSourceLocation(s.Location), + } + l.Targets = append(l.Targets, t) + } + return l, nil +} + +func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchState, error) { contextByName := opt.ContextByName - opt.ContextByName = func(ctx context.Context, name string) (*llb.State, *Image, *binfotypes.BuildInfo, error) { + opt.ContextByName = func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, error) { if !strings.EqualFold(name, "scratch") && !strings.EqualFold(name, "context") { if contextByName != nil { - st, img, bi, err := contextByName(ctx, name) - if err != nil { - return nil, nil, nil, err + if p == nil { + p = opt.TargetPlatform } - if bi != nil && bi.Deps != nil { - for k := range bi.Deps { - if buildInfo.Deps == nil { - buildInfo.Deps = make(map[string]binfotypes.BuildInfo) - } - buildInfo.Deps[k] = bi.Deps[k] - } + st, img, err := contextByName(ctx, name, resolveMode, p) + if err != nil { + return nil, nil, err } - return st, img, bi, nil + return st, img, nil } } - return nil, nil, nil, nil + return nil, nil, nil } if len(dt) == 0 { - return nil, nil, nil, errors.Errorf("the Dockerfile cannot be empty") + return nil, errors.Errorf("the Dockerfile cannot be empty") } if opt.ContextLocalName == "" { opt.ContextLocalName = defaultContextLocalName } + if opt.Warn == nil { + opt.Warn = func(string, string, [][]byte, *parser.Range) {} + } + platformOpt := buildPlatformOpt(&opt) optMetaArgs := getPlatformArgs(platformOpt) @@ -114,7 +196,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, dockerfile, err := parser.Parse(bytes.NewReader(dt)) if err != nil { - return nil, nil, nil, err + return nil, err } for _, w := range dockerfile.Warnings { @@ -125,17 +207,27 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, stages, metaArgs, err := instructions.Parse(dockerfile.AST) if err != nil { - return nil, nil, nil, err + return nil, err } shlex := shell.NewLex(dockerfile.EscapeToken) + outline := newOutlineCapture() for _, cmd := range metaArgs { for _, metaArg := range cmd.Args { + info := argInfo{definition: metaArg, location: cmd.Location()} + if v, ok := opt.BuildArgs[metaArg.Key]; !ok { + if metaArg.Value != nil { + *metaArg.Value, info.deps, _ = shlex.ProcessWordWithMatches(*metaArg.Value, metaArgsToMap(optMetaArgs)) + } + } else { + metaArg.Value = &v + } + optMetaArgs = append(optMetaArgs, metaArg) if metaArg.Value != nil { - *metaArg.Value, _ = shlex.ProcessWordWithMap(*metaArg.Value, metaArgsToMap(optMetaArgs)) + info.value = *metaArg.Value } - optMetaArgs = append(optMetaArgs, setKVValue(metaArg, opt.BuildArgs)) + outline.allArgs[metaArg.Key] = info } } @@ -148,60 +240,73 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, // set base state for every image for i, st := range stages { - name, err := shlex.ProcessWordWithMap(st.BaseName, metaArgsToMap(optMetaArgs)) + name, used, err := shlex.ProcessWordWithMatches(st.BaseName, metaArgsToMap(optMetaArgs)) if err != nil { - return nil, nil, nil, parser.WithLocation(err, st.Location) + return nil, parser.WithLocation(err, st.Location) } if name == "" { - return nil, nil, nil, parser.WithLocation(errors.Errorf("base name (%s) should not be blank", st.BaseName), st.Location) + return nil, parser.WithLocation(errors.Errorf("base name (%s) should not be blank", st.BaseName), st.Location) } st.BaseName = name ds := &dispatchState{ + stage: st, deps: make(map[*dispatchState]struct{}), ctxPaths: make(map[string]struct{}), stageName: st.Name, prefixPlatform: opt.PrefixPlatform, + outline: outline.clone(), + epoch: opt.SourceDateEpoch, + } + + if v := st.Platform; v != "" { + v, u, err := shlex.ProcessWordWithMatches(v, metaArgsToMap(optMetaArgs)) + if err != nil { + return nil, parser.WithLocation(errors.Wrapf(err, "failed to process arguments for platform %s", v), st.Location) + } + + p, err := platforms.Parse(v) + if err != nil { + return nil, parser.WithLocation(errors.Wrapf(err, "failed to parse platform %s", v), st.Location) + } + for k := range u { + used[k] = struct{}{} + } + ds.platform = &p } if st.Name != "" { - s, img, bi, err := opt.ContextByName(ctx, st.Name) + s, img, err := opt.ContextByName(ctx, st.Name, opt.ImageResolveMode.String(), ds.platform) if err != nil { - return nil, nil, nil, err + return nil, err } if s != nil { ds.noinit = true ds.state = *s if img != nil { - ds.image = *img - } - if bi != nil { - ds.buildInfo = *bi + ds.image = clampTimes(*img, opt.SourceDateEpoch) + if img.Architecture != "" && img.OS != "" { + ds.platform = &ocispecs.Platform{ + OS: img.OS, + Architecture: img.Architecture, + Variant: img.Variant, + } + } } allDispatchStates.addState(ds) continue } } - ds.stage = st - if st.Name == "" { ds.stageName = fmt.Sprintf("stage-%d", i) } - if v := st.Platform; v != "" { - v, err := shlex.ProcessWordWithMap(v, metaArgsToMap(optMetaArgs)) - if err != nil { - return nil, nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to process arguments for platform %s", v), st.Location) - } + allDispatchStates.addState(ds) - p, err := platforms.Parse(v) - if err != nil { - return nil, nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to parse platform %s", v), st.Location) - } - ds.platform = &p + for k := range used { + ds.outline.usedArgs[k] = struct{}{} } - allDispatchStates.addState(ds) total := 0 if ds.stage.BaseName != emptyImageName && ds.base == nil { @@ -212,9 +317,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, case *instructions.AddCommand, *instructions.CopyCommand, *instructions.RunCommand: total++ case *instructions.WorkdirCommand: - if useFileOp(opt.BuildArgs, opt.LLBCaps) { - total++ - } + total++ } } ds.cmdTotal = total @@ -239,7 +342,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, var ok bool target, ok = allDispatchStates.findStateByName(opt.Target) if !ok { - return nil, nil, nil, errors.Errorf("target stage %s could not be found", opt.Target) + return nil, errors.Errorf("target stage %s could not be found", opt.Target) } } @@ -249,7 +352,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, for i, cmd := range d.stage.Commands { newCmd, err := toCommand(cmd, allDispatchStates) if err != nil { - return nil, nil, nil, err + return nil, err } d.commands[i] = newCmd for _, src := range newCmd.sources { @@ -264,7 +367,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } if has, state := hasCircularDependency(allDispatchStates.states); has { - return nil, nil, nil, errors.Errorf("circular dependency detected on stage: %s", state.stageName) + return nil, errors.Errorf("circular dependency detected on stage: %s", state.stageName) } if len(allDispatchStates.states) == 1 { @@ -307,7 +410,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, d.stage.BaseName = reference.TagNameOnly(ref).String() var isScratch bool - st, img, bi, err := opt.ContextByName(ctx, d.stage.BaseName) + st, img, err := opt.ContextByName(ctx, d.stage.BaseName, opt.ImageResolveMode.String(), platform) if err != nil { return err } @@ -317,10 +420,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } else { d.image = emptyImage(platformOpt.targetPlatform) } - if bi != nil { - d.buildInfo = *bi - } - d.state = *st + d.state = st.Platform(*platform) d.platform = platform return nil } @@ -331,9 +431,10 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } prefix += "internal]" dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, llb.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: opt.ImageResolveMode.String(), - LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), + Platform: platform, + ResolveMode: opt.ImageResolveMode.String(), + LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), + ResolverType: llb.ResolverTypeRegistry, }) if err != nil { return suggest.WrapError(errors.Wrap(err, origName), origName, append(allStageNames, commonImageNames()...), true) @@ -396,30 +497,17 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } if err := eg.Wait(); err != nil { - return nil, nil, nil, err + return nil, err } buildContext := &mutableOutput{} ctxPaths := map[string]struct{}{} for _, d := range allDispatchStates.states { - if !isReachable(target, d) { + if !isReachable(target, d) || d.noinit { continue } - // collect build sources and dependencies - if len(d.buildInfo.Sources) > 0 { - buildInfo.Sources = append(buildInfo.Sources, d.buildInfo.Sources...) - } - if d.buildInfo.Deps != nil { - for name, bi := range d.buildInfo.Deps { - if buildInfo.Deps == nil { - buildInfo.Deps = make(map[string]binfotypes.BuildInfo) - } - buildInfo.Deps[name] = bi - } - } - if d.base != nil { d.state = d.base.state d.platform = d.base.platform @@ -428,11 +516,11 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, // make sure that PATH is always set if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok { - var os string + var pathOS string if d.platform != nil { - os = d.platform.OS + pathOS = d.platform.OS } - d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(os)) + d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(pathOS)) } // initialize base metadata from image conf @@ -445,12 +533,12 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } if d.image.Config.WorkingDir != "" { if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false, nil); err != nil { - return nil, nil, nil, parser.WithLocation(err, d.stage.Location) + return nil, parser.WithLocation(err, d.stage.Location) } } if d.image.Config.User != "" { if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil { - return nil, nil, nil, parser.WithLocation(err, d.stage.Location) + return nil, parser.WithLocation(err, d.stage.Location) } } d.state = d.state.Network(opt.ForceNetMode) @@ -470,35 +558,37 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, shmSize: opt.ShmSize, ulimit: opt.Ulimit, cgroupParent: opt.CgroupParent, - copyImage: opt.OverrideCopyImage, llbCaps: opt.LLBCaps, sourceMap: opt.SourceMap, } - if opt.copyImage == "" { - opt.copyImage = DefaultCopyImage - } if err = dispatchOnBuildTriggers(d, d.image.Config.OnBuild, opt); err != nil { - return nil, nil, nil, parser.WithLocation(err, d.stage.Location) + return nil, parser.WithLocation(err, d.stage.Location) } d.image.Config.OnBuild = nil for _, cmd := range d.commands { if err := dispatch(d, cmd, opt); err != nil { - return nil, nil, nil, parser.WithLocation(err, cmd.Location()) + return nil, parser.WithLocation(err, cmd.Location()) } } + d.opt = opt for p := range d.ctxPaths { ctxPaths[p] = struct{}{} } - } - // sort build sources - if len(buildInfo.Sources) > 0 { - sort.Slice(buildInfo.Sources, func(i, j int) bool { - return buildInfo.Sources[i].Ref < buildInfo.Sources[j].Ref - }) + locals := []instructions.KeyValuePairOptional{} + locals = append(locals, d.opt.metaArgs...) + locals = append(locals, d.buildArgs...) + for _, a := range locals { + switch a.Key { + case sbomScanStage: + d.scanStage = isEnabledForStage(d.stageName, a.ValueString()) + case sbomScanContext: + d.scanContext = isEnabledForStage(d.stageName, a.ValueString()) + } + } } if len(opt.Labels) != 0 && target.image.Config.Labels == nil { @@ -530,7 +620,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, if opt.LLBCaps != nil { defaults = append(defaults, llb.WithCaps(*opt.LLBCaps)) } - st := target.state.SetMarshalDefaults(defaults...) + target.state = target.state.SetMarshalDefaults(defaults...) if !platformOpt.implicitTarget { target.image.OS = platformOpt.targetPlatform.OS @@ -538,7 +628,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, target.image.Variant = platformOpt.targetPlatform.Variant } - return &st, &target.image, buildInfo, nil + return target, nil } func metaArgsToMap(metaArgs []instructions.KeyValuePairOptional) map[string]string { @@ -598,7 +688,6 @@ type dispatchOpt struct { shmSize int64 ulimit []pb.Ulimit cgroupParent string - copyImage string llbCaps *apicaps.CapSet sourceMap *llb.SourceMap } @@ -643,17 +732,25 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { case *instructions.WorkdirCommand: err = dispatchWorkdir(d, c, true, &opt) case *instructions.AddCommand: - err = dispatchCopy(d, copyConfig{ - params: c.SourcesAndDest, - source: opt.buildContext, - isAddCommand: true, - cmdToPrint: c, - chown: c.Chown, - chmod: c.Chmod, - link: c.Link, - location: c.Location(), - opt: opt, - }) + var checksum digest.Digest + if c.Checksum != "" { + checksum, err = digest.Parse(c.Checksum) + } + if err == nil { + err = dispatchCopy(d, copyConfig{ + params: c.SourcesAndDest, + source: opt.buildContext, + isAddCommand: true, + cmdToPrint: c, + chown: c.Chown, + chmod: c.Chmod, + link: c.Link, + keepGitDir: c.KeepGitDir, + checksum: checksum, + location: c.Location(), + opt: opt, + }) + } if err == nil { for _, src := range c.SourcePaths { if !strings.HasPrefix(src, "http://") && !strings.HasPrefix(src, "https://") { @@ -710,6 +807,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { } type dispatchState struct { + opt dispatchOpt state llb.State image Image platform *ocispecs.Platform @@ -728,6 +826,10 @@ type dispatchState struct { cmdTotal int prefixPlatform bool buildInfo binfotypes.BuildInfo + outline outlineCapture + epoch *time.Time + scanStage bool + scanContext bool } type dispatchStates struct { @@ -744,6 +846,7 @@ func (dss *dispatchStates) addState(ds *dispatchState) { if d, ok := dss.statesByName[ds.stage.BaseName]; ok { ds.base = d + ds.outline = d.outline.clone() } if ds.stage.Name != "" { dss.statesByName[strings.ToLower(ds.stage.Name)] = ds @@ -803,7 +906,7 @@ func dispatchEnv(d *dispatchState, c *instructions.EnvCommand) error { d.state = d.state.AddEnv(e.Key, e.Value) d.image.Config.Env = addEnv(d.image.Config.Env, e.Key, e.Value) } - return commitToHistory(&d.image, commitMessage.String(), false, nil) + return commitToHistory(&d.image, commitMessage.String(), false, nil, d.epoch) } func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error { @@ -814,7 +917,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE var args []string = c.CmdLine if len(c.Files) > 0 { if len(args) != 1 || !c.PrependShell { - return fmt.Errorf("parsing produced an invalid run command: %v", args) + return errors.Errorf("parsing produced an invalid run command: %v", args) } if heredoc := parser.MustParseHeredoc(args[0]); heredoc != nil { @@ -933,7 +1036,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE } d.state = d.state.Run(opt...).Root() - return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs, shell.BuildEnvs(env)), true, &d.state) + return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs, shell.BuildEnvs(env)), true, &d.state, d.epoch) } func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool, opt *dispatchOpt) error { @@ -945,7 +1048,7 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo d.image.Config.WorkingDir = wd if commit { withLayer := false - if wd != "/" && opt != nil && useFileOp(opt.buildArgValues, opt.llbCaps) { + if wd != "/" { mkdirOpt := []llb.MkdirOption{llb.WithParents(true)} if user := d.image.Config.User; user != "" { mkdirOpt = append(mkdirOpt, llb.WithUser(user)) @@ -964,12 +1067,12 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo ) withLayer = true } - return commitToHistory(&d.image, "WORKDIR "+wd, withLayer, nil) + return commitToHistory(&d.image, "WORKDIR "+wd, withLayer, nil, d.epoch) } return nil } -func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { +func dispatchCopy(d *dispatchState, cfg copyConfig) error { pp, err := pathRelativeToWorkingDir(d.state, cfg.params.DestPath) if err != nil { return err @@ -994,6 +1097,21 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { } } + if cfg.checksum != "" { + if !cfg.isAddCommand { + return errors.New("checksum can't be specified for COPY") + } + if !addChecksumEnabled { + return errors.New("instruction 'ADD --checksum=' requires the labs channel") + } + if len(cfg.params.SourcePaths) != 1 { + return errors.New("checksum can't be specified for multiple sources") + } + if !isHTTPSource(cfg.params.SourcePaths[0]) { + return errors.New("checksum can't be specified for non-HTTP sources") + } + } + commitMessage := bytes.NewBufferString("") if cfg.isAddCommand { commitMessage.WriteString("ADD") @@ -1005,7 +1123,34 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { for _, src := range cfg.params.SourcePaths { commitMessage.WriteString(" " + src) - if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { + gitRef, gitRefErr := gitutil.ParseGitRef(src) + if gitRefErr == nil && !gitRef.IndistinguishableFromLocal { + if !cfg.isAddCommand { + return errors.New("source can't be a git ref for COPY") + } + if !addGitEnabled { + return errors.New("instruction ADD requires the labs channel") + } + // TODO: print a warning (not an error) if gitRef.UnencryptedTCP is true + commit := gitRef.Commit + if gitRef.SubDir != "" { + commit += ":" + gitRef.SubDir + } + var gitOptions []llb.GitOption + if cfg.keepGitDir { + gitOptions = append(gitOptions, llb.KeepGitDir()) + } + st := llb.Git(gitRef.Remote, commit, gitOptions...) + opts := append([]llb.CopyOption{&llb.CopyInfo{ + Mode: mode, + CreateDestPath: true, + }}, copyOpt...) + if a == nil { + a = llb.Copy(st, "/", dest, opts...) + } else { + a = a.Copy(st, "/", dest, opts...) + } + } else if isHTTPSource(src) { if !cfg.isAddCommand { return errors.New("source can't be a URL for COPY") } @@ -1023,7 +1168,7 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { } } - st := llb.HTTP(src, llb.Filename(f), dfCmd(cfg.params)) + st := llb.HTTP(src, llb.Filename(f), llb.Checksum(cfg.checksum), dfCmd(cfg.params)) opts := append([]llb.CopyOption{&llb.CopyInfo{ Mode: mode, @@ -1097,7 +1242,8 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { fileOpt = append(fileOpt, llb.IgnoreCache) } - if cfg.opt.llbCaps.Supports(pb.CapMergeOp) == nil && cfg.link && cfg.chmod == "" { + // cfg.opt.llbCaps can be nil in unit tests + if cfg.opt.llbCaps != nil && cfg.opt.llbCaps.Supports(pb.CapMergeOp) == nil && cfg.link && cfg.chmod == "" { pgID := identity.NewID() d.cmdIndex-- // prefixCommand increases it pgName := prefixCommand(d, name, d.prefixPlatform, &platform, env) @@ -1116,7 +1262,7 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { d.state = d.state.File(a, fileOpt...) } - return commitToHistory(&d.image, commitMessage.String(), true, &d.state) + return commitToHistory(&d.image, commitMessage.String(), true, &d.state, d.epoch) } type copyConfig struct { @@ -1127,136 +1273,15 @@ type copyConfig struct { chown string chmod string link bool + keepGitDir bool + checksum digest.Digest location []parser.Range opt dispatchOpt } -func dispatchCopy(d *dispatchState, cfg copyConfig) error { - if useFileOp(cfg.opt.buildArgValues, cfg.opt.llbCaps) { - return dispatchCopyFileOp(d, cfg) - } - - if len(cfg.params.SourceContents) > 0 { - return errors.New("inline content copy is not supported") - } - - if cfg.chmod != "" { - if cfg.opt.llbCaps != nil && cfg.opt.llbCaps.Supports(pb.CapFileBase) != nil { - return errors.Wrap(cfg.opt.llbCaps.Supports(pb.CapFileBase), "chmod is not supported") - } - return errors.New("chmod is not supported") - } - - img := llb.Image(cfg.opt.copyImage, llb.MarkImageInternal, llb.Platform(cfg.opt.buildPlatforms[0]), WithInternalName("helper image for file operations")) - pp, err := pathRelativeToWorkingDir(d.state, cfg.params.DestPath) - if err != nil { - return err - } - dest := path.Join(".", pp) - if cfg.params.DestPath == "." || cfg.params.DestPath == "" || cfg.params.DestPath[len(cfg.params.DestPath)-1] == filepath.Separator { - dest += string(filepath.Separator) - } - args := []string{"copy"} - unpack := cfg.isAddCommand - - mounts := make([]llb.RunOption, 0, len(cfg.params.SourcePaths)) - if cfg.chown != "" { - args = append(args, fmt.Sprintf("--chown=%s", cfg.chown)) - _, _, err := parseUser(cfg.chown) - if err != nil { - mounts = append(mounts, llb.AddMount("/etc/passwd", d.state, llb.SourcePath("/etc/passwd"), llb.Readonly)) - mounts = append(mounts, llb.AddMount("/etc/group", d.state, llb.SourcePath("/etc/group"), llb.Readonly)) - } - } - - commitMessage := bytes.NewBufferString("") - if cfg.isAddCommand { - commitMessage.WriteString("ADD") - } else { - commitMessage.WriteString("COPY") - } - - for i, src := range cfg.params.SourcePaths { - commitMessage.WriteString(" " + src) - if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { - if !cfg.isAddCommand { - return errors.New("source can't be a URL for COPY") - } - - // Resources from remote URLs are not decompressed. - // https://docs.docker.com/engine/reference/builder/#add - // - // Note: mixing up remote archives and local archives in a single ADD instruction - // would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717 - unpack = false - u, err := url.Parse(src) - f := "__unnamed__" - if err == nil { - if base := path.Base(u.Path); base != "." && base != "/" { - f = base - } - } - target := path.Join(fmt.Sprintf("/src-%d", i), f) - args = append(args, target) - mounts = append(mounts, llb.AddMount(path.Dir(target), llb.HTTP(src, llb.Filename(f), dfCmd(cfg.params)), llb.Readonly)) - } else { - d, f := splitWildcards(src) - targetCmd := fmt.Sprintf("/src-%d", i) - targetMount := targetCmd - if f == "" { - f = path.Base(src) - targetMount = path.Join(targetMount, f) - } - targetCmd = path.Join(targetCmd, f) - args = append(args, targetCmd) - mounts = append(mounts, llb.AddMount(targetMount, cfg.source, llb.SourcePath(d), llb.Readonly)) - } - } - - commitMessage.WriteString(" " + cfg.params.DestPath) - - args = append(args, dest) - if unpack { - args = append(args[:1], append([]string{"--unpack"}, args[1:]...)...) - } - - platform := cfg.opt.targetPlatform - if d.platform != nil { - platform = *d.platform - } - - env, err := d.state.Env(context.TODO()) - if err != nil { - return err - } - - runOpt := []llb.RunOption{ - llb.Args(args), - llb.Dir("/dest"), - llb.ReadonlyRootFS(), - dfCmd(cfg.cmdToPrint), - llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(cfg.opt.shlex, cfg.cmdToPrint.String(), env)), d.prefixPlatform, &platform, env)), - location(cfg.opt.sourceMap, cfg.location), - } - if d.ignoreCache { - runOpt = append(runOpt, llb.IgnoreCache) - } - - if cfg.opt.llbCaps != nil { - if err := cfg.opt.llbCaps.Supports(pb.CapExecMetaNetwork); err == nil { - runOpt = append(runOpt, llb.Network(llb.NetModeNone)) - } - } - - run := img.Run(append(runOpt, mounts...)...) - d.state = run.AddMount("/dest", d.state).Platform(platform) - - return commitToHistory(&d.image, commitMessage.String(), true, &d.state) -} - func dispatchMaintainer(d *dispatchState, c *instructions.MaintainerCommand) error { d.image.Author = c.Maintainer - return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil, d.epoch) } func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error { @@ -1268,7 +1293,7 @@ func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error { d.image.Config.Labels[v.Key] = v.Value commitMessage.WriteString(" " + v.String()) } - return commitToHistory(&d.image, commitMessage.String(), false, nil) + return commitToHistory(&d.image, commitMessage.String(), false, nil, d.epoch) } func dispatchOnbuild(d *dispatchState, c *instructions.OnbuildCommand) error { @@ -1284,7 +1309,7 @@ func dispatchCmd(d *dispatchState, c *instructions.CmdCommand) error { d.image.Config.Cmd = args d.image.Config.ArgsEscaped = true d.cmdSet = true - return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil, d.epoch) } func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) error { @@ -1296,18 +1321,18 @@ func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) err if !d.cmdSet { d.image.Config.Cmd = nil } - return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil, d.epoch) } func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error { - d.image.Config.Healthcheck = &HealthConfig{ + d.image.Config.Healthcheck = &image.HealthConfig{ Test: c.Health.Test, Interval: c.Health.Interval, Timeout: c.Health.Timeout, StartPeriod: c.Health.StartPeriod, Retries: c.Health.Retries, } - return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil, d.epoch) } func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error { @@ -1337,14 +1362,14 @@ func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shel d.image.Config.ExposedPorts[string(p)] = struct{}{} } - return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil, d.epoch) } func dispatchUser(d *dispatchState, c *instructions.UserCommand, commit bool) error { d.state = d.state.User(c.User) d.image.Config.User = c.User if commit { - return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil, d.epoch) } return nil } @@ -1359,7 +1384,7 @@ func dispatchVolume(d *dispatchState, c *instructions.VolumeCommand) error { } d.image.Config.Volumes[v] = struct{}{} } - return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil, d.epoch) } func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) error { @@ -1367,12 +1392,12 @@ func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) err return err } d.image.Config.StopSignal = c.Signal - return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil, d.epoch) } func dispatchShell(d *dispatchState, c *instructions.ShellCommand) error { d.image.Config.Shell = c.Shell - return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil, d.epoch) } func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.KeyValuePairOptional, buildArgValues map[string]string) error { @@ -1385,21 +1410,34 @@ func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instru commitStr += "=" + *arg.Value } commitStrs = append(commitStrs, commitStr) + + skipArgInfo := false // skip the arg info if the arg is inherited from global scope if buildArg.Value == nil { for _, ma := range metaArgs { if ma.Key == buildArg.Key { buildArg.Value = ma.Value + skipArgInfo = true } } } + ai := argInfo{definition: arg, location: c.Location()} + if buildArg.Value != nil { - d.state = d.state.AddEnv(buildArg.Key, *buildArg.Value) + if _, ok := nonEnvArgs[buildArg.Key]; !ok { + d.state = d.state.AddEnv(buildArg.Key, *buildArg.Value) + } + ai.value = *buildArg.Value + } + + if !skipArgInfo { + d.outline.allArgs[arg.Key] = ai } + d.outline.usedArgs[arg.Key] = struct{}{} d.buildArgs = append(d.buildArgs, buildArg) } - return commitToHistory(&d.image, "ARG "+strings.Join(commitStrs, " "), false, nil) + return commitToHistory(&d.image, "ARG "+strings.Join(commitStrs, " "), false, nil, d.epoch) } func pathRelativeToWorkingDir(s llb.State, p string) (string, error) { @@ -1413,27 +1451,6 @@ func pathRelativeToWorkingDir(s llb.State, p string) (string, error) { return path.Join(dir, p), nil } -func splitWildcards(name string) (string, string) { - i := 0 - for ; i < len(name); i++ { - ch := name[i] - if ch == '\\' { - i++ - } else if ch == '*' || ch == '?' || ch == '[' { - break - } - } - if i == len(name) { - return name, "" - } - - base := path.Base(name[:i]) - if name[:i] == "" || strings.HasSuffix(name[:i], string(filepath.Separator)) { - base = "" - } - return path.Dir(name[:i]), base + name[i:] -} - func addEnv(env []string, k, v string) []string { gotOne := false for i, envVar := range env { @@ -1497,7 +1514,7 @@ func runCommandString(args []string, buildArgs []instructions.KeyValuePairOption return strings.Join(append(tmpBuildEnv, args...), " ") } -func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) error { +func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State, tm *time.Time) error { if st != nil { msg += " # buildkit" } @@ -1506,6 +1523,7 @@ func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) erro CreatedBy: msg, Comment: historyComment, EmptyLayer: !withLayer, + Created: tm, }) return nil } @@ -1525,6 +1543,20 @@ func isReachable(from, to *dispatchState) (ret bool) { return false } +func findReachable(from *dispatchState) (ret []*dispatchState) { + if from == nil { + return nil + } + ret = append(ret, from) + if from.base != nil { + ret = append(ret, findReachable(from.base)...) + } + for d := range from.deps { + ret = append(ret, findReachable(d)...) + } + return ret +} + func hasCircularDependency(states []*dispatchState) (bool, *dispatchState) { var visit func(state *dispatchState) bool if states == nil { @@ -1560,42 +1592,6 @@ func hasCircularDependency(states []*dispatchState) (bool, *dispatchState) { return false, nil } -func parseUser(str string) (uid uint32, gid uint32, err error) { - if str == "" { - return 0, 0, nil - } - parts := strings.SplitN(str, ":", 2) - for i, v := range parts { - switch i { - case 0: - uid, err = parseUID(v) - if err != nil { - return 0, 0, err - } - if len(parts) == 1 { - gid = uid - } - case 1: - gid, err = parseUID(v) - if err != nil { - return 0, 0, err - } - } - } - return -} - -func parseUID(str string) (uint32, error) { - if str == "root" { - return 0, nil - } - uid, err := strconv.ParseUint(str, 10, 32) - if err != nil { - return 0, err - } - return uint32(uid), nil -} - func normalizeContextPaths(paths map[string]struct{}) []string { pathSlice := make([]string, 0, len(paths)) for p := range paths { @@ -1760,16 +1756,6 @@ func platformFromEnv(env []string) *ocispecs.Platform { return &p } -func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { - enabled := true - if v, ok := args["BUILDKIT_DISABLE_FILEOP"]; ok { - if b, err := strconv.ParseBool(v); err == nil { - enabled = !b - } - } - return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil -} - func location(sm *llb.SourceMap, locations []parser.Range) llb.ConstraintsOpt { loc := make([]*pb.Range, 0, len(locations)) for _, l := range locations { @@ -1807,3 +1793,36 @@ func commonImageNames() []string { } return out } + +func clampTimes(img Image, tm *time.Time) Image { + if tm == nil { + return img + } + for i, h := range img.History { + if h.Created == nil || h.Created.After(*tm) { + img.History[i].Created = tm + } + } + if img.Created != nil && img.Created.After(*tm) { + img.Created = tm + } + return img +} + +func isHTTPSource(src string) bool { + return strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") +} + +func isEnabledForStage(stage string, value string) bool { + if enabled, err := strconv.ParseBool(value); err == nil { + return enabled + } + + vv := strings.Split(value, ",") + for _, v := range vv { + if v == stage { + return true + } + } + return false +} diff --git a/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go b/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go new file mode 100644 index 000000000000..4506baeb8ba8 --- /dev/null +++ b/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go @@ -0,0 +1,6 @@ +//go:build dfaddchecksum +// +build dfaddchecksum + +package dockerfile2llb + +const addChecksumEnabled = true diff --git a/frontend/dockerfile/dockerfile2llb/convert_addgit.go b/frontend/dockerfile/dockerfile2llb/convert_addgit.go new file mode 100644 index 000000000000..9ccb7a20e840 --- /dev/null +++ b/frontend/dockerfile/dockerfile2llb/convert_addgit.go @@ -0,0 +1,6 @@ +//go:build dfaddgit +// +build dfaddgit + +package dockerfile2llb + +const addGitEnabled = true diff --git a/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go b/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go new file mode 100644 index 000000000000..8de035297c1b --- /dev/null +++ b/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go @@ -0,0 +1,6 @@ +//go:build !dfaddchecksum +// +build !dfaddchecksum + +package dockerfile2llb + +const addChecksumEnabled = false diff --git a/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go b/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go new file mode 100644 index 000000000000..119bb32c8895 --- /dev/null +++ b/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go @@ -0,0 +1,6 @@ +//go:build !dfaddgit +// +build !dfaddgit + +package dockerfile2llb + +const addGitEnabled = false diff --git a/frontend/dockerfile/dockerfile2llb/convert_runmount.go b/frontend/dockerfile/dockerfile2llb/convert_runmount.go index 7777fba91ac9..1015590a0dc6 100644 --- a/frontend/dockerfile/dockerfile2llb/convert_runmount.go +++ b/frontend/dockerfile/dockerfile2llb/convert_runmount.go @@ -2,12 +2,9 @@ package dockerfile2llb import ( "context" - "fmt" "os" "path" "path/filepath" - "strconv" - "strings" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend/dockerfile/instructions" @@ -46,7 +43,7 @@ func detectRunMount(cmd *command, allDispatchStates *dispatchStates) bool { return false } -func setCacheUIDGIDFileOp(m *instructions.Mount, st llb.State) llb.State { +func setCacheUIDGID(m *instructions.Mount, st llb.State) llb.State { uid := 0 gid := 0 mode := os.FileMode(0755) @@ -62,24 +59,6 @@ func setCacheUIDGIDFileOp(m *instructions.Mount, st llb.State) llb.State { return st.File(llb.Mkdir("/cache", mode, llb.WithUIDGID(uid, gid)), llb.WithCustomName("[internal] settings cache mount permissions")) } -func setCacheUIDGID(m *instructions.Mount, st llb.State, fileop bool) llb.State { - if fileop { - return setCacheUIDGIDFileOp(m, st) - } - - var b strings.Builder - if m.UID != nil { - b.WriteString(fmt.Sprintf("chown %d /mnt/cache;", *m.UID)) - } - if m.GID != nil { - b.WriteString(fmt.Sprintf("chown :%d /mnt/cache;", *m.GID)) - } - if m.Mode != nil { - b.WriteString(fmt.Sprintf("chmod %s /mnt/cache;", strconv.FormatUint(*m.Mode, 8))) - } - return llb.Image("busybox").Run(llb.Shlex(fmt.Sprintf("sh -c 'mkdir -p /mnt/cache;%s'", b.String())), llb.WithCustomName("[internal] settings cache mount permissions")).AddMount("/mnt", st) -} - func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) ([]llb.RunOption, error) { var out []llb.RunOption mounts := instructions.GetMounts(c) @@ -100,7 +79,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []* )) } if mount.Type == instructions.MountTypeSecret { - secret, err := dispatchSecret(mount) + secret, err := dispatchSecret(d, mount, c.Location()) if err != nil { return nil, err } @@ -108,7 +87,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []* continue } if mount.Type == instructions.MountTypeSSH { - ssh, err := dispatchSSH(mount) + ssh, err := dispatchSSH(d, mount, c.Location()) if err != nil { return nil, err } @@ -148,7 +127,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []* mountOpts = append(mountOpts, llb.SourcePath(src)) } else { if mount.UID != nil || mount.GID != nil || mount.Mode != nil { - st = setCacheUIDGID(mount, st, useFileOp(opt.buildArgValues, opt.llbCaps)) + st = setCacheUIDGID(mount, st) mountOpts = append(mountOpts, llb.SourcePath("/cache")) } } diff --git a/frontend/dockerfile/dockerfile2llb/convert_secrets.go b/frontend/dockerfile/dockerfile2llb/convert_secrets.go index 2c88a5e4f7e7..ced2bff1b070 100644 --- a/frontend/dockerfile/dockerfile2llb/convert_secrets.go +++ b/frontend/dockerfile/dockerfile2llb/convert_secrets.go @@ -5,10 +5,11 @@ import ( "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/pkg/errors" ) -func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) { +func dispatchSecret(d *dispatchState, m *instructions.Mount, loc []parser.Range) (llb.RunOption, error) { id := m.CacheID if m.Source != "" { id = m.Source @@ -26,6 +27,13 @@ func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) { target = "/run/secrets/" + path.Base(id) } + if _, ok := d.outline.secrets[id]; !ok { + d.outline.secrets[id] = secretInfo{ + location: loc, + required: m.Required, + } + } + opts := []llb.SecretOption{llb.SecretID(id)} if !m.Required { diff --git a/frontend/dockerfile/dockerfile2llb/convert_ssh.go b/frontend/dockerfile/dockerfile2llb/convert_ssh.go index b55659d97883..ab7aaa60127f 100644 --- a/frontend/dockerfile/dockerfile2llb/convert_ssh.go +++ b/frontend/dockerfile/dockerfile2llb/convert_ssh.go @@ -3,13 +3,26 @@ package dockerfile2llb import ( "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/pkg/errors" ) -func dispatchSSH(m *instructions.Mount) (llb.RunOption, error) { +func dispatchSSH(d *dispatchState, m *instructions.Mount, loc []parser.Range) (llb.RunOption, error) { if m.Source != "" { return nil, errors.Errorf("ssh does not support source") } + + id := m.CacheID + if id == "" { + id = "default" + } + if _, ok := d.outline.ssh[id]; !ok { + d.outline.ssh[id] = sshInfo{ + location: loc, + required: m.Required, + } + } + opts := []llb.SSHOption{llb.SSHID(m.CacheID)} if m.Target != "" { diff --git a/frontend/dockerfile/dockerfile2llb/convert_test.go b/frontend/dockerfile/dockerfile2llb/convert_test.go index 5c1817addf91..ffcfee86fc15 100644 --- a/frontend/dockerfile/dockerfile2llb/convert_test.go +++ b/frontend/dockerfile/dockerfile2llb/convert_test.go @@ -1,16 +1,12 @@ package dockerfile2llb import ( - "strings" "testing" "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/shell" "github.com/moby/buildkit/util/appcontext" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" - ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func toEnvMap(args []instructions.KeyValuePairOptional, env []string) map[string]string { @@ -192,30 +188,3 @@ COPY --from=stage1 f2 /sub/ _, _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{}) assert.EqualError(t, err, "circular dependency detected on stage: stage0") } - -// moby/buildkit#2311 -func TestTargetBuildInfo(t *testing.T) { - df := ` -FROM busybox -ADD https://raw.githubusercontent.com/moby/buildkit/master/README.md / -` - _, _, bi, err := Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{ - TargetPlatform: &ocispecs.Platform{ - Architecture: "amd64", - OS: "linux", - }, - BuildPlatforms: []ocispecs.Platform{ - { - Architecture: "amd64", - OS: "linux", - }, - }, - }) - require.NoError(t, err) - - require.Equal(t, 1, len(bi.Sources)) - assert.Equal(t, binfotypes.SourceTypeDockerImage, bi.Sources[0].Type) - assert.Equal(t, "busybox", bi.Sources[0].Ref) - assert.True(t, strings.HasPrefix(bi.Sources[0].Alias, "docker.io/library/busybox@")) - assert.NotEmpty(t, bi.Sources[0].Pin) -} diff --git a/frontend/dockerfile/dockerfile2llb/directives.go b/frontend/dockerfile/dockerfile2llb/directives.go deleted file mode 100644 index 3cf982b9a9b3..000000000000 --- a/frontend/dockerfile/dockerfile2llb/directives.go +++ /dev/null @@ -1,55 +0,0 @@ -package dockerfile2llb - -import ( - "bufio" - "io" - "regexp" - "strings" - - "github.com/moby/buildkit/frontend/dockerfile/parser" -) - -const keySyntax = "syntax" - -var reDirective = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`) - -type Directive struct { - Name string - Value string - Location []parser.Range -} - -func DetectSyntax(r io.Reader) (string, string, []parser.Range, bool) { - directives := ParseDirectives(r) - if len(directives) == 0 { - return "", "", nil, false - } - v, ok := directives[keySyntax] - if !ok { - return "", "", nil, false - } - p := strings.SplitN(v.Value, " ", 2) - return p[0], v.Value, v.Location, true -} - -func ParseDirectives(r io.Reader) map[string]Directive { - m := map[string]Directive{} - s := bufio.NewScanner(r) - var l int - for s.Scan() { - l++ - match := reDirective.FindStringSubmatch(s.Text()) - if len(match) == 0 { - return m - } - m[strings.ToLower(match[1])] = Directive{ - Name: match[1], - Value: match[2], - Location: []parser.Range{{ - Start: parser.Position{Line: l}, - End: parser.Position{Line: l}, - }}, - } - } - return m -} diff --git a/frontend/dockerfile/dockerfile2llb/directives_test.go b/frontend/dockerfile/dockerfile2llb/directives_test.go deleted file mode 100644 index 6f45b2903111..000000000000 --- a/frontend/dockerfile/dockerfile2llb/directives_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package dockerfile2llb - -import ( - "bytes" - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestDirectives(t *testing.T) { - t.Parallel() - - dt := `#escape=\ -# key = FOO bar - -# smth -` - - d := ParseDirectives(bytes.NewBuffer([]byte(dt))) - require.Equal(t, len(d), 2, fmt.Sprintf("%+v", d)) - - v, ok := d["escape"] - require.True(t, ok) - require.Equal(t, v.Value, "\\") - - v, ok = d["key"] - require.True(t, ok) - require.Equal(t, v.Value, "FOO bar") - - // for some reason Moby implementation in case insensitive for escape - dt = `# EScape=\ -# KEY = FOO bar - -# smth -` - - d = ParseDirectives(bytes.NewBuffer([]byte(dt))) - require.Equal(t, len(d), 2, fmt.Sprintf("%+v", d)) - - v, ok = d["escape"] - require.True(t, ok) - require.Equal(t, v.Value, "\\") - - v, ok = d["key"] - require.True(t, ok) - require.Equal(t, v.Value, "FOO bar") -} - -func TestSyntaxDirective(t *testing.T) { - t.Parallel() - - dt := `# syntax = dockerfile:experimental // opts -FROM busybox -` - - ref, cmdline, loc, ok := DetectSyntax(bytes.NewBuffer([]byte(dt))) - require.True(t, ok) - require.Equal(t, ref, "dockerfile:experimental") - require.Equal(t, cmdline, "dockerfile:experimental // opts") - require.Equal(t, 1, loc[0].Start.Line) - require.Equal(t, 1, loc[0].End.Line) - - dt = `FROM busybox -RUN ls -` - ref, cmdline, _, ok = DetectSyntax(bytes.NewBuffer([]byte(dt))) - require.False(t, ok) - require.Equal(t, ref, "") - require.Equal(t, cmdline, "") -} diff --git a/frontend/dockerfile/dockerfile2llb/image.go b/frontend/dockerfile/dockerfile2llb/image.go index d4c82700e3e4..36b27aa28aba 100644 --- a/frontend/dockerfile/dockerfile2llb/image.go +++ b/frontend/dockerfile/dockerfile2llb/image.go @@ -1,59 +1,14 @@ package dockerfile2llb import ( - "time" - - "github.com/docker/docker/api/types/strslice" + "github.com/moby/buildkit/exporter/containerimage/image" "github.com/moby/buildkit/util/system" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// ImageConfig is a docker compatible config for an image -type ImageConfig struct { - ocispecs.ImageConfig - - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - - // NetworkDisabled bool `json:",omitempty"` // Is network disabled - // MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - // Image is the JSON structure which describes some basic information about the image. // This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. -type Image struct { - ocispecs.Image - - // Config defines the execution parameters which should be used as a base when running a container using the image. - Config ImageConfig `json:"config,omitempty"` - - // Variant defines platform variant. To be added to OCI. - Variant string `json:"variant,omitempty"` -} +type Image image.Image func clone(src Image) Image { img := src @@ -69,8 +24,8 @@ func emptyImage(platform ocispecs.Platform) Image { Image: ocispecs.Image{ Architecture: platform.Architecture, OS: platform.OS, + Variant: platform.Variant, }, - Variant: platform.Variant, } img.RootFS.Type = "layers" img.Config.WorkingDir = "/" diff --git a/frontend/dockerfile/dockerfile2llb/outline.go b/frontend/dockerfile/dockerfile2llb/outline.go new file mode 100644 index 000000000000..f93c8961b2ec --- /dev/null +++ b/frontend/dockerfile/dockerfile2llb/outline.go @@ -0,0 +1,210 @@ +package dockerfile2llb + +import ( + "sort" + + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/moby/buildkit/frontend/subrequests/outline" + pb "github.com/moby/buildkit/solver/pb" +) + +type outlineCapture struct { + allArgs map[string]argInfo + usedArgs map[string]struct{} + secrets map[string]secretInfo + ssh map[string]sshInfo +} + +type argInfo struct { + value string + definition instructions.KeyValuePairOptional + deps map[string]struct{} + location []parser.Range +} + +type secretInfo struct { + required bool + location []parser.Range +} + +type sshInfo struct { + required bool + location []parser.Range +} + +func newOutlineCapture() outlineCapture { + return outlineCapture{ + allArgs: map[string]argInfo{}, + usedArgs: map[string]struct{}{}, + secrets: map[string]secretInfo{}, + ssh: map[string]sshInfo{}, + } +} + +func (o outlineCapture) clone() outlineCapture { + allArgs := map[string]argInfo{} + for k, v := range o.allArgs { + allArgs[k] = v + } + usedArgs := map[string]struct{}{} + for k := range o.usedArgs { + usedArgs[k] = struct{}{} + } + secrets := map[string]secretInfo{} + for k, v := range o.secrets { + secrets[k] = v + } + ssh := map[string]sshInfo{} + for k, v := range o.ssh { + ssh[k] = v + } + return outlineCapture{ + allArgs: allArgs, + usedArgs: usedArgs, + secrets: secrets, + ssh: ssh, + } +} + +func (o outlineCapture) markAllUsed(in map[string]struct{}) { + for k := range in { + if a, ok := o.allArgs[k]; ok { + o.markAllUsed(a.deps) + } + o.usedArgs[k] = struct{}{} + } +} + +func (ds *dispatchState) args(visited map[string]struct{}) []outline.Arg { + ds.outline.markAllUsed(ds.outline.usedArgs) + + args := make([]outline.Arg, 0, len(ds.outline.usedArgs)) + for k := range ds.outline.usedArgs { + if a, ok := ds.outline.allArgs[k]; ok { + if _, ok := visited[k]; !ok { + args = append(args, outline.Arg{ + Name: a.definition.Key, + Value: a.value, + Description: a.definition.Comment, + Location: toSourceLocation(a.location), + }) + visited[k] = struct{}{} + } + } + } + + if ds.base != nil { + args = append(args, ds.base.args(visited)...) + } + for d := range ds.deps { + args = append(args, d.args(visited)...) + } + + return args +} + +func (ds *dispatchState) secrets(visited map[string]struct{}) []outline.Secret { + secrets := make([]outline.Secret, 0, len(ds.outline.secrets)) + for k, v := range ds.outline.secrets { + if _, ok := visited[k]; !ok { + secrets = append(secrets, outline.Secret{ + Name: k, + Required: v.required, + Location: toSourceLocation(v.location), + }) + visited[k] = struct{}{} + } + } + if ds.base != nil { + secrets = append(secrets, ds.base.secrets(visited)...) + } + for d := range ds.deps { + secrets = append(secrets, d.secrets(visited)...) + } + return secrets +} + +func (ds *dispatchState) ssh(visited map[string]struct{}) []outline.SSH { + ssh := make([]outline.SSH, 0, len(ds.outline.secrets)) + for k, v := range ds.outline.ssh { + if _, ok := visited[k]; !ok { + ssh = append(ssh, outline.SSH{ + Name: k, + Required: v.required, + Location: toSourceLocation(v.location), + }) + visited[k] = struct{}{} + } + } + if ds.base != nil { + ssh = append(ssh, ds.base.ssh(visited)...) + } + for d := range ds.deps { + ssh = append(ssh, d.ssh(visited)...) + } + return ssh +} + +func (ds *dispatchState) Outline(dt []byte) outline.Outline { + args := ds.args(map[string]struct{}{}) + sort.Slice(args, func(i, j int) bool { + return compLocation(args[i].Location, args[j].Location) + }) + + secrets := ds.secrets(map[string]struct{}{}) + sort.Slice(secrets, func(i, j int) bool { + return compLocation(secrets[i].Location, secrets[j].Location) + }) + + ssh := ds.ssh(map[string]struct{}{}) + sort.Slice(ssh, func(i, j int) bool { + return compLocation(ssh[i].Location, ssh[j].Location) + }) + + out := outline.Outline{ + Name: ds.stage.Name, + Description: ds.stage.Comment, + Sources: [][]byte{dt}, + Args: args, + Secrets: secrets, + SSH: ssh, + } + + return out +} + +func toSourceLocation(r []parser.Range) *pb.Location { + if len(r) == 0 { + return nil + } + arr := make([]*pb.Range, len(r)) + for i, r := range r { + arr[i] = &pb.Range{ + Start: pb.Position{ + Line: int32(r.Start.Line), + Character: int32(r.Start.Character), + }, + End: pb.Position{ + Line: int32(r.End.Line), + Character: int32(r.End.Character), + }, + } + } + return &pb.Location{Ranges: arr} +} + +func compLocation(a, b *pb.Location) bool { + if a.SourceIndex != b.SourceIndex { + return a.SourceIndex < b.SourceIndex + } + linea := 0 + lineb := 0 + if len(a.Ranges) > 0 { + linea = int(a.Ranges[0].Start.Line) + } + if len(b.Ranges) > 0 { + lineb = int(b.Ranges[0].Start.Line) + } + return linea < lineb +} diff --git a/frontend/dockerfile/dockerfile_addchecksum_test.go b/frontend/dockerfile/dockerfile_addchecksum_test.go new file mode 100644 index 000000000000..f34cf31a5038 --- /dev/null +++ b/frontend/dockerfile/dockerfile_addchecksum_test.go @@ -0,0 +1,175 @@ +//go:build dfaddchecksum +// +build dfaddchecksum + +package dockerfile + +import ( + "fmt" + "testing" + + "github.com/containerd/continuity/fs/fstest" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/frontend/dockerfile/builder" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/testutil/httpserver" + "github.com/moby/buildkit/util/testutil/integration" + digest "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/require" +) + +var addChecksumTests = integration.TestFuncs( + testAddChecksum, +) + +func init() { + allTests = append(allTests, addChecksumTests...) +} + +func testAddChecksum(t *testing.T, sb integration.Sandbox) { + f := getFrontend(t, sb) + f.RequiresBuildctl(t) + + resp := httpserver.Response{ + Etag: identity.NewID(), + Content: []byte("content1"), + } + server := httpserver.NewTestServer(map[string]httpserver.Response{ + "/foo": resp, + }) + defer server.Close() + + c, err := client.New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + t.Run("Valid", func(t *testing.T) { + dockerfile := []byte(fmt.Sprintf(` +FROM scratch +ADD --checksum=%s %s /tmp/foo +`, digest.FromBytes(resp.Content).String(), server.URL+"/foo")) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) + }) + t.Run("DigestFromEnv", func(t *testing.T) { + dockerfile := []byte(fmt.Sprintf(` +FROM scratch +ENV DIGEST=%s +ENV LINK=%s +ADD --checksum=${DIGEST} ${LINK} /tmp/foo +`, digest.FromBytes(resp.Content).String(), server.URL+"/foo")) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) + }) + t.Run("DigestMismatch", func(t *testing.T) { + dockerfile := []byte(fmt.Sprintf(` +FROM scratch +ADD --checksum=%s %s /tmp/foo +`, digest.FromBytes(nil).String(), server.URL+"/foo")) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.Error(t, err, "digest mismatch") + }) + t.Run("DigestWithKnownButUnsupportedAlgoName", func(t *testing.T) { + dockerfile := []byte(fmt.Sprintf(` +FROM scratch +ADD --checksum=md5:7e55db001d319a94b0b713529a756623 %s /tmp/foo +`, server.URL+"/foo")) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.Error(t, err, "unsupported digest algorithm") + }) + t.Run("DigestWithUnknownAlgoName", func(t *testing.T) { + dockerfile := []byte(fmt.Sprintf(` +FROM scratch +ADD --checksum=unknown:%s %s /tmp/foo +`, digest.FromBytes(resp.Content).Encoded(), server.URL+"/foo")) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.Error(t, err, "unsupported digest algorithm") + }) + t.Run("DigestWithoutAlgoName", func(t *testing.T) { + dockerfile := []byte(fmt.Sprintf(` +FROM scratch +ADD --checksum=%s %s /tmp/foo +`, digest.FromBytes(resp.Content).Encoded(), server.URL+"/foo")) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.Error(t, err, "invalid checksum digest format") + }) + t.Run("NonHTTPSource", func(t *testing.T) { + foo := []byte("local file") + dockerfile := []byte(fmt.Sprintf(` +FROM scratch +ADD --checksum=%s foo /tmp/foo +`, digest.FromBytes(foo).String())) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("foo", foo, 0600), + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.Error(t, err, "checksum can't be specified for non-HTTP sources") + }) +} diff --git a/frontend/dockerfile/dockerfile_addgit_test.go b/frontend/dockerfile/dockerfile_addgit_test.go new file mode 100644 index 000000000000..fa99dea5648c --- /dev/null +++ b/frontend/dockerfile/dockerfile_addgit_test.go @@ -0,0 +1,115 @@ +//go:build dfaddgit +// +build dfaddgit + +package dockerfile + +import ( + "bytes" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + "text/template" + + "github.com/containerd/continuity/fs/fstest" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/frontend/dockerfile/builder" + "github.com/moby/buildkit/util/testutil/integration" + "github.com/stretchr/testify/require" +) + +var addGitTests = integration.TestFuncs( + testAddGit, +) + +func init() { + allTests = append(allTests, addGitTests...) +} + +func testAddGit(t *testing.T, sb integration.Sandbox) { + f := getFrontend(t, sb) + + gitDir, err := os.MkdirTemp("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(gitDir) + gitCommands := []string{ + "git init", + "git config --local user.email test", + "git config --local user.name test", + } + makeCommit := func(tag string) []string { + return []string{ + "echo foo of " + tag + " >foo", + "git add foo", + "git commit -m " + tag, + "git tag " + tag, + } + } + gitCommands = append(gitCommands, makeCommit("v0.0.1")...) + gitCommands = append(gitCommands, makeCommit("v0.0.2")...) + gitCommands = append(gitCommands, makeCommit("v0.0.3")...) + gitCommands = append(gitCommands, "git update-server-info") + err = runShell(gitDir, gitCommands...) + require.NoError(t, err) + + server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(gitDir)))) + defer server.Close() + serverURL := server.URL + t.Logf("serverURL=%q", serverURL) + + dockerfile, err := applyTemplate(` +FROM alpine + +# Basic case +ADD {{.ServerURL}}/.git#v0.0.1 /x +RUN cd /x && \ + [ "$(cat foo)" = "foo of v0.0.1" ] + +# Complicated case +ARG REPO="{{.ServerURL}}/.git" +ARG TAG="v0.0.2" +ADD --keep-git-dir=true --chown=4242:8484 ${REPO}#${TAG} /buildkit-chowned +RUN apk add git +USER 4242 +RUN cd /buildkit-chowned && \ + [ "$(cat foo)" = "foo of v0.0.2" ] && \ + [ "$(stat -c %u foo)" = "4242" ] && \ + [ "$(stat -c %g foo)" = "8484" ] && \ + [ -z "$(git status -s)" ] +`, map[string]string{ + "ServerURL": serverURL, + }) + require.NoError(t, err) + t.Logf("dockerfile=%s", dockerfile) + + dir, err := integration.Tmpdir(t, + fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + c, err := client.New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) +} + +func applyTemplate(tmpl string, x interface{}) (string, error) { + var buf bytes.Buffer + parsed, err := template.New("").Parse(tmpl) + if err != nil { + return "", err + } + if err := parsed.Execute(&buf, x); err != nil { + return "", err + } + return buf.String(), nil +} diff --git a/frontend/dockerfile/dockerfile_buildinfo_test.go b/frontend/dockerfile/dockerfile_buildinfo_test.go index 84e0c24622d5..ea3705920972 100644 --- a/frontend/dockerfile/dockerfile_buildinfo_test.go +++ b/frontend/dockerfile/dockerfile_buildinfo_test.go @@ -5,7 +5,7 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "os" @@ -47,18 +47,16 @@ func testBuildInfoSources(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) f.RequiresBuildctl(t) - gitDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(gitDir) + gitDir := t.TempDir() dockerfile := ` FROM alpine:latest@sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300 AS alpine FROM busybox:latest -ADD https://raw.githubusercontent.com/moby/moby/master/README.md / +ADD https://user2:pw2@raw.githubusercontent.com/moby/moby/v20.10.21/README.md / COPY --from=alpine /bin/busybox /alpine-busybox ` - err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600) + err := os.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600) require.NoError(t, err) err = runShell(gitDir, @@ -75,27 +73,35 @@ COPY --from=alpine /bin/busybox /alpine-busybox server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(gitDir)))) defer server.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) - c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - res, err := f.Solve(sb.Context(), c, client.SolveOpt{ - Exports: []client.ExportEntry{ - { - Type: client.ExporterOCI, - Output: fixedWriteCloser(outW), + var exports []client.ExportEntry + if integration.IsTestDockerdMoby(sb) { + exports = []client.ExportEntry{{ + Type: "moby", + Attrs: map[string]string{ + "name": "reg.dummy:5000/buildkit/test:latest", }, - }, + }} + } else { + exports = []client.ExportEntry{{ + Type: client.ExporterOCI, + Attrs: map[string]string{}, + Output: fixedWriteCloser(nopWriteCloser{io.Discard}), + }} + } + + expectedURL := strings.Replace(server.URL, "http://", "http://xxxxx:xxxxx@", 1) + require.NotEqual(t, expectedURL, server.URL) + server.URL = strings.Replace(server.URL, "http://", "http://user:pass@", 1) + + res, err := f.Solve(sb.Context(), c, client.SolveOpt{ + Exports: exports, FrontendAttrs: map[string]string{ - builder.DefaultLocalNameContext: server.URL + "/.git#buildinfo", + builder.DefaultLocalNameContext: server.URL + "/.git#buildinfo", + builder.DefaultLocalNameContext + ":foo": "https://foo:bar@example.invalid/foo.html", }, }, nil) require.NoError(t, err) @@ -109,11 +115,20 @@ COPY --from=alpine /bin/busybox /alpine-busybox require.NoError(t, err) require.Contains(t, bi.Attrs, "context") - require.Equal(t, server.URL+"/.git#buildinfo", *bi.Attrs["context"]) + require.Equal(t, expectedURL+"/.git#buildinfo", *bi.Attrs["context"]) - sources := bi.Sources - require.Equal(t, 3, len(sources)) + require.Equal(t, "https://xxxxx:xxxxx@example.invalid/foo.html", *bi.Attrs["context:foo"]) + + _, isGateway := f.(*gatewayFrontend) + sources := bi.Sources + if isGateway { + require.Equal(t, 5, len(sources), "%+v", sources) + assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type) + assert.Contains(t, sources[0].Ref, "buildkit_test") + sources = sources[1:] + } + require.Equal(t, 4, len(sources), "%+v", sources) assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type) assert.Equal(t, "docker.io/library/alpine:latest@sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300", sources[0].Ref) assert.Equal(t, "sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300", sources[0].Pin) @@ -122,9 +137,13 @@ COPY --from=alpine /bin/busybox /alpine-busybox assert.Equal(t, "docker.io/library/busybox:latest", sources[1].Ref) assert.NotEmpty(t, sources[1].Pin) - assert.Equal(t, binfotypes.SourceTypeHTTP, sources[2].Type) - assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/master/README.md", sources[2].Ref) - assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", sources[2].Pin) + assert.Equal(t, binfotypes.SourceTypeGit, sources[2].Type) + assert.Equal(t, expectedURL+"/.git#buildinfo", sources[2].Ref) + assert.NotEmpty(t, sources[2].Pin) + + assert.Equal(t, binfotypes.SourceTypeHTTP, sources[3].Type) + assert.Equal(t, "https://xxxxx:xxxxx@raw.githubusercontent.com/moby/moby/v20.10.21/README.md", sources[3].Ref) + assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", sources[3].Pin) } func testBuildInfoSourcesNoop(t *testing.T, sb integration.Sandbox) { @@ -135,31 +154,34 @@ func testBuildInfoSourcesNoop(t *testing.T, sb integration.Sandbox) { FROM busybox:latest ` - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) + var exports []client.ExportEntry + if integration.IsTestDockerdMoby(sb) { + exports = []client.ExportEntry{{ + Type: "moby", + Attrs: map[string]string{ + "name": "reg.dummy:5000/buildkit/test:latest", + }, + }} + } else { + exports = []client.ExportEntry{{ + Type: client.ExporterOCI, + Attrs: map[string]string{}, + Output: fixedWriteCloser(nopWriteCloser{io.Discard}), + }} + } res, err := f.Solve(sb.Context(), c, client.SolveOpt{ - Exports: []client.ExportEntry{ - { - Type: client.ExporterOCI, - Output: fixedWriteCloser(outW), - }, - }, + Exports: exports, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -176,8 +198,12 @@ FROM busybox:latest require.NoError(t, err) sources := bi.Sources - require.Equal(t, 1, len(sources)) + if _, isGateway := f.(*gatewayFrontend); isGateway { + require.Equal(t, 2, len(sources), "%+v", sources) + sources = sources[1:] + } + require.Equal(t, 1, len(sources)) assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type) assert.Equal(t, "docker.io/library/busybox:latest", sources[0].Ref) assert.NotEmpty(t, sources[0].Pin) @@ -194,34 +220,37 @@ ARG foo RUN echo $foo ` - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) + var exports []client.ExportEntry + if integration.IsTestDockerdMoby(sb) { + exports = []client.ExportEntry{{ + Type: "moby", + Attrs: map[string]string{ + "name": "reg.dummy:5000/buildkit/test:latest", + }, + }} + } else { + exports = []client.ExportEntry{{ + Type: client.ExporterOCI, + Attrs: map[string]string{}, + Output: fixedWriteCloser(nopWriteCloser{io.Discard}), + }} + } res, err := f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ "build-arg:foo": "bar", }, - Exports: []client.ExportEntry{ - { - Type: client.ExporterOCI, - Output: fixedWriteCloser(outW), - }, - }, + Exports: exports, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -243,6 +272,7 @@ RUN echo $foo // moby/buildkit#2476 func testBuildInfoMultiPlatform(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureMultiPlatform) f := getFrontend(t, sb) f.RequiresBuildctl(t) @@ -250,27 +280,19 @@ func testBuildInfoMultiPlatform(t *testing.T, sb integration.Sandbox) { FROM busybox:latest ARG foo RUN echo $foo -ADD https://raw.githubusercontent.com/moby/moby/master/README.md / +ADD https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md / ` - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) - platforms := []string{"linux/amd64", "linux/arm64"} res, err := f.Solve(sb.Context(), c, client.SolveOpt{ @@ -281,7 +303,7 @@ ADD https://raw.githubusercontent.com/moby/moby/master/README.md / Exports: []client.ExportEntry{ { Type: client.ExporterOCI, - Output: fixedWriteCloser(outW), + Output: fixedWriteCloser(nopWriteCloser{io.Discard}), }, }, LocalDirs: map[string]string{ @@ -303,15 +325,21 @@ ADD https://raw.githubusercontent.com/moby/moby/master/README.md / require.Contains(t, bi.Attrs, "build-arg:foo") require.Equal(t, "bar", *bi.Attrs["build-arg:foo"]) + _, isGateway := f.(*gatewayFrontend) + sources := bi.Sources - require.Equal(t, 2, len(sources)) + if isGateway { + require.Equal(t, 3, len(sources), "%+v", sources) + sources = sources[1:] + } + require.Equal(t, 2, len(sources), "%+v", sources) assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type) assert.Equal(t, "docker.io/library/busybox:latest", sources[0].Ref) assert.NotEmpty(t, sources[0].Pin) assert.Equal(t, binfotypes.SourceTypeHTTP, sources[1].Type) - assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/master/README.md", sources[1].Ref) + assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", sources[1].Ref) assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", sources[1].Pin) } } @@ -327,35 +355,38 @@ FROM scratch COPY --from=base /out / ` - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) + var exports []client.ExportEntry + if integration.IsTestDockerdMoby(sb) { + exports = []client.ExportEntry{{ + Type: "moby", + Attrs: map[string]string{ + "name": "reg.dummy:5000/buildkit/test:latest", + }, + }} + } else { + exports = []client.ExportEntry{{ + Type: client.ExporterOCI, + Attrs: map[string]string{}, + Output: fixedWriteCloser(nopWriteCloser{io.Discard}), + }} + } res, err := f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ "build-arg:foo": "bar", "context:busybox": "docker-image://alpine", }, - Exports: []client.ExportEntry{ - { - Type: client.ExporterOCI, - Output: fixedWriteCloser(outW), - }, - }, + Exports: exports, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -376,8 +407,15 @@ COPY --from=base /out / require.Contains(t, bi.Attrs, "build-arg:foo") require.Equal(t, "bar", *bi.Attrs["build-arg:foo"]) + _, isGateway := f.(*gatewayFrontend) + sources := bi.Sources - require.Equal(t, 1, len(sources)) + if isGateway { + require.Equal(t, 2, len(sources), "%+v", sources) + sources = sources[1:] + } else { + require.Equal(t, 1, len(sources)) + } assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type) assert.Equal(t, "docker.io/library/alpine:latest", sources[0].Ref) assert.NotEmpty(t, sources[0].Pin) @@ -394,45 +432,48 @@ FROM scratch COPY --from=base /o* / ` - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - out := filepath.Join(destDir, "out.tar") - outW, err := os.Create(out) - require.NoError(t, err) - outf := []byte(`dummy-result`) - dir2, err := tmpdir( + dir2, err := integration.Tmpdir( + t, fstest.CreateFile("out", outf, 0600), fstest.CreateFile("out2", outf, 0600), fstest.CreateFile(".dockerignore", []byte("out2\n"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir2) + + var exports []client.ExportEntry + if integration.IsTestDockerdMoby(sb) { + exports = []client.ExportEntry{{ + Type: "moby", + Attrs: map[string]string{ + "name": "reg.dummy:5000/buildkit/test:latest", + }, + }} + } else { + exports = []client.ExportEntry{{ + Type: client.ExporterOCI, + Attrs: map[string]string{}, + Output: fixedWriteCloser(nopWriteCloser{io.Discard}), + }} + } res, err := f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ "build-arg:foo": "bar", "context:base": "local:basedir", }, - Exports: []client.ExportEntry{ - { - Type: client.ExporterOCI, - Output: fixedWriteCloser(outW), - }, - }, + Exports: exports, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -454,10 +495,17 @@ COPY --from=base /o* / require.Contains(t, bi.Attrs, "build-arg:foo") require.Equal(t, "bar", *bi.Attrs["build-arg:foo"]) - require.Equal(t, 0, len(bi.Sources)) + _, isGateway := f.(*gatewayFrontend) + if isGateway { + require.Equal(t, 1, len(bi.Sources)) + } else { + require.Equal(t, 0, len(bi.Sources)) + } } func testBuildInfoDeps(t *testing.T, sb integration.Sandbox) { + t.Skip("deps temporarily disabled with SLSA provenance support") + ctx := sb.Context() f := getFrontend(t, sb) f.RequiresBuildctl(t) @@ -469,15 +517,15 @@ func testBuildInfoDeps(t *testing.T, sb integration.Sandbox) { dockerfile := []byte(` FROM alpine ENV FOO=bar -ADD https://raw.githubusercontent.com/moby/moby/master/README.md / +ADD https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md / RUN echo first > /out `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) dockerfile2 := []byte(` FROM base AS build @@ -486,11 +534,11 @@ FROM busybox COPY --from=build /foo /out / `) - dir2, err := tmpdir( + dir2, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile2, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{}) @@ -545,9 +593,7 @@ COPY --from=build /foo /out / return res, nil } - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() res, err := c.Build(ctx, client.SolveOpt{ LocalDirs: map[string]string{ @@ -582,7 +628,7 @@ COPY --from=build /foo /out / assert.NotEmpty(t, bi.Sources[0].Pin) assert.Equal(t, binfotypes.SourceTypeHTTP, bi.Sources[1].Type) - assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/master/README.md", bi.Sources[1].Ref) + assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", bi.Sources[1].Ref) assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", bi.Sources[1].Pin) require.Contains(t, bi.Deps, "base") @@ -594,6 +640,8 @@ COPY --from=build /foo /out / } func testBuildInfoDepsMultiPlatform(t *testing.T, sb integration.Sandbox) { + t.Skip("deps temporarily disabled with SLSA provenance support") + ctx := sb.Context() f := getFrontend(t, sb) f.RequiresBuildctl(t) @@ -611,11 +659,11 @@ ENV FOO=bar-$TARGETARCH RUN echo "foo $TARGETARCH" > /out `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) dockerfile2 := []byte(` FROM base AS build @@ -624,11 +672,11 @@ FROM busybox COPY --from=build /foo /out / `) - dir2, err := tmpdir( + dir2, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile2, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{ @@ -691,9 +739,7 @@ COPY --from=build /foo /out / return res, nil } - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() res, err := c.Build(ctx, client.SolveOpt{ LocalDirs: map[string]string{ @@ -737,6 +783,8 @@ COPY --from=build /foo /out / } func testBuildInfoDepsMainNoSource(t *testing.T, sb integration.Sandbox) { + t.Skip("deps temporarily disabled with SLSA provenance support") + ctx := sb.Context() f := getFrontend(t, sb) f.RequiresBuildctl(t) @@ -748,26 +796,26 @@ func testBuildInfoDepsMainNoSource(t *testing.T, sb integration.Sandbox) { dockerfile := []byte(` FROM alpine ENV FOO=bar -ADD https://raw.githubusercontent.com/moby/moby/master/README.md / +ADD https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md / RUN echo first > /out `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) dockerfile2 := []byte(` FROM base AS build RUN echo "foo is $FOO" > /foo `) - dir2, err := tmpdir( + dir2, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile2, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{}) @@ -822,9 +870,7 @@ RUN echo "foo is $FOO" > /foo return res, nil } - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() res, err := c.Build(ctx, client.SolveOpt{ LocalDirs: map[string]string{ @@ -855,7 +901,7 @@ RUN echo "foo is $FOO" > /foo require.Equal(t, 1, len(bi.Sources)) assert.Equal(t, binfotypes.SourceTypeHTTP, bi.Sources[0].Type) - assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/master/README.md", bi.Sources[0].Ref) + assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", bi.Sources[0].Ref) assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", bi.Sources[0].Pin) require.Contains(t, bi.Deps, "base") diff --git a/frontend/dockerfile/dockerfile_heredoc_test.go b/frontend/dockerfile/dockerfile_heredoc_test.go index 4be68738e982..cbb386b33e1c 100644 --- a/frontend/dockerfile/dockerfile_heredoc_test.go +++ b/frontend/dockerfile/dockerfile_heredoc_test.go @@ -2,7 +2,6 @@ package dockerfile import ( "fmt" - "io/ioutil" "os" "path/filepath" "testing" @@ -68,19 +67,17 @@ FROM scratch COPY --from=build /dest / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -104,7 +101,7 @@ COPY --from=build /dest / } for name, content := range contents { - dt, err := ioutil.ReadFile(filepath.Join(destDir, name)) + dt, err := os.ReadFile(filepath.Join(destDir, name)) require.NoError(t, err) require.Equal(t, content, string(dt)) } @@ -141,19 +138,17 @@ COPY <<"EOF" rawslashfile3 EOF `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -169,31 +164,31 @@ EOF }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "quotefile")) + dt, err := os.ReadFile(filepath.Join(destDir, "quotefile")) require.NoError(t, err) require.Equal(t, "\"quotes in file\"\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "slashfile1")) + dt, err = os.ReadFile(filepath.Join(destDir, "slashfile1")) require.NoError(t, err) require.Equal(t, "\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "slashfile2")) + dt, err = os.ReadFile(filepath.Join(destDir, "slashfile2")) require.NoError(t, err) require.Equal(t, "\\\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "slashfile3")) + dt, err = os.ReadFile(filepath.Join(destDir, "slashfile3")) require.NoError(t, err) require.Equal(t, "$\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "rawslashfile1")) + dt, err = os.ReadFile(filepath.Join(destDir, "rawslashfile1")) require.NoError(t, err) require.Equal(t, "\\\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "rawslashfile2")) + dt, err = os.ReadFile(filepath.Join(destDir, "rawslashfile2")) require.NoError(t, err) require.Equal(t, "\\\\\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "rawslashfile3")) + dt, err = os.ReadFile(filepath.Join(destDir, "rawslashfile3")) require.NoError(t, err) require.Equal(t, "\\$\n", string(dt)) } @@ -213,19 +208,17 @@ FROM scratch COPY --from=build /dest /dest `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -241,7 +234,7 @@ COPY --from=build /dest /dest }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest")) + dt, err := os.ReadFile(filepath.Join(destDir, "dest")) require.NoError(t, err) require.Equal(t, "i am\nroot\n", string(dt)) } @@ -263,19 +256,17 @@ FROM scratch COPY --from=build /dest /dest `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -291,7 +282,7 @@ COPY --from=build /dest /dest }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest")) + dt, err := os.ReadFile(filepath.Join(destDir, "dest")) require.NoError(t, err) require.Equal(t, "foo\n", string(dt)) } @@ -314,19 +305,17 @@ FROM scratch COPY --from=build /dest /dest `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -342,7 +331,7 @@ COPY --from=build /dest /dest }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest")) + dt, err := os.ReadFile(filepath.Join(destDir, "dest")) require.NoError(t, err) require.Equal(t, "hello\nworld\n", string(dt)) } @@ -379,19 +368,17 @@ FROM scratch COPY --from=build /dest / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -415,7 +402,7 @@ COPY --from=build /dest / } for name, content := range contents { - dt, err := ioutil.ReadFile(filepath.Join(destDir, name)) + dt, err := os.ReadFile(filepath.Join(destDir, name)) require.NoError(t, err) require.Equal(t, content, string(dt)) } @@ -471,19 +458,17 @@ FROM scratch COPY --from=build /dest / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -511,7 +496,7 @@ COPY --from=build /dest / } for name, content := range contents { - dt, err := ioutil.ReadFile(filepath.Join(destDir, name)) + dt, err := os.ReadFile(filepath.Join(destDir, name)) require.NoError(t, err) require.Equal(t, content, string(dt)) } @@ -567,19 +552,17 @@ FROM scratch COPY --from=build /dest / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -610,13 +593,14 @@ COPY --from=build /dest / } for name, content := range contents { - dt, err := ioutil.ReadFile(filepath.Join(destDir, name)) + dt, err := os.ReadFile(filepath.Join(destDir, name)) require.NoError(t, err) require.Equal(t, content, string(dt)) } } func testOnBuildHeredoc(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) f := getFrontend(t, sb) registry, err := sb.NewRegistry() @@ -632,11 +616,11 @@ echo "hello world" >> /dest EOF `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -660,31 +644,19 @@ EOF }, nil) require.NoError(t, err) - dockerfile = []byte(fmt.Sprintf(` - FROM %s - `, target)) - - dir, err = tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) - require.NoError(t, err) - defer os.RemoveAll(dir) - dockerfile = []byte(fmt.Sprintf(` FROM %s AS base FROM scratch COPY --from=base /dest /dest `, target)) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -700,7 +672,7 @@ EOF }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest")) + dt, err := os.ReadFile(filepath.Join(destDir, "dest")) require.NoError(t, err) require.Equal(t, "hello world\n", string(dt)) } diff --git a/frontend/dockerfile/dockerfile_mount_test.go b/frontend/dockerfile/dockerfile_mount_test.go index 731d5f2d78dc..3a9ec3875119 100644 --- a/frontend/dockerfile/dockerfile_mount_test.go +++ b/frontend/dockerfile/dockerfile_mount_test.go @@ -1,10 +1,8 @@ package dockerfile import ( - "io/ioutil" "os" "path/filepath" - "strconv" "testing" "github.com/containerd/continuity/fs/fstest" @@ -26,12 +24,11 @@ var mountTests = integration.TestFuncs( testMountFromError, testMountInvalid, testMountTmpfsSize, + testCacheMountUser, ) func init() { allTests = append(allTests, mountTests...) - - fileOpTests = append(fileOpTests, integration.TestFuncs(testCacheMountUser)...) } func testMountContext(t *testing.T, sb integration.Sandbox) { @@ -42,12 +39,12 @@ FROM busybox RUN --mount=target=/context [ "$(cat /context/testfile)" == "contents0" ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("testfile", []byte("contents0"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -71,11 +68,11 @@ RUN --mount=target=/mytmp,type=tmpfs touch /mytmp/foo RUN [ ! -f /mytmp/foo ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -98,11 +95,11 @@ FROM scratch RUN --mont=target=/mytmp,type=tmpfs /bin/true `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -123,11 +120,11 @@ RUN --mont=target=/mytmp,type=tmpfs /bin/true RUN --mount=typ=tmpfs /bin/true `) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) _, err = f.Solve(sb.Context(), c, client.SolveOpt{ LocalDirs: map[string]string{ @@ -144,11 +141,11 @@ RUN --mont=target=/mytmp,type=tmpfs /bin/true RUN --mount=type=tmp /bin/true `) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) _, err = f.Solve(sb.Context(), c, client.SolveOpt{ LocalDirs: map[string]string{ @@ -176,20 +173,18 @@ from scratch COPY --from=second /unique /unique `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("cachebust", []byte("0"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -205,20 +200,18 @@ COPY --from=second /unique /unique }, nil) require.NoError(t, err) - dt1, err := ioutil.ReadFile(filepath.Join(destDir, "unique")) + dt1, err := os.ReadFile(filepath.Join(destDir, "unique")) require.NoError(t, err) // repeat with changed file that should be still cached by content - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("cachebust", []byte("1"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -234,34 +227,30 @@ COPY --from=second /unique /unique }, nil) require.NoError(t, err) - dt2, err := ioutil.ReadFile(filepath.Join(destDir, "unique")) + dt2, err := os.ReadFile(filepath.Join(destDir, "unique")) require.NoError(t, err) require.Equal(t, dt1, dt2) } func testCacheMountUser(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox RUN --mount=type=cache,target=/mycache,uid=1001,gid=1002,mode=0751 [ "$(stat -c "%u %g %f" /mycache)" == "1001 1002 41e9" ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -280,11 +269,11 @@ RUN --mount=type=cache,target=/mycache2 [ ! -f /mycache2/foo ] RUN --mount=type=cache,target=/mycache [ -f /mycache/foo ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -309,11 +298,11 @@ RUN --mount=type=cache,target=/mycache touch /mycache/foo RUN --mount=type=cache,target=$SOME_PATH [ -f $SOME_PATH/foo ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -338,11 +327,11 @@ RUN --mount=type=$MNT_TYPE,target=/mycache2 touch /mycache2/foo RUN --mount=type=cache,target=/mycache2 [ -f /mycache2/foo ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -372,11 +361,11 @@ FROM stage1 RUN --mount=type=$MNT_TYPE2,id=$MNT_ID,target=/whatever [ -f /whatever/foo ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -403,11 +392,11 @@ RUN --mount=type=cache,id=mycache,target=/tmp/meta touch /tmp/meta/foo RUN --mount=type=cache,id=mycache,target=$META_PATH [ -f /tmp/meta/foo ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -434,11 +423,11 @@ ENV ttt=test RUN --mount=from=$ttt,type=cache,target=/tmp ls `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -464,19 +453,17 @@ FROM scratch COPY --from=base /tmpfssize / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -492,7 +479,7 @@ COPY --from=base /tmpfssize / }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "tmpfssize")) + dt, err := os.ReadFile(filepath.Join(destDir, "tmpfssize")) require.NoError(t, err) require.Contains(t, string(dt), `size=131072k`) } diff --git a/frontend/dockerfile/dockerfile_outline_test.go b/frontend/dockerfile/dockerfile_outline_test.go new file mode 100644 index 000000000000..346fde14dff7 --- /dev/null +++ b/frontend/dockerfile/dockerfile_outline_test.go @@ -0,0 +1,309 @@ +package dockerfile + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/containerd/continuity/fs/fstest" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/frontend/dockerfile/builder" + gateway "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/subrequests" + "github.com/moby/buildkit/frontend/subrequests/outline" + "github.com/moby/buildkit/util/testutil/integration" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +var outlineTests = integration.TestFuncs( + testOutlineArgs, + testOutlineSecrets, + testOutlineDescribeDefinition, +) + +func testOutlineArgs(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureFrontendOutline) + f := getFrontend(t, sb) + if _, ok := f.(*clientFrontend); !ok { + t.Skip("only test with client frontend") + } + + dockerfile := []byte(`ARG inherited=box +ARG inherited2=box2 +ARG unused=abc${inherited2} +# sfx is a suffix +ARG sfx="usy${inherited}" + +FROM b${sfx} AS first +# this is not assigned to anything +ARG FOO=123 +# BAR is a number +ARG BAR=456 +RUN true + +FROM alpine${unused} AS second +ARG BAZ +RUN true + +FROM scratch AS third +ARG ABC=a + +# target defines build target +FROM third AS target +COPY --from=first /etc/passwd / + +FROM second +`) + + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + c, err := client.New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + destDir, err := os.MkdirTemp("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + called := false + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res, err := c.Solve(ctx, gateway.SolveRequest{ + FrontendOpt: map[string]string{ + "frontend.caps": "moby.buildkit.frontend.subrequests", + "requestid": "frontend.outline", + "build-arg:BAR": "678", + "target": "target", + }, + Frontend: "dockerfile.v0", + }) + require.NoError(t, err) + + outline, err := unmarshalOutline(res) + require.NoError(t, err) + + require.Equal(t, "target", outline.Name) + require.Equal(t, "defines build target", outline.Description) + + require.Equal(t, 1, len(outline.Sources)) + require.Equal(t, dockerfile, outline.Sources[0]) + + require.Equal(t, 5, len(outline.Args)) + + arg := outline.Args[0] + require.Equal(t, "inherited", arg.Name) + require.Equal(t, "box", arg.Value) + require.Equal(t, "", arg.Description) + require.Equal(t, int32(0), arg.Location.SourceIndex) + require.Equal(t, int32(1), arg.Location.Ranges[0].Start.Line) + + arg = outline.Args[1] + require.Equal(t, "sfx", arg.Name) + require.Equal(t, "usybox", arg.Value) + require.Equal(t, "is a suffix", arg.Description) + require.Equal(t, int32(5), arg.Location.Ranges[0].Start.Line) + + arg = outline.Args[2] + require.Equal(t, "FOO", arg.Name) + require.Equal(t, "123", arg.Value) + require.Equal(t, "", arg.Description) + require.Equal(t, int32(9), arg.Location.Ranges[0].Start.Line) + + arg = outline.Args[3] + require.Equal(t, "BAR", arg.Name) + require.Equal(t, "678", arg.Value) + require.Equal(t, "is a number", arg.Description) + + arg = outline.Args[4] + require.Equal(t, "ABC", arg.Name) + require.Equal(t, "a", arg.Value) + + called = true + return nil, nil + } + + _, err = c.Build(sb.Context(), client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + }, + }, "", frontend, nil) + require.NoError(t, err) + + require.True(t, called) +} + +func testOutlineSecrets(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureFrontendOutline) + f := getFrontend(t, sb) + if _, ok := f.(*clientFrontend); !ok { + t.Skip("only test with client frontend") + } + + dockerfile := []byte(` +FROM busybox AS first +RUN --mount=type=secret,target=/etc/passwd,required=true --mount=type=ssh true + +FROM alpine AS second +RUN --mount=type=secret,id=unused --mount=type=ssh,id=ssh2 true + +FROM scratch AS third +ARG BAR +RUN --mount=type=secret,id=second${BAR} true + +FROM third AS target +COPY --from=first /foo / +RUN --mount=type=ssh,id=ssh3,required true + +FROM second +`) + + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + c, err := client.New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + destDir, err := os.MkdirTemp("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + called := false + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res, err := c.Solve(ctx, gateway.SolveRequest{ + FrontendOpt: map[string]string{ + "frontend.caps": "moby.buildkit.frontend.subrequests", + "requestid": "frontend.outline", + "build-arg:BAR": "678", + "target": "target", + }, + Frontend: "dockerfile.v0", + }) + require.NoError(t, err) + + outline, err := unmarshalOutline(res) + require.NoError(t, err) + + require.Equal(t, 1, len(outline.Sources)) + require.Equal(t, dockerfile, outline.Sources[0]) + + require.Equal(t, 2, len(outline.Secrets)) + + secret := outline.Secrets[0] + require.Equal(t, "passwd", secret.Name) + require.Equal(t, true, secret.Required) + require.Equal(t, int32(0), secret.Location.SourceIndex) + require.Equal(t, int32(3), secret.Location.Ranges[0].Start.Line) + + secret = outline.Secrets[1] + require.Equal(t, "second678", secret.Name) + require.Equal(t, false, secret.Required) + require.Equal(t, int32(0), secret.Location.SourceIndex) + require.Equal(t, int32(10), secret.Location.Ranges[0].Start.Line) + + require.Equal(t, 2, len(outline.SSH)) + + ssh := outline.SSH[0] + require.Equal(t, "default", ssh.Name) + require.Equal(t, false, ssh.Required) + require.Equal(t, int32(0), ssh.Location.SourceIndex) + require.Equal(t, int32(3), ssh.Location.Ranges[0].Start.Line) + + ssh = outline.SSH[1] + require.Equal(t, "ssh3", ssh.Name) + require.Equal(t, true, ssh.Required) + require.Equal(t, int32(0), ssh.Location.SourceIndex) + require.Equal(t, int32(14), ssh.Location.Ranges[0].Start.Line) + + called = true + return nil, nil + } + + _, err = c.Build(sb.Context(), client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + }, + }, "", frontend, nil) + require.NoError(t, err) + + require.True(t, called) +} + +func testOutlineDescribeDefinition(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureFrontendOutline) + f := getFrontend(t, sb) + if _, ok := f.(*clientFrontend); !ok { + t.Skip("only test with client frontend") + } + + c, err := client.New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + dockerfile := []byte(` +FROM scratch +COPY Dockerfile Dockerfile +`) + + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + called := false + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + reqs, err := subrequests.Describe(ctx, c) + require.NoError(t, err) + + require.True(t, len(reqs) > 0) + + hasOutline := false + + for _, req := range reqs { + if req.Name != "frontend.outline" { + continue + } + hasOutline = true + require.Equal(t, subrequests.RequestType("rpc"), req.Type) + require.NotEqual(t, req.Version, "") + } + require.True(t, hasOutline) + + called = true + return nil, nil + } + + _, err = c.Build(sb.Context(), client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + }, + }, "", frontend, nil) + require.NoError(t, err) + + require.True(t, called) +} + +func unmarshalOutline(res *gateway.Result) (*outline.Outline, error) { + dt, ok := res.Metadata["result.json"] + if !ok { + return nil, errors.Errorf("missing frontend.outline") + } + var o outline.Outline + if err := json.Unmarshal(dt, &o); err != nil { + return nil, err + } + return &o, nil +} diff --git a/frontend/dockerfile/dockerfile_provenance_test.go b/frontend/dockerfile/dockerfile_provenance_test.go new file mode 100644 index 000000000000..835f72a52b86 --- /dev/null +++ b/frontend/dockerfile/dockerfile_provenance_test.go @@ -0,0 +1,909 @@ +package dockerfile + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/containerd/containerd/platforms" + "github.com/containerd/continuity/fs/fstest" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend/dockerfile/builder" + gateway "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/solver/llbsolver/provenance" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/testutil" + "github.com/moby/buildkit/util/testutil/integration" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func testProvenanceAttestation(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureProvenance) + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM busybox:latest +RUN echo "ok" > /foo +`) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + for _, mode := range []string{"", "min", "max"} { + t.Run(mode, func(t *testing.T) { + var target string + if target == "" { + target = registry + "/buildkit/testwithprovenance:none" + } else { + target = registry + "/buildkit/testwithprovenance:" + mode + } + + provReq := "" + if mode != "" { + provReq = "mode=" + mode + } + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + FrontendAttrs: map[string]string{ + "attest:provenance": provReq, + "build-arg:FOO": "bar", + "label:lbl": "abc", + "vcs:source": "https://user:pass@example.invalid/repo.git", + "vcs:revision": "123456", + "filename": "Dockerfile", + builder.DefaultLocalNameContext + ":foo": "https://foo:bar@example.invalid/foo.html", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) + + img := imgs.Find(platforms.Format(platforms.Normalize(platforms.DefaultSpec()))) + require.NotNil(t, img) + require.Equal(t, []byte("ok\n"), img.Layers[1]["foo"].Data) + + att := imgs.Find("unknown/unknown") + require.NotNil(t, att) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.digest"], string(img.Desc.Digest)) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const + + type stmtT struct { + Predicate provenance.ProvenancePredicate `json:"predicate"` + } + var stmt stmtT + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred := stmt.Predicate + + require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType) + require.Equal(t, "", pred.Builder.ID) + + require.Equal(t, "", pred.Invocation.ConfigSource.URI) + + _, isClient := f.(*clientFrontend) + _, isGateway := f.(*gatewayFrontend) + + args := pred.Invocation.Parameters.Args + if isClient { + require.Equal(t, "", pred.Invocation.Parameters.Frontend) + require.Equal(t, 0, len(args), "%v", args) + require.False(t, pred.Metadata.Completeness.Parameters) + require.Equal(t, "", pred.Invocation.ConfigSource.EntryPoint) + } else if isGateway { + require.Equal(t, "gateway.v0", pred.Invocation.Parameters.Frontend) + + if mode == "max" || mode == "" { + require.Equal(t, 4, len(args), "%v", args) + require.True(t, pred.Metadata.Completeness.Parameters) + + require.Equal(t, "bar", args["build-arg:FOO"]) + require.Equal(t, "abc", args["label:lbl"]) + require.Contains(t, args["source"], "buildkit_test/") + } else { + require.False(t, pred.Metadata.Completeness.Parameters) + require.Equal(t, 2, len(args), "%v", args) + require.Contains(t, args["source"], "buildkit_test/") + } + require.Equal(t, "https://xxxxx:xxxxx@example.invalid/foo.html", args["context:foo"]) + } else { + require.Equal(t, "dockerfile.v0", pred.Invocation.Parameters.Frontend) + + if mode == "max" || mode == "" { + require.Equal(t, 3, len(args)) + require.True(t, pred.Metadata.Completeness.Parameters) + + require.Equal(t, "bar", args["build-arg:FOO"]) + require.Equal(t, "abc", args["label:lbl"]) + } else { + require.False(t, pred.Metadata.Completeness.Parameters) + require.Equal(t, 1, len(args), "%v", args) + } + require.Equal(t, "https://xxxxx:xxxxx@example.invalid/foo.html", args["context:foo"]) + } + + expectedBase := "pkg:docker/busybox@latest?platform=" + url.PathEscape(platforms.Format(platforms.Normalize(platforms.DefaultSpec()))) + if isGateway { + require.Equal(t, 2, len(pred.Materials), "%+v", pred.Materials) + require.Contains(t, pred.Materials[0].URI, "docker/buildkit_test") + require.Equal(t, expectedBase, pred.Materials[1].URI) + require.NotEmpty(t, pred.Materials[1].Digest["sha256"]) + } else { + require.Equal(t, 1, len(pred.Materials), "%+v", pred.Materials) + require.Equal(t, expectedBase, pred.Materials[0].URI) + require.NotEmpty(t, pred.Materials[0].Digest["sha256"]) + } + + if !isClient { + require.Equal(t, "Dockerfile", pred.Invocation.ConfigSource.EntryPoint) + require.Equal(t, "https://xxxxx:xxxxx@example.invalid/repo.git", pred.Metadata.BuildKitMetadata.VCS["source"]) + require.Equal(t, "123456", pred.Metadata.BuildKitMetadata.VCS["revision"]) + } + + require.NotEmpty(t, pred.Metadata.BuildInvocationID) + + require.Equal(t, 2, len(pred.Invocation.Parameters.Locals), "%+v", pred.Invocation.Parameters.Locals) + require.Equal(t, "context", pred.Invocation.Parameters.Locals[0].Name) + require.Equal(t, "dockerfile", pred.Invocation.Parameters.Locals[1].Name) + + require.NotNil(t, pred.Metadata.BuildFinishedOn) + require.True(t, time.Since(*pred.Metadata.BuildFinishedOn) < 5*time.Minute) + require.NotNil(t, pred.Metadata.BuildStartedOn) + require.True(t, time.Since(*pred.Metadata.BuildStartedOn) < 5*time.Minute) + require.True(t, pred.Metadata.BuildStartedOn.Before(*pred.Metadata.BuildFinishedOn)) + + require.True(t, pred.Metadata.Completeness.Environment) + require.Equal(t, platforms.Format(platforms.Normalize(platforms.DefaultSpec())), pred.Invocation.Environment.Platform) + + require.False(t, pred.Metadata.Completeness.Materials) + require.False(t, pred.Metadata.Reproducible) + require.False(t, pred.Metadata.Hermetic) + + if mode == "max" || mode == "" { + require.Equal(t, 2, len(pred.Metadata.BuildKitMetadata.Layers)) + require.NotNil(t, pred.Metadata.BuildKitMetadata.Source) + require.Equal(t, "Dockerfile", pred.Metadata.BuildKitMetadata.Source.Infos[0].Filename) + require.Equal(t, dockerfile, pred.Metadata.BuildKitMetadata.Source.Infos[0].Data) + require.NotNil(t, pred.BuildConfig) + + require.Equal(t, 3, len(pred.BuildConfig.Definition)) + } else { + require.Equal(t, 0, len(pred.Metadata.BuildKitMetadata.Layers)) + require.Nil(t, pred.Metadata.BuildKitMetadata.Source) + require.Nil(t, pred.BuildConfig) + } + }) + } +} + +func testGitProvenanceAttestation(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureProvenance) + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM busybox:latest +RUN --network=none echo "git" > /foo +COPY myapp.Dockerfile / +`) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("myapp.Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + err = runShell(dir, + "git init", + "git config --local user.email test", + "git config --local user.name test", + "git add myapp.Dockerfile", + "git commit -m initial", + "git branch v1", + "git update-server-info", + ) + require.NoError(t, err) + + cmd := exec.Command("git", "rev-parse", "v1") + cmd.Dir = dir + expectedGitSHA, err := cmd.Output() + require.NoError(t, err) + + server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(dir)))) + defer server.Close() + + target := registry + "/buildkit/testwithprovenance:git" + + // inject dummy credentials to test that they are masked + expectedURL := strings.Replace(server.URL, "http://", "http://xxxxx:xxxxx@", 1) + require.NotEqual(t, expectedURL, server.URL) + server.URL = strings.Replace(server.URL, "http://", "http://user:pass@", 1) + + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "context": server.URL + "/.git#v1", + "attest:provenance": "", + "filename": "myapp.Dockerfile", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) + + img := imgs.Find(platforms.Format(platforms.Normalize(platforms.DefaultSpec()))) + require.NotNil(t, img) + require.Equal(t, []byte("git\n"), img.Layers[1]["foo"].Data) + + att := imgs.Find("unknown/unknown") + require.NotNil(t, att) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.digest"], string(img.Desc.Digest)) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const + + type stmtT struct { + Predicate provenance.ProvenancePredicate `json:"predicate"` + } + var stmt stmtT + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred := stmt.Predicate + + _, isClient := f.(*clientFrontend) + _, isGateway := f.(*gatewayFrontend) + + if isClient { + require.Empty(t, pred.Invocation.Parameters.Frontend) + require.Equal(t, "", pred.Invocation.ConfigSource.URI) + require.Equal(t, "", pred.Invocation.ConfigSource.EntryPoint) + } else { + require.NotEmpty(t, pred.Invocation.Parameters.Frontend) + require.Equal(t, expectedURL+"/.git#v1", pred.Invocation.ConfigSource.URI) + require.Equal(t, "myapp.Dockerfile", pred.Invocation.ConfigSource.EntryPoint) + } + + expBase := "pkg:docker/busybox@latest?platform=" + url.PathEscape(platforms.Format(platforms.Normalize(platforms.DefaultSpec()))) + if isGateway { + require.Equal(t, 3, len(pred.Materials), "%+v", pred.Materials) + + require.Contains(t, pred.Materials[0].URI, "pkg:docker/buildkit_test/") + require.NotEmpty(t, pred.Materials[0].Digest) + + require.Equal(t, expBase, pred.Materials[1].URI) + require.NotEmpty(t, pred.Materials[1].Digest["sha256"]) + + require.Equal(t, expectedURL+"/.git#v1", pred.Materials[2].URI) + require.Equal(t, strings.TrimSpace(string(expectedGitSHA)), pred.Materials[2].Digest["sha1"]) + } else { + require.Equal(t, 2, len(pred.Materials), "%+v", pred.Materials) + + require.Equal(t, expBase, pred.Materials[0].URI) + require.NotEmpty(t, pred.Materials[0].Digest["sha256"]) + + require.Equal(t, expectedURL+"/.git#v1", pred.Materials[1].URI) + require.Equal(t, strings.TrimSpace(string(expectedGitSHA)), pred.Materials[1].Digest["sha1"]) + } + + require.Equal(t, 0, len(pred.Invocation.Parameters.Locals)) + + require.True(t, pred.Metadata.Completeness.Materials) + require.True(t, pred.Metadata.Completeness.Environment) + require.True(t, pred.Metadata.Hermetic) + + if isClient { + require.False(t, pred.Metadata.Completeness.Parameters) + } else { + require.True(t, pred.Metadata.Completeness.Parameters) + } + require.False(t, pred.Metadata.Reproducible) + + require.Equal(t, 0, len(pred.Metadata.BuildKitMetadata.VCS), "%+v", pred.Metadata.BuildKitMetadata.VCS) +} + +func testMultiPlatformProvenance(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureMultiPlatform, integration.FeatureProvenance) + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM busybox:latest +ARG TARGETARCH +RUN echo "ok-$TARGETARCH" > /foo +`) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + target := registry + "/buildkit/testmultiprovenance:latest" + + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + FrontendAttrs: map[string]string{ + "attest:provenance": "mode=max", + "build-arg:FOO": "bar", + "label:lbl": "abc", + "platform": "linux/amd64,linux/arm64", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 4, len(imgs.Images)) + + _, isClient := f.(*clientFrontend) + _, isGateway := f.(*gatewayFrontend) + + for _, p := range []string{"linux/amd64", "linux/arm64"} { + img := imgs.Find(p) + require.NotNil(t, img) + if p == "linux/amd64" { + require.Equal(t, []byte("ok-amd64\n"), img.Layers[1]["foo"].Data) + } else { + require.Equal(t, []byte("ok-arm64\n"), img.Layers[1]["foo"].Data) + } + + att := imgs.FindAttestation(p) + require.NotNil(t, att) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const + + type stmtT struct { + Predicate provenance.ProvenancePredicate `json:"predicate"` + } + var stmt stmtT + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred := stmt.Predicate + + require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType) + require.Equal(t, "", pred.Builder.ID) + require.Equal(t, "", pred.Invocation.ConfigSource.URI) + + if isGateway { + require.Equal(t, 2, len(pred.Materials), "%+v", pred.Materials) + require.Contains(t, pred.Materials[0].URI, "buildkit_test") + require.Contains(t, pred.Materials[1].URI, "pkg:docker/busybox@latest") + require.Contains(t, pred.Materials[1].URI, url.PathEscape(p)) + } else { + require.Equal(t, 1, len(pred.Materials), "%+v", pred.Materials) + require.Contains(t, pred.Materials[0].URI, "pkg:docker/busybox@latest") + require.Contains(t, pred.Materials[0].URI, url.PathEscape(p)) + } + + args := pred.Invocation.Parameters.Args + if isClient { + require.Equal(t, 0, len(args), "%+v", args) + } else if isGateway { + require.Equal(t, 3, len(args), "%+v", args) + require.Equal(t, "bar", args["build-arg:FOO"]) + require.Equal(t, "abc", args["label:lbl"]) + require.Contains(t, args["source"], "buildkit_test/") + } else { + require.Equal(t, 2, len(args), "%+v", args) + require.Equal(t, "bar", args["build-arg:FOO"]) + require.Equal(t, "abc", args["label:lbl"]) + } + } +} + +func testClientFrontendProvenance(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureProvenance) + // Building with client frontend does not capture frontend provenance + // because frontend runs in client, not in BuildKit. + // This test builds Dockerfile inside a client frontend ensuring that + // in that case frontend provenance is captured. + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + target := registry + "/buildkit/clientprovenance:latest" + + f := getFrontend(t, sb) + + _, isClient := f.(*clientFrontend) + if !isClient { + t.Skip("not a client frontend") + } + + dockerfile := []byte(` + FROM alpine as x86target + RUN echo "alpine" > /foo + + FROM busybox:latest AS armtarget + RUN --network=none echo "bbox" > /foo + `) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + st := llb.HTTP("https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md") + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + // This does not show up in provenance + res0, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + dt, err := res0.Ref.ReadFile(ctx, gateway.ReadRequest{ + Filename: "README.md", + }) + if err != nil { + return nil, err + } + + res1, err := c.Solve(ctx, gateway.SolveRequest{ + Frontend: "dockerfile.v0", + FrontendOpt: map[string]string{ + "build-arg:FOO": string(dt[:3]), + "target": "armtarget", + }, + }) + if err != nil { + return nil, err + } + + res2, err := c.Solve(ctx, gateway.SolveRequest{ + Frontend: "dockerfile.v0", + FrontendOpt: map[string]string{ + "build-arg:FOO": string(dt[4:8]), + "target": "x86target", + }, + }) + if err != nil { + return nil, err + } + + res := gateway.NewResult() + res.AddRef("linux/arm64", res1.Ref) + res.AddRef("linux/amd64", res2.Ref) + + pl, err := json.Marshal(exptypes.Platforms{ + Platforms: []exptypes.Platform{ + { + ID: "linux/arm64", + Platform: ocispecs.Platform{OS: "linux", Architecture: "arm64"}, + }, + { + ID: "linux/amd64", + Platform: ocispecs.Platform{OS: "linux", Architecture: "amd64"}, + }, + }, + }) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, pl) + + return res, nil + } + + _, err = c.Build(sb.Context(), client.SolveOpt{ + FrontendAttrs: map[string]string{ + "attest:provenance": "mode=full", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, "", frontend, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 4, len(imgs.Images)) + + img := imgs.Find("linux/arm64") + require.NotNil(t, img) + require.Equal(t, []byte("bbox\n"), img.Layers[1]["foo"].Data) + + att := imgs.FindAttestation("linux/arm64") + require.NotNil(t, att) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const + + type stmtT struct { + Predicate provenance.ProvenancePredicate `json:"predicate"` + } + var stmt stmtT + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred := stmt.Predicate + + require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType) + require.Equal(t, "", pred.Builder.ID) + require.Equal(t, "", pred.Invocation.ConfigSource.URI) + + args := pred.Invocation.Parameters.Args + require.Equal(t, 2, len(args), "%+v", args) + require.Equal(t, "The", args["build-arg:FOO"]) + require.Equal(t, "armtarget", args["target"]) + + require.Equal(t, 2, len(pred.Invocation.Parameters.Locals)) + require.Equal(t, 1, len(pred.Materials)) + require.Contains(t, pred.Materials[0].URI, "docker/busybox") + + // amd64 + img = imgs.Find("linux/amd64") + require.NotNil(t, img) + require.Equal(t, []byte("alpine\n"), img.Layers[1]["foo"].Data) + + att = imgs.FindAttestation("linux/amd64") + require.NotNil(t, att) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") + attest = intoto.Statement{} + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const + + stmt = stmtT{} + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred = stmt.Predicate + + require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType) + require.Equal(t, "", pred.Builder.ID) + require.Equal(t, "", pred.Invocation.ConfigSource.URI) + + args = pred.Invocation.Parameters.Args + require.Equal(t, 2, len(args), "%+v", args) + require.Equal(t, "Moby", args["build-arg:FOO"]) + require.Equal(t, "x86target", args["target"]) + + require.Equal(t, 2, len(pred.Invocation.Parameters.Locals)) + require.Equal(t, 1, len(pred.Materials)) + require.Contains(t, pred.Materials[0].URI, "docker/alpine") +} + +func testClientLLBProvenance(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureProvenance) + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + target := registry + "/buildkit/clientprovenance:llb" + + f := getFrontend(t, sb) + + _, isClient := f.(*clientFrontend) + if !isClient { + t.Skip("not a client frontend") + } + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + st := llb.HTTP("https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md") + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + // this also shows up in the provenance + res0, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + dt, err := res0.Ref.ReadFile(ctx, gateway.ReadRequest{ + Filename: "README.md", + }) + if err != nil { + return nil, err + } + + st = llb.Image("alpine").File(llb.Mkfile("/foo", 0600, dt)) + def, err = st.Marshal(ctx) + if err != nil { + return nil, err + } + res1, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + return res1, nil + } + + _, err = c.Build(sb.Context(), client.SolveOpt{ + FrontendAttrs: map[string]string{ + "attest:provenance": "mode=full", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + LocalDirs: map[string]string{}, + }, "", frontend, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) + + nativePlatform := platforms.Format(platforms.Normalize(platforms.DefaultSpec())) + + img := imgs.Find(nativePlatform) + require.NotNil(t, img) + require.Contains(t, string(img.Layers[1]["foo"].Data), "The Moby Project") + + att := imgs.FindAttestation(nativePlatform) + require.NotNil(t, att) + require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest") + var attest intoto.Statement + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest)) + require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type) + require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const + + type stmtT struct { + Predicate provenance.ProvenancePredicate `json:"predicate"` + } + var stmt stmtT + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred := stmt.Predicate + + require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType) + require.Equal(t, "", pred.Builder.ID) + require.Equal(t, "", pred.Invocation.ConfigSource.URI) + + args := pred.Invocation.Parameters.Args + require.Equal(t, 0, len(args), "%+v", args) + require.Equal(t, 0, len(pred.Invocation.Parameters.Locals)) + + require.Equal(t, 2, len(pred.Materials), "%+v", pred.Materials) + require.Contains(t, pred.Materials[0].URI, "docker/alpine") + require.Contains(t, pred.Materials[1].URI, "README.md") +} + +func testSecretSSHProvenance(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureProvenance) + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM busybox:latest +RUN --mount=type=secret,id=mysecret --mount=type=secret,id=othersecret --mount=type=ssh echo "ok" > /foo +`) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + target := registry + "/buildkit/testsecretprovenance:latest" + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + FrontendAttrs: map[string]string{ + "attest:provenance": "mode=max", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + require.Equal(t, 2, len(imgs.Images)) + + expPlatform := platforms.Format(platforms.Normalize(platforms.DefaultSpec())) + + img := imgs.Find(expPlatform) + require.NotNil(t, img) + require.Equal(t, []byte("ok\n"), img.Layers[1]["foo"].Data) + + att := imgs.FindAttestation(expPlatform) + type stmtT struct { + Predicate provenance.ProvenancePredicate `json:"predicate"` + } + var stmt stmtT + require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt)) + pred := stmt.Predicate + + require.Equal(t, 2, len(pred.Invocation.Parameters.Secrets), "%+v", pred.Invocation.Parameters.Secrets) + require.Equal(t, "mysecret", pred.Invocation.Parameters.Secrets[0].ID) + require.True(t, pred.Invocation.Parameters.Secrets[0].Optional) + require.Equal(t, "othersecret", pred.Invocation.Parameters.Secrets[1].ID) + require.True(t, pred.Invocation.Parameters.Secrets[1].Optional) + + require.Equal(t, 1, len(pred.Invocation.Parameters.SSH), "%+v", pred.Invocation.Parameters.SSH) + require.Equal(t, "default", pred.Invocation.Parameters.SSH[0].ID) + require.True(t, pred.Invocation.Parameters.SSH[0].Optional) +} + +func testNilProvenance(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureProvenance) + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM scratch +ENV FOO=bar +`) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + FrontendAttrs: map[string]string{ + "attest:provenance": "mode=max", + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + }, + }, + }, nil) + require.NoError(t, err) +} diff --git a/frontend/dockerfile/dockerfile_runnetwork_test.go b/frontend/dockerfile/dockerfile_runnetwork_test.go index 8244d48f5716..2d02110e383c 100644 --- a/frontend/dockerfile/dockerfile_runnetwork_test.go +++ b/frontend/dockerfile/dockerfile_runnetwork_test.go @@ -41,11 +41,11 @@ FROM busybox RUN ip link show eth0 `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -77,11 +77,11 @@ RUN --network=none ! ip link show eth0 dockerfile += "RUN ip link show eth0" } - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -118,11 +118,11 @@ RUN --network=host nc 127.0.0.1 %s | grep foo dockerfile += fmt.Sprintf(`RUN ! nc 127.0.0.1 %s | grep foo`, port) } - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -141,8 +141,12 @@ RUN --network=host nc 127.0.0.1 %s | grep foo case networkHostGranted: require.NoError(t, err) case networkHostDenied: - require.Error(t, err) - require.Contains(t, err.Error(), "entitlement network.host is not allowed") + if !integration.IsTestDockerd() { + require.Error(t, err) + require.Contains(t, err.Error(), "entitlement network.host is not allowed") + } else { + require.NoError(t, err) + } default: require.Fail(t, "unexpected network.host mode %q", hostAllowed) } @@ -162,11 +166,11 @@ RUN nc 127.0.0.1 %s | grep foo RUN --network=none ! nc -z 127.0.0.1 %s `, port, port) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -188,8 +192,12 @@ RUN --network=none ! nc -z 127.0.0.1 %s case networkHostGranted: require.NoError(t, err) case networkHostDenied: - require.Error(t, err) - require.Contains(t, err.Error(), "entitlement network.host is not allowed") + if !integration.IsTestDockerd() { + require.Error(t, err) + require.Contains(t, err.Error(), "entitlement network.host is not allowed") + } else { + require.NoError(t, err) + } default: require.Fail(t, "unexpected network.host mode %q", hostAllowed) } diff --git a/frontend/dockerfile/dockerfile_runsecurity_test.go b/frontend/dockerfile/dockerfile_runsecurity_test.go index 4726182a9d71..99e3b3c49d38 100644 --- a/frontend/dockerfile/dockerfile_runsecurity_test.go +++ b/frontend/dockerfile/dockerfile_runsecurity_test.go @@ -4,7 +4,6 @@ package dockerfile import ( - "os" "testing" "github.com/containerd/continuity/fs/fstest" @@ -58,11 +57,11 @@ RUN --security=insecure ls -l /dev && dd if=/dev/zero of=disk.img bs=20M count=1 rm disk.img `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -97,11 +96,11 @@ RUN --security=insecure [ "$(printf '%x' $(( $(cat /proc/self/status | grep CapB RUN [ "$(cat /proc/self/status | grep CapBnd)" == "CapBnd: 00000000a80425fb" ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -135,11 +134,11 @@ FROM busybox RUN --security=sandbox [ "$(cat /proc/self/status | grep CapBnd)" == "CapBnd: 00000000a80425fb" ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -163,11 +162,11 @@ FROM busybox RUN [ "$(cat /proc/self/status | grep CapBnd)" == "CapBnd: 00000000a80425fb" ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) diff --git a/frontend/dockerfile/dockerfile_secrets_test.go b/frontend/dockerfile/dockerfile_secrets_test.go index ae00fac07df6..984bfacfe487 100644 --- a/frontend/dockerfile/dockerfile_secrets_test.go +++ b/frontend/dockerfile/dockerfile_secrets_test.go @@ -1,7 +1,6 @@ package dockerfile import ( - "os" "testing" "github.com/containerd/continuity/fs/fstest" @@ -31,11 +30,11 @@ RUN --mount=type=secret,required=false,mode=741,uid=100,gid=102,target=/mysecret RUN [ ! -f /mysecret ] # check no stub left behind `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -61,11 +60,11 @@ FROM busybox RUN --mount=type=secret,required,id=mysecret foo `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) diff --git a/frontend/dockerfile/dockerfile_ssh_test.go b/frontend/dockerfile/dockerfile_ssh_test.go index 9515aad12882..0714578e1bcb 100644 --- a/frontend/dockerfile/dockerfile_ssh_test.go +++ b/frontend/dockerfile/dockerfile_ssh_test.go @@ -5,7 +5,6 @@ import ( "crypto/rsa" "crypto/x509" "encoding/pem" - "io/ioutil" "os" "path/filepath" "testing" @@ -35,17 +34,17 @@ FROM busybox RUN --mount=type=ssh,mode=741,uid=100,gid=102 [ "$(stat -c "%u %g %f" $SSH_AUTH_SOCK)" = "100 102 c1e1" ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - k, err := rsa.GenerateKey(rand.Reader, 1024) + k, err := rsa.GenerateKey(rand.Reader, 2048) require.NoError(t, err) dt := pem.EncodeToMemory( @@ -55,11 +54,9 @@ RUN --mount=type=ssh,mode=741,uid=100,gid=102 [ "$(stat -c "%u %g %f" $SSH_AUTH_ }, ) - tmpDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() - err = ioutil.WriteFile(filepath.Join(tmpDir, "key"), dt, 0600) + err = os.WriteFile(filepath.Join(tmpDir, "key"), dt, 0600) require.NoError(t, err) ssh, err := sshprovider.NewSSHAgentProvider([]sshprovider.AgentConfig{{ diff --git a/frontend/dockerfile/dockerfile_targets_test.go b/frontend/dockerfile/dockerfile_targets_test.go new file mode 100644 index 000000000000..43e473c40da7 --- /dev/null +++ b/frontend/dockerfile/dockerfile_targets_test.go @@ -0,0 +1,193 @@ +package dockerfile + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/containerd/continuity/fs/fstest" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/frontend/dockerfile/builder" + gateway "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/subrequests" + "github.com/moby/buildkit/frontend/subrequests/targets" + "github.com/moby/buildkit/util/testutil/integration" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +var targetsTests = integration.TestFuncs( + testTargetsList, + testTargetsDescribeDefinition, +) + +func testTargetsList(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureFrontendTargets) + f := getFrontend(t, sb) + if _, ok := f.(*clientFrontend); !ok { + t.Skip("only test with client frontend") + } + + dockerfile := []byte(` +# build defines stage for compiling the binary +FROM alpine AS build +RUN true + +FROM busybox as second +RUN false + +FROM alpine +RUN false + +# binary returns the compiled binary +FROM second AS binary +`) + + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + c, err := client.New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + destDir, err := os.MkdirTemp("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + called := false + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res, err := c.Solve(ctx, gateway.SolveRequest{ + FrontendOpt: map[string]string{ + "frontend.caps": "moby.buildkit.frontend.subrequests", + "requestid": "frontend.targets", + }, + Frontend: "dockerfile.v0", + }) + require.NoError(t, err) + + list, err := unmarshalTargets(res) + require.NoError(t, err) + + require.Equal(t, 1, len(list.Sources)) + require.Equal(t, dockerfile, list.Sources[0]) + + require.Equal(t, 4, len(list.Targets)) + + target := list.Targets[0] + require.Equal(t, "build", target.Name) + require.Equal(t, "alpine", target.Base) + require.Equal(t, "defines stage for compiling the binary", target.Description) + require.Equal(t, false, target.Default) + require.Equal(t, int32(0), target.Location.SourceIndex) + require.Equal(t, int32(3), target.Location.Ranges[0].Start.Line) + + target = list.Targets[1] + require.Equal(t, "second", target.Name) + require.Equal(t, "", target.Description) + require.Equal(t, "busybox", target.Base) + require.Equal(t, false, target.Default) + require.Equal(t, int32(0), target.Location.SourceIndex) + require.Equal(t, int32(6), target.Location.Ranges[0].Start.Line) + + target = list.Targets[2] + require.Equal(t, "", target.Name) + require.Equal(t, "", target.Description) + require.Equal(t, "alpine", target.Base) + require.Equal(t, false, target.Default) + require.Equal(t, int32(0), target.Location.SourceIndex) + require.Equal(t, int32(9), target.Location.Ranges[0].Start.Line) + + target = list.Targets[3] + require.Equal(t, "binary", target.Name) + require.Equal(t, "returns the compiled binary", target.Description) + require.Equal(t, true, target.Default) + require.Equal(t, int32(0), target.Location.SourceIndex) + require.Equal(t, int32(13), target.Location.Ranges[0].Start.Line) + + called = true + return nil, nil + } + + _, err = c.Build(sb.Context(), client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + }, + }, "", frontend, nil) + require.NoError(t, err) + + require.True(t, called) +} + +func testTargetsDescribeDefinition(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureFrontendTargets) + f := getFrontend(t, sb) + if _, ok := f.(*clientFrontend); !ok { + t.Skip("only test with client frontend") + } + + c, err := client.New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + dockerfile := []byte(` +FROM scratch +COPY Dockerfile Dockerfile +`) + + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + called := false + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + reqs, err := subrequests.Describe(ctx, c) + require.NoError(t, err) + + require.True(t, len(reqs) > 0) + + hasTargets := false + + for _, req := range reqs { + if req.Name != "frontend.targets" { + continue + } + hasTargets = true + require.Equal(t, subrequests.RequestType("rpc"), req.Type) + require.NotEqual(t, req.Version, "") + } + require.True(t, hasTargets) + + called = true + return nil, nil + } + + _, err = c.Build(sb.Context(), client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + }, + }, "", frontend, nil) + require.NoError(t, err) + + require.True(t, called) +} + +func unmarshalTargets(res *gateway.Result) (*targets.List, error) { + dt, ok := res.Metadata["result.json"] + if !ok { + return nil, errors.Errorf("missing frontend.outline") + } + var l targets.List + if err := json.Unmarshal(dt, &l); err != nil { + return nil, err + } + return &l, nil +} diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index a4ca3578c7dd..ed49f9c8502a 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -8,29 +8,30 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "os" "os/exec" + "path" "path/filepath" "runtime" "sort" - "strconv" "strings" "testing" "time" "github.com/containerd/containerd" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/local" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/snapshots" "github.com/containerd/continuity/fs/fstest" + intoto "github.com/in-toto/in-toto-golang/in_toto" + controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend/dockerfile/builder" - "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" gateway "github.com/moby/buildkit/frontend/gateway/client" "github.com/moby/buildkit/frontend/subrequests" "github.com/moby/buildkit/identity" @@ -39,16 +40,18 @@ import ( "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/iohelper" "github.com/moby/buildkit/util/testutil" "github.com/moby/buildkit/util/testutil/httpserver" "github.com/moby/buildkit/util/testutil/integration" + digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/stretchr/testify/require" ) func init() { - if os.Getenv("TEST_DOCKERD") == "1" { + if integration.IsTestDockerd() { integration.InitDockerdWorker() } else { integration.InitOCIWorker() @@ -67,6 +70,7 @@ var allTests = integration.TestFuncs( testExportedHistory, testExposeExpansion, testUser, + testUserAdditionalGids, testCacheReleased, testDockerignore, testDockerignoreInvalid, @@ -89,7 +93,6 @@ var allTests = integration.TestFuncs( testQuotedMetaArgs, testIgnoreEntrypoint, testSymlinkedDockerfile, - testDockerfileAddArchiveWildcard, testEmptyWildcard, testWorkdirCreatesDir, testDockerfileAddArchiveWildcard, @@ -116,12 +119,14 @@ var allTests = integration.TestFuncs( testUlimit, testCgroupParent, testNamedImageContext, + testNamedImageContextPlatform, + testNamedImageContextTimestamps, + testNamedImageContextScratch, testNamedLocalContext, + testNamedOCILayoutContext, + testNamedOCILayoutContextExport, testNamedInputContext, testNamedMultiplatformInputContext, -) - -var fileOpTests = integration.TestFuncs( testEmptyDestDir, testCopyChownCreateDest, testCopyThroughSymlinkContext, @@ -144,6 +149,18 @@ var fileOpTests = integration.TestFuncs( testWorkdirCopyIgnoreRelative, testCopyFollowAllSymlinks, testDockerfileAddChownExpand, + testSourceDateEpochWithoutExporter, + testSBOMScannerImage, + testProvenanceAttestation, + testGitProvenanceAttestation, + testMultiPlatformProvenance, + testClientFrontendProvenance, + testClientLLBProvenance, + testSecretSSHProvenance, + testNilProvenance, + testSBOMScannerArgs, + testMultiPlatformWarnings, + testNilContextInSolveGateway, ) // Tests that depend on the `security.*` entitlements @@ -155,6 +172,11 @@ var networkTests = []integration.Test{} // Tests that depend on heredoc support var heredocTests = []integration.Test{} +// Tests that depend on reproducible env +var reproTests = integration.TestFuncs( + testReproSourceDateEpoch, +) + var opts []integration.TestOpt var securityOpts []integration.TestOpt @@ -170,9 +192,6 @@ func init() { opts = []integration.TestOpt{ integration.WithMirroredImages(integration.OfficialImages("busybox:latest")), - integration.WithMirroredImages(map[string]string{ - "docker/dockerfile-copy:v0.1.9": "docker.io/" + dockerfile2llb.DefaultCopyImage, - }), integration.WithMatrix("frontend", frontends), } @@ -194,10 +213,6 @@ func init() { func TestIntegration(t *testing.T) { integration.Run(t, allTests, opts...) - integration.Run(t, fileOpTests, append(opts, integration.WithMatrix("fileop", map[string]interface{}{ - "true": true, - "false": false, - }))...) integration.Run(t, securityTests, append(append(opts, securityOpts...), integration.WithMatrix("security.insecure", map[string]interface{}{ "granted": securityInsecureGranted, @@ -209,6 +224,12 @@ func TestIntegration(t *testing.T) { "denied": networkHostDenied, }))...) integration.Run(t, heredocTests, opts...) + integration.Run(t, outlineTests, opts...) + integration.Run(t, targetsTests, opts...) + + integration.Run(t, reproTests, append(opts, + // Only use the amd64 digest, regardless to the host platform + integration.WithMirroredImages(integration.OfficialImages("debian:bullseye-20230109-slim@sha256:1acb06a0c31fb467eb8327ad361f1091ab265e0bf26d452dea45dcb0c0ea5e75")))...) } func testDefaultEnvWithArgs(t *testing.T, sb integration.Sandbox) { @@ -229,20 +250,18 @@ COPY --from=build /out /out echo -n $my_arg $1 > /out `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("myscript.sh", script, 0700), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() for _, x := range []struct { name string @@ -269,7 +288,7 @@ echo -n $my_arg $1 > /out }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err := os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) require.Equal(t, x.expected, string(dt)) }) @@ -285,19 +304,17 @@ ENV myenv foo%sbar RUN [ "$myenv" = 'foo%sbar' ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -336,7 +353,8 @@ RUN [ ! -f foo ] && [ -f bar ] foo `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("Dockerfile.dockerignore", ignore, 0600), fstest.CreateFile("Dockerfile2", dockerfile2, 0600), @@ -345,7 +363,6 @@ foo fstest.CreateFile("bar", []byte("contents0"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -373,7 +390,6 @@ foo func testEmptyDestDir(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox @@ -382,21 +398,18 @@ COPY testfile $empty RUN [ "$(cat testfile)" == "contents0" ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("testfile", []byte("contents0"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -406,6 +419,7 @@ RUN [ "$(cat testfile)" == "contents0" ] } func testExportCacheLoop(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) f := getFrontend(t, sb) dockerfile := []byte(` @@ -424,16 +438,14 @@ FROM scratch COPY --from=base2 /foo /f `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("hello.txt", []byte("hello"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) - cacheDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(cacheDir) + cacheDir := t.TempDir() c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -497,13 +509,13 @@ COPY bar fordarwin FROM stage-$TARGETOS `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte("data"), 0600), fstest.CreateFile("bar", []byte("data2"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -571,19 +583,17 @@ WORKDIR /foo WORKDIR / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -611,11 +621,11 @@ func testCacheReleased(t *testing.T, sb integration.Sandbox) { FROM busybox `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -648,12 +658,12 @@ FROM scratch ENV foo bar `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile.web", dockerfile, 0600), fstest.Symlink("Dockerfile.web", "Dockerfile"), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -731,12 +741,12 @@ RUN e="300:400"; p="/file" ; a=` + "`" + `stat -c "%u:%g && e="300:400"; p="/existingdir/subdir/nestedfile"; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile.web", dockerfile, 0600), fstest.Symlink("Dockerfile.web", "Dockerfile"), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -774,22 +784,20 @@ FROM scratch COPY --from=base unique / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo1", []byte("foo1-data"), 0600), fstest.CreateFile("foo2", []byte("foo2-data"), 0600), fstest.CreateFile("bar", []byte("bar-data"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -805,15 +813,13 @@ COPY --from=base unique / }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "unique")) + dt, err := os.ReadFile(filepath.Join(destDir, "unique")) require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(dir, "bar"), []byte("bar-data-mod"), 0600) + err = os.WriteFile(filepath.Join(dir, "bar"), []byte("bar-data-mod"), 0600) require.NoError(t, err) - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -829,16 +835,14 @@ COPY --from=base unique / }, nil) require.NoError(t, err) - dt2, err := ioutil.ReadFile(filepath.Join(destDir, "unique")) + dt2, err := os.ReadFile(filepath.Join(destDir, "unique")) require.NoError(t, err) require.Equal(t, string(dt), string(dt2)) - err = ioutil.WriteFile(filepath.Join(dir, "foo2"), []byte("foo2-data-mod"), 0600) + err = os.WriteFile(filepath.Join(dir, "foo2"), []byte("foo2-data-mod"), 0600) require.NoError(t, err) - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -854,7 +858,7 @@ COPY --from=base unique / }, nil) require.NoError(t, err) - dt2, err = ioutil.ReadFile(filepath.Join(destDir, "unique")) + dt2, err = os.ReadFile(filepath.Join(destDir, "unique")) require.NoError(t, err) require.NotEqual(t, string(dt), string(dt2)) } @@ -867,20 +871,18 @@ FROM scratch COPY foo nomatch* / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte("contents0"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -896,14 +898,13 @@ COPY foo nomatch* / }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) require.Equal(t, "contents0", string(dt)) } func testWorkdirUser(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox @@ -913,20 +914,17 @@ WORKDIR /mydir RUN [ "$(stat -c "%U %G" /mydir)" == "user user" ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -937,7 +935,6 @@ RUN [ "$(stat -c "%U %G" /mydir)" == "user user" ] func testWorkdirCopyIgnoreRelative(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch AS base @@ -948,20 +945,17 @@ FROM scratch COPY --from=base Dockerfile . `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -972,7 +966,6 @@ COPY --from=base Dockerfile . func testWorkdirExists(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox @@ -982,20 +975,17 @@ WORKDIR /mydir RUN [ "$(stat -c "%U %G" /mydir)" == "user user" ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -1006,7 +996,6 @@ RUN [ "$(stat -c "%U %G" /mydir)" == "user user" ] func testCopyChownCreateDest(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox @@ -1020,11 +1009,11 @@ RUN [ "$(stat -c "%U %G" /dest)" == "user user" ] RUN [ "$(stat -c "%U %G" /dest01)" == "user01 user" ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -1032,8 +1021,7 @@ RUN [ "$(stat -c "%U %G" /dest01)" == "user01 user" ] _, err = f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - "build-arg:group": "user", + "build-arg:group": "user", }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -1045,29 +1033,26 @@ RUN [ "$(stat -c "%U %G" /dest01)" == "user01 user" ] func testCopyThroughSymlinkContext(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch COPY link/foo . `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.Symlink("sub", "link"), fstest.CreateDir("sub", 0700), fstest.CreateFile("sub/foo", []byte(`contents`), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -1076,9 +1061,6 @@ COPY link/foo . OutputDir: destDir, }, }, - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -1086,14 +1068,13 @@ COPY link/foo . }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) require.Equal(t, "contents", string(dt)) } func testCopyThroughSymlinkMultiStage(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox AS build @@ -1103,19 +1084,17 @@ COPY --from=build /sub/foo . COPY --from=build /sub2/foo bar `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -1124,9 +1103,6 @@ COPY --from=build /sub2/foo bar OutputDir: destDir, }, }, - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -1134,34 +1110,31 @@ COPY --from=build /sub2/foo bar }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) require.Equal(t, "data", string(dt)) } func testCopySocket(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch COPY . / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateSocket("socket.sock", 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -1170,9 +1143,6 @@ COPY . / OutputDir: destDir, }, }, - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -1195,11 +1165,11 @@ ENTRYPOINT ["/nosuchcmd"] RUN ["ls"] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -1228,19 +1198,17 @@ FROM scratch COPY --from=build /out . `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ LocalDirs: map[string]string{ @@ -1256,7 +1224,7 @@ COPY --from=build /out . }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err := os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) require.Equal(t, "bar-box-foo", string(dt)) } @@ -1274,19 +1242,17 @@ FROM scratch COPY --from=build /out . `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ LocalDirs: map[string]string{ @@ -1302,13 +1268,13 @@ COPY --from=build /out . }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err := os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) require.Equal(t, "foo bar:box-foo:123 456", string(dt)) } func testDefaultShellAndPath(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter) f := getFrontend(t, sb) dockerfile := []byte(` @@ -1317,19 +1283,17 @@ ENTRYPOINT foo bar COPY Dockerfile . `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() out := filepath.Join(destDir, "out.tar") outW, err := os.Create(out) @@ -1352,7 +1316,7 @@ COPY Dockerfile . }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "out.tar")) + dt, err := os.ReadFile(filepath.Join(destDir, "out.tar")) require.NoError(t, err) m, err := testutil.ReadTarToMap(dt, false) @@ -1398,7 +1362,7 @@ COPY Dockerfile . } func testExportMultiPlatform(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureMultiPlatform) f := getFrontend(t, sb) dockerfile := []byte(` @@ -1409,7 +1373,8 @@ LABEL target=$TARGETPLATFORM COPY arch-$TARGETARCH whoami `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("arch-arm", []byte(`i am arm`), 0600), fstest.CreateFile("arch-amd64", []byte(`i am amd64`), 0600), @@ -1417,15 +1382,12 @@ COPY arch-$TARGETARCH whoami fstest.CreateFile("arch-ppc64le", []byte(`i am ppc64le`), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ LocalDirs: map[string]string{ @@ -1444,23 +1406,21 @@ COPY arch-$TARGETARCH whoami }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "windows_amd64/whoami")) + dt, err := os.ReadFile(filepath.Join(destDir, "windows_amd64/whoami")) require.NoError(t, err) require.Equal(t, "i am amd64", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "linux_arm_v7/whoami")) + dt, err = os.ReadFile(filepath.Join(destDir, "linux_arm_v7/whoami")) require.NoError(t, err) require.Equal(t, "i am arm", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "linux_s390x/whoami")) + dt, err = os.ReadFile(filepath.Join(destDir, "linux_s390x/whoami")) require.NoError(t, err) require.Equal(t, "i am s390x", string(dt)) // repeat with oci exporter - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() out := filepath.Join(destDir, "out.tar") outW, err := os.Create(out) @@ -1483,7 +1443,7 @@ COPY arch-$TARGETARCH whoami }, nil) require.NoError(t, err) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out.tar")) + dt, err = os.ReadFile(filepath.Join(destDir, "out.tar")) require.NoError(t, err) m, err := testutil.ReadTarToMap(dt, false) @@ -1540,29 +1500,25 @@ COPY arch-$TARGETARCH whoami // tonistiigi/fsutil#46 func testContextChangeDirToFile(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch COPY foo / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateDir("foo", 0700), fstest.CreateFile("foo/bar", []byte(`contents`), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -1570,16 +1526,14 @@ COPY foo / }, nil) require.NoError(t, err) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte(`contents2`), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -1588,9 +1542,6 @@ COPY foo / OutputDir: destDir, }, }, - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -1598,35 +1549,31 @@ COPY foo / }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) require.Equal(t, "contents2", string(dt)) } func testNoSnapshotLeak(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch COPY foo / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte(`contents`), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -1638,9 +1585,6 @@ COPY foo / require.NoError(t, err) _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -1657,7 +1601,6 @@ COPY foo / // #1197 func testCopyFollowAllSymlinks(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch @@ -1665,27 +1608,20 @@ COPY foo / COPY foo/sub bar `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("bar", []byte(`bar-contents`), 0600), fstest.CreateDir("foo", 0700), fstest.Symlink("../bar", "foo/sub"), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -1696,7 +1632,6 @@ COPY foo/sub bar func testCopySymlinks(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch @@ -1704,7 +1639,8 @@ COPY foo / COPY sub/l* alllinks/ `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("bar", []byte(`bar-contents`), 0600), fstest.Symlink("bar", "foo"), @@ -1717,15 +1653,12 @@ COPY sub/l* alllinks/ fstest.CreateFile("sub/baz", []byte(`baz-contents`), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -1734,9 +1667,6 @@ COPY sub/l* alllinks/ OutputDir: destDir, }, }, - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -1744,19 +1674,19 @@ COPY sub/l* alllinks/ }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) require.Equal(t, "bar-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "alllinks/l0")) + dt, err = os.ReadFile(filepath.Join(destDir, "alllinks/l0")) require.NoError(t, err) require.Equal(t, "subfile-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "alllinks/lfile")) + dt, err = os.ReadFile(filepath.Join(destDir, "alllinks/lfile")) require.NoError(t, err) require.Equal(t, "lfile-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "alllinks/l1")) + dt, err = os.ReadFile(filepath.Join(destDir, "alllinks/l1")) require.NoError(t, err) require.Equal(t, "baz-contents", string(dt)) } @@ -1771,11 +1701,9 @@ FROM scratch COPY --from=0 /foo /foo `) - srcDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(srcDir) + srcDir := t.TempDir() - err = ioutil.WriteFile(filepath.Join(srcDir, "Dockerfile"), dockerfile, 0600) + err := os.WriteFile(filepath.Join(srcDir, "Dockerfile"), dockerfile, 0600) require.NoError(t, err) resp := httpserver.Response{ @@ -1788,9 +1716,7 @@ COPY --from=0 /foo /foo }) defer server.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -1810,7 +1736,7 @@ COPY --from=0 /foo /foo }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) } @@ -1828,11 +1754,11 @@ FROM scratch CMD ["test"] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -1861,11 +1787,11 @@ SHELL ["ls"] ENTRYPOINT my entrypoint `) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) target = "docker.io/moby/cmdoverridetest2:latest" _, err = f.Solve(sb.Context(), c, client.SolveOpt{ @@ -1920,11 +1846,11 @@ FROM scratch LABEL foo=bar `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -1953,12 +1879,12 @@ LABEL bar=baz COPY foo . `) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte("foo-contents"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) target = "docker.io/moby/testpullscratch2:latest" _, err = f.Solve(sb.Context(), c, client.SolveOpt{ @@ -2012,9 +1938,7 @@ COPY foo . def, err := echo.Marshal(sb.Context()) require.NoError(t, err) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Solve(sb.Context(), def, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -2030,7 +1954,7 @@ COPY foo . }, nil) require.NoError(t, err) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err = os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) require.Equal(t, "foo0", string(dt)) } @@ -2043,11 +1967,11 @@ ARG tag=nosuchtag FROM busybox:${tag} `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -2066,7 +1990,6 @@ FROM busybox:${tag} } func testDockerfileDirs(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) f := getFrontend(t, sb) f.RequiresBuildctl(t) @@ -2080,12 +2003,12 @@ func testDockerfileDirs(t *testing.T, sb integration.Sandbox) { RUN cmp -s foo foo3 `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte("bar"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) args, trace := f.DFCmdArgs(dir, dir) defer os.RemoveAll(trace) @@ -2108,17 +2031,17 @@ func testDockerfileDirs(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) // different context and dockerfile directories - dir1, err := tmpdir( + dir1, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir1) - dir2, err := tmpdir( + dir2, err := integration.Tmpdir( + t, fstest.CreateFile("foo", []byte("bar"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir2) args, trace = f.DFCmdArgs(dir2, dir1) defer os.RemoveAll(trace) @@ -2135,7 +2058,6 @@ func testDockerfileDirs(t *testing.T, sb integration.Sandbox) { } func testDockerfileInvalidCommand(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) f := getFrontend(t, sb) f.RequiresBuildctl(t) dockerfile := []byte(` @@ -2143,11 +2065,11 @@ func testDockerfileInvalidCommand(t *testing.T, sb integration.Sandbox) { RUN invalidcmd `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) args, trace := f.DFCmdArgs(dir, dir) defer os.RemoveAll(trace) @@ -2162,7 +2084,6 @@ func testDockerfileInvalidCommand(t *testing.T, sb integration.Sandbox) { } func testDockerfileInvalidInstruction(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) f := getFrontend(t, sb) f.RequiresBuildctl(t) dockerfile := []byte(` @@ -2170,11 +2091,11 @@ func testDockerfileInvalidInstruction(t *testing.T, sb integration.Sandbox) { FNTRYPOINT ["/bin/sh", "-c", "echo invalidinstruction"] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -2193,7 +2114,6 @@ func testDockerfileInvalidInstruction(t *testing.T, sb integration.Sandbox) { } func testDockerfileADDFromURL(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) f := getFrontend(t, sb) f.RequiresBuildctl(t) @@ -2221,24 +2141,22 @@ FROM scratch ADD %s /dest/ `, server.URL+"/foo")) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) args, trace := f.DFCmdArgs(dir, dir) defer os.RemoveAll(trace) - destDir, err := tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() - cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) + cmd := sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir)) err = cmd.Run() require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest/foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "dest/foo")) require.NoError(t, err) require.Equal(t, []byte("content1"), dt) @@ -2248,25 +2166,23 @@ FROM scratch ADD %s /dest/ `, server.URL+"/")) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) args, trace = f.DFCmdArgs(dir, dir) defer os.RemoveAll(trace) - destDir, err = tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() - cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) + cmd = sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir)) err = cmd.Run() require.NoError(t, err) destFile := filepath.Join(destDir, "dest/__unnamed__") - dt, err = ioutil.ReadFile(destFile) + dt, err = os.ReadFile(destFile) require.NoError(t, err) require.Equal(t, []byte("content2"), dt) @@ -2276,7 +2192,6 @@ ADD %s /dest/ } func testDockerfileAddArchive(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) f := getFrontend(t, sb) f.RequiresBuildctl(t) @@ -2300,24 +2215,22 @@ FROM scratch ADD t.tar / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("t.tar", buf.Bytes(), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) args, trace := f.DFCmdArgs(dir, dir) defer os.RemoveAll(trace) - destDir, err := tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() - cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) + cmd := sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir)) require.NoError(t, cmd.Run()) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) require.Equal(t, expectedContent, dt) @@ -2334,24 +2247,22 @@ FROM scratch ADD t.tar.gz / `) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("t.tar.gz", buf2.Bytes(), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) args, trace = f.DFCmdArgs(dir, dir) defer os.RemoveAll(trace) - destDir, err = tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() - cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) + cmd = sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir)) require.NoError(t, cmd.Run()) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err = os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) require.Equal(t, expectedContent, dt) @@ -2361,24 +2272,22 @@ FROM scratch COPY t.tar.gz / `) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("t.tar.gz", buf2.Bytes(), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) args, trace = f.DFCmdArgs(dir, dir) defer os.RemoveAll(trace) - destDir, err = tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() - cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) + cmd = sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir)) require.NoError(t, cmd.Run()) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "t.tar.gz")) + dt, err = os.ReadFile(filepath.Join(destDir, "t.tar.gz")) require.NoError(t, err) require.Equal(t, buf2.Bytes(), dt) @@ -2398,23 +2307,21 @@ FROM scratch ADD %s / `, server.URL+"/t.tar.gz")) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) args, trace = f.DFCmdArgs(dir, dir) defer os.RemoveAll(trace) - destDir, err = tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() - cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) + cmd = sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir)) require.NoError(t, cmd.Run()) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "t.tar.gz")) + dt, err = os.ReadFile(filepath.Join(destDir, "t.tar.gz")) require.NoError(t, err) require.Equal(t, buf2.Bytes(), dt) @@ -2424,23 +2331,21 @@ FROM scratch ADD %s /newname.tar.gz `, server.URL+"/t.tar.gz")) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) args, trace = f.DFCmdArgs(dir, dir) defer os.RemoveAll(trace) - destDir, err = tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() - cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) + cmd = sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir)) require.NoError(t, cmd.Run()) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "newname.tar.gz")) + dt, err = os.ReadFile(filepath.Join(destDir, "newname.tar.gz")) require.NoError(t, err) require.Equal(t, buf2.Bytes(), dt) } @@ -2483,17 +2388,15 @@ FROM scratch ADD *.tar /dest `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("t.tar", buf.Bytes(), 0600), fstest.CreateFile("b.tar", buf2.Bytes(), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -2513,18 +2416,17 @@ ADD *.tar /dest }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest/foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "dest/foo")) require.NoError(t, err) require.Equal(t, "content0", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "dest/bar")) + dt, err = os.ReadFile(filepath.Join(destDir, "dest/bar")) require.NoError(t, err) require.Equal(t, "content1", string(dt)) } func testDockerfileAddChownExpand(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox @@ -2534,12 +2436,12 @@ ADD --chown=${owner}:${group} foo / RUN [ "$(stat -c "%u %G" /foo)" == "1000 nobody" ] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte(`foo-contents`), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -2547,8 +2449,7 @@ RUN [ "$(stat -c "%u %G" /foo)" == "1000 nobody" ] _, err = f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - "build-arg:group": "nobody", + "build-arg:group": "nobody", }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -2559,7 +2460,6 @@ RUN [ "$(stat -c "%u %G" /foo)" == "1000 nobody" ] } func testSymlinkDestination(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) f := getFrontend(t, sb) f.RequiresBuildctl(t) @@ -2582,25 +2482,23 @@ ADD t.tar / COPY foo /symlink/ `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", expectedContent, 0600), fstest.CreateFile("t.tar", buf.Bytes(), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) args, trace := f.DFCmdArgs(dir, dir) defer os.RemoveAll(trace) - destDir, err := tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() - cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir)) + cmd := sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir)) require.NoError(t, cmd.Run()) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "tmp/symlink-target/foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "tmp/symlink-target/foo")) require.NoError(t, err) require.Equal(t, expectedContent, dt) } @@ -2618,17 +2516,17 @@ FROM scratch ENV foo=bar `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) args, trace := f.DFCmdArgs(dir, dir) defer os.RemoveAll(trace) target := "example.com/moby/dockerfilescratch:test" - cmd := sb.Cmd(args + " --exporter=image --exporter-opt=name=" + target) + cmd := sb.Cmd(args + " --output type=image,name=" + target) err = cmd.Run() require.NoError(t, err) @@ -2673,8 +2571,7 @@ ENV foo=bar } func testExposeExpansion(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - + integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter) f := getFrontend(t, sb) dockerfile := []byte(` @@ -2684,11 +2581,11 @@ EXPOSE $PORTS EXPOSE 5000 `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -2764,7 +2661,8 @@ Dockerfile .dockerignore `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte(`foo-contents`), 0600), fstest.CreateFile("bar", []byte(`bar-contents`), 0600), @@ -2773,15 +2671,12 @@ Dockerfile fstest.CreateFile(".dockerignore", dockerignore, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -2797,7 +2692,7 @@ Dockerfile }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) @@ -2817,7 +2712,7 @@ Dockerfile require.Error(t, err) require.True(t, errors.Is(err, os.ErrNotExist)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "bay")) + dt, err = os.ReadFile(filepath.Join(destDir, "bay")) require.NoError(t, err) require.Equal(t, "bay-contents", string(dt)) } @@ -2830,12 +2725,12 @@ FROM scratch COPY . . `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile(".dockerignore", []byte("!\n"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) ctx, cancel := context.WithTimeout(sb.Context(), 15*time.Second) defer cancel() @@ -2866,11 +2761,11 @@ func testDockerfileLowercase(t *testing.T, sb integration.Sandbox) { dockerfile := []byte(`FROM scratch `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) ctx := sb.Context() @@ -2888,7 +2783,6 @@ func testDockerfileLowercase(t *testing.T, sb integration.Sandbox) { } func testExportedHistory(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) f := getFrontend(t, sb) f.RequiresBuildctl(t) @@ -2905,18 +2799,20 @@ RUN echo bar > foo4 RUN ["ls"] `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte("contents0"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) args, trace := f.DFCmdArgs(dir, dir) defer os.RemoveAll(trace) + integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter) + target := "example.com/moby/dockerfilescratch:test" - cmd := sb.Cmd(args + " --exporter=image --exporter-opt=name=" + target) + cmd := sb.Cmd(args + " --output type=image,name=" + target) require.NoError(t, cmd.Run()) // TODO: expose this test to OCI worker @@ -2966,17 +2862,8 @@ RUN ["ls"] require.NotNil(t, ociimg.History[6].Created) } -func skipDockerd(t *testing.T, sb integration.Sandbox) { - // TODO: remove me once dockerd supports the image and exporter. - t.Helper() - if os.Getenv("TEST_DOCKERD") == "1" { - t.Skip("dockerd missing a required exporter, cache exporter, or entitlement") - } -} - func testUser(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) - + integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter) f := getFrontend(t, sb) dockerfile := []byte(` @@ -3039,19 +2926,17 @@ COPY --from=base /out / USER nobody `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -3067,11 +2952,11 @@ USER nobody }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "rootuser")) + dt, err := os.ReadFile(filepath.Join(destDir, "rootuser")) require.NoError(t, err) require.Equal(t, "root\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "daemonuser")) + dt, err = os.ReadFile(filepath.Join(destDir, "daemonuser")) require.NoError(t, err) require.Equal(t, "daemon\n", string(dt)) @@ -3120,9 +3005,45 @@ USER nobody require.Equal(t, "nobody", ociimg.Config.User) } +// testUserAdditionalGids ensures that that the primary GID is also included in the additional GID list. +// CVE-2023-25173: https://github.com/advisories/GHSA-hmfx-3pcx-653p +func testUserAdditionalGids(t *testing.T, sb integration.Sandbox) { + f := getFrontend(t, sb) + + dockerfile := []byte(` +# Mimics the tests in https://github.com/containerd/containerd/commit/3eda46af12b1deedab3d0802adb2e81cb3521950 +FROM busybox +SHELL ["/bin/sh", "-euxc"] +RUN [ "$(id)" = "uid=0(root) gid=0(root) groups=0(root),10(wheel)" ] +USER 1234 +RUN [ "$(id)" = "uid=1234 gid=0(root) groups=0(root)" ] +USER 1234:1234 +RUN [ "$(id)" = "uid=1234 gid=1234 groups=1234" ] +USER daemon +RUN [ "$(id)" = "uid=1(daemon) gid=1(daemon) groups=1(daemon)" ] +`) + + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + c, err := client.New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) +} + func testCopyChown(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox AS base @@ -3139,22 +3060,20 @@ FROM scratch COPY --from=base /out / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte(`foo-contents`), 0600), fstest.CreateDir("bar", 0700), fstest.CreateFile("bar/sub", nil, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -3164,8 +3083,7 @@ COPY --from=base /out / }, }, FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - "build-arg:group": "nobody", + "build-arg:group": "nobody", }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -3174,22 +3092,21 @@ COPY --from=base /out / }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "fooowner")) + dt, err := os.ReadFile(filepath.Join(destDir, "fooowner")) require.NoError(t, err) require.Equal(t, "daemon daemon\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subowner")) + dt, err = os.ReadFile(filepath.Join(destDir, "subowner")) require.NoError(t, err) require.Equal(t, "1000 nobody\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "foobisowner")) + dt, err = os.ReadFile(filepath.Join(destDir, "foobisowner")) require.NoError(t, err) require.Equal(t, "1000 nobody\n", string(dt)) } func testCopyChmod(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox AS base @@ -3206,22 +3123,19 @@ FROM scratch COPY --from=base /out / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte(`foo-contents`), 0600), fstest.CreateFile("bar", []byte(`bar-contents`), 0700), ) - require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -3230,37 +3144,29 @@ COPY --from=base /out / OutputDir: destDir, }, }, - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, }, }, nil) - if !isFileOp { - require.Contains(t, err.Error(), "chmod is not supported") - return - } require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "fooperm")) + dt, err := os.ReadFile(filepath.Join(destDir, "fooperm")) require.NoError(t, err) require.Equal(t, "0644\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "barperm")) + dt, err = os.ReadFile(filepath.Join(destDir, "barperm")) require.NoError(t, err) require.Equal(t, "0777\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "foobisperm")) + dt, err = os.ReadFile(filepath.Join(destDir, "foobisperm")) require.NoError(t, err) require.Equal(t, "0000\n", string(dt)) } func testCopyOverrideFiles(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch AS base @@ -3271,7 +3177,8 @@ COPY files/foo.go dest/foo.go COPY files dest `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateDir("sub", 0700), fstest.CreateDir("sub/dir1", 0700), @@ -3281,15 +3188,12 @@ COPY files dest fstest.CreateFile("files/foo.go", []byte(`foo.go-contents`), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -3298,9 +3202,6 @@ COPY files dest OutputDir: destDir, }, }, - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -3308,18 +3209,17 @@ COPY files dest }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "sub/dir1/dir2/foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "sub/dir1/dir2/foo")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "dest/foo.go")) + dt, err = os.ReadFile(filepath.Join(destDir, "dest/foo.go")) require.NoError(t, err) require.Equal(t, "foo.go-contents", string(dt)) } func testCopyVarSubstitution(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch AS base @@ -3327,21 +3227,19 @@ ENV FOO bar COPY $FOO baz `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("bar", []byte(`bar-contents`), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -3350,9 +3248,6 @@ COPY $FOO baz OutputDir: destDir, }, }, - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -3360,14 +3255,13 @@ COPY $FOO baz }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "baz")) + dt, err := os.ReadFile(filepath.Join(destDir, "baz")) require.NoError(t, err) require.Equal(t, "bar-contents", string(dt)) } func testCopyWildcards(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch AS base @@ -3382,7 +3276,8 @@ COPY sub/dir1/. subdest5 COPY sub/dir1 subdest6 `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo.go", []byte(`foo-contents`), 0600), fstest.CreateFile("bar.go", []byte(`bar-contents`), 0600), @@ -3392,15 +3287,12 @@ COPY sub/dir1 subdest6 fstest.CreateFile("sub/dir1/dir2/foo", []byte(`foo-contents`), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -3409,9 +3301,6 @@ COPY sub/dir1 subdest6 OutputDir: destDir, }, }, - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -3419,52 +3308,49 @@ COPY sub/dir1 subdest6 }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "gofiles/foo.go")) + dt, err := os.ReadFile(filepath.Join(destDir, "gofiles/foo.go")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "gofiles/bar.go")) + dt, err = os.ReadFile(filepath.Join(destDir, "gofiles/bar.go")) require.NoError(t, err) require.Equal(t, "bar-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo2.go")) + dt, err = os.ReadFile(filepath.Join(destDir, "foo2.go")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) - if isFileOp { // non-fileop implementation is historically buggy - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest/dir2/foo")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) - } + dt, err = os.ReadFile(filepath.Join(destDir, "subdest/dir2/foo")) + require.NoError(t, err) + require.Equal(t, "foo-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest2/foo")) + dt, err = os.ReadFile(filepath.Join(destDir, "subdest2/foo")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest3/bar")) + dt, err = os.ReadFile(filepath.Join(destDir, "subdest3/bar")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "all/foo.go")) + dt, err = os.ReadFile(filepath.Join(destDir, "all/foo.go")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest4/dir2/foo")) + dt, err = os.ReadFile(filepath.Join(destDir, "subdest4/dir2/foo")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest5/dir2/foo")) + dt, err = os.ReadFile(filepath.Join(destDir, "subdest5/dir2/foo")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest6/dir2/foo")) + dt, err = os.ReadFile(filepath.Join(destDir, "subdest6/dir2/foo")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) } func testCopyRelative(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox @@ -3490,21 +3376,18 @@ COPY foo ../ RUN sh -c "[ $(cat /test5/foo) = 'hello' ]" `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte(`hello`), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ - FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -3514,7 +3397,6 @@ RUN sh -c "[ $(cat /test5/foo) = 'hello' ]" } func testAddURLChmod(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) f := getFrontend(t, sb) f.RequiresBuildctl(t) @@ -3540,19 +3422,17 @@ FROM scratch COPY --from=build /dest /dest `, server.URL+"/foo")) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := tmpdir() - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -3568,7 +3448,7 @@ COPY --from=build /dest /dest }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest")) + dt, err := os.ReadFile(filepath.Join(destDir, "dest")) require.NoError(t, err) require.Equal(t, []byte("0644\n0755\n0413\n"), dt) } @@ -3576,9 +3456,7 @@ COPY --from=build /dest /dest func testDockerfileFromGit(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - gitDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(gitDir) + gitDir := t.TempDir() dockerfile := ` FROM busybox AS build @@ -3587,7 +3465,7 @@ FROM scratch COPY --from=build foo bar ` - err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600) + err := os.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600) require.NoError(t, err) err = runShell(gitDir, @@ -3604,7 +3482,7 @@ COPY --from=build foo bar COPY --from=build foo bar2 ` - err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600) + err = os.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600) require.NoError(t, err) err = runShell(gitDir, @@ -3617,9 +3495,7 @@ COPY --from=build foo bar2 server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(gitDir)))) defer server.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -3638,7 +3514,7 @@ COPY --from=build foo bar2 }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar")) + dt, err := os.ReadFile(filepath.Join(destDir, "bar")) require.NoError(t, err) require.Equal(t, "fromgit", string(dt)) @@ -3647,9 +3523,7 @@ COPY --from=build foo bar2 require.True(t, errors.Is(err, os.ErrNotExist)) // second request from master branch contains both files - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ @@ -3664,11 +3538,11 @@ COPY --from=build foo bar2 }, nil) require.NoError(t, err) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar")) + dt, err = os.ReadFile(filepath.Join(destDir, "bar")) require.NoError(t, err) require.Equal(t, "fromgit", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar2")) + dt, err = os.ReadFile(filepath.Join(destDir, "bar2")) require.NoError(t, err) require.Equal(t, "fromgit", string(dt)) } @@ -3709,9 +3583,7 @@ COPY foo bar }) defer server.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -3731,7 +3603,7 @@ COPY foo bar }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar")) + dt, err := os.ReadFile(filepath.Join(destDir, "bar")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) } @@ -3744,19 +3616,17 @@ FROM scratch COPY --from=busybox /etc/passwd test `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -3772,7 +3642,7 @@ COPY --from=busybox /etc/passwd test }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "test")) + dt, err := os.ReadFile(filepath.Join(destDir, "test")) require.NoError(t, err) require.Contains(t, string(dt), "root") @@ -3780,21 +3650,19 @@ COPY --from=busybox /etc/passwd test dockerfile = []byte(` FROM busybox AS golang -RUN mkdir /usr/bin && echo -n foo > /usr/bin/go +RUN mkdir -p /usr/bin && echo -n foo > /usr/bin/go FROM scratch COPY --from=golang /usr/bin/go go `) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -3810,7 +3678,7 @@ COPY --from=golang /usr/bin/go go }, nil) require.NoError(t, err) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "go")) + dt, err = os.ReadFile(filepath.Join(destDir, "go")) require.NoError(t, err) require.Contains(t, string(dt), "foo") } @@ -3826,20 +3694,18 @@ COPY --from=staGE0 bar baz FROM scratch COPY --from=stage1 baz bax `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte("foo-contents"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -3858,33 +3724,29 @@ COPY --from=stage1 baz bax }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "baz")) + dt, err := os.ReadFile(filepath.Join(destDir, "baz")) require.NoError(t, err) require.Contains(t, string(dt), "foo-contents") } func testLabels(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) + integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter) f := getFrontend(t, sb) dockerfile := []byte(` FROM scratch LABEL foo=bar `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - target := "example.com/moby/dockerfilelabels:test" _, err = f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ @@ -3940,7 +3802,6 @@ LABEL foo=bar // #2008 func testWildcardRenameCache(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) f := getFrontend(t, sb) dockerfile := []byte(` @@ -3948,21 +3809,17 @@ FROM alpine COPY file* /files/ RUN ls /files/file1 `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("file1", []byte("foo"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - _, err = f.Solve(sb.Context(), c, client.SolveOpt{ LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -3985,6 +3842,7 @@ RUN ls /files/file1 } func testOnBuildCleared(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) f := getFrontend(t, sb) registry, err := sb.NewRegistry() @@ -3998,11 +3856,11 @@ FROM busybox ONBUILD RUN mkdir -p /out && echo -n 11 >> /out/foo `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -4031,11 +3889,11 @@ ONBUILD RUN mkdir -p /out && echo -n 11 >> /out/foo FROM %s `, target)) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) target2 := registry + "/buildkit/testonbuild:child" @@ -4062,15 +3920,13 @@ ONBUILD RUN mkdir -p /out && echo -n 11 >> /out/foo COPY --from=base /out / `, target2)) - dir, err = tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -4086,13 +3942,13 @@ ONBUILD RUN mkdir -p /out && echo -n 11 >> /out/foo }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + dt, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) require.Equal(t, "11", string(dt)) } func testCacheMultiPlatformImportExport(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) f := getFrontend(t, sb) registry, err := sb.NewRegistry() @@ -4110,11 +3966,11 @@ COPY --from=base unique / COPY --from=base arch / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -4161,14 +4017,14 @@ COPY --from=base arch / desc, provider, err := contentutil.ProviderFromRef(target + "-img") require.NoError(t, err) - imgMap, err := readIndex(sb.Context(), provider, desc) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) require.NoError(t, err) - require.Equal(t, 2, len(imgMap)) + require.Equal(t, 2, len(imgs.Images)) - require.Equal(t, "amd64", string(imgMap["linux/amd64"].layers[1]["arch"].Data)) - dtamd := imgMap["linux/amd64"].layers[0]["unique"].Data - dtarm := imgMap["linux/arm/v7"].layers[0]["unique"].Data + require.Equal(t, "amd64", string(imgs.Find("linux/amd64").Layers[1]["arch"].Data)) + dtamd := imgs.Find("linux/amd64").Layers[0]["unique"].Data + dtarm := imgs.Find("linux/arm/v7").Layers[0]["unique"].Data require.NotEqual(t, dtamd, dtarm) for i := 0; i < 2; i++ { @@ -4201,21 +4057,21 @@ COPY --from=base arch / require.Equal(t, desc.Digest, desc2.Digest) - imgMap, err = readIndex(sb.Context(), provider, desc2) + imgs, err = testutil.ReadImages(sb.Context(), provider, desc2) require.NoError(t, err) - require.Equal(t, 2, len(imgMap)) + require.Equal(t, 2, len(imgs.Images)) - require.Equal(t, "arm", string(imgMap["linux/arm/v7"].layers[1]["arch"].Data)) - dtamd2 := imgMap["linux/amd64"].layers[0]["unique"].Data - dtarm2 := imgMap["linux/arm/v7"].layers[0]["unique"].Data + require.Equal(t, "arm", string(imgs.Find("linux/arm/v7").Layers[1]["arch"].Data)) + dtamd2 := imgs.Find("linux/amd64").Layers[0]["unique"].Data + dtarm2 := imgs.Find("linux/arm/v7").Layers[0]["unique"].Data require.Equal(t, string(dtamd), string(dtamd2)) require.Equal(t, string(dtarm), string(dtarm2)) } } func testCacheImportExport(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) + integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport) f := getFrontend(t, sb) registry, err := sb.NewRegistry() @@ -4234,20 +4090,18 @@ COPY --from=base const / COPY --from=base unique / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte("foobar"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() target := registry + "/buildkit/testexportdf:latest" @@ -4271,18 +4125,16 @@ COPY --from=base unique / }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "const")) + dt, err := os.ReadFile(filepath.Join(destDir, "const")) require.NoError(t, err) require.Equal(t, "foobar", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "unique")) + dt, err = os.ReadFile(filepath.Join(destDir, "unique")) require.NoError(t, err) ensurePruneAll(t, c, sb) - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ @@ -4301,21 +4153,17 @@ COPY --from=base unique / }, nil) require.NoError(t, err) - dt2, err := ioutil.ReadFile(filepath.Join(destDir, "const")) + dt2, err := os.ReadFile(filepath.Join(destDir, "const")) require.NoError(t, err) require.Equal(t, "foobar", string(dt2)) - dt2, err = ioutil.ReadFile(filepath.Join(destDir, "unique")) + dt2, err = os.ReadFile(filepath.Join(destDir, "unique")) require.NoError(t, err) require.Equal(t, string(dt), string(dt2)) - - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) } func testReproducibleIDs(t *testing.T, sb integration.Sandbox) { - skipDockerd(t, sb) + integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter) f := getFrontend(t, sb) dockerfile := []byte(` @@ -4324,21 +4172,17 @@ ENV foo=bar COPY foo / RUN echo bar > bar `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte("foo-contents"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - target := "example.com/moby/dockerfileids:test" opt := client.SolveOpt{ FrontendAttrs: map[string]string{}, @@ -4405,21 +4249,17 @@ COPY foo / RUN echo bar > bar `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte("foobar"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - target := "example.com/moby/dockerfileexpids:test" cacheTarget := registry + "/test/dockerfileexpids:cache" opt := client.SolveOpt{ @@ -4487,19 +4327,17 @@ FROM scratch COPY --from=s0 unique / COPY --from=s1 unique2 / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() opt := client.SolveOpt{ FrontendAttrs: map[string]string{}, @@ -4518,9 +4356,7 @@ COPY --from=s1 unique2 / _, err = f.Solve(sb.Context(), c, opt, nil) require.NoError(t, err) - destDir2, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir2 := t.TempDir() opt.FrontendAttrs["no-cache"] = "" opt.Exports[0].OutputDir = destDir2 @@ -4528,24 +4364,22 @@ COPY --from=s1 unique2 / _, err = f.Solve(sb.Context(), c, opt, nil) require.NoError(t, err) - unique1Dir1, err := ioutil.ReadFile(filepath.Join(destDir, "unique")) + unique1Dir1, err := os.ReadFile(filepath.Join(destDir, "unique")) require.NoError(t, err) - unique1Dir2, err := ioutil.ReadFile(filepath.Join(destDir2, "unique")) + unique1Dir2, err := os.ReadFile(filepath.Join(destDir2, "unique")) require.NoError(t, err) - unique2Dir1, err := ioutil.ReadFile(filepath.Join(destDir, "unique2")) + unique2Dir1, err := os.ReadFile(filepath.Join(destDir, "unique2")) require.NoError(t, err) - unique2Dir2, err := ioutil.ReadFile(filepath.Join(destDir2, "unique2")) + unique2Dir2, err := os.ReadFile(filepath.Join(destDir2, "unique2")) require.NoError(t, err) require.NotEqual(t, string(unique1Dir1), string(unique1Dir2)) require.NotEqual(t, string(unique2Dir1), string(unique2Dir2)) - destDir3, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir3 := t.TempDir() opt.FrontendAttrs["no-cache"] = "s1" opt.Exports[0].OutputDir = destDir3 @@ -4553,10 +4387,10 @@ COPY --from=s1 unique2 / _, err = f.Solve(sb.Context(), c, opt, nil) require.NoError(t, err) - unique1Dir3, err := ioutil.ReadFile(filepath.Join(destDir3, "unique")) + unique1Dir3, err := os.ReadFile(filepath.Join(destDir3, "unique")) require.NoError(t, err) - unique2Dir3, err := ioutil.ReadFile(filepath.Join(destDir3, "unique2")) + unique2Dir3, err := os.ReadFile(filepath.Join(destDir3, "unique2")) require.NoError(t, err) require.Equal(t, string(unique1Dir2), string(unique1Dir3)) @@ -4573,21 +4407,19 @@ FROM build-${TARGETOS} COPY foo2 bar2 `, runtime.GOOS)) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte("d0"), 0600), fstest.CreateFile("foo2", []byte("d1"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() opt := client.SolveOpt{ Exports: []client.ExportEntry{ @@ -4605,11 +4437,11 @@ COPY foo2 bar2 _, err = f.Solve(sb.Context(), c, opt, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar")) + dt, err := os.ReadFile(filepath.Join(destDir, "bar")) require.NoError(t, err) require.Equal(t, "d0", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar2")) + dt, err = os.ReadFile(filepath.Join(destDir, "bar2")) require.NoError(t, err) require.Equal(t, "d1", string(dt)) } @@ -4626,19 +4458,17 @@ FROM scratch COPY --from=build out . `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() opt := client.SolveOpt{ Exports: []client.ExportEntry{ @@ -4660,11 +4490,11 @@ COPY --from=build out . _, err = f.Solve(sb.Context(), c, opt, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "platform")) + dt, err := os.ReadFile(filepath.Join(destDir, "platform")) require.NoError(t, err) require.Equal(t, "darwin/ppc64le", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "os")) + dt, err = os.ReadFile(filepath.Join(destDir, "os")) require.NoError(t, err) require.Equal(t, "freebsd", string(dt)) } @@ -4682,19 +4512,17 @@ FROM scratch COPY --from=build /out / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() opt := client.SolveOpt{ FrontendAttrs: map[string]string{ @@ -4717,14 +4545,12 @@ COPY --from=build /out / _, err = f.Solve(sb.Context(), c, opt, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err := os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) require.Equal(t, "hpvalue::npvalue::foocontents::::bazcontent", string(dt)) // repeat with changed default args should match the old cache - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() opt = client.SolveOpt{ FrontendAttrs: map[string]string{ @@ -4746,14 +4572,12 @@ COPY --from=build /out / _, err = f.Solve(sb.Context(), c, opt, nil) require.NoError(t, err) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err = os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) require.Equal(t, "hpvalue::npvalue::foocontents::::bazcontent", string(dt)) // changing actual value invalidates cache - destDir, err = ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir = t.TempDir() opt = client.SolveOpt{ FrontendAttrs: map[string]string{ @@ -4775,14 +4599,13 @@ COPY --from=build /out / _, err = f.Solve(sb.Context(), c, opt, nil) require.NoError(t, err) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err = os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) require.Equal(t, "hpvalue2::::foocontents2::::bazcontent", string(dt)) } func testTarContext(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch @@ -4823,8 +4646,7 @@ COPY foo / _, err = f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - "context": url, + "context": url, }, Session: []session.Attachable{up}, }, nil) @@ -4833,7 +4655,6 @@ COPY foo / func testTarContextExternalDockerfile(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) - isFileOp := getFileOp(t, sb) foo := []byte("contents") @@ -4855,11 +4676,11 @@ func testTarContextExternalDockerfile(t *testing.T, sb integration.Sandbox) { FROM scratch COPY foo bar `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -4869,16 +4690,13 @@ COPY foo bar url := up.Add(buf) // repeat with changed default args should match the old cache - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), - "context": url, - "dockerfilekey": builder.DefaultLocalNameDockerfile, - "contextsubdir": "sub/dir", + "context": url, + "dockerfilekey": builder.DefaultLocalNameDockerfile, + "contextsubdir": "sub/dir", }, Session: []session.Attachable{up}, LocalDirs: map[string]string{ @@ -4893,7 +4711,7 @@ COPY foo bar }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar")) + dt, err := os.ReadFile(filepath.Join(destDir, "bar")) require.NoError(t, err) require.Equal(t, string(dt), "contents") } @@ -4907,12 +4725,12 @@ func testFrontendUseForwardedSolveResults(t *testing.T, sb integration.Sandbox) FROM scratch COPY foo foo2 `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), fstest.CreateFile("foo", []byte("data"), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { res, err := c.Solve(ctx, gateway.SolveRequest{ @@ -4946,9 +4764,7 @@ COPY foo foo2 }) } - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = c.Build(sb.Context(), client.SolveOpt{ Exports: []client.ExportEntry{ @@ -4964,7 +4780,7 @@ COPY foo foo2 }, "", frontend, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo3")) + dt, err := os.ReadFile(filepath.Join(destDir, "foo3")) require.NoError(t, err) require.Equal(t, dt, []byte("data")) } @@ -4976,9 +4792,7 @@ func testFrontendInputs(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() outMount := llb.Image("busybox").Run( llb.Shlex(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > /out/foo"`), @@ -4997,7 +4811,7 @@ func testFrontendInputs(t *testing.T, sb integration.Sandbox) { }, nil) require.NoError(t, err) - expected, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + expected, err := os.ReadFile(filepath.Join(destDir, "foo")) require.NoError(t, err) dockerfile := []byte(` @@ -5005,11 +4819,11 @@ FROM scratch COPY foo foo2 `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) _, err = f.Solve(sb.Context(), c, client.SolveOpt{ Exports: []client.ExportEntry{ @@ -5027,13 +4841,16 @@ COPY foo foo2 }, nil) require.NoError(t, err) - actual, err := ioutil.ReadFile(filepath.Join(destDir, "foo2")) + actual, err := os.ReadFile(filepath.Join(destDir, "foo2")) require.NoError(t, err) require.Equal(t, expected, actual) } func testFrontendSubrequests(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) + if _, ok := f.(*clientFrontend); !ok { + t.Skip("only test with client frontend") + } c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -5044,15 +4861,11 @@ FROM scratch COPY Dockerfile Dockerfile `) - if gf, ok := f.(*gatewayFrontend); ok { - dockerfile = []byte(fmt.Sprintf("#syntax=%s\n\n%s", gf.gw, dockerfile)) - } - - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) called := false @@ -5070,6 +4883,7 @@ COPY Dockerfile Dockerfile require.Equal(t, subrequests.RequestType("rpc"), req.Type) require.NotEqual(t, req.Version, "") require.True(t, len(req.Metadata) > 0) + require.Equal(t, "result.json", req.Metadata[0].Name) } } require.True(t, hasDescribe) @@ -5121,11 +4935,11 @@ RUN echo $HOSTNAME | grep foo RUN echo $(hostname) | grep foo `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) @@ -5179,19 +4993,17 @@ FROM scratch COPY --from=base /shmsize / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ @@ -5210,7 +5022,7 @@ COPY --from=base /shmsize / }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "shmsize")) + dt, err := os.ReadFile(filepath.Join(destDir, "shmsize")) require.NoError(t, err) require.Contains(t, string(dt), `size=131072k`) } @@ -5224,19 +5036,17 @@ FROM scratch COPY --from=base /ulimit / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ @@ -5255,7 +5065,7 @@ COPY --from=base /ulimit / }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "ulimit")) + dt, err := os.ReadFile(filepath.Join(destDir, "ulimit")) require.NoError(t, err) require.Equal(t, `1062`, strings.TrimSpace(string(dt))) } @@ -5273,19 +5083,17 @@ FROM scratch COPY --from=base /out / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ @@ -5304,7 +5112,7 @@ COPY --from=base /out / }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err := os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) require.Contains(t, strings.TrimSpace(string(dt)), `/foocgroup/buildkit/`) } @@ -5323,20 +5131,19 @@ FROM scratch COPY --from=base /out / `) - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) f := getFrontend(t, sb) - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) + destDir := t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ + // Make sure image resolution works as expected, do not add a tag or locator. "context:busybox": "docker-image://alpine", }, LocalDirs: map[string]string{ @@ -5352,55 +5159,77 @@ COPY --from=base /out / }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err := os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) require.True(t, len(dt) > 0) -} - -func testNamedLocalContext(t *testing.T, sb integration.Sandbox) { - ctx := sb.Context() - c, err := client.New(ctx, sb.Address()) - require.NoError(t, err) - defer c.Close() + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) - dockerfile := []byte(` -FROM busybox AS base -RUN cat /etc/alpine-release > /out -FROM scratch -COPY --from=base /o* / + // Now test with an image with custom envs + dockerfile = []byte(` +FROM alpine:latest +ENV PATH=/foobar:$PATH +ENV FOOBAR=foobar `) - dir, err := tmpdir( + dir, err = integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) - outf := []byte(`dummy-result`) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + target := registry + "/buildkit/testnamedimagecontext:latest" - dir2, err := tmpdir( - fstest.CreateFile("out", outf, 0600), - fstest.CreateFile("out2", outf, 0600), - fstest.CreateFile(".dockerignore", []byte("out2\n"), 0600), - ) + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) require.NoError(t, err) - defer os.RemoveAll(dir2) - f := getFrontend(t, sb) + dockerfile = []byte(` +FROM busybox AS base +RUN cat /etc/alpine-release > /out +RUN env | grep PATH > /env_path +RUN env | grep FOOBAR > /env_foobar +FROM scratch +COPY --from=base /out / +COPY --from=base /env_path / +COPY --from=base /env_foobar / + `) - destDir, err := ioutil.TempDir("", "buildkit") + dir, err = integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) require.NoError(t, err) - defer os.RemoveAll(destDir) + + f = getFrontend(t, sb) + + destDir = t.TempDir() _, err = f.Solve(sb.Context(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ - "context:base": "local:basedir", + "context:busybox": "docker-image://" + target, }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, - "basedir": dir2, }, Exports: []client.ExportEntry{ { @@ -5411,284 +5240,1388 @@ COPY --from=base /o* / }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "out")) + dt, err = os.ReadFile(filepath.Join(destDir, "out")) require.NoError(t, err) require.True(t, len(dt) > 0) - _, err = ioutil.ReadFile(filepath.Join(destDir, "out2")) - require.Error(t, err) - require.True(t, errors.Is(err, os.ErrNotExist)) + dt, err = os.ReadFile(filepath.Join(destDir, "env_foobar")) + require.NoError(t, err) + require.Equal(t, "FOOBAR=foobar", strings.TrimSpace(string(dt))) + + dt, err = os.ReadFile(filepath.Join(destDir, "env_path")) + require.NoError(t, err) + require.Contains(t, string(dt), "/foobar:") } -func testNamedInputContext(t *testing.T, sb integration.Sandbox) { +func testNamedImageContextPlatform(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) ctx := sb.Context() c, err := client.New(ctx, sb.Address()) require.NoError(t, err) defer c.Close() - dockerfile := []byte(` -FROM alpine -ENV FOO=bar -RUN echo first > /out -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) - defer os.RemoveAll(dir) - dockerfile2 := []byte(` -FROM base AS build -RUN echo "foo is $FOO" > /foo -FROM scratch -COPY --from=build /foo /out / -`) + // Build a base image and force buildkit to generate a manifest list. + dockerfile := []byte(`FROM --platform=$BUILDPLATFORM alpine:latest`) + target := registry + "/buildkit/testnamedimagecontextplatform:latest" - dir2, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile2, 0600), + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) f := getFrontend(t, sb) - b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{}) - if err != nil { - return nil, err - } - ref, err := res.SingleRef() - if err != nil { - return nil, err - } - st, err := ref.ToState() - if err != nil { - return nil, err - } - - def, err := st.Marshal(ctx) - if err != nil { - return nil, err - } - - dt, ok := res.Metadata["containerimage.config"] - if !ok { - return nil, errors.Errorf("no containerimage.config in metadata") - } - - dt, err = json.Marshal(map[string][]byte{ - "containerimage.config": dt, - }) - if err != nil { - return nil, err - } - - res, err = f.SolveGateway(ctx, c, gateway.SolveRequest{ - FrontendOpt: map[string]string{ - "dockerfilekey": builder.DefaultLocalNameDockerfile + "2", - "context:base": "input:base", - "input-metadata:base": string(dt), - }, - FrontendInputs: map[string]*pb.Definition{ - "base": def.ToPB(), - }, - }) - if err != nil { - return nil, err - } - return res, nil - } - - product := "buildkit_test" - - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - - _, err = c.Build(ctx, client.SolveOpt{ + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_MULTI_PLATFORM": "true", + }, LocalDirs: map[string]string{ - builder.DefaultLocalNameDockerfile: dir, - builder.DefaultLocalNameContext: dir, - builder.DefaultLocalNameDockerfile + "2": dir2, + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, }, Exports: []client.ExportEntry{ { - Type: client.ExporterLocal, - OutputDir: destDir, + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, }, }, - }, product, b, nil) + }, nil) require.NoError(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "out")) + dockerfile = []byte(` +FROM --platform=$BUILDPLATFORM busybox AS target +RUN echo hello +`) + + dir, err = integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) require.NoError(t, err) - require.Equal(t, "first\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo")) + f = getFrontend(t, sb) + + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "context:busybox": "docker-image://" + target, + // random platform that would never exist so it doesn't conflict with the build machine + // here we specifically want to make sure that the platform chosen for the image source is the one in the dockerfile not the target platform. + "platform": "darwin/ppc64le", + }, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) require.NoError(t, err) - require.Equal(t, "foo is bar\n", string(dt)) } -func testNamedMultiplatformInputContext(t *testing.T, sb integration.Sandbox) { +func testNamedImageContextTimestamps(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush) ctx := sb.Context() c, err := client.New(ctx, sb.Address()) require.NoError(t, err) defer c.Close() - dockerfile := []byte(` -FROM --platform=$BUILDPLATFORM alpine -ARG TARGETARCH -ENV FOO=bar-$TARGETARCH -RUN echo "foo $TARGETARCH" > /out -`) - - dir, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile, 0600), - ) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } require.NoError(t, err) - defer os.RemoveAll(dir) - dockerfile2 := []byte(` -FROM base AS build -RUN echo "foo is $FOO" > /foo -FROM scratch -COPY --from=build /foo /out / -`) + f := getFrontend(t, sb) - dir2, err := tmpdir( - fstest.CreateFile("Dockerfile", dockerfile2, 0600), + dockerfile := []byte(` +FROM alpine +RUN echo foo >> /test +`) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) - - f := getFrontend(t, sb) - b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{ - FrontendOpt: map[string]string{ - "platform": "linux/amd64,linux/arm64", - }, - }) - if err != nil { + target := registry + "/buildkit/testnamedimagecontexttimestamps:latest" + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + img, err := testutil.ReadImage(sb.Context(), provider, desc) + require.NoError(t, err) + + dirDerived, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + targetDerived := registry + "/buildkit/testnamedimagecontexttimestampsderived:latest" + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "context:alpine": "docker-image://" + target, + }, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dirDerived, + builder.DefaultLocalNameContext: dirDerived, + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": targetDerived, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + desc, provider, err = contentutil.ProviderFromRef(targetDerived) + require.NoError(t, err) + imgDerived, err := testutil.ReadImage(sb.Context(), provider, desc) + require.NoError(t, err) + + require.NotEqual(t, img.Img.Created, imgDerived.Img.Created) + diff := imgDerived.Img.Created.Sub(*img.Img.Created) + require.Greater(t, diff, time.Duration(0)) + require.Less(t, diff, 10*time.Minute) +} + +func testNamedImageContextScratch(t *testing.T, sb integration.Sandbox) { + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + dockerfile := []byte(` +FROM busybox +COPY < /out +FROM scratch +COPY --from=base /o* / +`) + + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + outf := []byte(`dummy-result`) + + dir2, err := integration.Tmpdir( + t, + fstest.CreateFile("out", outf, 0600), + fstest.CreateFile("out2", outf, 0600), + fstest.CreateFile(".dockerignore", []byte("out2\n"), 0600), + ) + require.NoError(t, err) + + f := getFrontend(t, sb) + + destDir := t.TempDir() + + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "context:base": "local:basedir", + }, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + "basedir": dir2, + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterLocal, + OutputDir: destDir, + }, + }, + }, nil) + require.NoError(t, err) + + dt, err := os.ReadFile(filepath.Join(destDir, "out")) + require.NoError(t, err) + require.True(t, len(dt) > 0) + + _, err = os.ReadFile(filepath.Join(destDir, "out2")) + require.Error(t, err) + require.True(t, errors.Is(err, os.ErrNotExist)) +} + +func testNamedOCILayoutContext(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureOCILayout) + // how this test works: + // 1- we use a regular builder with a dockerfile to create an image two files: "out" with content "first", "out2" with content "second" + // 2- we save the output to an OCI layout dir + // 3- we use another regular builder with a dockerfile to build using a referenced context "base", but override it to reference the output of the previous build + // 4- we check that the output of the second build matches our OCI layout, and not the referenced image + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + // create a tempdir where we will store the OCI layout + ocidir := t.TempDir() + + ociDockerfile := []byte(` + FROM busybox:latest + WORKDIR /test + RUN sh -c "echo -n first > out" + RUN sh -c "echo -n second > out2" + ENV foo=bar + `) + inDir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", ociDockerfile, 0600), + ) + require.NoError(t, err) + + f := getFrontend(t, sb) + + outW := bytes.NewBuffer(nil) + + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: inDir, + builder.DefaultLocalNameContext: inDir, + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterOCI, + Output: fixedWriteCloser(nopWriteCloser{outW}), + }, + }, + }, nil) + require.NoError(t, err) + + // extract the tar stream to the directory as OCI layout + m, err := testutil.ReadTarToMap(outW.Bytes(), false) + require.NoError(t, err) + + for filename, content := range m { + fullFilename := path.Join(ocidir, filename) + err = os.MkdirAll(path.Dir(fullFilename), 0755) + require.NoError(t, err) + if content.Header.FileInfo().IsDir() { + err = os.MkdirAll(fullFilename, 0755) + require.NoError(t, err) + } else { + err = os.WriteFile(fullFilename, content.Data, 0644) + require.NoError(t, err) + } + } + + var index ocispecs.Index + err = json.Unmarshal(m["index.json"].Data, &index) + require.NoError(t, err) + require.Equal(t, 1, len(index.Manifests)) + digest := index.Manifests[0].Digest.Hex() + + store, err := local.NewStore(ocidir) + ociID := "ocione" + require.NoError(t, err) + + // we will use this simple dockerfile to test + // 1. busybox is used as is, but because we override the context for base, + // when we run `COPY --from=base`, it should take the /o* from the image in the store, + // rather than what we built on the first 2 lines here. + // 2. we override the context for `foo` to be our local OCI store, which has an `ENV foo=bar` override. + // As such, the `RUN echo $foo` step should have `$foo` set to `"bar"`, and so + // when we `COPY --from=imported`, it should have the content of `/outfoo` as `"bar"` + dockerfile := []byte(` +FROM busybox AS base +RUN cat /etc/alpine-release > out + +FROM foo AS imported +RUN echo -n $foo > outfoo + +FROM scratch +COPY --from=base /test/o* / +COPY --from=imported /test/outfoo / +`) + + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + destDir := t.TempDir() + + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "context:base": fmt.Sprintf("oci-layout:%s@sha256:%s", ociID, digest), + "context:foo": fmt.Sprintf("oci-layout:%s@sha256:%s", ociID, digest), + }, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + OCIStores: map[string]content.Store{ + ociID: store, + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterLocal, + OutputDir: destDir, + }, + }, + }, nil) + require.NoError(t, err) + + dt, err := os.ReadFile(filepath.Join(destDir, "out")) + require.NoError(t, err) + require.True(t, len(dt) > 0) + require.Equal(t, []byte("first"), dt) + + dt, err = os.ReadFile(filepath.Join(destDir, "out2")) + require.NoError(t, err) + require.True(t, len(dt) > 0) + require.Equal(t, []byte("second"), dt) + + dt, err = os.ReadFile(filepath.Join(destDir, "outfoo")) + require.NoError(t, err) + require.True(t, len(dt) > 0) + require.Equal(t, []byte("bar"), dt) +} + +func testNamedOCILayoutContextExport(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureOCILayout) + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + ocidir := t.TempDir() + + dockerfile := []byte(` +FROM scratch +WORKDIR /test +ENV foo=bar + `) + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + f := getFrontend(t, sb) + + outW := bytes.NewBuffer(nil) + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + Exports: []client.ExportEntry{{ + Type: client.ExporterOCI, + Output: fixedWriteCloser(nopWriteCloser{outW}), + }}, + }, nil) + require.NoError(t, err) + + m, err := testutil.ReadTarToMap(outW.Bytes(), false) + require.NoError(t, err) + + for filename, content := range m { + fullFilename := path.Join(ocidir, filename) + err = os.MkdirAll(path.Dir(fullFilename), 0755) + require.NoError(t, err) + if content.Header.FileInfo().IsDir() { + err = os.MkdirAll(fullFilename, 0755) + require.NoError(t, err) + } else { + err = os.WriteFile(fullFilename, content.Data, 0644) + require.NoError(t, err) + } + } + + var index ocispecs.Index + err = json.Unmarshal(m["index.json"].Data, &index) + require.NoError(t, err) + require.Equal(t, 1, len(index.Manifests)) + digest := index.Manifests[0].Digest.Hex() + + store, err := local.NewStore(ocidir) + ociID := "ocione" + require.NoError(t, err) + + dockerfile = []byte(` +FROM nonexistent AS base +`) + + dir, err = integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + outW = bytes.NewBuffer(nil) + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "context:nonexistent": fmt.Sprintf("oci-layout:%s@sha256:%s", ociID, digest), + }, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + OCIStores: map[string]content.Store{ + ociID: store, + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterOCI, + Output: fixedWriteCloser(nopWriteCloser{outW}), + }, + }, + }, nil) + require.NoError(t, err) + + m, err = testutil.ReadTarToMap(outW.Bytes(), false) + require.NoError(t, err) + + err = json.Unmarshal(m["index.json"].Data, &index) + require.NoError(t, err) + require.Equal(t, 1, len(index.Manifests)) + digest = index.Manifests[0].Digest.Hex() + + var mfst ocispecs.Manifest + require.NoError(t, json.Unmarshal(m["blobs/sha256/"+digest].Data, &mfst)) + digest = mfst.Config.Digest.Hex() + + var cfg ocispecs.Image + require.NoError(t, json.Unmarshal(m["blobs/sha256/"+digest].Data, &cfg)) + + require.Equal(t, "/test", cfg.Config.WorkingDir) + require.Contains(t, cfg.Config.Env, "foo=bar") +} + +func testNamedInputContext(t *testing.T, sb integration.Sandbox) { + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + dockerfile := []byte(` +FROM alpine +ENV FOO=bar +RUN echo first > /out +`) + + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + dockerfile2 := []byte(` +FROM base AS build +RUN echo "foo is $FOO" > /foo +FROM scratch +COPY --from=build /foo /out / +`) + + dir2, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile2, 0600), + ) + require.NoError(t, err) + + f := getFrontend(t, sb) + + b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{}) + if err != nil { + return nil, err + } + ref, err := res.SingleRef() + if err != nil { + return nil, err + } + st, err := ref.ToState() + if err != nil { + return nil, err + } + + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + + dt, ok := res.Metadata["containerimage.config"] + if !ok { + return nil, errors.Errorf("no containerimage.config in metadata") + } + + dt, err = json.Marshal(map[string][]byte{ + "containerimage.config": dt, + }) + if err != nil { + return nil, err + } + + res, err = f.SolveGateway(ctx, c, gateway.SolveRequest{ + FrontendOpt: map[string]string{ + "dockerfilekey": builder.DefaultLocalNameDockerfile + "2", + "context:base": "input:base", + "input-metadata:base": string(dt), + }, + FrontendInputs: map[string]*pb.Definition{ + "base": def.ToPB(), + }, + }) + if err != nil { + return nil, err + } + return res, nil + } + + product := "buildkit_test" + + destDir := t.TempDir() + + _, err = c.Build(ctx, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + builder.DefaultLocalNameDockerfile + "2": dir2, + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterLocal, + OutputDir: destDir, + }, + }, + }, product, b, nil) + require.NoError(t, err) + + dt, err := os.ReadFile(filepath.Join(destDir, "out")) + require.NoError(t, err) + require.Equal(t, "first\n", string(dt)) + + dt, err = os.ReadFile(filepath.Join(destDir, "foo")) + require.NoError(t, err) + require.Equal(t, "foo is bar\n", string(dt)) +} + +func testNamedMultiplatformInputContext(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureMultiPlatform) + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + dockerfile := []byte(` +FROM --platform=$BUILDPLATFORM alpine +ARG TARGETARCH +ENV FOO=bar-$TARGETARCH +RUN echo "foo $TARGETARCH" > /out +`) + + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + dockerfile2 := []byte(` +FROM base AS build +RUN echo "foo is $FOO" > /foo +FROM scratch +COPY --from=build /foo /out / +`) + + dir2, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile2, 0600), + ) + require.NoError(t, err) + + f := getFrontend(t, sb) + + b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{ + FrontendOpt: map[string]string{ + "platform": "linux/amd64,linux/arm64", + }, + }) + if err != nil { + return nil, err + } + + if len(res.Refs) != 2 { + return nil, errors.Errorf("expected 2 refs, got %d", len(res.Refs)) + } + + inputs := map[string]*pb.Definition{} + st, err := res.Refs["linux/amd64"].ToState() + if err != nil { + return nil, err + } + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + inputs["base::linux/amd64"] = def.ToPB() + + st, err = res.Refs["linux/arm64"].ToState() + if err != nil { + return nil, err + } + def, err = st.Marshal(ctx) + if err != nil { + return nil, err + } + inputs["base::linux/arm64"] = def.ToPB() + + frontendOpt := map[string]string{ + "dockerfilekey": builder.DefaultLocalNameDockerfile + "2", + "context:base::linux/amd64": "input:base::linux/amd64", + "context:base::linux/arm64": "input:base::linux/arm64", + "platform": "linux/amd64,linux/arm64", + } + + dt, ok := res.Metadata["containerimage.config/linux/amd64"] + if !ok { + return nil, errors.Errorf("no containerimage.config in metadata") + } + dt, err = json.Marshal(map[string][]byte{ + "containerimage.config": dt, + }) + if err != nil { + return nil, err + } + frontendOpt["input-metadata:base::linux/amd64"] = string(dt) + + dt, ok = res.Metadata["containerimage.config/linux/arm64"] + if !ok { + return nil, errors.Errorf("no containerimage.config in metadata") + } + dt, err = json.Marshal(map[string][]byte{ + "containerimage.config": dt, + }) + if err != nil { + return nil, err + } + frontendOpt["input-metadata:base::linux/arm64"] = string(dt) + + res, err = f.SolveGateway(ctx, c, gateway.SolveRequest{ + FrontendOpt: frontendOpt, + FrontendInputs: inputs, + }) + if err != nil { return nil, err } + return res, nil + } + + product := "buildkit_test" + + destDir := t.TempDir() + + _, err = c.Build(ctx, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + builder.DefaultLocalNameDockerfile + "2": dir2, + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterLocal, + OutputDir: destDir, + }, + }, + }, product, b, nil) + require.NoError(t, err) + + dt, err := os.ReadFile(filepath.Join(destDir, "linux_amd64/out")) + require.NoError(t, err) + require.Equal(t, "foo amd64\n", string(dt)) + + dt, err = os.ReadFile(filepath.Join(destDir, "linux_amd64/foo")) + require.NoError(t, err) + require.Equal(t, "foo is bar-amd64\n", string(dt)) + + dt, err = os.ReadFile(filepath.Join(destDir, "linux_arm64/out")) + require.NoError(t, err) + require.Equal(t, "foo arm64\n", string(dt)) + + dt, err = os.ReadFile(filepath.Join(destDir, "linux_arm64/foo")) + require.NoError(t, err) + require.Equal(t, "foo is bar-arm64\n", string(dt)) +} + +func testSourceDateEpochWithoutExporter(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureSourceDateEpoch) + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM scratch +ENTRYPOINT foo bar +COPY Dockerfile . +`) + + dir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + c, err := client.New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + destDir, err := os.MkdirTemp("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) + require.NoError(t, err) + + tm := time.Date(2015, time.October, 21, 7, 28, 0, 0, time.UTC) + + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:SOURCE_DATE_EPOCH": fmt.Sprintf("%d", tm.Unix()), + }, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterOCI, + // disable exporter epoch to make sure we test dockerfile + Attrs: map[string]string{"source-date-epoch": ""}, + Output: fixedWriteCloser(outW), + }, + }, + }, nil) + require.NoError(t, err) + + dt, err := os.ReadFile(filepath.Join(destDir, "out.tar")) + require.NoError(t, err) + + m, err := testutil.ReadTarToMap(dt, false) + require.NoError(t, err) + + var idx ocispecs.Index + err = json.Unmarshal(m["index.json"].Data, &idx) + require.NoError(t, err) + + mlistHex := idx.Manifests[0].Digest.Hex() + + var mfst ocispecs.Manifest + err = json.Unmarshal(m["blobs/sha256/"+mlistHex].Data, &mfst) + require.NoError(t, err) + + var img ocispecs.Image + err = json.Unmarshal(m["blobs/sha256/"+mfst.Config.Digest.Hex()].Data, &img) + require.NoError(t, err) + + require.Equal(t, tm.Unix(), img.Created.Unix()) + for _, h := range img.History { + require.Equal(t, tm.Unix(), h.Created.Unix()) + } +} + +func testSBOMScannerImage(t *testing.T, sb integration.Sandbox) { + integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureSBOM) + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM busybox:latest +COPY <<-"EOF" /scan.sh + set -e + cat < $BUILDKIT_SCAN_DESTINATION/spdx.json + { + "_type": "https://in-toto.io/Statement/v0.1", + "predicateType": "https://spdx.dev/Document", + "predicate": {"name": "sbom-scan"} + } + BUNDLE +EOF +CMD sh /scan.sh +`) + scannerDir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + scannerTarget := registry + "/buildkit/testsbomscanner:latest" + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: scannerDir, + builder.DefaultLocalNameContext: scannerDir, + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": scannerTarget, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + dockerfile = []byte(` +FROM scratch +COPY < $BUILDKIT_SCAN_DESTINATION/spdx.json + { + "_type": "https://in-toto.io/Statement/v0.1", + "predicateType": "https://spdx.dev/Document", + "predicate": {"name": "core"} + } + BUNDLE + if [ "${BUILDKIT_SCAN_SOURCE_EXTRAS}" ]; then + for src in "${BUILDKIT_SCAN_SOURCE_EXTRAS}"/*; do + cat < $BUILDKIT_SCAN_DESTINATION/$(basename $src).spdx.json + { + "_type": "https://in-toto.io/Statement/v0.1", + "predicateType": "https://spdx.dev/Document", + "predicate": {"name": "extra"} + } + BUNDLE + done + fi +EOF +CMD sh /scan.sh +`) + + scannerDir, err := integration.Tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + + scannerTarget := registry + "/buildkit/testsbomscannerargs:latest" + _, err = f.Solve(sb.Context(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: scannerDir, + builder.DefaultLocalNameContext: scannerDir, + }, + Exports: []client.ExportEntry{ + { + Type: client.ExporterImage, + Attrs: map[string]string{ + "name": scannerTarget, + "push": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + // scan an image with no additional sboms + dockerfile = []byte(` +FROM scratch as base +COPY < **Note on whitespace** +> +> For backward compatibility, leading whitespace before comments (`#`) and +> instructions (such as `RUN`) are ignored, but discouraged. Leading whitespace +> is not preserved in these cases, and the following examples are therefore +> equivalent: +> +> ```dockerfile +> # this is a comment-line +> RUN echo hello +> RUN echo world +> ``` +> +> ```dockerfile +> # this is a comment-line +> RUN echo hello +> RUN echo world +> ``` +> +> Note however, that whitespace in instruction _arguments_, such as the commands +> following `RUN`, are preserved, so the following example prints ` hello world` +> with leading whitespace as specified: +> +> ```dockerfile +> RUN echo "\ +> hello\ +> world" +> ``` + +## Parser directives + +Parser directives are optional, and affect the way in which subsequent lines +in a `Dockerfile` are handled. Parser directives do not add layers to the build, +and will not be shown as a build step. Parser directives are written as a +special type of comment in the form `# directive=value`. A single directive +may only be used once. + +Once a comment, empty line or builder instruction has been processed, Docker +no longer looks for parser directives. Instead it treats anything formatted +as a parser directive as a comment and does not attempt to validate if it might +be a parser directive. Therefore, all parser directives must be at the very +top of a `Dockerfile`. + +Parser directives are not case-sensitive. However, convention is for them to +be lowercase. Convention is also to include a blank line following any +parser directives. Line continuation characters are not supported in parser +directives. + +Due to these rules, the following examples are all invalid: + +Invalid due to line continuation: + +```dockerfile +# direc \ +tive=value +``` + +Invalid due to appearing twice: + +```dockerfile +# directive=value1 +# directive=value2 + +FROM ImageName +``` + +Treated as a comment due to appearing after a builder instruction: + +```dockerfile +FROM ImageName +# directive=value +``` + +Treated as a comment due to appearing after a comment which is not a parser +directive: + +```dockerfile +# About my dockerfile +# directive=value +FROM ImageName +``` + +The unknown directive is treated as a comment due to not being recognized. In +addition, the known directive is treated as a comment due to appearing after +a comment which is not a parser directive. + +```dockerfile +# unknowndirective=value +# knowndirective=value +``` + +Non line-breaking whitespace is permitted in a parser directive. Hence, the +following lines are all treated identically: + +```dockerfile +#directive=value +# directive =value +# directive= value +# directive = value +# dIrEcTiVe=value +``` + +The following parser directives are supported: + +- `syntax` +- `escape` + +### syntax + + + +This feature is only available when using the [BuildKit](https://docs.docker.com/build/buildkit/) +backend, and is ignored when using the classic builder backend. + +See [Custom Dockerfile syntax](https://docs.docker.com/build/buildkit/dockerfile-frontend/) +page for more information. + +### escape + +```dockerfile +# escape=\ (backslash) +``` + +Or + +```dockerfile +# escape=` (backtick) +``` + +The `escape` directive sets the character used to escape characters in a +`Dockerfile`. If not specified, the default escape character is `\`. + +The escape character is used both to escape characters in a line, and to +escape a newline. This allows a `Dockerfile` instruction to +span multiple lines. Note that regardless of whether the `escape` parser +directive is included in a `Dockerfile`, *escaping is not performed in +a `RUN` command, except at the end of a line.* + +Setting the escape character to `` ` `` is especially useful on +`Windows`, where `\` is the directory path separator. `` ` `` is consistent +with [Windows PowerShell](https://technet.microsoft.com/en-us/library/hh847755.aspx). + +Consider the following example which would fail in a non-obvious way on +`Windows`. The second `\` at the end of the second line would be interpreted as an +escape for the newline, instead of a target of the escape from the first `\`. +Similarly, the `\` at the end of the third line would, assuming it was actually +handled as an instruction, cause it be treated as a line continuation. The result +of this dockerfile is that second and third lines are considered a single +instruction: + +```dockerfile +FROM microsoft/nanoserver +COPY testfile.txt c:\\ +RUN dir c:\ +``` + +Results in: + +```console +PS E:\myproject> docker build -t cmd . + +Sending build context to Docker daemon 3.072 kB +Step 1/2 : FROM microsoft/nanoserver + ---> 22738ff49c6d +Step 2/2 : COPY testfile.txt c:\RUN dir c: +GetFileAttributesEx c:RUN: The system cannot find the file specified. +PS E:\myproject> +``` + +One solution to the above would be to use `/` as the target of both the `COPY` +instruction, and `dir`. However, this syntax is, at best, confusing as it is not +natural for paths on `Windows`, and at worst, error prone as not all commands on +`Windows` support `/` as the path separator. + +By adding the `escape` parser directive, the following `Dockerfile` succeeds as +expected with the use of natural platform semantics for file paths on `Windows`: + +```dockerfile +# escape=` + +FROM microsoft/nanoserver +COPY testfile.txt c:\ +RUN dir c:\ +``` + +Results in: + +```console +PS E:\myproject> docker build -t succeeds --no-cache=true . + +Sending build context to Docker daemon 3.072 kB +Step 1/3 : FROM microsoft/nanoserver + ---> 22738ff49c6d +Step 2/3 : COPY testfile.txt c:\ + ---> 96655de338de +Removing intermediate container 4db9acbb1682 +Step 3/3 : RUN dir c:\ + ---> Running in a2c157f842f5 + Volume in drive C has no label. + Volume Serial Number is 7E6D-E0F7 + + Directory of c:\ + +10/05/2016 05:04 PM 1,894 License.txt +10/05/2016 02:22 PM Program Files +10/05/2016 02:14 PM Program Files (x86) +10/28/2016 11:18 AM 62 testfile.txt +10/28/2016 11:20 AM Users +10/28/2016 11:20 AM Windows + 2 File(s) 1,956 bytes + 4 Dir(s) 21,259,096,064 bytes free + ---> 01c7f3bef04f +Removing intermediate container a2c157f842f5 +Successfully built 01c7f3bef04f +PS E:\myproject> +``` + +## Environment replacement + +Environment variables (declared with [the `ENV` statement](#env)) can also be +used in certain instructions as variables to be interpreted by the +`Dockerfile`. Escapes are also handled for including variable-like syntax +into a statement literally. + +Environment variables are notated in the `Dockerfile` either with +`$variable_name` or `${variable_name}`. They are treated equivalently and the +brace syntax is typically used to address issues with variable names with no +whitespace, like `${foo}_bar`. + +The `${variable_name}` syntax also supports a few of the standard `bash` +modifiers as specified below: + +- `${variable:-word}` indicates that if `variable` is set then the result + will be that value. If `variable` is not set then `word` will be the result. +- `${variable:+word}` indicates that if `variable` is set then `word` will be + the result, otherwise the result is the empty string. + +In all cases, `word` can be any string, including additional environment +variables. + +Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`, +for example, will translate to `$foo` and `${foo}` literals respectively. + +Example (parsed representation is displayed after the `#`): + +```dockerfile +FROM busybox +ENV FOO=/bar +WORKDIR ${FOO} # WORKDIR /bar +ADD . $FOO # ADD . /bar +COPY \$FOO /quux # COPY $FOO /quux +``` + +Environment variables are supported by the following list of instructions in +the `Dockerfile`: + +- `ADD` +- `COPY` +- `ENV` +- `EXPOSE` +- `FROM` +- `LABEL` +- `STOPSIGNAL` +- `USER` +- `VOLUME` +- `WORKDIR` +- `ONBUILD` (when combined with one of the supported instructions above) + +Environment variable substitution will use the same value for each variable +throughout the entire instruction. In other words, in this example: + +```dockerfile +ENV abc=hello +ENV abc=bye def=$abc +ENV ghi=$abc +``` + +will result in `def` having a value of `hello`, not `bye`. However, +`ghi` will have a value of `bye` because it is not part of the same instruction +that set `abc` to `bye`. + +## .dockerignore file + +Before the docker CLI sends the context to the docker daemon, it looks +for a file named `.dockerignore` in the root directory of the context. +If this file exists, the CLI modifies the context to exclude files and +directories that match patterns in it. This helps to avoid +unnecessarily sending large or sensitive files and directories to the +daemon and potentially adding them to images using `ADD` or `COPY`. + +The CLI interprets the `.dockerignore` file as a newline-separated +list of patterns similar to the file globs of Unix shells. For the +purposes of matching, the root of the context is considered to be both +the working and the root directory. For example, the patterns +`/foo/bar` and `foo/bar` both exclude a file or directory named `bar` +in the `foo` subdirectory of `PATH` or in the root of the git +repository located at `URL`. Neither excludes anything else. + +If a line in `.dockerignore` file starts with `#` in column 1, then this line is +considered as a comment and is ignored before interpreted by the CLI. + +Here is an example `.dockerignore` file: + +```gitignore +# comment +*/temp* +*/*/temp* +temp? +``` + +This file causes the following build behavior: + +| Rule | Behavior | +|:------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `# comment` | Ignored. | +| `*/temp*` | Exclude files and directories whose names start with `temp` in any immediate subdirectory of the root. For example, the plain file `/somedir/temporary.txt` is excluded, as is the directory `/somedir/temp`. | +| `*/*/temp*` | Exclude files and directories starting with `temp` from any subdirectory that is two levels below the root. For example, `/somedir/subdir/temporary.txt` is excluded. | +| `temp?` | Exclude files and directories in the root directory whose names are a one-character extension of `temp`. For example, `/tempa` and `/tempb` are excluded. | + + +Matching is done using Go's +[filepath.Match](https://golang.org/pkg/path/filepath#Match) rules. A +preprocessing step removes leading and trailing whitespace and +eliminates `.` and `..` elements using Go's +[filepath.Clean](https://golang.org/pkg/path/filepath/#Clean). Lines +that are blank after preprocessing are ignored. + +Beyond Go's filepath.Match rules, Docker also supports a special +wildcard string `**` that matches any number of directories (including +zero). For example, `**/*.go` will exclude all files that end with `.go` +that are found in all directories, including the root of the build context. + +Lines starting with `!` (exclamation mark) can be used to make exceptions +to exclusions. The following is an example `.dockerignore` file that +uses this mechanism: + +```gitignore +*.md +!README.md +``` + +All markdown files *except* `README.md` are excluded from the context. + +The placement of `!` exception rules influences the behavior: the last +line of the `.dockerignore` that matches a particular file determines +whether it is included or excluded. Consider the following example: + +```gitignore +*.md +!README*.md +README-secret.md +``` + +No markdown files are included in the context except README files other than +`README-secret.md`. + +Now consider this example: + +```gitignore +*.md +README-secret.md +!README*.md +``` + +All of the README files are included. The middle line has no effect because +`!README*.md` matches `README-secret.md` and comes last. + +You can even use the `.dockerignore` file to exclude the `Dockerfile` +and `.dockerignore` files. These files are still sent to the daemon +because it needs them to do its job. But the `ADD` and `COPY` instructions +do not copy them to the image. + +Finally, you may want to specify which files to include in the +context, rather than which to exclude. To achieve this, specify `*` as +the first pattern, followed by one or more `!` exception patterns. + +> **Note** +> +> For historical reasons, the pattern `.` is ignored. + +## FROM + +```dockerfile +FROM [--platform=] [AS ] +``` + +Or + +```dockerfile +FROM [--platform=] [:] [AS ] +``` + +Or + +```dockerfile +FROM [--platform=] [@] [AS ] +``` + +The `FROM` instruction initializes a new build stage and sets the +[*Base Image*](https://docs.docker.com/glossary/#base-image) for subsequent instructions. As such, a +valid `Dockerfile` must start with a `FROM` instruction. The image can be +any valid image – it is especially easy to start by **pulling an image** from +the [*Public Repositories*](https://docs.docker.com/docker-hub/repos/). + +- `ARG` is the only instruction that may precede `FROM` in the `Dockerfile`. + See [Understand how ARG and FROM interact](#understand-how-arg-and-from-interact). +- `FROM` can appear multiple times within a single `Dockerfile` to + create multiple images or use one build stage as a dependency for another. + Simply make a note of the last image ID output by the commit before each new + `FROM` instruction. Each `FROM` instruction clears any state created by previous + instructions. +- Optionally a name can be given to a new build stage by adding `AS name` to the + `FROM` instruction. The name can be used in subsequent `FROM` and + `COPY --from=` instructions to refer to the image built in this stage. +- The `tag` or `digest` values are optional. If you omit either of them, the + builder assumes a `latest` tag by default. The builder returns an error if it + cannot find the `tag` value. + +The optional `--platform` flag can be used to specify the platform of the image +in case `FROM` references a multi-platform image. For example, `linux/amd64`, +`linux/arm64`, or `windows/amd64`. By default, the target platform of the build +request is used. Global build arguments can be used in the value of this flag, +for example [automatic platform ARGs](#automatic-platform-args-in-the-global-scope) +allow you to force a stage to native build platform (`--platform=$BUILDPLATFORM`), +and use it to cross-compile to the target platform inside the stage. + +### Understand how ARG and FROM interact + +`FROM` instructions support variables that are declared by any `ARG` +instructions that occur before the first `FROM`. + +```dockerfile +ARG CODE_VERSION=latest +FROM base:${CODE_VERSION} +CMD /code/run-app + +FROM extras:${CODE_VERSION} +CMD /code/run-extras +``` + +An `ARG` declared before a `FROM` is outside of a build stage, so it +can't be used in any instruction after a `FROM`. To use the default value of +an `ARG` declared before the first `FROM` use an `ARG` instruction without +a value inside of a build stage: + +```dockerfile +ARG VERSION=latest +FROM busybox:$VERSION +ARG VERSION +RUN echo $VERSION > image_version +``` + +## RUN + +RUN has 2 forms: + +- `RUN ` (*shell* form, the command is run in a shell, which by +default is `/bin/sh -c` on Linux or `cmd /S /C` on Windows) +- `RUN ["executable", "param1", "param2"]` (*exec* form) + +The `RUN` instruction will execute any commands in a new layer on top of the +current image and commit the results. The resulting committed image will be +used for the next step in the `Dockerfile`. + +Layering `RUN` instructions and generating commits conforms to the core +concepts of Docker where commits are cheap and containers can be created from +any point in an image's history, much like source control. + +The *exec* form makes it possible to avoid shell string munging, and to `RUN` +commands using a base image that does not contain the specified shell executable. + +The default shell for the *shell* form can be changed using the `SHELL` +command. + +In the *shell* form you can use a `\` (backslash) to continue a single +RUN instruction onto the next line. For example, consider these two lines: + +```dockerfile +RUN /bin/bash -c 'source $HOME/.bashrc; \ +echo $HOME' +``` + +Together they are equivalent to this single line: + +```dockerfile +RUN /bin/bash -c 'source $HOME/.bashrc; echo $HOME' +``` + +To use a different shell, other than '/bin/sh', use the *exec* form passing in +the desired shell. For example: + +```dockerfile +RUN ["/bin/bash", "-c", "echo hello"] +``` + +> **Note** +> +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +Unlike the *shell* form, the *exec* form does not invoke a command shell. +This means that normal shell processing does not happen. For example, +`RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +If you want shell processing then either use the *shell* form or execute +a shell directly, for example: `RUN [ "sh", "-c", "echo $HOME" ]`. +When using the exec form and executing a shell directly, as in the case for +the shell form, it is the shell that is doing the environment variable +expansion, not docker. + +> **Note** +> +> In the *JSON* form, it is necessary to escape backslashes. This is +> particularly relevant on Windows where the backslash is the path separator. +> The following line would otherwise be treated as *shell* form due to not +> being valid JSON, and fail in an unexpected way: +> +> ```dockerfile +> RUN ["c:\windows\system32\tasklist.exe"] +> ``` +> +> The correct syntax for this example is: +> +> ```dockerfile +> RUN ["c:\\windows\\system32\\tasklist.exe"] +> ``` + +The cache for `RUN` instructions isn't invalidated automatically during +the next build. The cache for an instruction like +`RUN apt-get dist-upgrade -y` will be reused during the next build. The +cache for `RUN` instructions can be invalidated by using the `--no-cache` +flag, for example `docker build --no-cache`. + +See the [`Dockerfile` Best Practices +guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/) for more information. + +The cache for `RUN` instructions can be invalidated by [`ADD`](#add) and [`COPY`](#copy) instructions. + +### Known issues (RUN) + +- [Issue 783](https://github.com/docker/docker/issues/783) is about file + permissions problems that can occur when using the AUFS file system. You + might notice it during an attempt to `rm` a file, for example. + + For systems that have recent aufs version (i.e., `dirperm1` mount option can + be set), docker will attempt to fix the issue automatically by mounting + the layers with `dirperm1` option. More details on `dirperm1` option can be + found at [`aufs` man page](https://github.com/sfjro/aufs3-linux/tree/aufs3.18/Documentation/filesystems/aufs) + + If your system doesn't have support for `dirperm1`, the issue describes a workaround. + +## RUN --mount + +> **Note** +> +> Added in [`docker/dockerfile:1.2`](#syntax) + +`RUN --mount` allows you to create filesystem mounts that the build can access. +This can be used to: + +- Create bind mount to the host filesystem or other build stages +- Access build secrets or ssh-agent sockets +- Use a persistent package management cache to speed up your build + +Syntax: `--mount=[type=][,option=[,option=]...]` + +### Mount types + +| Type | Description | +|------------------------------------------|-----------------------------------------------------------------------------------------------------------| +| [`bind`](#run---mounttypebind) (default) | Bind-mount context directories (read-only). | +| [`cache`](#run---mounttypecache) | Mount a temporary directory to cache directories for compilers and package managers. | +| [`secret`](#run---mounttypesecret) | Allow the build container to access secure files such as private keys without baking them into the image. | +| [`ssh`](#run---mounttypessh) | Allow the build container to access SSH keys via SSH agents, with support for passphrases. | + +### RUN --mount=type=bind + +This mount type allows binding files or directories to the build container. A +bind mount is read-only by default. + +| Option | Description | +|----------------------|--------------------------------------------------------------------------------------| +| `target`[^1] | Mount path. | +| `source` | Source path in the `from`. Defaults to the root of the `from`. | +| `from` | Build stage or image name for the root of the source. Defaults to the build context. | +| `rw`,`readwrite` | Allow writes on the mount. Written data will be discarded. | + +### RUN --mount=type=cache + +This mount type allows the build container to cache directories for compilers +and package managers. + +| Option | Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `id` | Optional ID to identify separate/different caches. Defaults to value of `target`. | +| `target`[^1] | Mount path. | +| `ro`,`readonly` | Read-only if set. | +| `sharing` | One of `shared`, `private`, or `locked`. Defaults to `shared`. A `shared` cache mount can be used concurrently by multiple writers. `private` creates a new mount if there are multiple writers. `locked` pauses the second writer until the first one releases the mount. | +| `from` | Build stage to use as a base of the cache mount. Defaults to empty directory. | +| `source` | Subpath in the `from` to mount. Defaults to the root of the `from`. | +| `mode` | File mode for new cache directory in octal. Default `0755`. | +| `uid` | User ID for new cache directory. Default `0`. | +| `gid` | Group ID for new cache directory. Default `0`. | + +Contents of the cache directories persists between builder invocations without +invalidating the instruction cache. Cache mounts should only be used for better +performance. Your build should work with any contents of the cache directory as +another build may overwrite the files or GC may clean it if more storage space +is needed. + +#### Example: cache Go packages + +```dockerfile +# syntax=docker/dockerfile:1 +FROM golang +RUN --mount=type=cache,target=/root/.cache/go-build \ + go build ... +``` + +#### Example: cache apt packages + +```dockerfile +# syntax=docker/dockerfile:1 +FROM ubuntu +RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt update && apt-get --no-install-recommends install -y gcc +``` + +Apt needs exclusive access to its data, so the caches use the option +`sharing=locked`, which will make sure multiple parallel builds using +the same cache mount will wait for each other and not access the same +cache files at the same time. You could also use `sharing=private` if +you prefer to have each build create another cache directory in this +case. + +### RUN --mount=type=tmpfs + +This mount type allows mounting tmpfs in the build container. + +| Option | Description | +|---------------------|-------------------------------------------------------| +| `target`[^1] | Mount path. | +| `size` | Specify an upper limit on the size of the filesystem. | + +### RUN --mount=type=secret + +This mount type allows the build container to access secure files such as +private keys without baking them into the image. + +| Option | Description | +|---------------------|---------------------------------------------------------------------------------------------------| +| `id` | ID of the secret. Defaults to basename of the target path. | +| `target` | Mount path. Defaults to `/run/secrets/` + `id`. | +| `required` | If set to `true`, the instruction errors out when the secret is unavailable. Defaults to `false`. | +| `mode` | File mode for secret file in octal. Default `0400`. | +| `uid` | User ID for secret file. Default `0`. | +| `gid` | Group ID for secret file. Default `0`. | + +#### Example: access to S3 + +```dockerfile +# syntax=docker/dockerfile:1 +FROM python:3 +RUN pip install awscli +RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \ + aws s3 cp s3://... ... +``` + +```console +$ docker buildx build --secret id=aws,src=$HOME/.aws/credentials . +``` + +### RUN --mount=type=ssh + +This mount type allows the build container to access SSH keys via SSH agents, +with support for passphrases. + +| Option | Description | +|---------------------|------------------------------------------------------------------------------------------------| +| `id` | ID of SSH agent socket or key. Defaults to "default". | +| `target` | SSH agent socket path. Defaults to `/run/buildkit/ssh_agent.${N}`. | +| `required` | If set to `true`, the instruction errors out when the key is unavailable. Defaults to `false`. | +| `mode` | File mode for socket in octal. Default `0600`. | +| `uid` | User ID for socket. Default `0`. | +| `gid` | Group ID for socket. Default `0`. | + +#### Example: access to Gitlab + +```dockerfile +# syntax=docker/dockerfile:1 +FROM alpine +RUN apk add --no-cache openssh-client +RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan gitlab.com >> ~/.ssh/known_hosts +RUN --mount=type=ssh \ + ssh -q -T git@gitlab.com 2>&1 | tee /hello +# "Welcome to GitLab, @GITLAB_USERNAME_ASSOCIATED_WITH_SSHKEY" should be printed here +# with the type of build progress is defined as `plain`. +``` + +```console +$ eval $(ssh-agent) +$ ssh-add ~/.ssh/id_rsa +(Input your passphrase here) +$ docker buildx build --ssh default=$SSH_AUTH_SOCK . +``` + +You can also specify a path to `*.pem` file on the host directly instead of `$SSH_AUTH_SOCK`. +However, pem files with passphrases are not supported. + +## RUN --network + +> **Note** +> +> Added in [`docker/dockerfile:1.1`](#syntax) + +`RUN --network` allows control over which networking environment the command +is run in. + +Syntax: `--network=` + +### Network types + +| Type | Description | +|----------------------------------------------|----------------------------------------| +| [`default`](#run---networkdefault) (default) | Run in the default network. | +| [`none`](#run---networknone) | Run with no network access. | +| [`host`](#run---networkhost) | Run in the host's network environment. | + +### RUN --network=default + +Equivalent to not supplying a flag at all, the command is run in the default +network for the build. + +### RUN --network=none + +The command is run with no network access (`lo` is still available, but is +isolated to this process) + +#### Example: isolating external effects + +```dockerfile +# syntax=docker/dockerfile:1 +FROM python:3.6 +ADD mypackage.tgz wheels/ +RUN --network=none pip install --find-links wheels mypackage +``` + +`pip` will only be able to install the packages provided in the tarfile, which +can be controlled by an earlier build stage. + +### RUN --network=host + +The command is run in the host's network environment (similar to +`docker build --network=host`, but on a per-instruction basis) + +> **Warning** +> +> The use of `--network=host` is protected by the `network.host` entitlement, +> which needs to be enabled when starting the buildkitd daemon with +> `--allow-insecure-entitlement network.host` flag or in [buildkitd config](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md), +> and for a build request with [`--allow network.host` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/#allow). +{:.warning} + +## RUN --security + +> **Note** +> +> Not yet available in stable syntax, use [`docker/dockerfile:1-labs`](#syntax) version. + +### RUN --security=insecure + +With `--security=insecure`, builder runs the command without sandbox in insecure +mode, which allows to run flows requiring elevated privileges (e.g. containerd). +This is equivalent to running `docker run --privileged`. + +> **Warning** +> +> In order to access this feature, entitlement `security.insecure` should be +> enabled when starting the buildkitd daemon with +> `--allow-insecure-entitlement security.insecure` flag or in [buildkitd config](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md), +> and for a build request with [`--allow security.insecure` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/#allow). +{:.warning} + +#### Example: check entitlements + +```dockerfile +# syntax=docker/dockerfile:1-labs +FROM ubuntu +RUN --security=insecure cat /proc/self/status | grep CapEff +``` +```text +#84 0.093 CapEff: 0000003fffffffff +``` + +### RUN --security=sandbox + +Default sandbox mode can be activated via `--security=sandbox`, but that is no-op. + +## CMD + +The `CMD` instruction has three forms: + +- `CMD ["executable","param1","param2"]` (*exec* form, this is the preferred form) +- `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) +- `CMD command param1 param2` (*shell* form) + +There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD` +then only the last `CMD` will take effect. + +**The main purpose of a `CMD` is to provide defaults for an executing +container.** These defaults can include an executable, or they can omit +the executable, in which case you must specify an `ENTRYPOINT` +instruction as well. + +If `CMD` is used to provide default arguments for the `ENTRYPOINT` instruction, +both the `CMD` and `ENTRYPOINT` instructions should be specified with the JSON +array format. + +> **Note** +> +> The *exec* form is parsed as a JSON array, which means that you must use +> double-quotes (") around words not single-quotes ('). + +Unlike the *shell* form, the *exec* form does not invoke a command shell. +This means that normal shell processing does not happen. For example, +`CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +If you want shell processing then either use the *shell* form or execute +a shell directly, for example: `CMD [ "sh", "-c", "echo $HOME" ]`. +When using the exec form and executing a shell directly, as in the case for +the shell form, it is the shell that is doing the environment variable +expansion, not docker. + +When used in the shell or exec formats, the `CMD` instruction sets the command +to be executed when running the image. + +If you use the *shell* form of the `CMD`, then the `` will execute in +`/bin/sh -c`: + +```dockerfile +FROM ubuntu +CMD echo "This is a test." | wc - +``` + +If you want to **run your** `` **without a shell** then you must +express the command as a JSON array and give the full path to the executable. +**This array form is the preferred format of `CMD`.** Any additional parameters +must be individually expressed as strings in the array: + +```dockerfile +FROM ubuntu +CMD ["/usr/bin/wc","--help"] +``` + +If you would like your container to run the same executable every time, then +you should consider using `ENTRYPOINT` in combination with `CMD`. See +[*ENTRYPOINT*](#entrypoint). + +If the user specifies arguments to `docker run` then they will override the +default specified in `CMD`. + +> **Note** +> +> Do not confuse `RUN` with `CMD`. `RUN` actually runs a command and commits +> the result; `CMD` does not execute anything at build time, but specifies +> the intended command for the image. + +## LABEL + +```dockerfile +LABEL = = = ... +``` + +The `LABEL` instruction adds metadata to an image. A `LABEL` is a +key-value pair. To include spaces within a `LABEL` value, use quotes and +backslashes as you would in command-line parsing. A few usage examples: + +```dockerfile +LABEL "com.example.vendor"="ACME Incorporated" +LABEL com.example.label-with-value="foo" +LABEL version="1.0" +LABEL description="This text illustrates \ +that label-values can span multiple lines." +``` + +An image can have more than one label. You can specify multiple labels on a +single line. Prior to Docker 1.10, this decreased the size of the final image, +but this is no longer the case. You may still choose to specify multiple labels +in a single instruction, in one of the following two ways: + +```dockerfile +LABEL multi.label1="value1" multi.label2="value2" other="value3" +``` + +```dockerfile +LABEL multi.label1="value1" \ + multi.label2="value2" \ + other="value3" +``` + +> **Note** +> +> Be sure to use double quotes and not single quotes. Particularly when you are +> using string interpolation (e.g. `LABEL example="foo-$ENV_VAR"`), single +> quotes will take the string as is without unpacking the variable's value. + +Labels included in base or parent images (images in the `FROM` line) are +inherited by your image. If a label already exists but with a different value, +the most-recently-applied value overrides any previously-set value. + +To view an image's labels, use the `docker image inspect` command. You can use +the `--format` option to show just the labels; + +```console +$ docker image inspect --format='{{json .Config.Labels}}' myimage +``` + +```json +{ + "com.example.vendor": "ACME Incorporated", + "com.example.label-with-value": "foo", + "version": "1.0", + "description": "This text illustrates that label-values can span multiple lines.", + "multi.label1": "value1", + "multi.label2": "value2", + "other": "value3" +} +``` + +## MAINTAINER (deprecated) + +```dockerfile +MAINTAINER +``` + +The `MAINTAINER` instruction sets the *Author* field of the generated images. +The `LABEL` instruction is a much more flexible version of this and you should use +it instead, as it enables setting any metadata you require, and can be viewed +easily, for example with `docker inspect`. To set a label corresponding to the +`MAINTAINER` field you could use: + +```dockerfile +LABEL org.opencontainers.image.authors="SvenDowideit@home.org.au" +``` + +This will then be visible from `docker inspect` with the other labels. + +## EXPOSE + +```dockerfile +EXPOSE [/...] +``` + +The `EXPOSE` instruction informs Docker that the container listens on the +specified network ports at runtime. You can specify whether the port listens on +TCP or UDP, and the default is TCP if the protocol is not specified. + +The `EXPOSE` instruction does not actually publish the port. It functions as a +type of documentation between the person who builds the image and the person who +runs the container, about which ports are intended to be published. To actually +publish the port when running the container, use the `-p` flag on `docker run` +to publish and map one or more ports, or the `-P` flag to publish all exposed +ports and map them to high-order ports. + +By default, `EXPOSE` assumes TCP. You can also specify UDP: + +```dockerfile +EXPOSE 80/udp +``` + +To expose on both TCP and UDP, include two lines: + +```dockerfile +EXPOSE 80/tcp +EXPOSE 80/udp +``` + +In this case, if you use `-P` with `docker run`, the port will be exposed once +for TCP and once for UDP. Remember that `-P` uses an ephemeral high-ordered host +port on the host, so the port will not be the same for TCP and UDP. + +Regardless of the `EXPOSE` settings, you can override them at runtime by using +the `-p` flag. For example + +```console +$ docker run -p 80:80/tcp -p 80:80/udp ... +``` + +To set up port redirection on the host system, see [using the -P flag](https://docs.docker.com/engine/reference/run/#expose-incoming-ports). +The `docker network` command supports creating networks for communication among +containers without the need to expose or publish specific ports, because the +containers connected to the network can communicate with each other over any +port. For detailed information, see the +[overview of this feature](https://docs.docker.com/engine/userguide/networking/). + +## ENV + +```dockerfile +ENV = ... +``` + +The `ENV` instruction sets the environment variable `` to the value +``. This value will be in the environment for all subsequent instructions +in the build stage and can be [replaced inline](#environment-replacement) in +many as well. The value will be interpreted for other environment variables, so +quote characters will be removed if they are not escaped. Like command line parsing, +quotes and backslashes can be used to include spaces within values. + +Example: + +```dockerfile +ENV MY_NAME="John Doe" +ENV MY_DOG=Rex\ The\ Dog +ENV MY_CAT=fluffy +``` + +The `ENV` instruction allows for multiple `= ...` variables to be set +at one time, and the example below will yield the same net results in the final +image: + +```dockerfile +ENV MY_NAME="John Doe" MY_DOG=Rex\ The\ Dog \ + MY_CAT=fluffy +``` + +The environment variables set using `ENV` will persist when a container is run +from the resulting image. You can view the values using `docker inspect`, and +change them using `docker run --env =`. + +A stage inherits any environment variables that were set using `ENV` by its +parent stage or any ancestor. Refer [here](https://docs.docker.com/build/building/multi-stage/) +for more on multi-staged builds. + +Environment variable persistence can cause unexpected side effects. For example, +setting `ENV DEBIAN_FRONTEND=noninteractive` changes the behavior of `apt-get`, +and may confuse users of your image. + +If an environment variable is only needed during build, and not in the final +image, consider setting a value for a single command instead: + +```dockerfile +RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y ... +``` + +Or using [`ARG`](#arg), which is not persisted in the final image: + +```dockerfile +ARG DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y ... +``` + +> **Alternative syntax** +> +> The `ENV` instruction also allows an alternative syntax `ENV `, +> omitting the `=`. For example: +> +> ```dockerfile +> ENV MY_VAR my-value +> ``` +> +> This syntax does not allow for multiple environment-variables to be set in a +> single `ENV` instruction, and can be confusing. For example, the following +> sets a single environment variable (`ONE`) with value `"TWO= THREE=world"`: +> +> ```dockerfile +> ENV ONE TWO= THREE=world +> ``` +> +> The alternative syntax is supported for backward compatibility, but discouraged +> for the reasons outlined above, and may be removed in a future release. + +## ADD + +ADD has two forms: + +```dockerfile +ADD [--chown=:] [--checksum=] ... +ADD [--chown=:] ["",... ""] +``` + +The latter form is required for paths containing whitespace. + +> **Note** +> +> The `--chown` feature is only supported on Dockerfiles used to build Linux containers, +> and will not work on Windows containers. Since user and group ownership concepts do +> not translate between Linux and Windows, the use of `/etc/passwd` and `/etc/group` for +> translating user and group names to IDs restricts this feature to only be viable +> for Linux OS-based containers. + +The `ADD` instruction copies new files, directories or remote file URLs from `` +and adds them to the filesystem of the image at the path ``. + +Multiple `` resources may be specified but if they are files or +directories, their paths are interpreted as relative to the source of +the context of the build. + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](https://golang.org/pkg/path/filepath#Match) rules. For example: + +To add all files starting with "hom": + +```dockerfile +ADD hom* /mydir/ +``` + +In the example below, `?` is replaced with any single character, e.g., "home.txt". + +```dockerfile +ADD hom?.txt /mydir/ +``` + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + +The example below uses a relative path, and adds "test.txt" to `/relativeDir/`: + +```dockerfile +ADD test.txt relativeDir/ +``` + +Whereas this example uses an absolute path, and adds "test.txt" to `/absoluteDir/` + +```dockerfile +ADD test.txt /absoluteDir/ +``` + +When adding files or directories that contain special characters (such as `[` +and `]`), you need to escape those paths following the Golang rules to prevent +them from being treated as a matching pattern. For example, to add a file +named `arr[0].txt`, use the following; + +```dockerfile +ADD arr[[]0].txt /mydir/ +``` + + +All new files and directories are created with a UID and GID of 0, unless the +optional `--chown` flag specifies a given username, groupname, or UID/GID +combination to request specific ownership of the content added. The +format of the `--chown` flag allows for either username and groupname strings +or direct integer UID and GID in any combination. Providing a username without +groupname or a UID without GID will use the same numeric UID as the GID. If a +username or groupname is provided, the container's root filesystem +`/etc/passwd` and `/etc/group` files will be used to perform the translation +from name to integer UID or GID respectively. The following examples show +valid definitions for the `--chown` flag: + +```dockerfile +ADD --chown=55:mygroup files* /somedir/ +ADD --chown=bin files* /somedir/ +ADD --chown=1 files* /somedir/ +ADD --chown=10:11 files* /somedir/ +``` + +If the container root filesystem does not contain either `/etc/passwd` or +`/etc/group` files and either user or group names are used in the `--chown` +flag, the build will fail on the `ADD` operation. Using numeric IDs requires +no lookup and will not depend on container root filesystem content. + +In the case where `` is a remote file URL, the destination will +have permissions of 600. If the remote file being retrieved has an HTTP +`Last-Modified` header, the timestamp from that header will be used +to set the `mtime` on the destination file. However, like any other file +processed during an `ADD`, `mtime` will not be included in the determination +of whether or not the file has changed and the cache should be updated. + +> **Note** +> +> If you build by passing a `Dockerfile` through STDIN (`docker +> build - < somefile`), there is no build context, so the `Dockerfile` +> can only contain a URL based `ADD` instruction. You can also pass a +> compressed archive through STDIN: (`docker build - < archive.tar.gz`), +> the `Dockerfile` at the root of the archive and the rest of the +> archive will be used as the context of the build. + +If your URL files are protected using authentication, you need to use `RUN wget`, +`RUN curl` or use another tool from within the container as the `ADD` instruction +does not support authentication. + +> **Note** +> +> The first encountered `ADD` instruction will invalidate the cache for all +> following instructions from the Dockerfile if the contents of `` have +> changed. This includes invalidating the cache for `RUN` instructions. +> See the [`Dockerfile` Best Practices +guide – Leverage build cache](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#leverage-build-cache) +> for more information. + + +`ADD` obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `ADD ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a URL and `` does not end with a trailing slash, then a + file is downloaded from the URL and copied to ``. + +- If `` is a URL and `` does end with a trailing slash, then the + filename is inferred from the URL and the file is downloaded to + `/`. For instance, `ADD http://example.com/foobar /` would + create the file `/foobar`. The URL must have a nontrivial path so that an + appropriate filename can be discovered in this case (`http://example.com` + will not work). + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. + +> **Note** +> +> The directory itself is not copied, just its contents. + +- If `` is a *local* tar archive in a recognized compression format + (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources + from *remote* URLs are **not** decompressed. When a directory is copied or + unpacked, it has the same behavior as `tar -x`, the result is the union of: + + 1. Whatever existed at the destination path and + 2. The contents of the source tree, with conflicts resolved in favor + of "2." on a file-by-file basis. + + > **Note** + > + > Whether a file is identified as a recognized compression format or not + > is done solely based on the contents of the file, not the name of the file. + > For example, if an empty file happens to end with `.tar.gz` this will not + > be recognized as a compressed file and **will not** generate any kind of + > decompression error message, rather the file will simply be copied to the + > destination. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +### Verifying a remote file checksum `ADD --checksum= ` +> **Note** +> +> Not yet available in stable syntax, use [`docker/dockerfile:1-labs`](#syntax) version (`1.5-labs` or newer). + +The checksum of a remote file can be verified with the `--checksum` flag: + +```dockerfile +ADD --checksum=sha256:24454f830cdb571e2c4ad15481119c43b3cafd48dd869a9b2945d1036d1dc68d https://mirrors.edge.kernel.org/pub/linux/kernel/Historic/linux-0.01.tar.gz / +``` + +The `--checksum` flag only supports HTTP sources currently. + +### Adding a git repository `ADD ` + +> **Note** +> +> Not yet available in stable syntax, use [`docker/dockerfile:1-labs`](#syntax) version (`1.5-labs` or newer). + +This form allows adding a git repository to an image directly, without using the `git` command inside the image: +``` +ADD [--keep-git-dir=] +``` + +```dockerfile +# syntax=docker/dockerfile:1-labs +FROM alpine +ADD --keep-git-dir=true https://github.com/moby/buildkit.git#v0.10.1 /buildkit +``` + +The `--keep-git-dir=true` flag adds the `.git` directory. This flag defaults to false. + +### Adding a private git repository + +To add a private repo via SSH, create a Dockerfile with the following form: + +```dockerfile +# syntax=docker/dockerfile:1-labs +FROM alpine +ADD git@git.example.com:foo/bar.git /bar +``` + +This Dockerfile can be built with `docker build --ssh` or `buildctl build --ssh`, e.g., + +```console +$ docker build --ssh default +``` + +```console +$ buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. --ssh default +``` + +## ADD --link + +See [`COPY --link`](#copy---link). + +## COPY + +COPY has two forms: + +```dockerfile +COPY [--chown=:] ... +COPY [--chown=:] ["",... ""] +``` + +This latter form is required for paths containing whitespace + +> **Note** +> +> The `--chown` feature is only supported on Dockerfiles used to build Linux containers, +> and will not work on Windows containers. Since user and group ownership concepts do +> not translate between Linux and Windows, the use of `/etc/passwd` and `/etc/group` for +> translating user and group names to IDs restricts this feature to only be viable for +> Linux OS-based containers. + +The `COPY` instruction copies new files or directories from `` +and adds them to the filesystem of the container at the path ``. + +Multiple `` resources may be specified but the paths of files and +directories will be interpreted as relative to the source of the context +of the build. + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](https://golang.org/pkg/path/filepath#Match) rules. For example: + +To add all files starting with "hom": + +```dockerfile +COPY hom* /mydir/ +``` + +In the example below, `?` is replaced with any single character, e.g., "home.txt". + +```dockerfile +COPY hom?.txt /mydir/ +``` + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + +The example below uses a relative path, and adds "test.txt" to `/relativeDir/`: + +```dockerfile +COPY test.txt relativeDir/ +``` + +Whereas this example uses an absolute path, and adds "test.txt" to `/absoluteDir/` + +```dockerfile +COPY test.txt /absoluteDir/ +``` + +When copying files or directories that contain special characters (such as `[` +and `]`), you need to escape those paths following the Golang rules to prevent +them from being treated as a matching pattern. For example, to copy a file +named `arr[0].txt`, use the following; + +```dockerfile +COPY arr[[]0].txt /mydir/ +``` + +All new files and directories are created with a UID and GID of 0, unless the +optional `--chown` flag specifies a given username, groupname, or UID/GID +combination to request specific ownership of the copied content. The +format of the `--chown` flag allows for either username and groupname strings +or direct integer UID and GID in any combination. Providing a username without +groupname or a UID without GID will use the same numeric UID as the GID. If a +username or groupname is provided, the container's root filesystem +`/etc/passwd` and `/etc/group` files will be used to perform the translation +from name to integer UID or GID respectively. The following examples show +valid definitions for the `--chown` flag: + +```dockerfile +COPY --chown=55:mygroup files* /somedir/ +COPY --chown=bin files* /somedir/ +COPY --chown=1 files* /somedir/ +COPY --chown=10:11 files* /somedir/ +``` + +If the container root filesystem does not contain either `/etc/passwd` or +`/etc/group` files and either user or group names are used in the `--chown` +flag, the build will fail on the `COPY` operation. Using numeric IDs requires +no lookup and does not depend on container root filesystem content. + +> **Note** +> +> If you build using STDIN (`docker build - < somefile`), there is no +> build context, so `COPY` can't be used. + +Optionally `COPY` accepts a flag `--from=` that can be used to set +the source location to a previous build stage (created with `FROM .. AS `) +that will be used instead of a build context sent by the user. In case a build +stage with a specified name can't be found an image with the same name is +attempted to be used instead. + +`COPY` obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `COPY ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. + +> **Note** +> +> The directory itself is not copied, just its contents. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +> **Note** +> +> The first encountered `COPY` instruction will invalidate the cache for all +> following instructions from the Dockerfile if the contents of `` have +> changed. This includes invalidating the cache for `RUN` instructions. +> See the [`Dockerfile` Best Practices +guide – Leverage build cache](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#leverage-build-cache) +> for more information. + +## COPY --link + +> **Note** +> +> Added in [`docker/dockerfile:1.4`](#syntax) + +Enabling this flag in `COPY` or `ADD` commands allows you to copy files with +enhanced semantics where your files remain independent on their own layer and +don't get invalidated when commands on previous layers are changed. + +When `--link` is used your source files are copied into an empty destination +directory. That directory is turned into a layer that is linked on top of your +previous state. + +```dockerfile +# syntax=docker/dockerfile:1 +FROM alpine +COPY --link /foo /bar +``` + +Is equivalent of doing two builds: + +```dockerfile +FROM alpine +``` + +and + +```dockerfile +FROM scratch +COPY /foo /bar +``` + +and merging all the layers of both images together. + +### Benefits of using `--link` + +Use `--link` to reuse already built layers in subsequent builds with +`--cache-from` even if the previous layers have changed. This is especially +important for multi-stage builds where a `COPY --from` statement would +previously get invalidated if any previous commands in the same stage changed, +causing the need to rebuild the intermediate stages again. With `--link` the +layer the previous build generated is reused and merged on top of the new +layers. This also means you can easily rebase your images when the base images +receive updates, without having to execute the whole build again. In backends +that support it, BuildKit can do this rebase action without the need to push or +pull any layers between the client and the registry. BuildKit will detect this +case and only create new image manifest that contains the new layers and old +layers in correct order. + +The same behavior where BuildKit can avoid pulling down the base image can also +happen when using `--link` and no other commands that would require access to +the files in the base image. In that case BuildKit will only build the layers +for the `COPY` commands and push them to the registry directly on top of the +layers of the base image. + +### Incompatibilities with `--link=false` + +When using `--link` the `COPY/ADD` commands are not allowed to read any files +from the previous state. This means that if in previous state the destination +directory was a path that contained a symlink, `COPY/ADD` can not follow it. +In the final image the destination path created with `--link` will always be a +path containing only directories. + +If you don't rely on the behavior of following symlinks in the destination +path, using `--link` is always recommended. The performance of `--link` is +equivalent or better than the default behavior and, it creates much better +conditions for cache reuse. + +## ENTRYPOINT + +ENTRYPOINT has two forms: + +The *exec* form, which is the preferred form: + +```dockerfile +ENTRYPOINT ["executable", "param1", "param2"] +``` + +The *shell* form: + +```dockerfile +ENTRYPOINT command param1 param2 +``` + +An `ENTRYPOINT` allows you to configure a container that will run as an executable. + +For example, the following starts nginx with its default content, listening +on port 80: + +```console +$ docker run -i -t --rm -p 80:80 nginx +``` + +Command line arguments to `docker run ` will be appended after all +elements in an *exec* form `ENTRYPOINT`, and will override all elements specified +using `CMD`. +This allows arguments to be passed to the entry point, i.e., `docker run -d` +will pass the `-d` argument to the entry point. +You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` +flag. + +The *shell* form prevents any `CMD` or `run` command line arguments from being +used, but has the disadvantage that your `ENTRYPOINT` will be started as a +subcommand of `/bin/sh -c`, which does not pass signals. +This means that the executable will not be the container's `PID 1` - and +will _not_ receive Unix signals - so your executable will not receive a +`SIGTERM` from `docker stop `. + +Only the last `ENTRYPOINT` instruction in the `Dockerfile` will have an effect. + +### Exec form ENTRYPOINT example + +You can use the *exec* form of `ENTRYPOINT` to set fairly stable default commands +and arguments and then use either form of `CMD` to set additional defaults that +are more likely to be changed. + +```dockerfile +FROM ubuntu +ENTRYPOINT ["top", "-b"] +CMD ["-c"] +``` + +When you run the container, you can see that `top` is the only process: + +```console +$ docker run -it --rm --name test top -H + +top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05 +Threads: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie +%Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st +KiB Mem: 2056668 total, 1616832 used, 439836 free, 99352 buffers +KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 19744 2336 2080 R 0.0 0.1 0:00.04 top +``` + +To examine the result further, you can use `docker exec`: + +```console +$ docker exec -it test ps aux + +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 2.6 0.1 19752 2352 ? Ss+ 08:24 0:00 top -b -H +root 7 0.0 0.1 15572 2164 ? R+ 08:25 0:00 ps aux +``` + +And you can gracefully request `top` to shut down using `docker stop test`. + +The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the +foreground (i.e., as `PID 1`): + +```dockerfile +FROM debian:stable +RUN apt-get update && apt-get install -y --force-yes apache2 +EXPOSE 80 443 +VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"] +ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] +``` + +If you need to write a starter script for a single executable, you can ensure that +the final executable receives the Unix signals by using `exec` and `gosu` +commands: + +```bash +#!/usr/bin/env bash +set -e + +if [ "$1" = 'postgres' ]; then + chown -R postgres "$PGDATA" + + if [ -z "$(ls -A "$PGDATA")" ]; then + gosu postgres initdb + fi + + exec gosu postgres "$@" +fi + +exec "$@" +``` + +Lastly, if you need to do some extra cleanup (or communicate with other containers) +on shutdown, or are co-ordinating more than one executable, you may need to ensure +that the `ENTRYPOINT` script receives the Unix signals, passes them on, and then +does some more work: + +```bash +#!/bin/sh +# Note: I've written this using sh so it works in the busybox container too + +# USE the trap if you need to also do manual cleanup after the service is stopped, +# or need to start multiple services in the one container +trap "echo TRAPed signal" HUP INT QUIT TERM + +# start service in background here +/usr/sbin/apachectl start + +echo "[hit enter key to exit] or run 'docker stop '" +read + +# stop service and clean up here +echo "stopping apache" +/usr/sbin/apachectl stop + +echo "exited $0" +``` + +If you run this image with `docker run -it --rm -p 80:80 --name test apache`, +you can then examine the container's processes with `docker exec`, or `docker top`, +and then ask the script to stop Apache: + +```console +$ docker exec -it test ps aux + +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.1 0.0 4448 692 ? Ss+ 00:42 0:00 /bin/sh /run.sh 123 cmd cmd2 +root 19 0.0 0.2 71304 4440 ? Ss 00:42 0:00 /usr/sbin/apache2 -k start +www-data 20 0.2 0.2 360468 6004 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +www-data 21 0.2 0.2 360468 6000 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +root 81 0.0 0.1 15572 2140 ? R+ 00:44 0:00 ps aux + +$ docker top test + +PID USER COMMAND +10035 root {run.sh} /bin/sh /run.sh 123 cmd cmd2 +10054 root /usr/sbin/apache2 -k start +10055 33 /usr/sbin/apache2 -k start +10056 33 /usr/sbin/apache2 -k start + +$ /usr/bin/time docker stop test + +test +real 0m 0.27s +user 0m 0.03s +sys 0m 0.03s +``` + +> **Note** +> +> You can override the `ENTRYPOINT` setting using `--entrypoint`, +> but this can only set the binary to *exec* (no `sh -c` will be used). + +> **Note** +> +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +Unlike the *shell* form, the *exec* form does not invoke a command shell. +This means that normal shell processing does not happen. For example, +`ENTRYPOINT [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +If you want shell processing then either use the *shell* form or execute +a shell directly, for example: `ENTRYPOINT [ "sh", "-c", "echo $HOME" ]`. +When using the exec form and executing a shell directly, as in the case for +the shell form, it is the shell that is doing the environment variable +expansion, not docker. + +### Shell form ENTRYPOINT example + +You can specify a plain string for the `ENTRYPOINT` and it will execute in `/bin/sh -c`. +This form will use shell processing to substitute shell environment variables, +and will ignore any `CMD` or `docker run` command line arguments. +To ensure that `docker stop` will signal any long running `ENTRYPOINT` executable +correctly, you need to remember to start it with `exec`: + +```dockerfile +FROM ubuntu +ENTRYPOINT exec top -b +``` + +When you run this image, you'll see the single `PID 1` process: + +```console +$ docker run -it --rm --name test top + +Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached +CPU: 5% usr 0% sys 0% nic 94% idle 0% io 0% irq 0% sirq +Load average: 0.08 0.03 0.05 2/98 6 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root R 3164 0% 0% top -b +``` + +Which exits cleanly on `docker stop`: + +```console +$ /usr/bin/time docker stop test + +test +real 0m 0.20s +user 0m 0.02s +sys 0m 0.04s +``` + +If you forget to add `exec` to the beginning of your `ENTRYPOINT`: + +```dockerfile +FROM ubuntu +ENTRYPOINT top -b +CMD -- --ignored-param1 +``` + +You can then run it (giving it a name for the next step): + +```console +$ docker run -it --name test top --ignored-param2 + +top - 13:58:24 up 17 min, 0 users, load average: 0.00, 0.00, 0.00 +Tasks: 2 total, 1 running, 1 sleeping, 0 stopped, 0 zombie +%Cpu(s): 16.7 us, 33.3 sy, 0.0 ni, 50.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st +MiB Mem : 1990.8 total, 1354.6 free, 231.4 used, 404.7 buff/cache +MiB Swap: 1024.0 total, 1024.0 free, 0.0 used. 1639.8 avail Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 2612 604 536 S 0.0 0.0 0:00.02 sh + 6 root 20 0 5956 3188 2768 R 0.0 0.2 0:00.00 top +``` + +You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID 1`. + +If you then run `docker stop test`, the container will not exit cleanly - the +`stop` command will be forced to send a `SIGKILL` after the timeout: + +```console +$ docker exec -it test ps waux + +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.4 0.0 2612 604 pts/0 Ss+ 13:58 0:00 /bin/sh -c top -b --ignored-param2 +root 6 0.0 0.1 5956 3188 pts/0 S+ 13:58 0:00 top -b +root 7 0.0 0.1 5884 2816 pts/1 Rs+ 13:58 0:00 ps waux + +$ /usr/bin/time docker stop test + +test +real 0m 10.19s +user 0m 0.04s +sys 0m 0.03s +``` + +### Understand how CMD and ENTRYPOINT interact + +Both `CMD` and `ENTRYPOINT` instructions define what command gets executed when running a container. +There are few rules that describe their co-operation. + +1. Dockerfile should specify at least one of `CMD` or `ENTRYPOINT` commands. + +2. `ENTRYPOINT` should be defined when using the container as an executable. + +3. `CMD` should be used as a way of defining default arguments for an `ENTRYPOINT` command +or for executing an ad-hoc command in a container. + +4. `CMD` will be overridden when running the container with alternative arguments. + +The table below shows what command is executed for different `ENTRYPOINT` / `CMD` combinations: + +| | No ENTRYPOINT | ENTRYPOINT exec_entry p1_entry | ENTRYPOINT ["exec_entry", "p1_entry"] | +|:-------------------------------|:---------------------------|:-------------------------------|:-----------------------------------------------| +| **No CMD** | *error, not allowed* | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry | +| **CMD ["exec_cmd", "p1_cmd"]** | exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry exec_cmd p1_cmd | +| **CMD exec_cmd p1_cmd** | /bin/sh -c exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry /bin/sh -c exec_cmd p1_cmd | + +> **Note** +> +> If `CMD` is defined from the base image, setting `ENTRYPOINT` will +> reset `CMD` to an empty value. In this scenario, `CMD` must be defined in the +> current image to have a value. + +## VOLUME + +```dockerfile +VOLUME ["/data"] +``` + +The `VOLUME` instruction creates a mount point with the specified name +and marks it as holding externally mounted volumes from native host or other +containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain +string with multiple arguments, such as `VOLUME /var/log` or `VOLUME /var/log +/var/db`. For more information/examples and mounting instructions via the +Docker client, refer to +[*Share Directories via Volumes*](https://docs.docker.com/storage/volumes/) +documentation. + +The `docker run` command initializes the newly created volume with any data +that exists at the specified location within the base image. For example, +consider the following Dockerfile snippet: + +```dockerfile +FROM ubuntu +RUN mkdir /myvol +RUN echo "hello world" > /myvol/greeting +VOLUME /myvol +``` + +This Dockerfile results in an image that causes `docker run` to +create a new mount point at `/myvol` and copy the `greeting` file +into the newly created volume. + +### Notes about specifying volumes + +Keep the following things in mind about volumes in the `Dockerfile`. + +- **Volumes on Windows-based containers**: When using Windows-based containers, + the destination of a volume inside the container must be one of: + + - a non-existing or empty directory + - a drive other than `C:` + +- **Changing the volume from within the Dockerfile**: If any build steps change the + data within the volume after it has been declared, those changes will be discarded. + +- **JSON formatting**: The list is parsed as a JSON array. + You must enclose words with double quotes (`"`) rather than single quotes (`'`). + +- **The host directory is declared at container run-time**: The host directory + (the mountpoint) is, by its nature, host-dependent. This is to preserve image + portability, since a given host directory can't be guaranteed to be available + on all hosts. For this reason, you can't mount a host directory from + within the Dockerfile. The `VOLUME` instruction does not support specifying a `host-dir` + parameter. You must specify the mountpoint when you create or run the container. + +## USER + +```dockerfile +USER [:] +``` + +or + +```dockerfile +USER [:] +``` + +The `USER` instruction sets the user name (or UID) and optionally the user +group (or GID) to use as the default user and group for the remainder of the +current stage. The specified user is used for `RUN` instructions and at +runtime, runs the relevant `ENTRYPOINT` and `CMD` commands. + +> Note that when specifying a group for the user, the user will have _only_ the +> specified group membership. Any other configured group memberships will be ignored. + +> **Warning** +> +> When the user doesn't have a primary group then the image (or the next +> instructions) will be run with the `root` group. +> +> On Windows, the user must be created first if it's not a built-in account. +> This can be done with the `net user` command called as part of a Dockerfile. + +```dockerfile +FROM microsoft/windowsservercore +# Create Windows user in the container +RUN net user /add patrick +# Set it for subsequent commands +USER patrick +``` + +## WORKDIR + +```dockerfile +WORKDIR /path/to/workdir +``` + +The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD`, +`ENTRYPOINT`, `COPY` and `ADD` instructions that follow it in the `Dockerfile`. +If the `WORKDIR` doesn't exist, it will be created even if it's not used in any +subsequent `Dockerfile` instruction. + +The `WORKDIR` instruction can be used multiple times in a `Dockerfile`. If a +relative path is provided, it will be relative to the path of the previous +`WORKDIR` instruction. For example: + +```dockerfile +WORKDIR /a +WORKDIR b +WORKDIR c +RUN pwd +``` + +The output of the final `pwd` command in this `Dockerfile` would be `/a/b/c`. + +The `WORKDIR` instruction can resolve environment variables previously set using +`ENV`. You can only use environment variables explicitly set in the `Dockerfile`. +For example: + +```dockerfile +ENV DIRPATH=/path +WORKDIR $DIRPATH/$DIRNAME +RUN pwd +``` + +The output of the final `pwd` command in this `Dockerfile` would be +`/path/$DIRNAME` + +If not specified, the default working directory is `/`. In practice, if you aren't building a Dockerfile from scratch (`FROM scratch`), +the `WORKDIR` may likely be set by the base image you're using. + +Therefore, to avoid unintended operations in unknown directories, it is best practice to set your `WORKDIR` explicitly. + +## ARG + +```dockerfile +ARG [=] +``` + +The `ARG` instruction defines a variable that users can pass at build-time to +the builder with the `docker build` command using the `--build-arg =` +flag. If a user specifies a build argument that was not +defined in the Dockerfile, the build outputs a warning. + +```console +[Warning] One or more build-args [foo] were not consumed. +``` + +A Dockerfile may include one or more `ARG` instructions. For example, +the following is a valid Dockerfile: + +```dockerfile +FROM busybox +ARG user1 +ARG buildno +# ... +``` + +> **Warning:** +> +> It is not recommended to use build-time variables for passing secrets like +> GitHub keys, user credentials etc. Build-time variable values are visible to +> any user of the image with the `docker history` command. +> +> Refer to the [`RUN --mount=type=secret`](#run---mounttypesecret) section to +> learn about secure ways to use secrets when building images. +{:.warning} + +### Default values + +An `ARG` instruction can optionally include a default value: + +```dockerfile +FROM busybox +ARG user1=someuser +ARG buildno=1 +# ... +``` + +If an `ARG` instruction has a default value and if there is no value passed +at build-time, the builder uses the default. + +### Scope + +An `ARG` variable definition comes into effect from the line on which it is +defined in the `Dockerfile` not from the argument's use on the command-line or +elsewhere. For example, consider this Dockerfile: + +```dockerfile +FROM busybox +USER ${user:-some_user} +ARG user +USER $user +# ... +``` + +A user builds this file by calling: + +```console +$ docker build --build-arg user=what_user . +``` + +The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the +subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is +defined and the `what_user` value was passed on the command line. Prior to its definition by an +`ARG` instruction, any use of a variable results in an empty string. + +An `ARG` instruction goes out of scope at the end of the build +stage where it was defined. To use an arg in multiple stages, each stage must +include the `ARG` instruction. + +```dockerfile +FROM busybox +ARG SETTINGS +RUN ./run/setup $SETTINGS + +FROM busybox +ARG SETTINGS +RUN ./run/other $SETTINGS +``` + +### Using ARG variables + +You can use an `ARG` or an `ENV` instruction to specify variables that are +available to the `RUN` instruction. Environment variables defined using the +`ENV` instruction always override an `ARG` instruction of the same name. Consider +this Dockerfile with an `ENV` and `ARG` instruction. + +```dockerfile +FROM ubuntu +ARG CONT_IMG_VER +ENV CONT_IMG_VER=v1.0.0 +RUN echo $CONT_IMG_VER +``` + +Then, assume this image is built with this command: + +```console +$ docker build --build-arg CONT_IMG_VER=v2.0.1 . +``` + +In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting +passed by the user:`v2.0.1` This behavior is similar to a shell +script where a locally scoped variable overrides the variables passed as +arguments or inherited from environment, from its point of definition. + +Using the example above but a different `ENV` specification you can create more +useful interactions between `ARG` and `ENV` instructions: + +```dockerfile +FROM ubuntu +ARG CONT_IMG_VER +ENV CONT_IMG_VER=${CONT_IMG_VER:-v1.0.0} +RUN echo $CONT_IMG_VER +``` + +Unlike an `ARG` instruction, `ENV` values are always persisted in the built +image. Consider a docker build without the `--build-arg` flag: + +```console +$ docker build . +``` + +Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but +its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. + +The variable expansion technique in this example allows you to pass arguments +from the command line and persist them in the final image by leveraging the +`ENV` instruction. Variable expansion is only supported for [a limited set of +Dockerfile instructions.](#environment-replacement) + +### Predefined ARGs + +Docker has a set of predefined `ARG` variables that you can use without a +corresponding `ARG` instruction in the Dockerfile. + +- `HTTP_PROXY` +- `http_proxy` +- `HTTPS_PROXY` +- `https_proxy` +- `FTP_PROXY` +- `ftp_proxy` +- `NO_PROXY` +- `no_proxy` +- `ALL_PROXY` +- `all_proxy` + +To use these, pass them on the command line using the `--build-arg` flag, for +example: + +```console +$ docker build --build-arg HTTPS_PROXY=https://my-proxy.example.com . +``` + +By default, these pre-defined variables are excluded from the output of +`docker history`. Excluding them reduces the risk of accidentally leaking +sensitive authentication information in an `HTTP_PROXY` variable. + +For example, consider building the following Dockerfile using +`--build-arg HTTP_PROXY=http://user:pass@proxy.lon.example.com` + +```dockerfile +FROM ubuntu +RUN echo "Hello World" +``` + +In this case, the value of the `HTTP_PROXY` variable is not available in the +`docker history` and is not cached. If you were to change location, and your +proxy server changed to `http://user:pass@proxy.sfo.example.com`, a subsequent +build does not result in a cache miss. + +If you need to override this behaviour then you may do so by adding an `ARG` +statement in the Dockerfile as follows: + +```dockerfile +FROM ubuntu +ARG HTTP_PROXY +RUN echo "Hello World" +``` + +When building this Dockerfile, the `HTTP_PROXY` is preserved in the +`docker history`, and changing its value invalidates the build cache. + +### Automatic platform ARGs in the global scope + +This feature is only available when using the [BuildKit](https://docs.docker.com/build/buildkit/) +backend. + +Docker predefines a set of `ARG` variables with information on the platform of +the node performing the build (build platform) and on the platform of the +resulting image (target platform). The target platform can be specified with +the `--platform` flag on `docker build`. + +The following `ARG` variables are set automatically: + +- `TARGETPLATFORM` - platform of the build result. Eg `linux/amd64`, `linux/arm/v7`, `windows/amd64`. +- `TARGETOS` - OS component of TARGETPLATFORM +- `TARGETARCH` - architecture component of TARGETPLATFORM +- `TARGETVARIANT` - variant component of TARGETPLATFORM +- `BUILDPLATFORM` - platform of the node performing the build. +- `BUILDOS` - OS component of BUILDPLATFORM +- `BUILDARCH` - architecture component of BUILDPLATFORM +- `BUILDVARIANT` - variant component of BUILDPLATFORM + +These arguments are defined in the global scope so are not automatically +available inside build stages or for your `RUN` commands. To expose one of +these arguments inside the build stage redefine it without value. + +For example: + +```dockerfile +FROM alpine +ARG TARGETPLATFORM +RUN echo "I'm building for $TARGETPLATFORM" +``` + +### BuildKit built-in build args + +| Arg | Type | Description | +|---------------------------------------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `BUILDKIT_CACHE_MOUNT_NS` | String | Set optional cache ID namespace. | +| `BUILDKIT_CONTEXT_KEEP_GIT_DIR` | Bool | Trigger git context to keep the `.git` directory. | +| `BUILDKIT_BUILDINFO` | Bool | Enable build info (default `true`). | +| `BUILDKIT_INLINE_BUILDINFO_ATTRS`[^2] | Bool | Inline build info attributes in image config or not. | +| `BUILDKIT_INLINE_CACHE`[^2] | Bool | Inline cache metadata to image config or not. | +| `BUILDKIT_MULTI_PLATFORM` | Bool | Opt into determnistic output regardless of multi-platform output or not. | +| `BUILDKIT_SANDBOX_HOSTNAME` | String | Set the hostname (default `buildkitsandbox`) | +| `BUILDKIT_SYNTAX` | String | Set frontend image | +| `SOURCE_DATE_EPOCH` | Int | Set the UNIX timestamp for created image and layers. More info from [reproducible builds](https://reproducible-builds.org/docs/source-date-epoch/). Supported since Dockerfile 1.5, BuildKit 0.11 | + +> **Warning** +> +> Build information along `BUILDKIT_BUILDINFO` and `BUILDKIT_INLINE_BUILDINFO_ATTRS` +> build args are deprecated and will be removed in the next release. See the [BuildKit Deprecated features page](https://github.com/moby/buildkit/blob/master/docs/deprecated.md) +> for status and alternative recommendation about this feature. + +#### Example: keep `.git` dir + +When using a Git context, `.git` dir is not kept on git checkouts. It can be +useful to keep it around if you want to retrieve git information during +your build: + +```dockerfile +# syntax=docker/dockerfile:1 +FROM alpine +WORKDIR /src +RUN --mount=target=. \ + make REVISION=$(git rev-parse HEAD) build +``` + +```console +$ docker build --build-arg BUILDKIT_CONTEXT_KEEP_GIT_DIR=1 https://github.com/user/repo.git#main +``` + +### Impact on build caching + +`ARG` variables are not persisted into the built image as `ENV` variables are. +However, `ARG` variables do impact the build cache in similar ways. If a +Dockerfile defines an `ARG` variable whose value is different from a previous +build, then a "cache miss" occurs upon its first usage, not its definition. In +particular, all `RUN` instructions following an `ARG` instruction use the `ARG` +variable implicitly (as an environment variable), thus can cause a cache miss. +All predefined `ARG` variables are exempt from caching unless there is a +matching `ARG` statement in the `Dockerfile`. + +For example, consider these two Dockerfile: + +```dockerfile +FROM ubuntu +ARG CONT_IMG_VER +RUN echo $CONT_IMG_VER +``` + +```dockerfile +FROM ubuntu +ARG CONT_IMG_VER +RUN echo hello +``` + +If you specify `--build-arg CONT_IMG_VER=` on the command line, in both +cases, the specification on line 2 does not cause a cache miss; line 3 does +cause a cache miss.`ARG CONT_IMG_VER` causes the RUN line to be identified +as the same as running `CONT_IMG_VER= echo hello`, so if the `` +changes, we get a cache miss. + +Consider another example under the same command line: + +```dockerfile +FROM ubuntu +ARG CONT_IMG_VER +ENV CONT_IMG_VER=$CONT_IMG_VER +RUN echo $CONT_IMG_VER +``` + +In this example, the cache miss occurs on line 3. The miss happens because +the variable's value in the `ENV` references the `ARG` variable and that +variable is changed through the command line. In this example, the `ENV` +command causes the image to include the value. + +If an `ENV` instruction overrides an `ARG` instruction of the same name, like +this Dockerfile: + +```dockerfile +FROM ubuntu +ARG CONT_IMG_VER +ENV CONT_IMG_VER=hello +RUN echo $CONT_IMG_VER +``` + +Line 3 does not cause a cache miss because the value of `CONT_IMG_VER` is a +constant (`hello`). As a result, the environment variables and values used on +the `RUN` (line 4) doesn't change between builds. + +## ONBUILD + +```dockerfile +ONBUILD +``` + +The `ONBUILD` instruction adds to the image a *trigger* instruction to +be executed at a later time, when the image is used as the base for +another build. The trigger will be executed in the context of the +downstream build, as if it had been inserted immediately after the +`FROM` instruction in the downstream `Dockerfile`. + +Any build instruction can be registered as a trigger. + +This is useful if you are building an image which will be used as a base +to build other images, for example an application build environment or a +daemon which may be customized with user-specific configuration. + +For example, if your image is a reusable Python application builder, it +will require application source code to be added in a particular +directory, and it might require a build script to be called *after* +that. You can't just call `ADD` and `RUN` now, because you don't yet +have access to the application source code, and it will be different for +each application build. You could simply provide application developers +with a boilerplate `Dockerfile` to copy-paste into their application, but +that is inefficient, error-prone and difficult to update because it +mixes with application-specific code. + +The solution is to use `ONBUILD` to register advance instructions to +run later, during the next build stage. + +Here's how it works: + +1. When it encounters an `ONBUILD` instruction, the builder adds a + trigger to the metadata of the image being built. The instruction + does not otherwise affect the current build. +2. At the end of the build, a list of all triggers is stored in the + image manifest, under the key `OnBuild`. They can be inspected with + the `docker inspect` command. +3. Later the image may be used as a base for a new build, using the + `FROM` instruction. As part of processing the `FROM` instruction, + the downstream builder looks for `ONBUILD` triggers, and executes + them in the same order they were registered. If any of the triggers + fail, the `FROM` instruction is aborted which in turn causes the + build to fail. If all triggers succeed, the `FROM` instruction + completes and the build continues as usual. +4. Triggers are cleared from the final image after being executed. In + other words they are not inherited by "grand-children" builds. + +For example you might add something like this: + +```dockerfile +ONBUILD ADD . /app/src +ONBUILD RUN /usr/local/bin/python-build --dir /app/src +``` + +> **Warning** +> +> Chaining `ONBUILD` instructions using `ONBUILD ONBUILD` isn't allowed. + +> **Warning** +> +> The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions. + +## STOPSIGNAL + +```dockerfile +STOPSIGNAL signal +``` + +The `STOPSIGNAL` instruction sets the system call signal that will be sent to the +container to exit. This signal can be a signal name in the format `SIG`, +for instance `SIGKILL`, or an unsigned number that matches a position in the +kernel's syscall table, for instance `9`. The default is `SIGTERM` if not +defined. + +The image's default stopsignal can be overridden per container, using the +`--stop-signal` flag on `docker run` and `docker create`. + +## HEALTHCHECK + +The `HEALTHCHECK` instruction has two forms: + +- `HEALTHCHECK [OPTIONS] CMD command` (check container health by running a command inside the container) +- `HEALTHCHECK NONE` (disable any healthcheck inherited from the base image) + +The `HEALTHCHECK` instruction tells Docker how to test a container to check that +it is still working. This can detect cases such as a web server that is stuck in +an infinite loop and unable to handle new connections, even though the server +process is still running. + +When a container has a healthcheck specified, it has a _health status_ in +addition to its normal status. This status is initially `starting`. Whenever a +health check passes, it becomes `healthy` (whatever state it was previously in). +After a certain number of consecutive failures, it becomes `unhealthy`. + +The options that can appear before `CMD` are: + +- `--interval=DURATION` (default: `30s`) +- `--timeout=DURATION` (default: `30s`) +- `--start-period=DURATION` (default: `0s`) +- `--retries=N` (default: `3`) + +The health check will first run **interval** seconds after the container is +started, and then again **interval** seconds after each previous check completes. + +If a single run of the check takes longer than **timeout** seconds then the check +is considered to have failed. + +It takes **retries** consecutive failures of the health check for the container +to be considered `unhealthy`. + +**start period** provides initialization time for containers that need time to bootstrap. +Probe failure during that period will not be counted towards the maximum number of retries. +However, if a health check succeeds during the start period, the container is considered +started and all consecutive failures will be counted towards the maximum number of retries. + +There can only be one `HEALTHCHECK` instruction in a Dockerfile. If you list +more than one then only the last `HEALTHCHECK` will take effect. + +The command after the `CMD` keyword can be either a shell command (e.g. `HEALTHCHECK +CMD /bin/check-running`) or an _exec_ array (as with other Dockerfile commands; +see e.g. `ENTRYPOINT` for details). + +The command's exit status indicates the health status of the container. +The possible values are: + +- 0: success - the container is healthy and ready for use +- 1: unhealthy - the container is not working correctly +- 2: reserved - do not use this exit code + +For example, to check every five minutes or so that a web-server is able to +serve the site's main page within three seconds: + +```dockerfile +HEALTHCHECK --interval=5m --timeout=3s \ + CMD curl -f http://localhost/ || exit 1 +``` + +To help debug failing probes, any output text (UTF-8 encoded) that the command writes +on stdout or stderr will be stored in the health status and can be queried with +`docker inspect`. Such output should be kept short (only the first 4096 bytes +are stored currently). + +When the health status of a container changes, a `health_status` event is +generated with the new status. + + +## SHELL + +```dockerfile +SHELL ["executable", "parameters"] +``` + +The `SHELL` instruction allows the default shell used for the *shell* form of +commands to be overridden. The default shell on Linux is `["/bin/sh", "-c"]`, and on +Windows is `["cmd", "/S", "/C"]`. The `SHELL` instruction *must* be written in JSON +form in a Dockerfile. + +The `SHELL` instruction is particularly useful on Windows where there are +two commonly used and quite different native shells: `cmd` and `powershell`, as +well as alternate shells available including `sh`. + +The `SHELL` instruction can appear multiple times. Each `SHELL` instruction overrides +all previous `SHELL` instructions, and affects all subsequent instructions. For example: + +```dockerfile +FROM microsoft/windowsservercore + +# Executed as cmd /S /C echo default +RUN echo default + +# Executed as cmd /S /C powershell -command Write-Host default +RUN powershell -command Write-Host default + +# Executed as powershell -command Write-Host hello +SHELL ["powershell", "-command"] +RUN Write-Host hello + +# Executed as cmd /S /C echo hello +SHELL ["cmd", "/S", "/C"] +RUN echo hello +``` + +The following instructions can be affected by the `SHELL` instruction when the +*shell* form of them is used in a Dockerfile: `RUN`, `CMD` and `ENTRYPOINT`. + +The following example is a common pattern found on Windows which can be +streamlined by using the `SHELL` instruction: + +```dockerfile +RUN powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" +``` + +The command invoked by docker will be: + +```powershell +cmd /S /C powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" +``` + +This is inefficient for two reasons. First, there is an un-necessary cmd.exe command +processor (aka shell) being invoked. Second, each `RUN` instruction in the *shell* +form requires an extra `powershell -command` prefixing the command. + +To make this more efficient, one of two mechanisms can be employed. One is to +use the JSON form of the RUN command such as: + +```dockerfile +RUN ["powershell", "-command", "Execute-MyCmdlet", "-param1 \"c:\\foo.txt\""] +``` + +While the JSON form is unambiguous and does not use the un-necessary cmd.exe, +it does require more verbosity through double-quoting and escaping. The alternate +mechanism is to use the `SHELL` instruction and the *shell* form, +making a more natural syntax for Windows users, especially when combined with +the `escape` parser directive: + +```dockerfile +# escape=` + +FROM microsoft/nanoserver +SHELL ["powershell","-command"] +RUN New-Item -ItemType Directory C:\Example +ADD Execute-MyCmdlet.ps1 c:\example\ +RUN c:\example\Execute-MyCmdlet -sample 'hello world' +``` + +Resulting in: + +```console +PS E:\myproject> docker build -t shell . + +Sending build context to Docker daemon 4.096 kB +Step 1/5 : FROM microsoft/nanoserver + ---> 22738ff49c6d +Step 2/5 : SHELL powershell -command + ---> Running in 6fcdb6855ae2 + ---> 6331462d4300 +Removing intermediate container 6fcdb6855ae2 +Step 3/5 : RUN New-Item -ItemType Directory C:\Example + ---> Running in d0eef8386e97 + + + Directory: C:\ + + +Mode LastWriteTime Length Name +---- ------------- ------ ---- +d----- 10/28/2016 11:26 AM Example + + + ---> 3f2fbf1395d9 +Removing intermediate container d0eef8386e97 +Step 4/5 : ADD Execute-MyCmdlet.ps1 c:\example\ + ---> a955b2621c31 +Removing intermediate container b825593d39fc +Step 5/5 : RUN c:\example\Execute-MyCmdlet 'hello world' + ---> Running in be6d8e63fe75 +hello world + ---> 8e559e9bf424 +Removing intermediate container be6d8e63fe75 +Successfully built 8e559e9bf424 +PS E:\myproject> +``` + +The `SHELL` instruction could also be used to modify the way in which +a shell operates. For example, using `SHELL cmd /S /C /V:ON|OFF` on Windows, delayed +environment variable expansion semantics could be modified. + +The `SHELL` instruction can also be used on Linux should an alternate shell be +required such as `zsh`, `csh`, `tcsh` and others. + +## Here-Documents + +> **Note** +> +> Added in [`docker/dockerfile:1.4`](#syntax) + +Here-documents allow redirection of subsequent Dockerfile lines to the input of +`RUN` or `COPY` commands. If such command contains a [here-document](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_07_04) +the Dockerfile considers the next lines until the line only containing a +here-doc delimiter as part of the same command. + +### Example: Running a multi-line script + +```dockerfile +# syntax=docker/dockerfile:1 +FROM debian +RUN < file1 && < file2 +I am +first +FILE1 +I am +second +FILE2 +``` + +### Example: Creating inline files + +In `COPY` commands source parameters can be replaced with here-doc indicators. +Regular here-doc [variable expansion and tab stripping rules](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_07_04) apply. + +```dockerfile +# syntax=docker/dockerfile:1 +FROM alpine +ARG FOO=bar +COPY <<-EOT /app/foo + hello ${FOO} +EOT +``` + +```dockerfile +# syntax=docker/dockerfile:1 +FROM alpine +COPY <<-"EOT" /app/script.sh + echo hello ${FOO} +EOT +RUN FOO=abc ash /app/script.sh +``` + +## Dockerfile examples + +For examples of Dockerfiles, refer to: + +- The ["build images" section](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/) +- The ["get started" tutorial](https://docs.docker.com/get-started/) +- The [language-specific getting started guides](https://docs.docker.com/language/) + +[^1]: Value required +[^2]: For Docker-integrated [BuildKit](https://docs.docker.com/build/buildkit/#getting-started) and `docker buildx build` diff --git a/frontend/dockerfile/docs/syntax.md b/frontend/dockerfile/docs/syntax.md index 33ff1cfbb9a6..fc84bdaae508 100644 --- a/frontend/dockerfile/docs/syntax.md +++ b/frontend/dockerfile/docs/syntax.md @@ -1,376 +1,3 @@ # Dockerfile frontend syntaxes -This page documents new BuildKit-only commands added to the Dockerfile frontend. - -## Note for Docker users - -If you are using Docker v18.09 or later, BuildKit mode can be enabled by setting `export DOCKER_BUILDKIT=1` on the client side. - -[Docker Buildx](https://github.com/docker/buildx) always enables BuildKit. - -## Using external Dockerfile frontend - -BuildKit supports loading frontends dynamically from container images. Images for Dockerfile frontends are available at [`docker/dockerfile`](https://hub.docker.com/r/docker/dockerfile/tags/) repository. - -To use the external frontend, the first line of your Dockerfile needs to be `# syntax=docker/dockerfile:1.3` pointing to the -specific image you want to use. - -BuildKit also ships with Dockerfile frontend builtin but it is recommended to use an external image to make sure that all -users use the same version on the builder and to pick up bugfixes automatically without waiting for a new version of BuildKit -or Docker engine. - -The images are published on two channels: *latest* and *labs*. The latest channel uses semver versioning while labs uses an -[incrementing number](https://github.com/moby/buildkit/issues/528). This means the labs channel may remove a feature without -incrementing the major component of a version and you may want to pin the image to a specific revision. Even when syntaxes -change in between releases on labs channel, the old versions are guaranteed to be backward compatible. - - -## Linked copies `COPY --link`, `ADD --link` - -To use this flag set Dockerfile version to at least `1.4`. - -```dockerfile -# syntax=docker/dockerfile:1.4 -``` - -Enabling this flag in `COPY` or `ADD` commands allows you to copy files with enhanced semantics where your files remain independent on their own layer and don't get invalidated when commands on previous layers are changed. - -When `--link` is used your source files are copied into an empty destination directory. That directory is turned into a layer that is linked on top of your previous state. - -```dockerfile -# syntax=docker/dockerfile:1.4 -FROM alpine -COPY --link /foo /bar -``` - -Is equivalent of doing two builds: - -```dockerfile -FROM alpine -``` - -and - -```dockerfile -FROM scratch -COPY /foo /bar -``` - -and merging all the layers of both images together. - -#### Benefits of using `--link` - -Using `--link` allows to reuse already built layers in subsequent builds with `--cache-from` even if the previous layers have changed. This is especially important for multi-stage builds where a `COPY --from` statement would previously get invalidated if any previous commands in the same stage changed, causing the need to rebuild the intermediate stages again. With `--link` the layer the previous build generated is reused and merged on top of the new layers. This also means you can easily rebase your images when the base images receive updates, without having to execute the whole build again. In backends that support it, BuildKit can do this rebase action without the need to push or pull any layers between the client and the registry. BuildKit will detect this case and only create new image manifest that contains the new layers and old layers in correct order. - -The same behavior where BuildKit can avoid pulling down the base image can also happen when using `--link` and no other commands that would require access to the files in the base image. In that case BuildKit will only build the layers for the `COPY` commands and push them to the registry directly on top of the layers of the base image. - -#### Incompatibilities with `--link=false` - -When using `--link` the `COPY/ADD` commands are not allowed to read any files from the previous state. This means that if in previous state the destination directory was a path that contained a symlink, `COPY/ADD` can not follow it. In the final image the destination path created with `--link` will always be a path containing only directories. - -If you don't rely on the behavior of following symlinks in the destination path, using `--link` is always recommended. The performance of `--link` is equivalent or better than the default behavior and it creates much better conditions for cache reuse. - - -## Build Mounts `RUN --mount=...` - -To use this flag set Dockerfile version to at least `1.2` - -``` -# syntax=docker/dockerfile:1.3 -``` - -`RUN --mount` allows you to create mounts that process running as part of the build can access. This can be used to bind -files from other part of the build without copying, accessing build secrets or ssh-agent sockets, or creating cache -locations to speed up your build. - -### `RUN --mount=type=bind` (the default mount type) - -This mount type allows binding directories (read-only) in the context or in an image to the build container. - -|Option |Description| -|---------------------|-----------| -|`target` (required) | Mount path.| -|`source` | Source path in the `from`. Defaults to the root of the `from`.| -|`from` | Build stage or image name for the root of the source. Defaults to the build context.| -|`rw`,`readwrite` | Allow writes on the mount. Written data will be discarded.| - -### `RUN --mount=type=cache` - -This mount type allows the build container to cache directories for compilers and package managers. - -|Option |Description| -|---------------------|-----------| -|`id` | Optional ID to identify separate/different caches. Defaults to value of `target`. | -|`target` (required) | Mount path.| -|`ro`,`readonly` | Read-only if set.| -|`sharing` | One of `shared`, `private`, or `locked`. Defaults to `shared`. A `shared` cache mount can be used concurrently by multiple writers. `private` creates a new mount if there are multiple writers. `locked` pauses the second writer until the first one releases the mount.| -|`from` | Build stage to use as a base of the cache mount. Defaults to empty directory.| -|`source` | Subpath in the `from` to mount. Defaults to the root of the `from`.| -|`mode` | File mode for new cache directory in octal. Default 0755.| -|`uid` | User ID for new cache directory. Default 0.| -|`gid` | Group ID for new cache directory. Default 0.| - -Contents of the cache directories persists between builder invocations without invalidating the -instruction cache. Cache mounts should only be used for better performance. Your build should work -with any contents of the cache directory as another build may overwrite the files or GC may clean -it if more storage space is needed. - - -#### Example: cache Go packages - -```dockerfile -# syntax = docker/dockerfile:1.3 -FROM golang -... -RUN --mount=type=cache,target=/root/.cache/go-build go build ... -``` - -#### Example: cache apt packages - -```dockerfile -# syntax = docker/dockerfile:1.3 -FROM ubuntu -RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache -RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt \ - apt update && apt-get --no-install-recommends install -y gcc -``` - -### `RUN --mount=type=tmpfs` - -This mount type allows mounting tmpfs in the build container. - -|Option |Description| -|---------------------|-----------| -|`target` (required) | Mount path.| -|`size` | Specify an upper limit on the size of the filesystem.| - - -### `RUN --mount=type=secret` - -This mount type allows the build container to access secure files such as private keys without baking them into the image. - -|Option |Description| -|---------------------|-----------| -|`id` | ID of the secret. Defaults to basename of the target path.| -|`target` | Mount path. Defaults to `/run/secrets/` + `id`.| -|`required` | If set to `true`, the instruction errors out when the secret is unavailable. Defaults to `false`.| -|`mode` | File mode for secret file in octal. Default 0400.| -|`uid` | User ID for secret file. Default 0.| -|`gid` | Group ID for secret file. Default 0.| - - -#### Example: access to S3 - -```dockerfile -# syntax = docker/dockerfile:1.3 -FROM python:3 -RUN pip install awscli -RUN --mount=type=secret,id=aws,target=/root/.aws/credentials aws s3 cp s3://... ... -``` - -```console -$ docker build --secret id=aws,src=$HOME/.aws/credentials . -``` - -```console -$ buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. \ - --secret id=aws,src=$HOME/.aws/credentials -``` - -### `RUN --mount=type=ssh` - -This mount type allows the build container to access SSH keys via SSH agents, with support for passphrases. - -|Option |Description| -|---------------------|-----------| -|`id` | ID of SSH agent socket or key. Defaults to "default".| -|`target` | SSH agent socket path. Defaults to `/run/buildkit/ssh_agent.${N}`.| -|`required` | If set to `true`, the instruction errors out when the key is unavailable. Defaults to `false`.| -|`mode` | File mode for socket in octal. Default 0600.| -|`uid` | User ID for socket. Default 0.| -|`gid` | Group ID for socket. Default 0.| - - -#### Example: access to Gitlab - -```dockerfile -# syntax = docker/dockerfile:1.3 -FROM alpine -RUN apk add --no-cache openssh-client -RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan gitlab.com >> ~/.ssh/known_hosts -RUN --mount=type=ssh ssh -q -T git@gitlab.com 2>&1 | tee /hello -# "Welcome to GitLab, @GITLAB_USERNAME_ASSOCIATED_WITH_SSHKEY" should be printed here -# with the type of build progress is defined as `plain`. -``` - -```console -$ eval $(ssh-agent) -$ ssh-add ~/.ssh/id_rsa -(Input your passphrase here) -$ docker build --ssh default=$SSH_AUTH_SOCK . -``` - -``` -$ buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. \ - --ssh default=$SSH_AUTH_SOCK -``` - -You can also specify a path to `*.pem` file on the host directly instead of `$SSH_AUTH_SOCK`. -However, pem files with passphrases are not supported. - - -## Network modes `RUN --network=none|host|default` - -``` -# syntax=docker/dockerfile:1.3 -``` - -`RUN --network` allows control over which networking environment the command is run in. - -The allowed values are: - -* `none` - The command is run with no network access (`lo` is still available, - but is isolated to this process) -* `host` - The command is run in the host's network environment (similar to - `docker build --network=host`, but on a per-instruction basis) -* `default` - Equivalent to not supplying a flag at all, the command is run in - the default network for the build - -The use of `--network=host` is protected by the `network.host` entitlement, -which needs to be enabled when starting the buildkitd daemon -(`--allow-insecure-entitlement network.host`) and on the build request -(`--allow network.host`). - -#### Example: isolating external effects - -```dockerfile -# syntax = docker/dockerfile:1.3 -FROM python:3.6 -ADD mypackage.tgz wheels/ -RUN --network=none pip install --find-links wheels mypackage -``` - -`pip` will only be able to install the packages provided in the tarfile, which -can be controlled by an earlier build stage. - -## Here-Documents - -This feature is available since `docker/dockerfile:1.4.0` release. - -``` -# syntax=docker/dockerfile:1.4 -``` - -Here-documents allow redirection of subsequent Dockerfile lines to the input of `RUN` or `COPY` commands. -If such command contains a [here-document](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_07_04) -Dockerfile will consider the next lines until the line only containing a here-doc delimiter as part of the same command. - -#### Example: running a multi-line script - -```dockerfile -# syntax = docker/dockerfile:1.4 -FROM debian -RUN < file1 && < file2 -I am -first -FILE1 -I am -second -FILE2 -``` - -#### Example: creating inline files - -In `COPY` commands source parameters can be replaced with here-doc indicators. -Regular here-doc [variable expansion and tab stripping rules](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_07_04) apply. - -```dockerfile -# syntax = docker/dockerfile:1.4 -FROM alpine -ARG FOO=bar -COPY <<-eot /app/foo - hello ${FOO} -eot -``` - -```dockerfile -# syntax = docker/dockerfile:1.4 -FROM alpine -COPY <<-"eot" /app/script.sh - echo hello ${FOO} -eot -RUN FOO=abc ash /app/script.sh -``` - -## Security context `RUN --security=insecure|sandbox` - -To use this flag, set Dockerfile version to `labs` channel. - -``` -# syntax=docker/dockerfile:1.3-labs -``` - -With `--security=insecure`, builder runs the command without sandbox in insecure mode, -which allows to run flows requiring elevated privileges (e.g. containerd). This is equivalent -to running `docker run --privileged`. In order to access this feature, entitlement -`security.insecure` should be enabled when starting the buildkitd daemon -(`--allow-insecure-entitlement security.insecure`) and for a build request -(`--allow security.insecure`). - -Default sandbox mode can be activated via `--security=sandbox`, but that is no-op. - -#### Example: check entitlements - -```dockerfile -# syntax = docker/dockerfile:1.3-labs -FROM ubuntu -RUN --security=insecure cat /proc/self/status | grep CapEff -``` - -``` -#84 0.093 CapEff: 0000003fffffffff -``` - -## Built-in build args - -* `BUILDKIT_CACHE_MOUNT_NS=` set optional cache ID namespace -* `BUILDKIT_CONTEXT_KEEP_GIT_DIR=` trigger git context to keep the `.git` directory -* `BUILDKIT_INLINE_BUILDINFO_ATTRS=`¹ inline build info attributes in image config or not -* `BUILDKIT_INLINE_CACHE=`¹ inline cache metadata to image config or not -* `BUILDKIT_MULTI_PLATFORM=` opt into determnistic output regardless of multi-platform output or not -* `BUILDKIT_SANDBOX_HOSTNAME=` set the hostname (default `buildkitsandbox`) -* `BUILDKIT_SYNTAX=` set frontend image - -> **¹** For Docker-integrated BuildKit (`DOCKER_BUILDKIT=1 docker build`) and `docker buildx` +This page has moved to [Dockerfile reference documentation](reference.md) diff --git a/frontend/dockerfile/errors_test.go b/frontend/dockerfile/errors_test.go index ac329040b51c..d021bd28aa15 100644 --- a/frontend/dockerfile/errors_test.go +++ b/frontend/dockerfile/errors_test.go @@ -2,7 +2,6 @@ package dockerfile import ( "fmt" - "os" "testing" "github.com/containerd/continuity/fs/fstest" @@ -76,11 +75,11 @@ env bar=baz`, for _, tc := range tcases { t.Run(tc.name, func(t *testing.T) { - dir, err := tmpdir( + dir, err := integration.Tmpdir( + t, fstest.CreateFile("Dockerfile", []byte(tc.dockerfile), 0600), ) require.NoError(t, err) - defer os.RemoveAll(dir) c, err := client.New(sb.Context(), sb.Address()) require.NoError(t, err) diff --git a/frontend/dockerfile/instructions/bflag.go b/frontend/dockerfile/instructions/bflag.go index 1cfbf760006a..a527175b7369 100644 --- a/frontend/dockerfile/instructions/bflag.go +++ b/frontend/dockerfile/instructions/bflag.go @@ -1,10 +1,10 @@ package instructions import ( - "fmt" "strings" "github.com/moby/buildkit/util/suggest" + "github.com/pkg/errors" ) // FlagType is the type of the build flag @@ -88,7 +88,7 @@ func (bf *BFlags) AddStrings(name string) *Flag { // Note, any error will be generated when Parse() is called (see Parse). func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { if _, ok := bf.flags[name]; ok { - bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + bf.Err = errors.Errorf("Duplicate flag defined: %s", name) return nil } @@ -123,7 +123,8 @@ func (bf *BFlags) Used() []string { func (fl *Flag) IsTrue() bool { if fl.flagType != boolType { // Should never get here - panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + err := errors.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name) + panic(err) } return fl.Value == "true" } @@ -134,19 +135,21 @@ func (fl *Flag) IsTrue() bool { // compile time error so it doesn't matter too much when we stop our // processing as long as we do stop it, so this allows the code // around AddXXX() to be just: -// defFlag := AddString("description", "") +// +// defFlag := AddString("description", "") +// // w/o needing to add an if-statement around each one. func (bf *BFlags) Parse() error { // If there was an error while defining the possible flags // go ahead and bubble it back up here since we didn't do it // earlier in the processing if bf.Err != nil { - return fmt.Errorf("error setting up flags: %s", bf.Err) + return errors.Wrap(bf.Err, "error setting up flags") } for _, arg := range bf.Args { if !strings.HasPrefix(arg, "--") { - return fmt.Errorf("arg should start with -- : %s", arg) + return errors.Errorf("arg should start with -- : %s", arg) } if arg == "--" { @@ -164,11 +167,12 @@ func (bf *BFlags) Parse() error { flag, ok := bf.flags[arg] if !ok { - return suggest.WrapError(fmt.Errorf("unknown flag: %s", arg), arg, allFlags(bf.flags), true) + err := errors.Errorf("unknown flag: %s", arg) + return suggest.WrapError(err, arg, allFlags(bf.flags), true) } if _, ok = bf.used[arg]; ok && flag.flagType != stringsType { - return fmt.Errorf("duplicate flag specified: %s", arg) + return errors.Errorf("duplicate flag specified: %s", arg) } bf.used[arg] = flag @@ -177,7 +181,7 @@ func (bf *BFlags) Parse() error { case boolType: // value == "" is only ok if no "=" was specified if index >= 0 && value == "" { - return fmt.Errorf("missing a value on flag: %s", arg) + return errors.Errorf("missing a value on flag: %s", arg) } lower := strings.ToLower(value) @@ -186,18 +190,18 @@ func (bf *BFlags) Parse() error { } else if lower == "true" || lower == "false" { flag.Value = lower } else { - return fmt.Errorf("expecting boolean value for flag %s, not: %s", arg, value) + return errors.Errorf("expecting boolean value for flag %s, not: %s", arg, value) } case stringType: if index < 0 { - return fmt.Errorf("missing a value on flag: %s", arg) + return errors.Errorf("missing a value on flag: %s", arg) } flag.Value = value case stringsType: if index < 0 { - return fmt.Errorf("missing a value on flag: %s", arg) + return errors.Errorf("missing a value on flag: %s", arg) } flag.StringValues = append(flag.StringValues, value) diff --git a/frontend/dockerfile/instructions/commands.go b/frontend/dockerfile/instructions/commands.go index 48ebf183a965..9ffbd457ab3f 100644 --- a/frontend/dockerfile/instructions/commands.go +++ b/frontend/dockerfile/instructions/commands.go @@ -9,7 +9,10 @@ import ( "github.com/pkg/errors" ) -// KeyValuePair represent an arbitrary named value (useful in slice instead of map[string] string to preserve ordering) +// KeyValuePair represents an arbitrary named value. +// +// This is useful for commands containing key-value maps that want to preserve +// the order of insertion, instead of map[string]string which does not. type KeyValuePair struct { Key string Value string @@ -19,13 +22,17 @@ func (kvp *KeyValuePair) String() string { return kvp.Key + "=" + kvp.Value } -// KeyValuePairOptional is the same as KeyValuePair but Value is optional +// KeyValuePairOptional is identical to KeyValuePair, but allows for optional values. type KeyValuePairOptional struct { Key string Value *string Comment string } +func (kvpo *KeyValuePairOptional) String() string { + return kvpo.Key + "=" + kvpo.ValueString() +} + func (kvpo *KeyValuePairOptional) ValueString() string { v := "" if kvpo.Value != nil { @@ -34,7 +41,11 @@ func (kvpo *KeyValuePairOptional) ValueString() string { return v } -// Command is implemented by every command present in a dockerfile +// Command interface is implemented by every possible command in a Dockerfile. +// +// The interface only exposes the minimal common elements shared between every +// command, while more detailed information per-command can be extracted using +// runtime type analysis, e.g. type-switches. type Command interface { Name() string Location() []parser.Range @@ -68,17 +79,18 @@ func newWithNameAndCode(req parseRequest) withNameAndCode { return withNameAndCode{code: strings.TrimSpace(req.original), name: req.command, location: req.location} } -// SingleWordExpander is a provider for variable expansion where 1 word => 1 output +// SingleWordExpander is a provider for variable expansion where a single word +// corresponds to a single output. type SingleWordExpander func(word string) (string, error) -// SupportsSingleWordExpansion interface marks a command as supporting variable -// expansion +// SupportsSingleWordExpansion interface allows a command to support variable. type SupportsSingleWordExpansion interface { Expand(expander SingleWordExpander) error } -// SupportsSingleWordExpansionRaw interface marks a command as supporting -// variable expansion, while ensuring that quotes are preserved +// SupportsSingleWordExpansionRaw interface allows a command to support +// variable expansion, while ensuring that minimal transformations are applied +// during expansion, so that quotes and other special characters are preserved. type SupportsSingleWordExpansionRaw interface { ExpandRaw(expander SingleWordExpander) error } @@ -121,18 +133,22 @@ func expandSliceInPlace(values []string, expander SingleWordExpander) error { return nil } -// EnvCommand : ENV key1 value1 [keyN valueN...] +// EnvCommand allows setting an variable in the container's environment. +// +// ENV key1 value1 [keyN valueN...] type EnvCommand struct { withNameAndCode - Env KeyValuePairs // kvp slice instead of map to preserve ordering + Env KeyValuePairs } -// Expand variables func (c *EnvCommand) Expand(expander SingleWordExpander) error { return expandKvpsInPlace(c.Env, expander) } -// MaintainerCommand : MAINTAINER maintainer_name +// MaintainerCommand (deprecated) allows specifying a maintainer details for +// the image. +// +// MAINTAINER maintainer_name type MaintainerCommand struct { withNameAndCode Maintainer string @@ -154,17 +170,15 @@ func NewLabelCommand(k string, v string, NoExp bool) *LabelCommand { return cmd } -// LabelCommand : LABEL some json data describing the image -// -// Sets the Label variable foo to bar, +// LabelCommand sets an image label in the output // +// LABEL some json data describing the image type LabelCommand struct { withNameAndCode - Labels KeyValuePairs // kvp slice instead of map to preserve ordering + Labels KeyValuePairs noExpand bool } -// Expand variables func (c *LabelCommand) Expand(expander SingleWordExpander) error { if c.noExpand { return nil @@ -174,16 +188,16 @@ func (c *LabelCommand) Expand(expander SingleWordExpander) error { // SourceContent represents an anonymous file object type SourceContent struct { - Path string - Data string - Expand bool + Path string // path to the file + Data string // string content from the file + Expand bool // whether to expand file contents } // SourcesAndDest represent a collection of sources and a destination type SourcesAndDest struct { - DestPath string - SourcePaths []string - SourceContents []SourceContent + DestPath string // destination to write output + SourcePaths []string // file path sources + SourceContents []SourceContent // anonymous file sources } func (s *SourcesAndDest) Expand(expander SingleWordExpander) error { @@ -216,20 +230,22 @@ func (s *SourcesAndDest) ExpandRaw(expander SingleWordExpander) error { return nil } -// AddCommand : ADD foo /path +// AddCommand adds files from the provided sources to the target destination. // -// Add the file 'foo' to '/path'. Tarball and Remote URL (http, https) handling -// exist here. If you do not wish to have this automatic handling, use COPY. +// ADD foo /path // +// ADD supports tarball and remote URL handling, which may not always be +// desired - if you do not wish to have this automatic handling, use COPY. type AddCommand struct { withNameAndCode SourcesAndDest - Chown string - Chmod string - Link bool + Chown string + Chmod string + Link bool + KeepGitDir bool // whether to keep .git dir, only meaningful for git sources + Checksum string } -// Expand variables func (c *AddCommand) Expand(expander SingleWordExpander) error { expandedChown, err := expander(c.Chown) if err != nil { @@ -237,13 +253,20 @@ func (c *AddCommand) Expand(expander SingleWordExpander) error { } c.Chown = expandedChown + expandedChecksum, err := expander(c.Checksum) + if err != nil { + return err + } + c.Checksum = expandedChecksum + return c.SourcesAndDest.Expand(expander) } -// CopyCommand : COPY foo /path +// CopyCommand copies files from the provided sources to the target destination. // -// Same as 'ADD' but without the tar and remote url handling. +// COPY foo /path // +// Same as 'ADD' but without the magic additional tarball and remote URL handling. type CopyCommand struct { withNameAndCode SourcesAndDest @@ -253,7 +276,6 @@ type CopyCommand struct { Link bool } -// Expand variables func (c *CopyCommand) Expand(expander SingleWordExpander) error { expandedChown, err := expander(c.Chown) if err != nil { @@ -264,22 +286,24 @@ func (c *CopyCommand) Expand(expander SingleWordExpander) error { return c.SourcesAndDest.Expand(expander) } -// OnbuildCommand : ONBUILD +// OnbuildCommand allows specifying a command to be run on builds the use the +// resulting build image as a base image. +// +// ONBUILD type OnbuildCommand struct { withNameAndCode Expression string } -// WorkdirCommand : WORKDIR /tmp -// -// Set the working directory for future RUN/CMD/etc statements. +// WorkdirCommand sets the current working directory for all future commands in +// the stage // +// WORKDIR /tmp type WorkdirCommand struct { withNameAndCode Path string } -// Expand variables func (c *WorkdirCommand) Expand(expander SingleWordExpander) error { p, err := expander(c.Path) if err != nil { @@ -303,16 +327,13 @@ type ShellDependantCmdLine struct { PrependShell bool } -// RunCommand : RUN some command yo +// RunCommand runs a command. // -// run a command and commit the image. Args are automatically prepended with -// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under -// Windows, in the event there is only one argument The difference in processing: +// RUN "echo hi" # sh -c "echo hi" // -// RUN echo hi # sh -c echo hi (Linux) -// RUN echo hi # cmd /S /C echo hi (Windows) -// RUN [ "echo", "hi" ] # echo hi +// or // +// RUN ["echo", "hi"] # echo hi type RunCommand struct { withNameAndCode withExternalData @@ -327,60 +348,54 @@ func (c *RunCommand) Expand(expander SingleWordExpander) error { return nil } -// CmdCommand : CMD foo +// CmdCommand sets the default command to run in the container on start. +// +// CMD "echo hi" # sh -c "echo hi" // -// Set the default command to run in the container (which may be empty). -// Argument handling is the same as RUN. +// or // +// CMD ["echo", "hi"] # echo hi type CmdCommand struct { withNameAndCode ShellDependantCmdLine } -// HealthCheckCommand : HEALTHCHECK foo -// -// Set the default healthcheck command to run in the container (which may be empty). -// Argument handling is the same as RUN. +// HealthCheckCommand sets the default healthcheck command to run in the container. // +// HEALTHCHECK type HealthCheckCommand struct { withNameAndCode Health *container.HealthConfig } -// EntrypointCommand : ENTRYPOINT /usr/sbin/nginx -// -// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments -// to /usr/sbin/nginx. Uses the default shell if not in JSON format. +// EntrypointCommand sets the default entrypoint of the container to use the +// provided command. // -// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint -// is initialized at newBuilder time instead of through argument parsing. +// ENTRYPOINT /usr/sbin/nginx // +// Entrypoint uses the default shell if not in JSON format. type EntrypointCommand struct { withNameAndCode ShellDependantCmdLine } -// ExposeCommand : EXPOSE 6667/tcp 7000/tcp -// -// Expose ports for links and port mappings. This all ends up in -// req.runConfig.ExposedPorts for runconfig. +// ExposeCommand marks a container port that can be exposed at runtime. // +// EXPOSE 6667/tcp 7000/tcp type ExposeCommand struct { withNameAndCode Ports []string } -// UserCommand : USER foo -// -// Set the user to 'foo' for future commands and when running the -// ENTRYPOINT/CMD at container run time. +// UserCommand sets the user for the rest of the stage, and when starting the +// container at run-time. // +// USER user type UserCommand struct { withNameAndCode User string } -// Expand variables func (c *UserCommand) Expand(expander SingleWordExpander) error { p, err := expander(c.User) if err != nil { @@ -390,29 +405,26 @@ func (c *UserCommand) Expand(expander SingleWordExpander) error { return nil } -// VolumeCommand : VOLUME /foo -// -// Expose the volume /foo for use. Will also accept the JSON array form. +// VolumeCommand exposes the specified volume for use in the build environment. // +// VOLUME /foo type VolumeCommand struct { withNameAndCode Volumes []string } -// Expand variables func (c *VolumeCommand) Expand(expander SingleWordExpander) error { return expandSliceInPlace(c.Volumes, expander) } -// StopSignalCommand : STOPSIGNAL signal +// StopSignalCommand sets the signal that will be used to kill the container. // -// Set the signal that will be used to kill the container. +// STOPSIGNAL signal type StopSignalCommand struct { withNameAndCode Signal string } -// Expand variables func (c *StopSignalCommand) Expand(expander SingleWordExpander) error { p, err := expander(c.Signal) if err != nil { @@ -430,17 +442,16 @@ func (c *StopSignalCommand) CheckPlatform(platform string) error { return nil } -// ArgCommand : ARG name[=value] +// ArgCommand adds the specified variable to the list of variables that can be +// passed to the builder using the --build-arg flag for expansion and +// substitution. // -// Adds the variable foo to the trusted list of variables that can be passed -// to builder using the --build-arg flag for expansion/substitution or passing to 'run'. -// Dockerfile author may optionally set a default value of this variable. +// ARG name[=value] type ArgCommand struct { withNameAndCode Args []KeyValuePairOptional } -// Expand variables func (c *ArgCommand) Expand(expander SingleWordExpander) error { for i, v := range c.Args { p, err := expander(v.Key) @@ -460,32 +471,42 @@ func (c *ArgCommand) Expand(expander SingleWordExpander) error { return nil } -// ShellCommand : SHELL powershell -command +// ShellCommand sets a custom shell to use. // -// Set the non-default shell to use. +// SHELL bash -e -c type ShellCommand struct { withNameAndCode Shell strslice.StrSlice } -// Stage represents a single stage in a multi-stage build +// Stage represents a bundled collection of commands. +// +// Each stage begins with a FROM command (which is consumed into the Stage), +// indicating the source or stage to derive from, and ends either at the +// end-of-the file, or the start of the next stage. +// +// Stages can be named, and can be additionally configured to use a specific +// platform, in the case of a multi-arch base image. type Stage struct { - Name string - Commands []Command - BaseName string - SourceCode string - Platform string - Location []parser.Range - Comment string + Name string // name of the stage + Commands []Command // commands contained within the stage + BaseName string // name of the base stage or source + Platform string // platform of base source to use + + Comment string // doc-comment directly above the stage + + SourceCode string // contents of the defining FROM command + Location []parser.Range // location of the defining FROM command } -// AddCommand to the stage +// AddCommand appends a command to the stage. func (s *Stage) AddCommand(cmd Command) { // todo: validate cmd type s.Commands = append(s.Commands, cmd) } -// IsCurrentStage check if the stage name is the current stage +// IsCurrentStage returns true if the provided stage name is the name of the +// current stage, and false otherwise. func IsCurrentStage(s []Stage, name string) bool { if len(s) == 0 { return false @@ -493,7 +514,7 @@ func IsCurrentStage(s []Stage, name string) bool { return s[len(s)-1].Name == name } -// CurrentStage return the last stage in a slice +// CurrentStage returns the last stage from a list of stages. func CurrentStage(s []Stage) (*Stage, error) { if len(s) == 0 { return nil, errors.New("no build stage in current context") @@ -501,7 +522,7 @@ func CurrentStage(s []Stage) (*Stage, error) { return &s[len(s)-1], nil } -// HasStage looks for the presence of a given stage name +// HasStage looks for the presence of a given stage name from a list of stages. func HasStage(s []Stage, name string) (int, bool) { for i, stage := range s { // Stage name is case-insensitive by design diff --git a/frontend/dockerfile/instructions/commands_runmount.go b/frontend/dockerfile/instructions/commands_runmount.go index 517ded7d6788..e328b27bc7cc 100644 --- a/frontend/dockerfile/instructions/commands_runmount.go +++ b/frontend/dockerfile/instructions/commands_runmount.go @@ -6,7 +6,7 @@ import ( "strconv" "strings" - dockeropts "github.com/docker/docker/opts" + "github.com/docker/go-units" "github.com/moby/buildkit/util/suggest" "github.com/pkg/errors" ) @@ -231,11 +231,10 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { } case "size": if m.Type == "tmpfs" { - tmpfsSize := new(dockeropts.MemBytes) - if err := tmpfsSize.Set(value); err != nil { + m.SizeLimit, err = units.RAMInBytes(value) + if err != nil { return nil, errors.Errorf("invalid value for %s: %s", key, value) } - m.SizeLimit = tmpfsSize.Value() } else { return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) } diff --git a/frontend/dockerfile/instructions/errors_unix.go b/frontend/dockerfile/instructions/errors_unix.go index 610aed7cc08b..7f1eaa5deb3d 100644 --- a/frontend/dockerfile/instructions/errors_unix.go +++ b/frontend/dockerfile/instructions/errors_unix.go @@ -3,8 +3,8 @@ package instructions -import "fmt" +import "github.com/pkg/errors" func errNotJSON(command, _ string) error { - return fmt.Errorf("%s requires the arguments to be in JSON form", command) + return errors.Errorf("%s requires the arguments to be in JSON form", command) } diff --git a/frontend/dockerfile/instructions/errors_windows.go b/frontend/dockerfile/instructions/errors_windows.go index a4843c5b6ab5..1eec9d126ce0 100644 --- a/frontend/dockerfile/instructions/errors_windows.go +++ b/frontend/dockerfile/instructions/errors_windows.go @@ -5,6 +5,8 @@ import ( "path/filepath" "regexp" "strings" + + "github.com/pkg/errors" ) func errNotJSON(command, original string) error { @@ -23,5 +25,5 @@ func errNotJSON(command, original string) error { strings.Contains(original, "]") { extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) } - return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) + return errors.Errorf("%s requires the arguments to be in JSON form%s", command, extra) } diff --git a/frontend/dockerfile/instructions/parse.go b/frontend/dockerfile/instructions/parse.go index d3b7326ce250..6c362fc6fad6 100644 --- a/frontend/dockerfile/instructions/parse.go +++ b/frontend/dockerfile/instructions/parse.go @@ -1,3 +1,7 @@ +// The instructions package contains the definitions of the high-level +// Dockerfile commands, as well as low-level primitives for extracting these +// commands from a pre-parsed Abstract Syntax Tree. + package instructions import ( @@ -37,7 +41,7 @@ func nodeArgs(node *parser.Node) []string { if len(arg.Children) == 0 { result = append(result, arg.Value) } else if len(arg.Children) == 1 { - //sub command + // sub command result = append(result, arg.Children[0].Value) result = append(result, nodeArgs(arg.Children[0])...) } @@ -281,6 +285,8 @@ func parseAdd(req parseRequest) (*AddCommand, error) { flChown := req.flags.AddString("chown", "") flChmod := req.flags.AddString("chmod", "") flLink := req.flags.AddBool("link", false) + flKeepGitDir := req.flags.AddBool("keep-git-dir", false) + flChecksum := req.flags.AddString("checksum", "") if err := req.flags.Parse(); err != nil { return nil, err } @@ -296,6 +302,8 @@ func parseAdd(req parseRequest) (*AddCommand, error) { Chown: flChown.Value, Chmod: flChmod.Value, Link: flLink.Value == "true", + KeepGitDir: flKeepGitDir.Value == "true", + Checksum: flChecksum.Value, }, nil } @@ -377,7 +385,7 @@ func parseOnBuild(req parseRequest) (*OnbuildCommand, error) { case "ONBUILD": return nil, errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") case "MAINTAINER", "FROM": - return nil, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + return nil, errors.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) } original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "") @@ -503,8 +511,11 @@ func parseOptInterval(f *Flag) (time.Duration, error) { if err != nil { return 0, err } + if d == 0 { + return 0, nil + } if d < container.MinimumDuration { - return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration) + return 0, errors.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration) } return d, nil } @@ -551,7 +562,7 @@ func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) default: - return nil, fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) + return nil, errors.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) } interval, err := parseOptInterval(flInterval) @@ -577,8 +588,8 @@ func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { if err != nil { return nil, err } - if retries < 1 { - return nil, fmt.Errorf("--retries must be at least 1 (not %d)", retries) + if retries < 0 { + return nil, errors.Errorf("--retries cannot be negative (%d)", retries) } healthcheck.Retries = int(retries) } else { @@ -725,7 +736,7 @@ func errExactlyOneArgument(command string) error { } func errNoDestinationArgument(command string) error { - return errors.Errorf("%s requires at least two arguments, but only one was provided. Destination could not be determined.", command) + return errors.Errorf("%s requires at least two arguments, but only one was provided. Destination could not be determined", command) } func errBadHeredoc(command string, option string) error { diff --git a/frontend/dockerfile/instructions/parse_test.go b/frontend/dockerfile/instructions/parse_test.go index bffbd37c2447..887efe5799fe 100644 --- a/frontend/dockerfile/instructions/parse_test.go +++ b/frontend/dockerfile/instructions/parse_test.go @@ -136,6 +136,10 @@ func TestParseOptInterval(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), "cannot be less than 1ms") + flInterval.Value = "0ms" + _, err = parseOptInterval(flInterval) + require.NoError(t, err) + flInterval.Value = "1ms" _, err = parseOptInterval(flInterval) require.NoError(t, err) diff --git a/frontend/dockerfile/parser/directives.go b/frontend/dockerfile/parser/directives.go new file mode 100644 index 000000000000..db1668f252bf --- /dev/null +++ b/frontend/dockerfile/parser/directives.go @@ -0,0 +1,171 @@ +package parser + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +const ( + keySyntax = "syntax" + keyEscape = "escape" +) + +var validDirectives = map[string]struct{}{ + keySyntax: {}, + keyEscape: {}, +} + +type Directive struct { + Name string + Value string + Location []Range +} + +// DirectiveParser is a parser for Dockerfile directives that enforces the +// quirks of the directive parser. +type DirectiveParser struct { + line int + regexp *regexp.Regexp + seen map[string]struct{} + done bool +} + +func (d *DirectiveParser) setComment(comment string) { + d.regexp = regexp.MustCompile(fmt.Sprintf(`^%s\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`, comment)) +} + +func (d *DirectiveParser) ParseLine(line []byte) (*Directive, error) { + d.line++ + if d.done { + return nil, nil + } + if d.regexp == nil { + d.setComment("#") + } + + match := d.regexp.FindSubmatch(line) + if len(match) == 0 { + d.done = true + return nil, nil + } + + k := strings.ToLower(string(match[1])) + if _, ok := validDirectives[k]; !ok { + d.done = true + return nil, nil + } + if d.seen == nil { + d.seen = map[string]struct{}{} + } + if _, ok := d.seen[k]; ok { + return nil, errors.Errorf("only one %s parser directive can be used", k) + } + d.seen[k] = struct{}{} + + v := string(match[2]) + + directive := Directive{ + Name: k, + Value: v, + Location: []Range{{ + Start: Position{Line: d.line}, + End: Position{Line: d.line}, + }}, + } + return &directive, nil +} + +func (d *DirectiveParser) ParseAll(data []byte) ([]*Directive, error) { + scanner := bufio.NewScanner(bytes.NewReader(data)) + var directives []*Directive + for scanner.Scan() { + if d.done { + break + } + + d, err := d.ParseLine(scanner.Bytes()) + if err != nil { + return directives, err + } + if d != nil { + directives = append(directives, d) + } + } + return directives, nil +} + +// DetectSyntax returns the syntax of provided input. +// +// The traditional dockerfile directives '# syntax = ...' are used by default, +// however, the function will also fallback to c-style directives '// syntax = ...' +// and json-encoded directives '{ "syntax": "..." }'. Finally, starting lines +// with '#!' are treated as shebangs and ignored. +// +// This allows for a flexible range of input formats, and appropriate syntax +// selection. +func DetectSyntax(dt []byte) (string, string, []Range, bool) { + dt, hadShebang, err := discardShebang(dt) + if err != nil { + return "", "", nil, false + } + line := 0 + if hadShebang { + line++ + } + + // use default directive parser, and search for #syntax= + directiveParser := DirectiveParser{line: line} + if syntax, cmdline, loc, ok := detectSyntaxFromParser(dt, directiveParser); ok { + return syntax, cmdline, loc, true + } + + // use directive with different comment prefix, and search for //syntax= + directiveParser = DirectiveParser{line: line} + directiveParser.setComment("//") + if syntax, cmdline, loc, ok := detectSyntaxFromParser(dt, directiveParser); ok { + return syntax, cmdline, loc, true + } + + // search for possible json directives + var directive struct { + Syntax string `json:"syntax"` + } + if err := json.Unmarshal(dt, &directive); err == nil { + if directive.Syntax != "" { + loc := []Range{{ + Start: Position{Line: line}, + End: Position{Line: line}, + }} + return directive.Syntax, directive.Syntax, loc, true + } + } + + return "", "", nil, false +} + +func detectSyntaxFromParser(dt []byte, parser DirectiveParser) (string, string, []Range, bool) { + directives, _ := parser.ParseAll(dt) + for _, d := range directives { + // check for syntax directive before erroring out, since the error + // might have occurred *after* the syntax directive + if d.Name == keySyntax { + p, _, _ := strings.Cut(d.Value, " ") + return p, d.Value, d.Location, true + } + } + return "", "", nil, false +} + +func discardShebang(dt []byte) ([]byte, bool, error) { + line, rest, _ := bytes.Cut(dt, []byte("\n")) + if bytes.HasPrefix(line, []byte("#!")) { + return rest, true, nil + } + return dt, false, nil +} diff --git a/frontend/dockerfile/parser/directives_test.go b/frontend/dockerfile/parser/directives_test.go new file mode 100644 index 000000000000..31077977b2d2 --- /dev/null +++ b/frontend/dockerfile/parser/directives_test.go @@ -0,0 +1,105 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDirectives(t *testing.T) { + t.Parallel() + + dt := `#escape=\ +# key = FOO bar + +# smth +` + + parser := DirectiveParser{} + d, err := parser.ParseAll([]byte(dt)) + require.NoError(t, err) + require.Len(t, d, 1) + + require.Equal(t, d[0].Name, "escape") + require.Equal(t, d[0].Value, "\\") + + // for some reason Moby implementation in case insensitive for escape + dt = `# EScape=\ +# KEY = FOO bar + +# smth +` + + parser = DirectiveParser{} + d, err = parser.ParseAll([]byte(dt)) + require.NoError(t, err) + require.Len(t, d, 1) + + require.Equal(t, d[0].Name, "escape") + require.Equal(t, d[0].Value, "\\") +} + +func TestDetectSyntax(t *testing.T) { + t.Parallel() + + dt := `# syntax = dockerfile:experimental // opts +FROM busybox +` + ref, cmdline, loc, ok := DetectSyntax([]byte(dt)) + require.True(t, ok) + require.Equal(t, ref, "dockerfile:experimental") + require.Equal(t, cmdline, "dockerfile:experimental // opts") + require.Equal(t, 1, loc[0].Start.Line) + require.Equal(t, 1, loc[0].End.Line) + + dt = `#!/bin/sh +# syntax = dockerfile:experimental +FROM busybox +` + ref, _, loc, ok = DetectSyntax([]byte(dt)) + require.True(t, ok) + require.Equal(t, ref, "dockerfile:experimental") + require.Equal(t, 2, loc[0].Start.Line) + require.Equal(t, 2, loc[0].End.Line) + + dt = `#!/bin/sh + +# syntax = dockerfile:experimental +` + _, _, _, ok = DetectSyntax([]byte(dt)) + require.False(t, ok) + + dt = `FROM busybox +RUN ls +` + ref, cmdline, _, ok = DetectSyntax([]byte(dt)) + require.False(t, ok) + require.Equal(t, ref, "") + require.Equal(t, cmdline, "") + + dt = `//syntax=foo +//key=value` + ref, _, _, ok = DetectSyntax([]byte(dt)) + require.True(t, ok) + require.Equal(t, ref, "foo") + + dt = `#!/bin/sh +//syntax=x` + ref, _, _, ok = DetectSyntax([]byte(dt)) + require.True(t, ok) + require.Equal(t, ref, "x") + + dt = `{"syntax": "foo"}` + ref, _, _, ok = DetectSyntax([]byte(dt)) + require.True(t, ok) + require.Equal(t, ref, "foo") + + dt = `{"syntax": "foo"` + _, _, _, ok = DetectSyntax([]byte(dt)) + require.False(t, ok) + + dt = `{"syntax": "foo"} +# syntax=bar` + _, _, _, ok = DetectSyntax([]byte(dt)) + require.False(t, ok) +} diff --git a/frontend/dockerfile/parser/json_test.go b/frontend/dockerfile/parser/json_test.go index 5ffd9bd2572d..d25804cd04d4 100644 --- a/frontend/dockerfile/parser/json_test.go +++ b/frontend/dockerfile/parser/json_test.go @@ -16,12 +16,12 @@ var invalidJSONArraysOfStrings = []string{ } var validJSONArraysOfStrings = map[string][]string{ - `[]`: {}, - `[""]`: {""}, - `["a"]`: {"a"}, - `["a","b"]`: {"a", "b"}, - `[ "a", "b" ]`: {"a", "b"}, - `[ "a", "b" ]`: {"a", "b"}, + `[]`: {}, + `[""]`: {""}, + `["a"]`: {"a"}, + `["a","b"]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, ` [ "a", "b" ] `: {"a", "b"}, `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, } diff --git a/frontend/dockerfile/parser/line_parsers.go b/frontend/dockerfile/parser/line_parsers.go index c0d0a55d1224..db8d0bda23d1 100644 --- a/frontend/dockerfile/parser/line_parsers.go +++ b/frontend/dockerfile/parser/line_parsers.go @@ -8,7 +8,6 @@ package parser import ( "encoding/json" - "fmt" "strings" "unicode" "unicode/utf8" @@ -34,7 +33,6 @@ func parseIgnore(rest string, d *directives) (*Node, map[string]bool, error) { // statement with sub-statements. // // ONBUILD RUN foo bar -> (onbuild (run foo bar)) -// func parseSubCommand(rest string, d *directives) (*Node, map[string]bool, error) { if rest == "" { return nil, nil, nil @@ -154,7 +152,7 @@ func parseNameVal(rest string, key string, d *directives) (*Node, error) { if !strings.Contains(words[0], "=") { parts := reWhitespace.Split(rest, 2) if len(parts) < 2 { - return nil, fmt.Errorf(key + " must have two arguments") + return nil, errors.Errorf("%s must have two arguments", key) } return newKeyValueNode(parts[0], parts[1]), nil } @@ -163,7 +161,7 @@ func parseNameVal(rest string, key string, d *directives) (*Node, error) { var prevNode *Node for _, word := range words { if !strings.Contains(word, "=") { - return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + return nil, errors.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) } parts := strings.SplitN(word, "=", 2) @@ -274,7 +272,7 @@ func parseString(rest string, d *directives) (*Node, map[string]bool, error) { func parseJSON(rest string, d *directives) (*Node, map[string]bool, error) { rest = strings.TrimLeftFunc(rest, unicode.IsSpace) if !strings.HasPrefix(rest, "[") { - return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) + return nil, nil, errors.Errorf("Error parsing %q as a JSON array", rest) } var myJSON []interface{} diff --git a/frontend/dockerfile/parser/parser.go b/frontend/dockerfile/parser/parser.go index 53165e0a481d..d6723635d4a8 100644 --- a/frontend/dockerfile/parser/parser.go +++ b/frontend/dockerfile/parser/parser.go @@ -1,4 +1,5 @@ -// Package parser implements a parser and parse tree dumper for Dockerfiles. +// The parser package implements a parser that transforms a raw byte-stream +// into a low-level Abstract Syntax Tree. package parser import ( @@ -27,7 +28,6 @@ import ( // This data structure is frankly pretty lousy for handling complex languages, // but lucky for us the Dockerfile isn't very complicated. This structure // works a little more effectively than a "proper" parse tree for our needs. -// type Node struct { Value string // actual content Next *Node // the next item in the current sexp @@ -115,7 +115,6 @@ type Heredoc struct { var ( dispatch map[string]func(string, *directives) (*Node, map[string]bool, error) reWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - reDirectives = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`) reComment = regexp.MustCompile(`^#.*$`) reHeredoc = regexp.MustCompile(`^(\d*)<<(-?)([^<]*)$`) reLeadingTabs = regexp.MustCompile(`(?m)^\t+`) @@ -124,11 +123,6 @@ var ( // DefaultEscapeToken is the default escape token const DefaultEscapeToken = '\\' -var validDirectives = map[string]struct{}{ - "escape": {}, - "syntax": {}, -} - var ( // Directives allowed to contain heredocs heredocDirectives = map[string]bool{ @@ -143,13 +137,12 @@ var ( } ) -// directive is the structure used during a build run to hold the state of +// directives is the structure used during a build run to hold the state of // parsing directives. type directives struct { - escapeToken rune // Current escape token - lineContinuationRegex *regexp.Regexp // Current line continuation regex - done bool // Whether we are done looking for directives - seen map[string]struct{} // Whether the escape directive has been seen + parser DirectiveParser + escapeToken rune // Current escape token + lineContinuationRegex *regexp.Regexp // Current line continuation regex } // setEscapeToken sets the default token for escaping characters and as line- @@ -178,40 +171,19 @@ func (d *directives) setEscapeToken(s string) error { // Parser directives must precede any builder instruction or other comments, // and cannot be repeated. func (d *directives) possibleParserDirective(line string) error { - if d.done { - return nil - } - - match := reDirectives.FindStringSubmatch(line) - if len(match) == 0 { - d.done = true - return nil - } - - k := strings.ToLower(match[1]) - _, ok := validDirectives[k] - if !ok { - d.done = true - return nil - } - - if _, ok := d.seen[k]; ok { - return errors.Errorf("only one %s parser directive can be used", k) + directive, err := d.parser.ParseLine([]byte(line)) + if err != nil { + return err } - d.seen[k] = struct{}{} - - if k == "escape" { - return d.setEscapeToken(match[2]) + if directive != nil && directive.Name == keyEscape { + return d.setEscapeToken(directive.Value) } - return nil } // newDefaultDirectives returns a new directives structure with the default escapeToken token func newDefaultDirectives() *directives { - d := &directives{ - seen: map[string]struct{}{}, - } + d := &directives{} d.setEscapeToken(string(DefaultEscapeToken)) return d } @@ -274,13 +246,15 @@ func newNodeFromLine(line string, d *directives, comments []string) (*Node, erro }, nil } -// Result is the result of parsing a Dockerfile +// Result contains the bundled outputs from parsing a Dockerfile. type Result struct { AST *Node EscapeToken rune Warnings []Warning } +// Warning contains information to identify and locate a warning generated +// during parsing. type Warning struct { Short string Detail [][]byte @@ -301,8 +275,8 @@ func (r *Result) PrintWarnings(out io.Writer) { } } -// Parse reads lines from a Reader, parses the lines into an AST and returns -// the AST and escape token +// Parse consumes lines from a provided Reader, parses each line into an AST +// and returns the results of doing so. func Parse(rwc io.Reader) (*Result, error) { d := newDefaultDirectives() currentLine := 0 @@ -421,7 +395,7 @@ func Parse(rwc io.Reader) (*Result, error) { }, withLocation(handleScannerError(scanner.Err()), currentLine, 0) } -// Extracts a heredoc from a possible heredoc regex match +// heredocFromMatch extracts a heredoc from a possible heredoc regex match. func heredocFromMatch(match []string) (*Heredoc, error) { if len(match) == 0 { return nil, nil @@ -457,7 +431,7 @@ func heredocFromMatch(match []string) (*Heredoc, error) { return nil, err } if len(wordsRaw) != len(words) { - return nil, fmt.Errorf("internal lexing of heredoc produced inconsistent results: %s", rest) + return nil, errors.Errorf("internal lexing of heredoc produced inconsistent results: %s", rest) } word := words[0] @@ -475,9 +449,14 @@ func heredocFromMatch(match []string) (*Heredoc, error) { }, nil } +// ParseHeredoc parses a heredoc word from a target string, returning the +// components from the doc. func ParseHeredoc(src string) (*Heredoc, error) { return heredocFromMatch(reHeredoc.FindStringSubmatch(src)) } + +// MustParseHeredoc is a variant of ParseHeredoc that discards the error, if +// there was one present. func MustParseHeredoc(src string) *Heredoc { heredoc, _ := ParseHeredoc(src) return heredoc @@ -503,6 +482,7 @@ func heredocsFromLine(line string) ([]Heredoc, error) { return docs, nil } +// ChompHeredocContent chomps leading tabs from the heredoc. func ChompHeredocContent(src string) string { return reLeadingTabs.ReplaceAllString(src, "") } diff --git a/frontend/dockerfile/parser/parser_test.go b/frontend/dockerfile/parser/parser_test.go index 676188f1ea5f..70ae7e64b559 100644 --- a/frontend/dockerfile/parser/parser_test.go +++ b/frontend/dockerfile/parser/parser_test.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -56,7 +55,7 @@ func TestParseCases(t *testing.T) { result, err := Parse(df) require.NoError(t, err, dockerfile) - content, err := ioutil.ReadFile(resultfile) + content, err := os.ReadFile(resultfile) require.NoError(t, err, resultfile) if runtime.GOOS == "windows" { diff --git a/frontend/dockerfile/parser/testfiles/health/Dockerfile b/frontend/dockerfile/parser/testfiles/health/Dockerfile index 081e4428820a..f34fe9462167 100644 --- a/frontend/dockerfile/parser/testfiles/health/Dockerfile +++ b/frontend/dockerfile/parser/testfiles/health/Dockerfile @@ -8,3 +8,4 @@ HEALTHCHECK CMD HEALTHCHECK CMD a b HEALTHCHECK --timeout=3s CMD ["foo"] HEALTHCHECK CONNECT TCP 7000 +HEALTHCHECK --start-period=0s --interval=5s --timeout=0s --retries=0 CMD ["foo"] diff --git a/frontend/dockerfile/parser/testfiles/health/result b/frontend/dockerfile/parser/testfiles/health/result index 092924f88c5c..b4f69164e9af 100644 --- a/frontend/dockerfile/parser/testfiles/health/result +++ b/frontend/dockerfile/parser/testfiles/health/result @@ -7,3 +7,4 @@ (healthcheck "CMD" "a b") (healthcheck ["--timeout=3s"] "CMD" "foo") (healthcheck "CONNECT" "TCP 7000") +(healthcheck ["--start-period=0s" "--interval=5s" "--timeout=0s" "--retries=0"] "CMD" "foo") diff --git a/frontend/dockerfile/release/labs/tags b/frontend/dockerfile/release/labs/tags index 03dd8c3a5750..71a17cb3926a 100644 --- a/frontend/dockerfile/release/labs/tags +++ b/frontend/dockerfile/release/labs/tags @@ -1 +1 @@ -dfrunsecurity +dfrunsecurity dfaddgit dfaddchecksum diff --git a/frontend/dockerfile/shell/lex.go b/frontend/dockerfile/shell/lex.go index 23ab81f25cab..b930ab32601a 100644 --- a/frontend/dockerfile/shell/lex.go +++ b/frontend/dockerfile/shell/lex.go @@ -377,7 +377,7 @@ func (sw *shellWord) processDollar() (string, error) { } // Grab the current value of the variable in question so we - // can use to to determine what to do based on the modifier + // can use it to determine what to do based on the modifier newValue, found := sw.getEnv(name) switch modifier { diff --git a/frontend/dockerfile/shell/lex_test.go b/frontend/dockerfile/shell/lex_test.go index b14307152228..d4775969c2f6 100644 --- a/frontend/dockerfile/shell/lex_test.go +++ b/frontend/dockerfile/shell/lex_test.go @@ -248,3 +248,40 @@ func TestProcessWithMatches(t *testing.T) { require.Equal(t, 0, len(matches)) } + +func TestProcessWithMatchesPlatform(t *testing.T) { + shlex := NewLex('\\') + + const ( + // corresponds to the filename convention used in https://github.com/moby/buildkit/releases + release = "something-${VERSION}.${TARGETOS}-${TARGETARCH}${TARGETVARIANT:+-${TARGETVARIANT}}.tar.gz" + version = "v1.2.3" + ) + + w, _, err := shlex.ProcessWordWithMatches(release, map[string]string{ + "VERSION": version, + "TARGETOS": "linux", + "TARGETARCH": "arm", + "TARGETVARIANT": "v7", + }) + require.NoError(t, err) + require.Equal(t, "something-v1.2.3.linux-arm-v7.tar.gz", w) + + w, _, err = shlex.ProcessWordWithMatches(release, map[string]string{ + "VERSION": version, + "TARGETOS": "linux", + "TARGETARCH": "arm64", + "TARGETVARIANT": "", + }) + require.NoError(t, err) + require.Equal(t, "something-v1.2.3.linux-arm64.tar.gz", w) + + w, _, err = shlex.ProcessWordWithMatches(release, map[string]string{ + "VERSION": version, + "TARGETOS": "linux", + "TARGETARCH": "arm64", + // No "TARGETVARIANT": "", + }) + require.NoError(t, err) + require.Equal(t, "something-v1.2.3.linux-arm64.tar.gz", w) +} diff --git a/frontend/frontend.go b/frontend/frontend.go index dedda54c6104..024ac802045c 100644 --- a/frontend/frontend.go +++ b/frontend/frontend.go @@ -6,10 +6,16 @@ import ( "github.com/moby/buildkit/client/llb" gw "github.com/moby/buildkit/frontend/gateway/client" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/solver/result" digest "github.com/opencontainers/go-digest" ) +type Result = result.Result[solver.ResultProxy] + +type Attestation = result.Attestation[solver.ResultProxy] + type Frontend interface { Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string, inputs map[string]*pb.Definition, sid string, sm *session.Manager) (*Result, error) } diff --git a/frontend/frontend_test.go b/frontend/frontend_test.go index b1d061a265eb..c2dc94f66823 100644 --- a/frontend/frontend_test.go +++ b/frontend/frontend_test.go @@ -2,7 +2,6 @@ package frontend import ( "context" - "io/ioutil" "os" "path/filepath" "testing" @@ -19,7 +18,7 @@ import ( ) func init() { - if os.Getenv("TEST_DOCKERD") == "1" { + if integration.IsTestDockerd() { integration.InitDockerdWorker() } else { integration.InitOCIWorker() @@ -32,6 +31,7 @@ func TestFrontendIntegration(t *testing.T) { testRefReadFile, testRefReadDir, testRefStatFile, + testRefEvaluate, testReturnNil, )) } @@ -43,10 +43,6 @@ func testReturnNil(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) defer c.Close() - destDir, err := ioutil.TempDir("", "buildkit") - require.NoError(t, err) - defer os.RemoveAll(destDir) - frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { return nil, nil } @@ -72,10 +68,10 @@ func testRefReadFile(t *testing.T, sb integration.Sandbox) { testcontent := []byte(`foobar`) dir, err := tmpdir( + t, fstest.CreateFile("test", testcontent, 0666), ) require.NoError(t, err) - defer os.RemoveAll(dir) frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { def, err := llb.Local("mylocal").Marshal(ctx) @@ -135,6 +131,7 @@ func testRefReadDir(t *testing.T, sb integration.Sandbox) { defer c.Close() dir, err := tmpdir( + t, fstest.CreateDir("somedir", 0777), fstest.CreateFile("somedir/foo1.txt", []byte(`foo1`), 0666), fstest.CreateFile("somedir/foo2.txt", []byte{}, 0666), @@ -143,7 +140,6 @@ func testRefReadDir(t *testing.T, sb integration.Sandbox) { fstest.CreateDir("somedir/baz.dir", 0777), ) require.NoError(t, err) - defer os.RemoveAll(dir) expMap := make(map[string]*fstypes.Stat) @@ -253,10 +249,10 @@ func testRefStatFile(t *testing.T, sb integration.Sandbox) { testcontent := []byte(`foobar`) dir, err := tmpdir( + t, fstest.CreateFile("test", testcontent, 0666), ) require.NoError(t, err) - defer os.RemoveAll(dir) exp, err := fsutil.Stat(filepath.Join(dir, "test")) require.NoError(t, err) @@ -296,11 +292,57 @@ func testRefStatFile(t *testing.T, sb integration.Sandbox) { require.NoError(t, err) } -func tmpdir(appliers ...fstest.Applier) (string, error) { - tmpdir, err := ioutil.TempDir("", "buildkit-frontend") - if err != nil { - return "", err +func testRefEvaluate(t *testing.T, sb integration.Sandbox) { + ctx := sb.Context() + + c, err := client.New(ctx, sb.Address()) + require.NoError(t, err) + defer c.Close() + + frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + st := llb.Scratch().File(llb.Mkfile("/test", 0666, []byte{})) + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + res, err := c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + ref, err := res.SingleRef() + if err != nil { + return nil, err + } + + st = llb.Scratch().File(llb.Mkfile("/test/dir-does-not-exist", 0666, []byte{})) + def, err = st.Marshal(ctx) + if err != nil { + return nil, err + } + res, err = c.Solve(ctx, gateway.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + ref2, err := res.SingleRef() + if err != nil { + return nil, err + } + + require.NoError(t, ref.Evaluate(ctx)) + require.Error(t, ref2.Evaluate(ctx)) + return gateway.NewResult(), nil } + + _, err = c.Build(ctx, client.SolveOpt{}, "", frontend, nil) + require.NoError(t, err) +} + +func tmpdir(t *testing.T, appliers ...fstest.Applier) (string, error) { + tmpdir := t.TempDir() if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil { return "", err } diff --git a/frontend/gateway/client/attestation.go b/frontend/gateway/client/attestation.go new file mode 100644 index 000000000000..5ffe67233c50 --- /dev/null +++ b/frontend/gateway/client/attestation.go @@ -0,0 +1,51 @@ +package client + +import ( + pb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/solver/result" + "github.com/pkg/errors" +) + +func AttestationToPB[T any](a *result.Attestation[T]) (*pb.Attestation, error) { + if a.ContentFunc != nil { + return nil, errors.Errorf("attestation callback cannot be sent through gateway") + } + + subjects := make([]*pb.InTotoSubject, len(a.InToto.Subjects)) + for i, subject := range a.InToto.Subjects { + subjects[i] = &pb.InTotoSubject{ + Kind: subject.Kind, + Name: subject.Name, + Digest: subject.Digest, + } + } + + return &pb.Attestation{ + Kind: a.Kind, + Metadata: a.Metadata, + Path: a.Path, + InTotoPredicateType: a.InToto.PredicateType, + InTotoSubjects: subjects, + }, nil +} + +func AttestationFromPB[T any](a *pb.Attestation) (*result.Attestation[T], error) { + subjects := make([]result.InTotoSubject, len(a.InTotoSubjects)) + for i, subject := range a.InTotoSubjects { + subjects[i] = result.InTotoSubject{ + Kind: subject.Kind, + Name: subject.Name, + Digest: subject.Digest, + } + } + + return &result.Attestation[T]{ + Kind: a.Kind, + Metadata: a.Metadata, + Path: a.Path, + InToto: result.InTotoAttestation{ + PredicateType: a.InTotoPredicateType, + Subjects: subjects, + }, + }, nil +} diff --git a/frontend/gateway/client/client.go b/frontend/gateway/client/client.go index 61bc018ff5df..7b6b9de132bc 100644 --- a/frontend/gateway/client/client.go +++ b/frontend/gateway/client/client.go @@ -7,12 +7,24 @@ import ( "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/solver/result" + spb "github.com/moby/buildkit/sourcepolicy/pb" "github.com/moby/buildkit/util/apicaps" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" fstypes "github.com/tonistiigi/fsutil/types" ) +type Result = result.Result[Reference] + +type Attestation = result.Attestation[Reference] + +type BuildFunc func(context.Context, Client) (*Result, error) + +func NewResult() *Result { + return &Result{} +} + type Client interface { Solve(ctx context.Context, req SolveRequest) (*Result, error) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) @@ -64,6 +76,8 @@ type StartRequest struct { Stdin io.ReadCloser Stdout, Stderr io.WriteCloser SecurityMode pb.SecurityMode + + RemoveMountStubsRecursive bool } // WinSize is same as executor.WinSize, copied here to prevent circular package @@ -82,6 +96,7 @@ type ContainerProcess interface { type Reference interface { ToState() (llb.State, error) + Evaluate(ctx context.Context) error ReadFile(ctx context.Context, req ReadRequest) ([]byte, error) StatFile(ctx context.Context, req StatRequest) (*fstypes.Stat, error) ReadDir(ctx context.Context, req ReadDirRequest) ([]*fstypes.Stat, error) @@ -114,6 +129,7 @@ type SolveRequest struct { FrontendOpt map[string]string FrontendInputs map[string]*pb.Definition CacheImports []CacheOptionsEntry + SourcePolicies []*spb.Policy } type CacheOptionsEntry struct { diff --git a/frontend/gateway/client/result.go b/frontend/gateway/client/result.go deleted file mode 100644 index bd5422847822..000000000000 --- a/frontend/gateway/client/result.go +++ /dev/null @@ -1,54 +0,0 @@ -package client - -import ( - "context" - "sync" - - "github.com/pkg/errors" -) - -type BuildFunc func(context.Context, Client) (*Result, error) - -type Result struct { - mu sync.Mutex - Ref Reference - Refs map[string]Reference - Metadata map[string][]byte -} - -func NewResult() *Result { - return &Result{} -} - -func (r *Result) AddMeta(k string, v []byte) { - r.mu.Lock() - if r.Metadata == nil { - r.Metadata = map[string][]byte{} - } - r.Metadata[k] = v - r.mu.Unlock() -} - -func (r *Result) AddRef(k string, ref Reference) { - r.mu.Lock() - if r.Refs == nil { - r.Refs = map[string]Reference{} - } - r.Refs[k] = ref - r.mu.Unlock() -} - -func (r *Result) SetRef(ref Reference) { - r.Ref = ref -} - -func (r *Result) SingleRef() (Reference, error) { - r.mu.Lock() - defer r.mu.Unlock() - - if r.Refs != nil && r.Ref == nil { - return nil, errors.Errorf("invalid map result") - } - - return r.Ref, nil -} diff --git a/frontend/gateway/container.go b/frontend/gateway/container.go index 824a503ffb55..d6161d1def93 100644 --- a/frontend/gateway/container.go +++ b/frontend/gateway/container.go @@ -298,14 +298,15 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques signal := make(chan syscall.Signal) procInfo := executor.ProcessInfo{ Meta: executor.Meta{ - Args: req.Args, - Env: req.Env, - User: req.User, - Cwd: req.Cwd, - Tty: req.Tty, - NetMode: gwCtr.netMode, - ExtraHosts: gwCtr.extraHosts, - SecurityMode: req.SecurityMode, + Args: req.Args, + Env: req.Env, + User: req.User, + Cwd: req.Cwd, + Tty: req.Tty, + NetMode: gwCtr.netMode, + ExtraHosts: gwCtr.extraHosts, + SecurityMode: req.SecurityMode, + RemoveMountStubsRecursive: req.RemoveMountStubsRecursive, }, Stdin: req.Stdin, Stdout: req.Stdout, @@ -361,6 +362,8 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques } func (gwCtr *gatewayContainer) Release(ctx context.Context) error { + gwCtr.mu.Lock() + defer gwCtr.mu.Unlock() gwCtr.cancel() err1 := gwCtr.errGroup.Wait() @@ -371,6 +374,7 @@ func (gwCtr *gatewayContainer) Release(ctx context.Context) error { err2 = err } } + gwCtr.cleanup = nil if err1 != nil { return stack.Enable(err1) diff --git a/frontend/gateway/forwarder/forward.go b/frontend/gateway/forwarder/forward.go index 2964ad02bd06..e13894ba37ed 100644 --- a/frontend/gateway/forwarder/forward.go +++ b/frontend/gateway/forwarder/forward.go @@ -17,6 +17,7 @@ import ( "github.com/moby/buildkit/solver/errdefs" llberrdefs "github.com/moby/buildkit/solver/llbsolver/errdefs" opspb "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/solver/result" "github.com/moby/buildkit/util/apicaps" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" @@ -26,16 +27,17 @@ import ( ) func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, w worker.Infos, sid string, sm *session.Manager) (*bridgeClient, error) { - return &bridgeClient{ + bc := &bridgeClient{ opts: opts, inputs: inputs, FrontendLLBBridge: llbBridge, sid: sid, sm: sm, workers: w, - final: map[*ref]struct{}{}, workerRefByID: make(map[string]*worker.WorkerRef), - }, nil + } + bc.buildOpts = bc.loadBuildOpts() + return bc, nil } type bridgeClient struct { @@ -43,12 +45,13 @@ type bridgeClient struct { mu sync.Mutex opts map[string]string inputs map[string]*opspb.Definition - final map[*ref]struct{} sid string sm *session.Manager refs []*ref workers worker.Infos workerRefByID map[string]*worker.WorkerRef + buildOpts client.BuildOpts + ctrs []client.Container } func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) { @@ -59,42 +62,44 @@ func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*cli FrontendOpt: req.FrontendOpt, FrontendInputs: req.FrontendInputs, CacheImports: req.CacheImports, + SourcePolicies: req.SourcePolicies, }, c.sid) if err != nil { return nil, c.wrapSolveError(err) } - - cRes := &client.Result{} - c.mu.Lock() - for k, r := range res.Refs { - rr, err := c.newRef(r, session.NewGroup(c.sid)) - if err != nil { - return nil, err + for _, atts := range res.Attestations { + for _, att := range atts { + if att.ContentFunc != nil { + return nil, errors.Errorf("attestation callback cannot be sent through gateway") + } } - c.refs = append(c.refs, rr) - cRes.AddRef(k, rr) } - if r := res.Ref; r != nil { + + c.mu.Lock() + cRes, err := result.ConvertResult(res, func(r solver.ResultProxy) (client.Reference, error) { rr, err := c.newRef(r, session.NewGroup(c.sid)) if err != nil { return nil, err } c.refs = append(c.refs, rr) - cRes.SetRef(rr) - } + return rr, nil + }) c.mu.Unlock() - cRes.Metadata = res.Metadata + if err != nil { + return nil, err + } return cRes, nil } -func (c *bridgeClient) BuildOpts() client.BuildOpts { - workers := make([]client.WorkerInfo, 0, len(c.workers.WorkerInfos())) - for _, w := range c.workers.WorkerInfos() { - workers = append(workers, client.WorkerInfo{ +func (c *bridgeClient) loadBuildOpts() client.BuildOpts { + wis := c.workers.WorkerInfos() + workers := make([]client.WorkerInfo, len(wis)) + for i, w := range wis { + workers[i] = client.WorkerInfo{ ID: w.ID, Labels: w.Labels, Platforms: w.Platforms, - }) + } } return client.BuildOpts{ @@ -107,6 +112,10 @@ func (c *bridgeClient) BuildOpts() client.BuildOpts { } } +func (c *bridgeClient) BuildOpts() client.BuildOpts { + return c.buildOpts +} + func (c *bridgeClient) Inputs(ctx context.Context) (map[string]llb.State, error) { inputs := make(map[string]llb.State) for key, def := range c.inputs { @@ -176,42 +185,43 @@ func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, err if r == nil { return nil, nil } - - res := &frontend.Result{} - - if r.Refs != nil { - res.Refs = make(map[string]solver.ResultProxy, len(r.Refs)) - for k, r := range r.Refs { - rr, ok := r.(*ref) - if !ok { - return nil, errors.Errorf("invalid reference type for forward %T", r) + for _, atts := range r.Attestations { + for _, att := range atts { + if att.ContentFunc != nil { + return nil, errors.Errorf("attestation callback cannot be sent through gateway") } - c.final[rr] = struct{}{} - res.Refs[k] = rr.ResultProxy } } - if r := r.Ref; r != nil { + + res, err := result.ConvertResult(r, func(r client.Reference) (solver.ResultProxy, error) { rr, ok := r.(*ref) if !ok { return nil, errors.Errorf("invalid reference type for forward %T", r) } - c.final[rr] = struct{}{} - res.Ref = rr.ResultProxy + return rr.acquireResultProxy(), nil + }) + if err != nil { + return nil, err } - res.Metadata = r.Metadata - return res, nil } func (c *bridgeClient) discard(err error) { + for _, ctr := range c.ctrs { + ctr.Release(context.TODO()) + } + for id, workerRef := range c.workerRefByID { workerRef.ImmutableRef.Release(context.TODO()) delete(c.workerRefByID, id) } for _, r := range c.refs { if r != nil { - if _, ok := c.final[r]; !ok || err != nil { - r.Release(context.TODO()) + r.resultProxy.Release(context.TODO()) + if err != nil { + for _, clone := range r.resultProxyClones { + clone.Release(context.TODO()) + } } } } @@ -240,7 +250,7 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer return errors.Errorf("unexpected Ref type: %T", m.Ref) } - res, err := refProxy.Result(ctx) + res, err := refProxy.resultProxy.Result(ctx) if err != nil { return err } @@ -292,27 +302,45 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer if err != nil { return nil, err } + c.ctrs = append(c.ctrs, ctr) return ctr, nil } +func (c *bridgeClient) newRef(r solver.ResultProxy, s session.Group) (*ref, error) { + return &ref{resultProxy: r, session: s, c: c}, nil +} + type ref struct { - solver.ResultProxy + resultProxy solver.ResultProxy + resultProxyClones []solver.ResultProxy + session session.Group c *bridgeClient } -func (c *bridgeClient) newRef(r solver.ResultProxy, s session.Group) (*ref, error) { - return &ref{ResultProxy: r, session: s, c: c}, nil +func (r *ref) acquireResultProxy() solver.ResultProxy { + s1, s2 := solver.SplitResultProxy(r.resultProxy) + r.resultProxy = s1 + r.resultProxyClones = append(r.resultProxyClones, s2) + return s2 } func (r *ref) ToState() (st llb.State, err error) { - defop, err := llb.NewDefinitionOp(r.Definition()) + defop, err := llb.NewDefinitionOp(r.resultProxy.Definition()) if err != nil { return st, err } return llb.NewState(defop), nil } +func (r *ref) Evaluate(ctx context.Context) error { + _, err := r.resultProxy.Result(ctx) + if err != nil { + return r.c.wrapSolveError(err) + } + return nil +} + func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) { m, err := r.getMountable(ctx) if err != nil { @@ -351,7 +379,7 @@ func (r *ref) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.St } func (r *ref) getMountable(ctx context.Context) (snapshot.Mountable, error) { - rr, err := r.ResultProxy.Result(ctx) + rr, err := r.resultProxy.Result(ctx) if err != nil { return nil, r.c.wrapSolveError(err) } diff --git a/frontend/gateway/gateway.go b/frontend/gateway/gateway.go index 842e3252f377..79825d0b651a 100644 --- a/frontend/gateway/gateway.go +++ b/frontend/gateway/gateway.go @@ -39,7 +39,6 @@ import ( opspb "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" "github.com/moby/buildkit/util/bklog" - "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/grpcerrors" "github.com/moby/buildkit/util/stack" "github.com/moby/buildkit/util/tracing" @@ -226,10 +225,11 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten env = append(env, "BUILDKIT_EXPORTEDPRODUCT="+apicaps.ExportedProduct) meta := executor.Meta{ - Env: env, - Args: args, - Cwd: cwd, - ReadonlyRootFS: readonly, + Env: env, + Args: args, + Cwd: cwd, + ReadonlyRootFS: readonly, + RemoveMountStubsRecursive: true, } if v, ok := img.Config.Labels["moby.buildkit.frontend.network.none"]; ok { @@ -278,7 +278,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten err = w.Executor().Run(ctx, "", mountWithSession(rootFS, session.NewGroup(sid)), mnts, executor.ProcessInfo{Meta: meta, Stdin: lbf.Stdin, Stdout: lbf.Stdout, Stderr: os.Stderr}, nil) if err != nil { - if errdefs.IsCanceled(err) && lbf.isErrServerClosed { + if errdefs.IsCanceled(ctx, err) && lbf.isErrServerClosed { err = errors.Errorf("frontend grpc server closed unexpectedly") } // An existing error (set via Return rpc) takes @@ -345,26 +345,27 @@ func (b *bindMount) IdentityMapping() *idtools.IdentityMapping { func (lbf *llbBridgeForwarder) Discard() { lbf.mu.Lock() defer lbf.mu.Unlock() + + for ctr := range lbf.ctrs { + lbf.ReleaseContainer(context.TODO(), &pb.ReleaseContainerRequest{ + ContainerID: ctr, + }) + } + for id, workerRef := range lbf.workerRefByID { - workerRef.ImmutableRef.Release(context.TODO()) + workerRef.Release(context.TODO()) delete(lbf.workerRefByID, id) } - for id, r := range lbf.refs { - if lbf.err == nil && lbf.result != nil { - keep := false - lbf.result.EachRef(func(r2 solver.ResultProxy) error { - if r == r2 { - keep = true - } - return nil - }) - if keep { - continue - } - } + if lbf.err != nil && lbf.result != nil { + lbf.result.EachRef(func(r solver.ResultProxy) error { + r.Release(context.TODO()) + return nil + }) + } + for _, r := range lbf.refs { r.Release(context.TODO()) - delete(lbf.refs, id) } + lbf.refs = map[string]solver.ResultProxy{} } func (lbf *llbBridgeForwarder) Done() <-chan struct{} { @@ -540,9 +541,14 @@ func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.R } } dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, llb.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: req.ResolveMode, - LogName: req.LogName, + ResolverType: llb.ResolverType(req.ResolverType), + Platform: platform, + ResolveMode: req.ResolveMode, + LogName: req.LogName, + Store: llb.ResolveImageConfigOptStore{ + SessionID: req.SessionID, + StoreID: req.StoreID, + }, }) if err != nil { return nil, err @@ -553,20 +559,6 @@ func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.R }, nil } -func translateLegacySolveRequest(req *pb.SolveRequest) error { - // translates ImportCacheRefs to new CacheImports (v0.4.0) - for _, legacyImportRef := range req.ImportCacheRefsDeprecated { - im := &pb.CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{"ref": legacyImportRef}, - } - // FIXME(AkihiroSuda): skip append if already exists - req.CacheImports = append(req.CacheImports, im) - } - req.ImportCacheRefsDeprecated = nil - return nil -} - func (lbf *llbBridgeForwarder) wrapSolveError(solveErr error) error { var ( ee *llberrdefs.ExecError @@ -621,9 +613,6 @@ func (lbf *llbBridgeForwarder) registerResultIDs(results ...solver.Result) (ids } func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) (*pb.SolveResponse, error) { - if err := translateLegacySolveRequest(req); err != nil { - return nil, err - } var cacheImports []frontend.CacheOptionsEntry for _, e := range req.CacheImports { cacheImports = append(cacheImports, frontend.CacheOptionsEntry{ @@ -640,6 +629,7 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) FrontendOpt: req.FrontendOpt, FrontendInputs: req.FrontendInputs, CacheImports: cacheImports, + SourcePolicies: req.SourcePolicies, }, lbf.sid) if err != nil { return nil, lbf.wrapSolveError(err) @@ -656,6 +646,7 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) var defaultID string lbf.mu.Lock() + if res.Refs != nil { ids := make(map[string]string, len(res.Refs)) defs := make(map[string]*opspb.Definition, len(res.Refs)) @@ -664,16 +655,6 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) if ref == nil { id = "" } else { - dtbi, err := buildinfo.Encode(ctx, pbRes.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), ref.BuildSources()) - if err != nil { - return nil, err - } - if dtbi != nil && len(dtbi) > 0 { - if pbRes.Metadata == nil { - pbRes.Metadata = make(map[string][]byte) - } - pbRes.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k)] = dtbi - } lbf.refs[id] = ref } ids[k] = id @@ -697,16 +678,6 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) if ref == nil { id = "" } else { - dtbi, err := buildinfo.Encode(ctx, pbRes.Metadata, exptypes.ExporterBuildInfo, ref.BuildSources()) - if err != nil { - return nil, err - } - if dtbi != nil && len(dtbi) > 0 { - if pbRes.Metadata == nil { - pbRes.Metadata = make(map[string][]byte) - } - pbRes.Metadata[exptypes.ExporterBuildInfo] = dtbi - } def = ref.Definition() lbf.refs[id] = ref } @@ -718,6 +689,31 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) pbRes.Result = &pb.Result_RefDeprecated{RefDeprecated: id} } } + + if res.Attestations != nil { + pbRes.Attestations = map[string]*pb.Attestations{} + for k, atts := range res.Attestations { + for _, att := range atts { + pbAtt, err := gwclient.AttestationToPB(&att) + if err != nil { + return nil, err + } + + if att.Ref != nil { + id := identity.NewID() + def := att.Ref.Definition() + lbf.refs[id] = att.Ref + pbAtt.Ref = &pb.Ref{Id: id, Def: def} + } + + if pbRes.Attestations[k] == nil { + pbRes.Attestations[k] = &pb.Attestations{} + } + pbRes.Attestations[k].Attestation = append(pbRes.Attestations[k].Attestation, pbAtt) + } + } + } + lbf.mu.Unlock() // compatibility mode for older clients @@ -750,15 +746,15 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) return resp, nil } -func (lbf *llbBridgeForwarder) getImmutableRef(ctx context.Context, id, path string) (cache.ImmutableRef, error) { +func (lbf *llbBridgeForwarder) getImmutableRef(ctx context.Context, id string) (cache.ImmutableRef, error) { lbf.mu.Lock() ref, ok := lbf.refs[id] lbf.mu.Unlock() if !ok { - return nil, errors.Errorf("no such ref: %v", id) + return nil, errors.Errorf("no such ref: %s", id) } if ref == nil { - return nil, errors.Wrap(os.ErrNotExist, path) + return nil, errors.Errorf("empty ref: %s", id) } r, err := ref.Result(ctx) @@ -777,7 +773,7 @@ func (lbf *llbBridgeForwarder) getImmutableRef(ctx context.Context, id, path str func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileRequest) (*pb.ReadFileResponse, error) { ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) - ref, err := lbf.getImmutableRef(ctx, req.Ref, req.FilePath) + ref, err := lbf.getImmutableRef(ctx, req.Ref) if err != nil { return nil, err } @@ -792,9 +788,12 @@ func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileReq } } - m, err := ref.Mount(ctx, true, session.NewGroup(lbf.sid)) - if err != nil { - return nil, err + var m snapshot.Mountable + if ref != nil { + m, err = ref.Mount(ctx, true, session.NewGroup(lbf.sid)) + if err != nil { + return nil, err + } } dt, err := cacheutil.ReadFile(ctx, m, newReq) @@ -808,7 +807,7 @@ func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileReq func (lbf *llbBridgeForwarder) ReadDir(ctx context.Context, req *pb.ReadDirRequest) (*pb.ReadDirResponse, error) { ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) - ref, err := lbf.getImmutableRef(ctx, req.Ref, req.DirPath) + ref, err := lbf.getImmutableRef(ctx, req.Ref) if err != nil { return nil, err } @@ -817,9 +816,12 @@ func (lbf *llbBridgeForwarder) ReadDir(ctx context.Context, req *pb.ReadDirReque Path: req.DirPath, IncludePattern: req.IncludePattern, } - m, err := ref.Mount(ctx, true, session.NewGroup(lbf.sid)) - if err != nil { - return nil, err + var m snapshot.Mountable + if ref != nil { + m, err = ref.Mount(ctx, true, session.NewGroup(lbf.sid)) + if err != nil { + return nil, err + } } entries, err := cacheutil.ReadDir(ctx, m, newReq) if err != nil { @@ -832,13 +834,16 @@ func (lbf *llbBridgeForwarder) ReadDir(ctx context.Context, req *pb.ReadDirReque func (lbf *llbBridgeForwarder) StatFile(ctx context.Context, req *pb.StatFileRequest) (*pb.StatFileResponse, error) { ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) - ref, err := lbf.getImmutableRef(ctx, req.Ref, req.Path) + ref, err := lbf.getImmutableRef(ctx, req.Ref) if err != nil { return nil, err } - m, err := ref.Mount(ctx, true, session.NewGroup(lbf.sid)) - if err != nil { - return nil, err + var m snapshot.Mountable + if ref != nil { + m, err = ref.Mount(ctx, true, session.NewGroup(lbf.sid)) + if err != nil { + return nil, err + } } st, err := cacheutil.StatFile(ctx, m, req.Path) if err != nil { @@ -848,6 +853,16 @@ func (lbf *llbBridgeForwarder) StatFile(ctx context.Context, req *pb.StatFileReq return &pb.StatFileResponse{Stat: st}, nil } +func (lbf *llbBridgeForwarder) Evaluate(ctx context.Context, req *pb.EvaluateRequest) (*pb.EvaluateResponse, error) { + ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) + + _, err := lbf.getImmutableRef(ctx, req.Ref) + if err != nil { + return nil, err + } + return &pb.EvaluateResponse{}, nil +} + func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongResponse, error) { workers := lbf.workers.WorkerInfos() pbWorkers := make([]*apitypes.WorkerRecord, 0, len(workers)) @@ -880,38 +895,54 @@ func (lbf *llbBridgeForwarder) Return(ctx context.Context, in *pb.ReturnRequest) switch res := in.Result.Result.(type) { case *pb.Result_RefDeprecated: - ref, err := lbf.convertRef(res.RefDeprecated) + ref, err := lbf.cloneRef(res.RefDeprecated) if err != nil { return nil, err } - r.Ref = ref + r.SetRef(ref) case *pb.Result_RefsDeprecated: - m := map[string]solver.ResultProxy{} for k, id := range res.RefsDeprecated.Refs { - ref, err := lbf.convertRef(id) + ref, err := lbf.cloneRef(id) if err != nil { return nil, err } - m[k] = ref + r.AddRef(k, ref) } - r.Refs = m case *pb.Result_Ref: - ref, err := lbf.convertRef(res.Ref.Id) + ref, err := lbf.cloneRef(res.Ref.Id) if err != nil { return nil, err } - r.Ref = ref + r.SetRef(ref) case *pb.Result_Refs: - m := map[string]solver.ResultProxy{} for k, ref := range res.Refs.Refs { - ref, err := lbf.convertRef(ref.Id) + ref, err := lbf.cloneRef(ref.Id) if err != nil { return nil, err } - m[k] = ref + r.AddRef(k, ref) } - r.Refs = m } + + if in.Result.Attestations != nil { + for k, pbAtts := range in.Result.Attestations { + for _, pbAtt := range pbAtts.Attestation { + att, err := gwclient.AttestationFromPB[solver.ResultProxy](pbAtt) + if err != nil { + return nil, err + } + if pbAtt.Ref != nil { + ref, err := lbf.cloneRef(pbAtt.Ref.Id) + if err != nil { + return nil, err + } + att.Ref = ref + } + r.AddAttestation(k, *att) + } + } + } + return lbf.setResult(r, nil) } @@ -1252,15 +1283,16 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e pios[pid] = pio proc, err := ctr.Start(initCtx, gwclient.StartRequest{ - Args: init.Meta.Args, - Env: init.Meta.Env, - User: init.Meta.User, - Cwd: init.Meta.Cwd, - Tty: init.Tty, - Stdin: pio.processReaders[0], - Stdout: pio.processWriters[1], - Stderr: pio.processWriters[2], - SecurityMode: init.Security, + Args: init.Meta.Args, + Env: init.Meta.Env, + User: init.Meta.User, + Cwd: init.Meta.Cwd, + Tty: init.Tty, + Stdin: pio.processReaders[0], + Stdout: pio.processWriters[1], + Stderr: pio.processWriters[2], + SecurityMode: init.Security, + RemoveMountStubsRecursive: init.Meta.RemoveMountStubsRecursive, }) if err != nil { return stack.Enable(err) @@ -1399,10 +1431,27 @@ func (lbf *llbBridgeForwarder) convertRef(id string) (solver.ResultProxy, error) if !ok { return nil, errors.Errorf("return reference %s not found", id) } - return r, nil } +func (lbf *llbBridgeForwarder) cloneRef(id string) (solver.ResultProxy, error) { + if id == "" { + return nil, nil + } + + lbf.mu.Lock() + defer lbf.mu.Unlock() + + r, ok := lbf.refs[id] + if !ok { + return nil, errors.Errorf("return reference %s not found", id) + } + + s1, s2 := solver.SplitResultProxy(r) + lbf.refs[id] = s1 + return s2, nil +} + func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { go func() { <-ctx.Done() diff --git a/frontend/gateway/grpcclient/client.go b/frontend/gateway/grpcclient/client.go index d8e2799ff0dc..1b000a816e37 100644 --- a/frontend/gateway/grpcclient/client.go +++ b/frontend/gateway/grpcclient/client.go @@ -115,7 +115,7 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro req := &pb.ReturnRequest{} if retError == nil { if res == nil { - res = &client.Result{} + res = client.NewResult() } pbRes := &pb.Result{ Metadata: res.Metadata, @@ -160,6 +160,31 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro } } } + + if res.Attestations != nil { + attestations := map[string]*pb.Attestations{} + for k, as := range res.Attestations { + for _, a := range as { + pbAtt, err := client.AttestationToPB(&a) + if err != nil { + retError = err + continue + } + pbRef, err := convertRef(a.Ref) + if err != nil { + retError = err + continue + } + pbAtt.Ref = pbRef + if attestations[k] == nil { + attestations[k] = &pb.Attestations{} + } + attestations[k].Attestation = append(attestations[k].Attestation, pbAtt) + } + } + pbRes.Attestations = attestations + } + if retError == nil { req.Result = pbRes } @@ -323,22 +348,12 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * } } } - var ( - // old API - legacyRegistryCacheImports []string - // new API (CapImportCaches) - cacheImports []*pb.CacheOptionsEntry - ) - supportCapImportCaches := c.caps.Supports(pb.CapImportCaches) == nil + var cacheImports []*pb.CacheOptionsEntry for _, im := range creq.CacheImports { - if !supportCapImportCaches && im.Type == "registry" { - legacyRegistryCacheImports = append(legacyRegistryCacheImports, im.Attrs["ref"]) - } else { - cacheImports = append(cacheImports, &pb.CacheOptionsEntry{ - Type: im.Type, - Attrs: im.Attrs, - }) - } + cacheImports = append(cacheImports, &pb.CacheOptionsEntry{ + Type: im.Type, + Attrs: im.Attrs, + }) } // these options are added by go client in solve() @@ -366,10 +381,8 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * FrontendInputs: creq.FrontendInputs, AllowResultReturn: true, AllowResultArrayRef: true, - // old API - ImportCacheRefsDeprecated: legacyRegistryCacheImports, - // new API - CacheImports: cacheImports, + CacheImports: cacheImports, + SourcePolicies: creq.SourcePolicies, } // backwards compatibility with inline return @@ -381,30 +394,15 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * if c.caps.Supports(pb.CapGatewayEvaluateSolve) == nil { req.Evaluate = creq.Evaluate } else { - // If evaluate is not supported, fallback to running Stat(".") in order to - // trigger an evaluation of the result. + // If evaluate is not supported, fallback to running Stat(".") in + // order to trigger an evaluation of the result. defer func() { if res == nil { return } - - var ( - id string - ref client.Reference - ) - ref, err = res.SingleRef() - if err != nil { - for refID := range res.Refs { - id = refID - break - } - } else { - id = ref.(*reference).id - } - - _, err = c.client.StatFile(ctx, &pb.StatFileRequest{ - Ref: id, - Path: ".", + err = res.EachRef(func(ref client.Reference) error { + _, err := ref.StatFile(ctx, client.StatRequest{Path: "."}) + return err }) }() } @@ -415,7 +413,7 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * return nil, err } - res = &client.Result{} + res = client.NewResult() if resp.Result == nil { if id := resp.Ref; id != "" { c.requests[id] = req @@ -456,6 +454,25 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * res.AddRef(k, ref) } } + + if resp.Result.Attestations != nil { + for p, as := range resp.Result.Attestations { + for _, a := range as.Attestation { + att, err := client.AttestationFromPB[client.Reference](a) + if err != nil { + return nil, err + } + if a.Ref.Id != "" { + ref, err := newReference(c, a.Ref) + if err != nil { + return nil, err + } + att.Ref = ref + } + res.AddAttestation(p, *att) + } + } + } } return res, nil @@ -472,7 +489,15 @@ func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb OSFeatures: platform.OSFeatures, } } - resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{Ref: ref, Platform: p, ResolveMode: opt.ResolveMode, LogName: opt.LogName}) + resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{ + ResolverType: int32(opt.ResolverType), + Ref: ref, + Platform: p, + ResolveMode: opt.ResolveMode, + LogName: opt.LogName, + SessionID: opt.Store.SessionID, + StoreID: opt.Store.StoreID, + }) if err != nil { return "", nil, err } @@ -806,6 +831,7 @@ func (ctr *container) Start(ctx context.Context, req client.StartRequest) (clien Tty: req.Tty, Security: req.SecurityMode, } + init.Meta.RemoveMountStubsRecursive = req.RemoveMountStubsRecursive if req.Stdin != nil { init.Fds = append(init.Fds, 0) } @@ -1036,6 +1062,15 @@ func (r *reference) ToState() (st llb.State, err error) { return llb.NewState(defop), nil } +func (r *reference) Evaluate(ctx context.Context) error { + req := &pb.EvaluateRequest{Ref: r.id} + _, err := r.c.client.Evaluate(ctx, req) + if err != nil { + return err + } + return nil +} + func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) { rfr := &pb.ReadFileRequest{FilePath: req.Filename, Ref: r.id} if r := req.Range; r != nil { diff --git a/frontend/gateway/pb/caps.go b/frontend/gateway/pb/caps.go index c4af39f3f0b9..deb192dc116e 100644 --- a/frontend/gateway/pb/caps.go +++ b/frontend/gateway/pb/caps.go @@ -56,8 +56,14 @@ const ( // errors. CapGatewayEvaluateSolve apicaps.CapID = "gateway.solve.evaluate" + CapGatewayEvaluate apicaps.CapID = "gateway.evaluate" + // CapGatewayWarnings is the capability to log warnings from frontend CapGatewayWarnings apicaps.CapID = "gateway.warnings" + + // CapAttestations is the capability to indicate that attestation + // references will be attached to results + CapAttestations apicaps.CapID = "reference.attestations" ) func init() { @@ -194,10 +200,24 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapGatewayEvaluate, + Name: "gateway evaluate", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapGatewayWarnings, Name: "logging warnings", Enabled: true, Status: apicaps.CapStatusExperimental, }) + + Caps.Init(apicaps.Cap{ + ID: CapAttestations, + Name: "reference attestations", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) } diff --git a/frontend/gateway/pb/gateway.pb.go b/frontend/gateway/pb/gateway.pb.go index e8e797ca7e10..da36afdd140b 100644 --- a/frontend/gateway/pb/gateway.pb.go +++ b/frontend/gateway/pb/gateway.pb.go @@ -11,7 +11,8 @@ import ( proto "github.com/gogo/protobuf/proto" types1 "github.com/moby/buildkit/api/types" pb "github.com/moby/buildkit/solver/pb" - pb1 "github.com/moby/buildkit/util/apicaps/pb" + pb1 "github.com/moby/buildkit/sourcepolicy/pb" + pb2 "github.com/moby/buildkit/util/apicaps/pb" github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" types "github.com/tonistiigi/fsutil/types" grpc "google.golang.org/grpc" @@ -33,17 +34,70 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +type AttestationKind int32 + +const ( + AttestationKindInToto AttestationKind = 0 + AttestationKindBundle AttestationKind = 1 +) + +var AttestationKind_name = map[int32]string{ + 0: "InToto", + 1: "Bundle", +} + +var AttestationKind_value = map[string]int32{ + "InToto": 0, + "Bundle": 1, +} + +func (x AttestationKind) String() string { + return proto.EnumName(AttestationKind_name, int32(x)) +} + +func (AttestationKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{0} +} + +type InTotoSubjectKind int32 + +const ( + InTotoSubjectKindSelf InTotoSubjectKind = 0 + InTotoSubjectKindRaw InTotoSubjectKind = 1 +) + +var InTotoSubjectKind_name = map[int32]string{ + 0: "Self", + 1: "Raw", +} + +var InTotoSubjectKind_value = map[string]int32{ + "Self": 0, + "Raw": 1, +} + +func (x InTotoSubjectKind) String() string { + return proto.EnumName(InTotoSubjectKind_name, int32(x)) +} + +func (InTotoSubjectKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{1} +} + type Result struct { // Types that are valid to be assigned to Result: + // // *Result_RefDeprecated // *Result_RefsDeprecated // *Result_Ref // *Result_Refs - Result isResult_Result `protobuf_oneof:"result"` - Metadata map[string][]byte `protobuf:"bytes,10,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Result isResult_Result `protobuf_oneof:"result"` + Metadata map[string][]byte `protobuf:"bytes,10,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // 11 was used during development and is reserved for old attestation format + Attestations map[string]*Attestations `protobuf:"bytes,12,rep,name=attestations,proto3" json:"attestations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Result) Reset() { *m = Result{} } @@ -145,6 +199,13 @@ func (m *Result) GetMetadata() map[string][]byte { return nil } +func (m *Result) GetAttestations() map[string]*Attestations { + if m != nil { + return m.Attestations + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Result) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -304,6 +365,196 @@ func (m *RefMap) GetRefs() map[string]*Ref { return nil } +type Attestations struct { + Attestation []*Attestation `protobuf:"bytes,1,rep,name=attestation,proto3" json:"attestation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Attestations) Reset() { *m = Attestations{} } +func (m *Attestations) String() string { return proto.CompactTextString(m) } +func (*Attestations) ProtoMessage() {} +func (*Attestations) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{4} +} +func (m *Attestations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Attestations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Attestations.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Attestations) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attestations.Merge(m, src) +} +func (m *Attestations) XXX_Size() int { + return m.Size() +} +func (m *Attestations) XXX_DiscardUnknown() { + xxx_messageInfo_Attestations.DiscardUnknown(m) +} + +var xxx_messageInfo_Attestations proto.InternalMessageInfo + +func (m *Attestations) GetAttestation() []*Attestation { + if m != nil { + return m.Attestation + } + return nil +} + +type Attestation struct { + Kind AttestationKind `protobuf:"varint,1,opt,name=kind,proto3,enum=moby.buildkit.v1.frontend.AttestationKind" json:"kind,omitempty"` + Metadata map[string][]byte `protobuf:"bytes,2,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Ref *Ref `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` + InTotoPredicateType string `protobuf:"bytes,5,opt,name=inTotoPredicateType,proto3" json:"inTotoPredicateType,omitempty"` + InTotoSubjects []*InTotoSubject `protobuf:"bytes,6,rep,name=inTotoSubjects,proto3" json:"inTotoSubjects,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Attestation) Reset() { *m = Attestation{} } +func (m *Attestation) String() string { return proto.CompactTextString(m) } +func (*Attestation) ProtoMessage() {} +func (*Attestation) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{5} +} +func (m *Attestation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Attestation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Attestation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Attestation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attestation.Merge(m, src) +} +func (m *Attestation) XXX_Size() int { + return m.Size() +} +func (m *Attestation) XXX_DiscardUnknown() { + xxx_messageInfo_Attestation.DiscardUnknown(m) +} + +var xxx_messageInfo_Attestation proto.InternalMessageInfo + +func (m *Attestation) GetKind() AttestationKind { + if m != nil { + return m.Kind + } + return AttestationKindInToto +} + +func (m *Attestation) GetMetadata() map[string][]byte { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Attestation) GetRef() *Ref { + if m != nil { + return m.Ref + } + return nil +} + +func (m *Attestation) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *Attestation) GetInTotoPredicateType() string { + if m != nil { + return m.InTotoPredicateType + } + return "" +} + +func (m *Attestation) GetInTotoSubjects() []*InTotoSubject { + if m != nil { + return m.InTotoSubjects + } + return nil +} + +type InTotoSubject struct { + Kind InTotoSubjectKind `protobuf:"varint,1,opt,name=kind,proto3,enum=moby.buildkit.v1.frontend.InTotoSubjectKind" json:"kind,omitempty"` + Digest []github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,rep,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InTotoSubject) Reset() { *m = InTotoSubject{} } +func (m *InTotoSubject) String() string { return proto.CompactTextString(m) } +func (*InTotoSubject) ProtoMessage() {} +func (*InTotoSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{6} +} +func (m *InTotoSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InTotoSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InTotoSubject.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InTotoSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_InTotoSubject.Merge(m, src) +} +func (m *InTotoSubject) XXX_Size() int { + return m.Size() +} +func (m *InTotoSubject) XXX_DiscardUnknown() { + xxx_messageInfo_InTotoSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_InTotoSubject proto.InternalMessageInfo + +func (m *InTotoSubject) GetKind() InTotoSubjectKind { + if m != nil { + return m.Kind + } + return InTotoSubjectKindSelf +} + +func (m *InTotoSubject) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type ReturnRequest struct { Result *Result `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` Error *rpc.Status `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` @@ -316,7 +567,7 @@ func (m *ReturnRequest) Reset() { *m = ReturnRequest{} } func (m *ReturnRequest) String() string { return proto.CompactTextString(m) } func (*ReturnRequest) ProtoMessage() {} func (*ReturnRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{4} + return fileDescriptor_f1a937782ebbded5, []int{7} } func (m *ReturnRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -369,7 +620,7 @@ func (m *ReturnResponse) Reset() { *m = ReturnResponse{} } func (m *ReturnResponse) String() string { return proto.CompactTextString(m) } func (*ReturnResponse) ProtoMessage() {} func (*ReturnResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{5} + return fileDescriptor_f1a937782ebbded5, []int{8} } func (m *ReturnResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -408,7 +659,7 @@ func (m *InputsRequest) Reset() { *m = InputsRequest{} } func (m *InputsRequest) String() string { return proto.CompactTextString(m) } func (*InputsRequest) ProtoMessage() {} func (*InputsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{6} + return fileDescriptor_f1a937782ebbded5, []int{9} } func (m *InputsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -448,7 +699,7 @@ func (m *InputsResponse) Reset() { *m = InputsResponse{} } func (m *InputsResponse) String() string { return proto.CompactTextString(m) } func (*InputsResponse) ProtoMessage() {} func (*InputsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{7} + return fileDescriptor_f1a937782ebbded5, []int{10} } func (m *InputsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -489,6 +740,9 @@ type ResolveImageConfigRequest struct { Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform,proto3" json:"Platform,omitempty"` ResolveMode string `protobuf:"bytes,3,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` LogName string `protobuf:"bytes,4,opt,name=LogName,proto3" json:"LogName,omitempty"` + ResolverType int32 `protobuf:"varint,5,opt,name=ResolverType,proto3" json:"ResolverType,omitempty"` + SessionID string `protobuf:"bytes,6,opt,name=SessionID,proto3" json:"SessionID,omitempty"` + StoreID string `protobuf:"bytes,7,opt,name=StoreID,proto3" json:"StoreID,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -498,7 +752,7 @@ func (m *ResolveImageConfigRequest) Reset() { *m = ResolveImageConfigReq func (m *ResolveImageConfigRequest) String() string { return proto.CompactTextString(m) } func (*ResolveImageConfigRequest) ProtoMessage() {} func (*ResolveImageConfigRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{8} + return fileDescriptor_f1a937782ebbded5, []int{11} } func (m *ResolveImageConfigRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,6 +809,27 @@ func (m *ResolveImageConfigRequest) GetLogName() string { return "" } +func (m *ResolveImageConfigRequest) GetResolverType() int32 { + if m != nil { + return m.ResolverType + } + return 0 +} + +func (m *ResolveImageConfigRequest) GetSessionID() string { + if m != nil { + return m.SessionID + } + return "" +} + +func (m *ResolveImageConfigRequest) GetStoreID() string { + if m != nil { + return m.StoreID + } + return "" +} + type ResolveImageConfigResponse struct { Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=Digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"Digest"` Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"` @@ -567,7 +842,7 @@ func (m *ResolveImageConfigResponse) Reset() { *m = ResolveImageConfigRe func (m *ResolveImageConfigResponse) String() string { return proto.CompactTextString(m) } func (*ResolveImageConfigResponse) ProtoMessage() {} func (*ResolveImageConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{9} + return fileDescriptor_f1a937782ebbded5, []int{12} } func (m *ResolveImageConfigResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -607,13 +882,9 @@ type SolveRequest struct { Definition *pb.Definition `protobuf:"bytes,1,opt,name=Definition,proto3" json:"Definition,omitempty"` Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` FrontendOpt map[string]string `protobuf:"bytes,3,rep,name=FrontendOpt,proto3" json:"FrontendOpt,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. - // When ImportCacheRefsDeprecated is set, the solver appends - // {.Type = "registry", .Attrs = {"ref": importCacheRef}} - // for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed) - ImportCacheRefsDeprecated []string `protobuf:"bytes,4,rep,name=ImportCacheRefsDeprecated,proto3" json:"ImportCacheRefsDeprecated,omitempty"` - AllowResultReturn bool `protobuf:"varint,5,opt,name=allowResultReturn,proto3" json:"allowResultReturn,omitempty"` - AllowResultArrayRef bool `protobuf:"varint,6,opt,name=allowResultArrayRef,proto3" json:"allowResultArrayRef,omitempty"` + // 4 was removed in BuildKit v0.11.0. + AllowResultReturn bool `protobuf:"varint,5,opt,name=allowResultReturn,proto3" json:"allowResultReturn,omitempty"` + AllowResultArrayRef bool `protobuf:"varint,6,opt,name=allowResultArrayRef,proto3" json:"allowResultArrayRef,omitempty"` // apicaps.CapSolveInlineReturn deprecated Final bool `protobuf:"varint,10,opt,name=Final,proto3" json:"Final,omitempty"` ExporterAttr []byte `protobuf:"bytes,11,opt,name=ExporterAttr,proto3" json:"ExporterAttr,omitempty"` @@ -623,6 +894,7 @@ type SolveRequest struct { // apicaps:CapFrontendInputs FrontendInputs map[string]*pb.Definition `protobuf:"bytes,13,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Evaluate bool `protobuf:"varint,14,opt,name=Evaluate,proto3" json:"Evaluate,omitempty"` + SourcePolicies []*pb1.Policy `protobuf:"bytes,15,rep,name=SourcePolicies,proto3" json:"SourcePolicies,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -632,7 +904,7 @@ func (m *SolveRequest) Reset() { *m = SolveRequest{} } func (m *SolveRequest) String() string { return proto.CompactTextString(m) } func (*SolveRequest) ProtoMessage() {} func (*SolveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{10} + return fileDescriptor_f1a937782ebbded5, []int{13} } func (m *SolveRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -682,13 +954,6 @@ func (m *SolveRequest) GetFrontendOpt() map[string]string { return nil } -func (m *SolveRequest) GetImportCacheRefsDeprecated() []string { - if m != nil { - return m.ImportCacheRefsDeprecated - } - return nil -} - func (m *SolveRequest) GetAllowResultReturn() bool { if m != nil { return m.AllowResultReturn @@ -738,6 +1003,13 @@ func (m *SolveRequest) GetEvaluate() bool { return false } +func (m *SolveRequest) GetSourcePolicies() []*pb1.Policy { + if m != nil { + return m.SourcePolicies + } + return nil +} + // CacheOptionsEntry corresponds to the control.CacheOptionsEntry type CacheOptionsEntry struct { Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` @@ -751,7 +1023,7 @@ func (m *CacheOptionsEntry) Reset() { *m = CacheOptionsEntry{} } func (m *CacheOptionsEntry) String() string { return proto.CompactTextString(m) } func (*CacheOptionsEntry) ProtoMessage() {} func (*CacheOptionsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{11} + return fileDescriptor_f1a937782ebbded5, []int{14} } func (m *CacheOptionsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +1080,7 @@ func (m *SolveResponse) Reset() { *m = SolveResponse{} } func (m *SolveResponse) String() string { return proto.CompactTextString(m) } func (*SolveResponse) ProtoMessage() {} func (*SolveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{12} + return fileDescriptor_f1a937782ebbded5, []int{15} } func (m *SolveResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +1136,7 @@ func (m *ReadFileRequest) Reset() { *m = ReadFileRequest{} } func (m *ReadFileRequest) String() string { return proto.CompactTextString(m) } func (*ReadFileRequest) ProtoMessage() {} func (*ReadFileRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{13} + return fileDescriptor_f1a937782ebbded5, []int{16} } func (m *ReadFileRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -926,7 +1198,7 @@ func (m *FileRange) Reset() { *m = FileRange{} } func (m *FileRange) String() string { return proto.CompactTextString(m) } func (*FileRange) ProtoMessage() {} func (*FileRange) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{14} + return fileDescriptor_f1a937782ebbded5, []int{17} } func (m *FileRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -980,7 +1252,7 @@ func (m *ReadFileResponse) Reset() { *m = ReadFileResponse{} } func (m *ReadFileResponse) String() string { return proto.CompactTextString(m) } func (*ReadFileResponse) ProtoMessage() {} func (*ReadFileResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{15} + return fileDescriptor_f1a937782ebbded5, []int{18} } func (m *ReadFileResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1029,7 +1301,7 @@ func (m *ReadDirRequest) Reset() { *m = ReadDirRequest{} } func (m *ReadDirRequest) String() string { return proto.CompactTextString(m) } func (*ReadDirRequest) ProtoMessage() {} func (*ReadDirRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{16} + return fileDescriptor_f1a937782ebbded5, []int{19} } func (m *ReadDirRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1090,7 +1362,7 @@ func (m *ReadDirResponse) Reset() { *m = ReadDirResponse{} } func (m *ReadDirResponse) String() string { return proto.CompactTextString(m) } func (*ReadDirResponse) ProtoMessage() {} func (*ReadDirResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{17} + return fileDescriptor_f1a937782ebbded5, []int{20} } func (m *ReadDirResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1138,7 +1410,7 @@ func (m *StatFileRequest) Reset() { *m = StatFileRequest{} } func (m *StatFileRequest) String() string { return proto.CompactTextString(m) } func (*StatFileRequest) ProtoMessage() {} func (*StatFileRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{18} + return fileDescriptor_f1a937782ebbded5, []int{21} } func (m *StatFileRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1192,7 +1464,7 @@ func (m *StatFileResponse) Reset() { *m = StatFileResponse{} } func (m *StatFileResponse) String() string { return proto.CompactTextString(m) } func (*StatFileResponse) ProtoMessage() {} func (*StatFileResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{19} + return fileDescriptor_f1a937782ebbded5, []int{22} } func (m *StatFileResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,24 +1500,25 @@ func (m *StatFileResponse) GetStat() *types.Stat { return nil } -type PingRequest struct { +type EvaluateRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *PingRequest) Reset() { *m = PingRequest{} } -func (m *PingRequest) String() string { return proto.CompactTextString(m) } -func (*PingRequest) ProtoMessage() {} -func (*PingRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{20} +func (m *EvaluateRequest) Reset() { *m = EvaluateRequest{} } +func (m *EvaluateRequest) String() string { return proto.CompactTextString(m) } +func (*EvaluateRequest) ProtoMessage() {} +func (*EvaluateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{23} } -func (m *PingRequest) XXX_Unmarshal(b []byte) error { +func (m *EvaluateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *EvaluateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_PingRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_EvaluateRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1255,39 +1528,43 @@ func (m *PingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *PingRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PingRequest.Merge(m, src) +func (m *EvaluateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvaluateRequest.Merge(m, src) } -func (m *PingRequest) XXX_Size() int { +func (m *EvaluateRequest) XXX_Size() int { return m.Size() } -func (m *PingRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PingRequest.DiscardUnknown(m) +func (m *EvaluateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EvaluateRequest.DiscardUnknown(m) } -var xxx_messageInfo_PingRequest proto.InternalMessageInfo +var xxx_messageInfo_EvaluateRequest proto.InternalMessageInfo -type PongResponse struct { - FrontendAPICaps []pb1.APICap `protobuf:"bytes,1,rep,name=FrontendAPICaps,proto3" json:"FrontendAPICaps"` - LLBCaps []pb1.APICap `protobuf:"bytes,2,rep,name=LLBCaps,proto3" json:"LLBCaps"` - Workers []*types1.WorkerRecord `protobuf:"bytes,3,rep,name=Workers,proto3" json:"Workers,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *EvaluateRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" } -func (m *PongResponse) Reset() { *m = PongResponse{} } -func (m *PongResponse) String() string { return proto.CompactTextString(m) } -func (*PongResponse) ProtoMessage() {} -func (*PongResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{21} +type EvaluateResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *PongResponse) XXX_Unmarshal(b []byte) error { + +func (m *EvaluateResponse) Reset() { *m = EvaluateResponse{} } +func (m *EvaluateResponse) String() string { return proto.CompactTextString(m) } +func (*EvaluateResponse) ProtoMessage() {} +func (*EvaluateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{24} +} +func (m *EvaluateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PongResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *EvaluateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_PongResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_EvaluateResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1297,26 +1574,107 @@ func (m *PongResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *PongResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PongResponse.Merge(m, src) +func (m *EvaluateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvaluateResponse.Merge(m, src) } -func (m *PongResponse) XXX_Size() int { +func (m *EvaluateResponse) XXX_Size() int { return m.Size() } -func (m *PongResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PongResponse.DiscardUnknown(m) +func (m *EvaluateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EvaluateResponse.DiscardUnknown(m) } -var xxx_messageInfo_PongResponse proto.InternalMessageInfo +var xxx_messageInfo_EvaluateResponse proto.InternalMessageInfo -func (m *PongResponse) GetFrontendAPICaps() []pb1.APICap { - if m != nil { - return m.FrontendAPICaps +type PingRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PingRequest) Reset() { *m = PingRequest{} } +func (m *PingRequest) String() string { return proto.CompactTextString(m) } +func (*PingRequest) ProtoMessage() {} +func (*PingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{25} +} +func (m *PingRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PingRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingRequest.Merge(m, src) +} +func (m *PingRequest) XXX_Size() int { + return m.Size() +} +func (m *PingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PingRequest proto.InternalMessageInfo + +type PongResponse struct { + FrontendAPICaps []pb2.APICap `protobuf:"bytes,1,rep,name=FrontendAPICaps,proto3" json:"FrontendAPICaps"` + LLBCaps []pb2.APICap `protobuf:"bytes,2,rep,name=LLBCaps,proto3" json:"LLBCaps"` + Workers []*types1.WorkerRecord `protobuf:"bytes,3,rep,name=Workers,proto3" json:"Workers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PongResponse) Reset() { *m = PongResponse{} } +func (m *PongResponse) String() string { return proto.CompactTextString(m) } +func (*PongResponse) ProtoMessage() {} +func (*PongResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{26} +} +func (m *PongResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PongResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PongResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PongResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PongResponse.Merge(m, src) +} +func (m *PongResponse) XXX_Size() int { + return m.Size() +} +func (m *PongResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PongResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PongResponse proto.InternalMessageInfo + +func (m *PongResponse) GetFrontendAPICaps() []pb2.APICap { + if m != nil { + return m.FrontendAPICaps } return nil } -func (m *PongResponse) GetLLBCaps() []pb1.APICap { +func (m *PongResponse) GetLLBCaps() []pb2.APICap { if m != nil { return m.LLBCaps } @@ -1347,7 +1705,7 @@ func (m *WarnRequest) Reset() { *m = WarnRequest{} } func (m *WarnRequest) String() string { return proto.CompactTextString(m) } func (*WarnRequest) ProtoMessage() {} func (*WarnRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{22} + return fileDescriptor_f1a937782ebbded5, []int{27} } func (m *WarnRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1428,7 +1786,7 @@ func (m *WarnResponse) Reset() { *m = WarnResponse{} } func (m *WarnResponse) String() string { return proto.CompactTextString(m) } func (*WarnResponse) ProtoMessage() {} func (*WarnResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{23} + return fileDescriptor_f1a937782ebbded5, []int{28} } func (m *WarnResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1474,7 +1832,7 @@ func (m *NewContainerRequest) Reset() { *m = NewContainerRequest{} } func (m *NewContainerRequest) String() string { return proto.CompactTextString(m) } func (*NewContainerRequest) ProtoMessage() {} func (*NewContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{24} + return fileDescriptor_f1a937782ebbded5, []int{29} } func (m *NewContainerRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1555,7 +1913,7 @@ func (m *NewContainerResponse) Reset() { *m = NewContainerResponse{} } func (m *NewContainerResponse) String() string { return proto.CompactTextString(m) } func (*NewContainerResponse) ProtoMessage() {} func (*NewContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{25} + return fileDescriptor_f1a937782ebbded5, []int{30} } func (m *NewContainerResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1595,7 +1953,7 @@ func (m *ReleaseContainerRequest) Reset() { *m = ReleaseContainerRequest func (m *ReleaseContainerRequest) String() string { return proto.CompactTextString(m) } func (*ReleaseContainerRequest) ProtoMessage() {} func (*ReleaseContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{26} + return fileDescriptor_f1a937782ebbded5, []int{31} } func (m *ReleaseContainerRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1641,7 +1999,7 @@ func (m *ReleaseContainerResponse) Reset() { *m = ReleaseContainerRespon func (m *ReleaseContainerResponse) String() string { return proto.CompactTextString(m) } func (*ReleaseContainerResponse) ProtoMessage() {} func (*ReleaseContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{27} + return fileDescriptor_f1a937782ebbded5, []int{32} } func (m *ReleaseContainerResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1673,6 +2031,7 @@ var xxx_messageInfo_ReleaseContainerResponse proto.InternalMessageInfo type ExecMessage struct { ProcessID string `protobuf:"bytes,1,opt,name=ProcessID,proto3" json:"ProcessID,omitempty"` // Types that are valid to be assigned to Input: + // // *ExecMessage_Init // *ExecMessage_File // *ExecMessage_Resize @@ -1690,7 +2049,7 @@ func (m *ExecMessage) Reset() { *m = ExecMessage{} } func (m *ExecMessage) String() string { return proto.CompactTextString(m) } func (*ExecMessage) ProtoMessage() {} func (*ExecMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{28} + return fileDescriptor_f1a937782ebbded5, []int{33} } func (m *ExecMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1846,7 +2205,7 @@ func (m *InitMessage) Reset() { *m = InitMessage{} } func (m *InitMessage) String() string { return proto.CompactTextString(m) } func (*InitMessage) ProtoMessage() {} func (*InitMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{29} + return fileDescriptor_f1a937782ebbded5, []int{34} } func (m *InitMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1922,7 +2281,7 @@ func (m *ExitMessage) Reset() { *m = ExitMessage{} } func (m *ExitMessage) String() string { return proto.CompactTextString(m) } func (*ExitMessage) ProtoMessage() {} func (*ExitMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{30} + return fileDescriptor_f1a937782ebbded5, []int{35} } func (m *ExitMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1975,7 +2334,7 @@ func (m *StartedMessage) Reset() { *m = StartedMessage{} } func (m *StartedMessage) String() string { return proto.CompactTextString(m) } func (*StartedMessage) ProtoMessage() {} func (*StartedMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{31} + return fileDescriptor_f1a937782ebbded5, []int{36} } func (m *StartedMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2014,7 +2373,7 @@ func (m *DoneMessage) Reset() { *m = DoneMessage{} } func (m *DoneMessage) String() string { return proto.CompactTextString(m) } func (*DoneMessage) ProtoMessage() {} func (*DoneMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{32} + return fileDescriptor_f1a937782ebbded5, []int{37} } func (m *DoneMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2056,7 +2415,7 @@ func (m *FdMessage) Reset() { *m = FdMessage{} } func (m *FdMessage) String() string { return proto.CompactTextString(m) } func (*FdMessage) ProtoMessage() {} func (*FdMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{33} + return fileDescriptor_f1a937782ebbded5, []int{38} } func (m *FdMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2118,7 +2477,7 @@ func (m *ResizeMessage) Reset() { *m = ResizeMessage{} } func (m *ResizeMessage) String() string { return proto.CompactTextString(m) } func (*ResizeMessage) ProtoMessage() {} func (*ResizeMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{34} + return fileDescriptor_f1a937782ebbded5, []int{39} } func (m *ResizeMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2174,7 +2533,7 @@ func (m *SignalMessage) Reset() { *m = SignalMessage{} } func (m *SignalMessage) String() string { return proto.CompactTextString(m) } func (*SignalMessage) ProtoMessage() {} func (*SignalMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{35} + return fileDescriptor_f1a937782ebbded5, []int{40} } func (m *SignalMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2211,13 +2570,20 @@ func (m *SignalMessage) GetName() string { } func init() { + proto.RegisterEnum("moby.buildkit.v1.frontend.AttestationKind", AttestationKind_name, AttestationKind_value) + proto.RegisterEnum("moby.buildkit.v1.frontend.InTotoSubjectKind", InTotoSubjectKind_name, InTotoSubjectKind_value) proto.RegisterType((*Result)(nil), "moby.buildkit.v1.frontend.Result") + proto.RegisterMapType((map[string]*Attestations)(nil), "moby.buildkit.v1.frontend.Result.AttestationsEntry") proto.RegisterMapType((map[string][]byte)(nil), "moby.buildkit.v1.frontend.Result.MetadataEntry") proto.RegisterType((*RefMapDeprecated)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated") proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated.RefsEntry") proto.RegisterType((*Ref)(nil), "moby.buildkit.v1.frontend.Ref") proto.RegisterType((*RefMap)(nil), "moby.buildkit.v1.frontend.RefMap") proto.RegisterMapType((map[string]*Ref)(nil), "moby.buildkit.v1.frontend.RefMap.RefsEntry") + proto.RegisterType((*Attestations)(nil), "moby.buildkit.v1.frontend.Attestations") + proto.RegisterType((*Attestation)(nil), "moby.buildkit.v1.frontend.Attestation") + proto.RegisterMapType((map[string][]byte)(nil), "moby.buildkit.v1.frontend.Attestation.MetadataEntry") + proto.RegisterType((*InTotoSubject)(nil), "moby.buildkit.v1.frontend.InTotoSubject") proto.RegisterType((*ReturnRequest)(nil), "moby.buildkit.v1.frontend.ReturnRequest") proto.RegisterType((*ReturnResponse)(nil), "moby.buildkit.v1.frontend.ReturnResponse") proto.RegisterType((*InputsRequest)(nil), "moby.buildkit.v1.frontend.InputsRequest") @@ -2238,6 +2604,8 @@ func init() { proto.RegisterType((*ReadDirResponse)(nil), "moby.buildkit.v1.frontend.ReadDirResponse") proto.RegisterType((*StatFileRequest)(nil), "moby.buildkit.v1.frontend.StatFileRequest") proto.RegisterType((*StatFileResponse)(nil), "moby.buildkit.v1.frontend.StatFileResponse") + proto.RegisterType((*EvaluateRequest)(nil), "moby.buildkit.v1.frontend.EvaluateRequest") + proto.RegisterType((*EvaluateResponse)(nil), "moby.buildkit.v1.frontend.EvaluateResponse") proto.RegisterType((*PingRequest)(nil), "moby.buildkit.v1.frontend.PingRequest") proto.RegisterType((*PongResponse)(nil), "moby.buildkit.v1.frontend.PongResponse") proto.RegisterType((*WarnRequest)(nil), "moby.buildkit.v1.frontend.WarnRequest") @@ -2259,137 +2627,161 @@ func init() { func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } var fileDescriptor_f1a937782ebbded5 = []byte{ - // 2078 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x8a, 0x94, 0x48, 0x3e, 0xfe, 0xb1, 0x32, 0x4e, 0x53, 0x7a, 0x11, 0x38, 0xca, 0x36, - 0x55, 0x69, 0x47, 0x59, 0xa6, 0x72, 0x02, 0xb9, 0x72, 0x90, 0xd4, 0xfa, 0x07, 0x29, 0x91, 0x64, - 0x75, 0x94, 0xc2, 0x40, 0x90, 0x02, 0x5d, 0x71, 0x87, 0xf4, 0xc2, 0xab, 0x9d, 0xed, 0xec, 0xd0, - 0xb2, 0x92, 0x4b, 0x7b, 0xeb, 0xb1, 0x40, 0x81, 0x5e, 0x0b, 0xf4, 0x13, 0xf4, 0x13, 0xf4, 0x9c, - 0x63, 0x8f, 0x45, 0x0f, 0x41, 0xe1, 0xcf, 0x50, 0x14, 0xe8, 0x2d, 0x78, 0x33, 0xb3, 0xe4, 0x92, - 0xa2, 0x96, 0x24, 0x7c, 0xe2, 0xcc, 0xdb, 0xf7, 0x7b, 0xf3, 0xfe, 0xcd, 0x7b, 0x6f, 0x08, 0xf5, - 0x9e, 0x27, 0xd9, 0xa5, 0x77, 0xe5, 0xc6, 0x82, 0x4b, 0x4e, 0xee, 0x5c, 0xf0, 0xf3, 0x2b, 0xf7, - 0xbc, 0x1f, 0x84, 0xfe, 0xf3, 0x40, 0xba, 0x2f, 0x7e, 0xee, 0x76, 0x05, 0x8f, 0x24, 0x8b, 0x7c, - 0xfb, 0x83, 0x5e, 0x20, 0x9f, 0xf5, 0xcf, 0xdd, 0x0e, 0xbf, 0x68, 0xf7, 0x78, 0x8f, 0xb7, 0x15, - 0xe2, 0xbc, 0xdf, 0x55, 0x3b, 0xb5, 0x51, 0x2b, 0x2d, 0xc9, 0xde, 0x18, 0x67, 0xef, 0x71, 0xde, - 0x0b, 0x99, 0x17, 0x07, 0x89, 0x59, 0xb6, 0x45, 0xdc, 0x69, 0x27, 0xd2, 0x93, 0xfd, 0xc4, 0x60, - 0xd6, 0x33, 0x18, 0x54, 0xa4, 0x9d, 0x2a, 0xd2, 0x4e, 0x78, 0xf8, 0x82, 0x89, 0x76, 0x7c, 0xde, - 0xe6, 0x71, 0xca, 0xdd, 0xbe, 0x91, 0xdb, 0x8b, 0x83, 0xb6, 0xbc, 0x8a, 0x59, 0xd2, 0xbe, 0xe4, - 0xe2, 0x39, 0x13, 0x06, 0xf0, 0xe0, 0x46, 0x40, 0x5f, 0x06, 0x21, 0xa2, 0x3a, 0x5e, 0x9c, 0xe0, - 0x21, 0xf8, 0x6b, 0x40, 0x59, 0xb3, 0x25, 0x8f, 0x82, 0x44, 0x06, 0x41, 0x2f, 0x68, 0x77, 0x13, - 0x85, 0xd1, 0xa7, 0xa0, 0x11, 0x9a, 0xdd, 0xf9, 0x63, 0x01, 0x96, 0x29, 0x4b, 0xfa, 0xa1, 0x24, - 0x6b, 0x50, 0x17, 0xac, 0xbb, 0xcb, 0x62, 0xc1, 0x3a, 0x9e, 0x64, 0x7e, 0xd3, 0x5a, 0xb5, 0x5a, - 0x95, 0x83, 0x05, 0x3a, 0x4a, 0x26, 0xbf, 0x86, 0x86, 0x60, 0xdd, 0x24, 0xc3, 0xb8, 0xb8, 0x6a, - 0xb5, 0xaa, 0x1b, 0xef, 0xbb, 0x37, 0x06, 0xc3, 0xa5, 0xac, 0x7b, 0xec, 0xc5, 0x43, 0xc8, 0xc1, - 0x02, 0x1d, 0x13, 0x42, 0x36, 0xa0, 0x20, 0x58, 0xb7, 0x59, 0x50, 0xb2, 0xee, 0xe6, 0xcb, 0x3a, - 0x58, 0xa0, 0xc8, 0x4c, 0x36, 0xa1, 0x88, 0x52, 0x9a, 0x45, 0x05, 0x7a, 0x77, 0xaa, 0x02, 0x07, - 0x0b, 0x54, 0x01, 0xc8, 0x17, 0x50, 0xbe, 0x60, 0xd2, 0xf3, 0x3d, 0xe9, 0x35, 0x61, 0xb5, 0xd0, - 0xaa, 0x6e, 0xb4, 0x73, 0xc1, 0xe8, 0x20, 0xf7, 0xd8, 0x20, 0xf6, 0x22, 0x29, 0xae, 0xe8, 0x40, - 0x80, 0xfd, 0x08, 0xea, 0x23, 0x9f, 0xc8, 0x0a, 0x14, 0x9e, 0xb3, 0x2b, 0xed, 0x3f, 0x8a, 0x4b, - 0xf2, 0x26, 0x2c, 0xbd, 0xf0, 0xc2, 0x3e, 0x53, 0xae, 0xaa, 0x51, 0xbd, 0xd9, 0x5a, 0x7c, 0x68, - 0x6d, 0x97, 0x61, 0x59, 0x28, 0xf1, 0xce, 0x5f, 0x2c, 0x58, 0x19, 0xf7, 0x13, 0x39, 0x34, 0x16, - 0x5a, 0x4a, 0xc9, 0x8f, 0xe7, 0x70, 0x31, 0x12, 0x12, 0xad, 0xaa, 0x12, 0x61, 0x6f, 0x42, 0x65, - 0x40, 0x9a, 0xa6, 0x62, 0x25, 0xa3, 0xa2, 0xb3, 0x09, 0x05, 0xca, 0xba, 0xa4, 0x01, 0x8b, 0x81, - 0x49, 0x0a, 0xba, 0x18, 0xf8, 0x64, 0x15, 0x0a, 0x3e, 0xeb, 0x9a, 0xe0, 0x37, 0xdc, 0xf8, 0xdc, - 0xdd, 0x65, 0xdd, 0x20, 0x0a, 0x64, 0xc0, 0x23, 0x8a, 0x9f, 0x9c, 0xbf, 0x59, 0x98, 0x5c, 0xa8, - 0x16, 0xf9, 0x6c, 0xc4, 0x8e, 0xe9, 0xa9, 0x72, 0x4d, 0xfb, 0xa7, 0xf9, 0xda, 0x7f, 0x94, 0xd5, - 0x7e, 0x6a, 0xfe, 0x64, 0xad, 0x93, 0x50, 0xa7, 0x4c, 0xf6, 0x45, 0x44, 0xd9, 0xef, 0xfa, 0x2c, - 0x91, 0xe4, 0x17, 0x69, 0x44, 0x94, 0xfc, 0x69, 0x69, 0x85, 0x8c, 0xd4, 0x00, 0x48, 0x0b, 0x96, - 0x98, 0x10, 0x5c, 0x18, 0x2d, 0x88, 0xab, 0x2b, 0x87, 0x2b, 0xe2, 0x8e, 0x7b, 0xa6, 0x2a, 0x07, - 0xd5, 0x0c, 0xce, 0x0a, 0x34, 0xd2, 0x53, 0x93, 0x98, 0x47, 0x09, 0x73, 0x6e, 0x41, 0xfd, 0x30, - 0x8a, 0xfb, 0x32, 0x31, 0x7a, 0x38, 0xff, 0xb0, 0xa0, 0x91, 0x52, 0x34, 0x0f, 0xf9, 0x1a, 0xaa, - 0x43, 0x1f, 0xa7, 0xce, 0xdc, 0xca, 0xd1, 0x6f, 0x14, 0x9f, 0x09, 0x90, 0xf1, 0x6d, 0x56, 0x9c, - 0x7d, 0x02, 0x2b, 0xe3, 0x0c, 0x13, 0x3c, 0xfd, 0xde, 0xa8, 0xa7, 0xc7, 0x03, 0x9f, 0xf1, 0xec, - 0x9f, 0x2d, 0xb8, 0x43, 0x99, 0x2a, 0x85, 0x87, 0x17, 0x5e, 0x8f, 0xed, 0xf0, 0xa8, 0x1b, 0xf4, - 0x52, 0x37, 0xaf, 0xa8, 0xac, 0x4a, 0x25, 0x63, 0x82, 0xb5, 0xa0, 0x7c, 0x1a, 0x7a, 0xb2, 0xcb, - 0xc5, 0x85, 0x11, 0x5e, 0x43, 0xe1, 0x29, 0x8d, 0x0e, 0xbe, 0x92, 0x55, 0xa8, 0x1a, 0xc1, 0xc7, - 0xdc, 0x67, 0xaa, 0x66, 0x54, 0x68, 0x96, 0x44, 0x9a, 0x50, 0x3a, 0xe2, 0xbd, 0x13, 0xef, 0x82, - 0xa9, 0xe2, 0x50, 0xa1, 0xe9, 0xd6, 0xf9, 0xbd, 0x05, 0xf6, 0x24, 0xad, 0x8c, 0x8b, 0x3f, 0x87, - 0xe5, 0xdd, 0xa0, 0xc7, 0x12, 0x1d, 0xfd, 0xca, 0xf6, 0xc6, 0x77, 0xdf, 0xbf, 0xb3, 0xf0, 0xef, - 0xef, 0xdf, 0xb9, 0x9f, 0xa9, 0xab, 0x3c, 0x66, 0x51, 0x87, 0x47, 0xd2, 0x0b, 0x22, 0x26, 0xb0, - 0x3d, 0x7c, 0xe0, 0x2b, 0x88, 0xab, 0x91, 0xd4, 0x48, 0x20, 0x6f, 0xc1, 0xb2, 0x96, 0x6e, 0xae, - 0xbd, 0xd9, 0x39, 0xff, 0x5d, 0x82, 0xda, 0x19, 0x2a, 0x90, 0xfa, 0xc2, 0x05, 0x18, 0xba, 0xd0, - 0xa4, 0xdd, 0xb8, 0x63, 0x33, 0x1c, 0xc4, 0x86, 0xf2, 0xbe, 0x09, 0xb1, 0xb9, 0xae, 0x83, 0x3d, - 0xf9, 0x0a, 0xaa, 0xe9, 0xfa, 0x49, 0x2c, 0x9b, 0x05, 0x95, 0x23, 0x0f, 0x73, 0x72, 0x24, 0xab, - 0x89, 0x9b, 0x81, 0x9a, 0x0c, 0xc9, 0x50, 0xc8, 0x27, 0x70, 0xe7, 0xf0, 0x22, 0xe6, 0x42, 0xee, - 0x78, 0x9d, 0x67, 0x8c, 0x8e, 0x76, 0x81, 0xe2, 0x6a, 0xa1, 0x55, 0xa1, 0x37, 0x33, 0x90, 0x75, - 0x78, 0xc3, 0x0b, 0x43, 0x7e, 0x69, 0x2e, 0x8d, 0x4a, 0xff, 0xe6, 0xd2, 0xaa, 0xd5, 0x2a, 0xd3, - 0xeb, 0x1f, 0xc8, 0x87, 0x70, 0x3b, 0x43, 0x7c, 0x2c, 0x84, 0x77, 0x85, 0xf9, 0xb2, 0xac, 0xf8, - 0x27, 0x7d, 0xc2, 0x0a, 0xb6, 0x1f, 0x44, 0x5e, 0xd8, 0x04, 0xc5, 0xa3, 0x37, 0xc4, 0x81, 0xda, - 0xde, 0x4b, 0x54, 0x89, 0x89, 0xc7, 0x52, 0x8a, 0x66, 0x55, 0x85, 0x62, 0x84, 0x46, 0x4e, 0xa1, - 0xa6, 0x14, 0xd6, 0xba, 0x27, 0xcd, 0x9a, 0x72, 0xda, 0x7a, 0x8e, 0xd3, 0x14, 0xfb, 0x93, 0x38, - 0x73, 0x95, 0x46, 0x24, 0x90, 0x0e, 0x34, 0x52, 0xc7, 0xe9, 0x3b, 0xd8, 0xac, 0x2b, 0x99, 0x8f, - 0xe6, 0x0d, 0x84, 0x46, 0xeb, 0x23, 0xc6, 0x44, 0x62, 0x1a, 0xec, 0xe1, 0x75, 0xf3, 0x24, 0x6b, - 0x36, 0x94, 0xcd, 0x83, 0xbd, 0xfd, 0x29, 0xac, 0x8c, 0xc7, 0x72, 0x9e, 0xa2, 0x6f, 0xff, 0x0a, - 0x6e, 0x4f, 0x50, 0xe1, 0xb5, 0xea, 0xc1, 0xdf, 0x2d, 0x78, 0xe3, 0x9a, 0xdf, 0x08, 0x81, 0xe2, - 0x97, 0x57, 0x31, 0x33, 0x22, 0xd5, 0x9a, 0x1c, 0xc3, 0x12, 0xc6, 0x25, 0x69, 0x2e, 0x2a, 0xa7, - 0x6d, 0xce, 0x13, 0x08, 0x57, 0x21, 0xb5, 0xc3, 0xb4, 0x14, 0xfb, 0x21, 0xc0, 0x90, 0x38, 0x57, - 0xeb, 0xfb, 0x1a, 0xea, 0x26, 0x2a, 0xa6, 0x3c, 0xac, 0xe8, 0x29, 0xc5, 0x80, 0x71, 0x06, 0x19, - 0xb6, 0x8b, 0xc2, 0x9c, 0xed, 0xc2, 0xf9, 0x16, 0x6e, 0x51, 0xe6, 0xf9, 0xfb, 0x41, 0xc8, 0x6e, - 0xae, 0x8a, 0x78, 0xd7, 0x83, 0x90, 0x9d, 0x7a, 0xf2, 0xd9, 0xe0, 0xae, 0x9b, 0x3d, 0xd9, 0x82, - 0x25, 0xea, 0x45, 0x3d, 0x66, 0x8e, 0x7e, 0x2f, 0xe7, 0x68, 0x75, 0x08, 0xf2, 0x52, 0x0d, 0x71, - 0x1e, 0x41, 0x65, 0x40, 0xc3, 0x4a, 0xf5, 0xa4, 0xdb, 0x4d, 0x98, 0xae, 0x7a, 0x05, 0x6a, 0x76, - 0x48, 0x3f, 0x62, 0x51, 0xcf, 0x1c, 0x5d, 0xa0, 0x66, 0xe7, 0xac, 0xe1, 0xa8, 0x92, 0x6a, 0x6e, - 0x5c, 0x43, 0xa0, 0xb8, 0x8b, 0xf3, 0x94, 0xa5, 0x2e, 0x98, 0x5a, 0x3b, 0x3e, 0xb6, 0x39, 0xcf, - 0xdf, 0x0d, 0xc4, 0xcd, 0x06, 0x36, 0xa1, 0xb4, 0x1b, 0x88, 0x8c, 0x7d, 0xe9, 0x96, 0xac, 0x61, - 0x03, 0xec, 0x84, 0x7d, 0x1f, 0xad, 0x95, 0x4c, 0x44, 0xa6, 0xd2, 0x8f, 0x51, 0x9d, 0xcf, 0xb4, - 0x1f, 0xd5, 0x29, 0x46, 0x99, 0x75, 0x28, 0xb1, 0x48, 0x8a, 0x80, 0xa5, 0x5d, 0x92, 0xb8, 0x7a, - 0x04, 0x76, 0xd5, 0x08, 0xac, 0xba, 0x31, 0x4d, 0x59, 0x9c, 0x4d, 0xb8, 0x85, 0x84, 0xfc, 0x40, - 0x10, 0x28, 0x66, 0x94, 0x54, 0x6b, 0x67, 0x0b, 0x56, 0x86, 0x40, 0x73, 0xf4, 0x1a, 0x14, 0x71, - 0xc0, 0x36, 0x65, 0x7c, 0xd2, 0xb9, 0xea, 0xbb, 0x53, 0x87, 0xea, 0x69, 0x10, 0xa5, 0xfd, 0xd0, - 0x79, 0x65, 0x41, 0xed, 0x94, 0x47, 0xc3, 0x4e, 0x74, 0x0a, 0xb7, 0xd2, 0x1b, 0xf8, 0xf8, 0xf4, - 0x70, 0xc7, 0x8b, 0x53, 0x53, 0x56, 0xaf, 0x87, 0xd9, 0xbc, 0x05, 0x5c, 0xcd, 0xb8, 0x5d, 0xc4, - 0xa6, 0x45, 0xc7, 0xe1, 0xe4, 0x97, 0x50, 0x3a, 0x3a, 0xda, 0x56, 0x92, 0x16, 0xe7, 0x92, 0x94, - 0xc2, 0xc8, 0xa7, 0x50, 0x7a, 0xaa, 0x9e, 0x28, 0x89, 0x69, 0x2c, 0x13, 0x52, 0x4e, 0x1b, 0xaa, - 0xd9, 0x28, 0xeb, 0x70, 0xe1, 0xd3, 0x14, 0xe4, 0xfc, 0xcf, 0x82, 0xea, 0x53, 0x6f, 0x38, 0x6b, - 0x7d, 0x0e, 0xcb, 0xfe, 0x6b, 0x77, 0x5b, 0xbd, 0xc5, 0x5b, 0x1c, 0xb2, 0x17, 0x2c, 0x34, 0xa9, - 0xaa, 0x37, 0x48, 0x4d, 0x9e, 0x71, 0xa1, 0x6f, 0x67, 0x8d, 0xea, 0x0d, 0xe6, 0xb5, 0xcf, 0xa4, - 0x17, 0x84, 0xaa, 0x6b, 0xd5, 0xa8, 0xd9, 0x61, 0xd4, 0xfb, 0x22, 0x54, 0x4d, 0xa9, 0x42, 0x71, - 0x49, 0x1c, 0x28, 0x06, 0x51, 0x97, 0xab, 0xbe, 0x63, 0xaa, 0xdb, 0x19, 0xef, 0x8b, 0x0e, 0x3b, - 0x8c, 0xba, 0x9c, 0xaa, 0x6f, 0xe4, 0x5d, 0x58, 0x16, 0x78, 0x8d, 0x92, 0x66, 0x49, 0x39, 0xa5, - 0x82, 0x5c, 0xfa, 0xb2, 0x99, 0x0f, 0x4e, 0x03, 0x6a, 0xda, 0x6e, 0x33, 0xed, 0xfd, 0x69, 0x11, - 0x6e, 0x9f, 0xb0, 0xcb, 0x9d, 0xd4, 0xae, 0xd4, 0x21, 0xab, 0x50, 0x1d, 0xd0, 0x0e, 0x77, 0x4d, - 0xfa, 0x65, 0x49, 0x78, 0xd8, 0x31, 0xef, 0x47, 0x32, 0x8d, 0xa1, 0x3a, 0x4c, 0x51, 0xa8, 0xf9, - 0x40, 0x7e, 0x0a, 0xa5, 0x13, 0x26, 0xf1, 0x2d, 0xa9, 0xac, 0x6e, 0x6c, 0x54, 0x91, 0xe7, 0x84, - 0x49, 0x1c, 0x8d, 0x68, 0xfa, 0x0d, 0xe7, 0xad, 0x38, 0x9d, 0xb7, 0x8a, 0x93, 0xe6, 0xad, 0xf4, - 0x2b, 0xd9, 0x84, 0x6a, 0x87, 0x47, 0x89, 0x14, 0x5e, 0x80, 0x07, 0x2f, 0x29, 0xe6, 0x1f, 0x21, - 0xb3, 0x0e, 0xec, 0xce, 0xf0, 0x23, 0xcd, 0x72, 0x92, 0xfb, 0x00, 0xec, 0xa5, 0x14, 0xde, 0x01, - 0x4f, 0x64, 0xd2, 0x5c, 0x56, 0x0a, 0x03, 0xe2, 0x90, 0x70, 0x78, 0x4a, 0x33, 0x5f, 0x9d, 0xb7, - 0xe0, 0xcd, 0x51, 0x8f, 0x18, 0x57, 0x3d, 0x82, 0x1f, 0x53, 0x16, 0x32, 0x2f, 0x61, 0xf3, 0x7b, - 0xcb, 0xb1, 0xa1, 0x79, 0x1d, 0x6c, 0x04, 0xff, 0xbf, 0x00, 0xd5, 0xbd, 0x97, 0xac, 0x73, 0xcc, - 0x92, 0xc4, 0xeb, 0x31, 0xf2, 0x36, 0x54, 0x4e, 0x05, 0xef, 0xb0, 0x24, 0x19, 0xc8, 0x1a, 0x12, - 0xc8, 0x27, 0x50, 0x3c, 0x8c, 0x02, 0x69, 0xda, 0xdc, 0x5a, 0xee, 0xd0, 0x1d, 0x48, 0x23, 0x13, - 0x1f, 0x9c, 0xb8, 0x25, 0x5b, 0x50, 0xc4, 0x22, 0x31, 0x4b, 0xa1, 0xf6, 0x33, 0x58, 0xc4, 0x90, - 0x6d, 0xf5, 0x44, 0x0f, 0xbe, 0x61, 0x26, 0x4a, 0xad, 0xfc, 0x0e, 0x13, 0x7c, 0xc3, 0x86, 0x12, - 0x0c, 0x92, 0xec, 0x41, 0xe9, 0x4c, 0x7a, 0x02, 0xe7, 0x34, 0x1d, 0xbd, 0x7b, 0x79, 0x83, 0x88, - 0xe6, 0x1c, 0x4a, 0x49, 0xb1, 0xe8, 0x84, 0xbd, 0x97, 0x81, 0x34, 0xb7, 0x21, 0xcf, 0x09, 0xc8, - 0x96, 0x31, 0x04, 0xb7, 0x88, 0xde, 0xe5, 0x11, 0x6b, 0x96, 0xa6, 0xa2, 0x91, 0x2d, 0x83, 0xc6, - 0x2d, 0xba, 0xe1, 0x2c, 0xe8, 0xe1, 0x7c, 0x57, 0x9e, 0xea, 0x06, 0xcd, 0x98, 0x71, 0x83, 0x26, - 0x6c, 0x97, 0x60, 0x49, 0x4d, 0x33, 0xce, 0x5f, 0x2d, 0xa8, 0x66, 0xe2, 0x34, 0xc3, 0xbd, 0x7b, - 0x1b, 0x8a, 0xf8, 0xca, 0x37, 0xf1, 0x2f, 0xab, 0x5b, 0xc7, 0xa4, 0x47, 0x15, 0x15, 0x0b, 0xc7, - 0xbe, 0xaf, 0x8b, 0x62, 0x9d, 0xe2, 0x12, 0x29, 0x5f, 0xca, 0x2b, 0x15, 0xb2, 0x32, 0xc5, 0x25, - 0x59, 0x87, 0xf2, 0x19, 0xeb, 0xf4, 0x45, 0x20, 0xaf, 0x54, 0x10, 0x1a, 0x1b, 0x2b, 0xaa, 0x9c, - 0x18, 0x9a, 0xba, 0x9c, 0x03, 0x0e, 0xe7, 0x0b, 0x4c, 0xce, 0xa1, 0x82, 0x04, 0x8a, 0x3b, 0xf8, - 0xd6, 0x41, 0xcd, 0xea, 0x54, 0xad, 0xf1, 0xb9, 0xb9, 0x37, 0xed, 0xb9, 0xb9, 0x97, 0x3e, 0x37, - 0x47, 0x83, 0x8a, 0xdd, 0x27, 0xe3, 0x64, 0xe7, 0x31, 0x54, 0x06, 0x89, 0x87, 0x2f, 0xfd, 0x7d, - 0xdf, 0x9c, 0xb4, 0xb8, 0xef, 0xa3, 0x29, 0x7b, 0x4f, 0xf6, 0xd5, 0x29, 0x65, 0x8a, 0xcb, 0x41, - 0xaf, 0x2f, 0x64, 0x7a, 0xfd, 0x26, 0x3e, 0xa4, 0x33, 0xd9, 0x87, 0x4c, 0x94, 0x5f, 0x26, 0xa9, - 0xca, 0xb8, 0xd6, 0x66, 0x84, 0x89, 0x92, 0xa5, 0xcc, 0x08, 0x13, 0xe7, 0x27, 0x50, 0x1f, 0x89, - 0x17, 0x32, 0xa9, 0x97, 0x9b, 0x19, 0x09, 0x71, 0xbd, 0xf1, 0xaf, 0x0a, 0x54, 0x8e, 0x8e, 0xb6, - 0xb7, 0x45, 0xe0, 0xf7, 0x18, 0xf9, 0x83, 0x05, 0xe4, 0xfa, 0x23, 0x8e, 0x7c, 0x94, 0x7f, 0x33, - 0x26, 0xbf, 0x44, 0xed, 0x8f, 0xe7, 0x44, 0x99, 0xfe, 0xfc, 0x15, 0x2c, 0xa9, 0xd9, 0x90, 0xfc, - 0x6c, 0xc6, 0x99, 0xde, 0x6e, 0x4d, 0x67, 0x34, 0xb2, 0x3b, 0x50, 0x4e, 0xe7, 0x2b, 0x72, 0x3f, - 0x57, 0xbd, 0x91, 0xf1, 0xd1, 0x7e, 0x7f, 0x26, 0x5e, 0x73, 0xc8, 0x6f, 0xa1, 0x64, 0xc6, 0x26, - 0x72, 0x6f, 0x0a, 0x6e, 0x38, 0xc0, 0xd9, 0xf7, 0x67, 0x61, 0x1d, 0x9a, 0x91, 0x8e, 0x47, 0xb9, - 0x66, 0x8c, 0x0d, 0x5f, 0xb9, 0x66, 0x5c, 0x9b, 0xb7, 0x9e, 0x42, 0x11, 0xe7, 0x28, 0x92, 0x57, - 0x4f, 0x32, 0x83, 0x96, 0x9d, 0x17, 0xae, 0x91, 0x01, 0xec, 0x37, 0x58, 0x77, 0xd5, 0x5b, 0x34, - 0xbf, 0xe2, 0x66, 0xfe, 0x3c, 0xb2, 0xef, 0xcd, 0xc0, 0x39, 0x14, 0x6f, 0xde, 0x71, 0xad, 0x19, - 0xfe, 0xc1, 0x99, 0x2e, 0x7e, 0xec, 0xbf, 0x22, 0x0e, 0xb5, 0x6c, 0x3b, 0x25, 0x6e, 0x0e, 0x74, - 0xc2, 0x24, 0x62, 0xb7, 0x67, 0xe6, 0x37, 0x07, 0x7e, 0x8b, 0x6f, 0x82, 0xd1, 0x56, 0x4b, 0x36, - 0x72, 0xdd, 0x31, 0xb1, 0xa9, 0xdb, 0x0f, 0xe6, 0xc2, 0x98, 0xc3, 0x3d, 0xdd, 0xca, 0x4d, 0xbb, - 0x26, 0xf9, 0x9d, 0x69, 0xd0, 0xf2, 0xed, 0x19, 0xf9, 0x5a, 0xd6, 0x87, 0x16, 0xe6, 0x19, 0x8e, - 0x70, 0xb9, 0xb2, 0x33, 0xb3, 0x6d, 0x6e, 0x9e, 0x65, 0x67, 0xc1, 0xed, 0xda, 0x77, 0xaf, 0xee, - 0x5a, 0xff, 0x7c, 0x75, 0xd7, 0xfa, 0xcf, 0xab, 0xbb, 0xd6, 0xf9, 0xb2, 0xfa, 0x63, 0xfe, 0xc1, - 0x0f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x92, 0x5d, 0x25, 0xb8, 0xea, 0x18, 0x00, 0x00, + // 2452 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x59, 0xcf, 0x6f, 0x1b, 0xc7, + 0xf5, 0xd7, 0x8a, 0x14, 0x25, 0x3d, 0x52, 0x14, 0x3d, 0x76, 0xf2, 0xa5, 0x17, 0x81, 0x23, 0xaf, + 0x63, 0x45, 0x56, 0x9c, 0xa5, 0xbf, 0xb2, 0x0d, 0xb9, 0x76, 0xeb, 0xc4, 0xfa, 0x05, 0x29, 0x96, + 0x6c, 0x76, 0xe4, 0xc2, 0x45, 0x90, 0x02, 0x5d, 0x71, 0x87, 0xf4, 0xd6, 0xab, 0xdd, 0xed, 0xee, + 0xd0, 0x32, 0x93, 0x4b, 0x7b, 0x28, 0x50, 0xe4, 0xd4, 0x53, 0x6f, 0x41, 0x81, 0x16, 0xe8, 0xb9, + 0xfd, 0x03, 0xda, 0x73, 0x80, 0x5e, 0x7a, 0xee, 0x21, 0x28, 0xfc, 0x0f, 0xf4, 0x56, 0xa0, 0xb7, + 0xe2, 0xcd, 0xcc, 0x92, 0xc3, 0x1f, 0x5a, 0x92, 0xf5, 0x89, 0x33, 0x6f, 0xde, 0x8f, 0x79, 0xef, + 0xcd, 0x7b, 0xf3, 0x99, 0x25, 0x2c, 0xb5, 0x1c, 0xce, 0xce, 0x9c, 0x8e, 0x1d, 0xc5, 0x21, 0x0f, + 0xc9, 0xe5, 0xd3, 0xf0, 0xa4, 0x63, 0x9f, 0xb4, 0x3d, 0xdf, 0x7d, 0xe9, 0x71, 0xfb, 0xd5, 0xff, + 0xdb, 0xcd, 0x38, 0x0c, 0x38, 0x0b, 0x5c, 0xf3, 0xe3, 0x96, 0xc7, 0x5f, 0xb4, 0x4f, 0xec, 0x46, + 0x78, 0x5a, 0x6b, 0x85, 0xad, 0xb0, 0x26, 0x24, 0x4e, 0xda, 0x4d, 0x31, 0x13, 0x13, 0x31, 0x92, + 0x9a, 0xcc, 0x8d, 0x41, 0xf6, 0x56, 0x18, 0xb6, 0x7c, 0xe6, 0x44, 0x5e, 0xa2, 0x86, 0xb5, 0x38, + 0x6a, 0xd4, 0x12, 0xee, 0xf0, 0x76, 0xa2, 0x64, 0x6e, 0x6a, 0x32, 0xb8, 0x91, 0x5a, 0xba, 0x91, + 0x5a, 0x12, 0xfa, 0xaf, 0x58, 0x5c, 0x8b, 0x4e, 0x6a, 0x61, 0x94, 0x72, 0xd7, 0xce, 0xe5, 0x76, + 0x22, 0xaf, 0xc6, 0x3b, 0x11, 0x4b, 0x6a, 0x67, 0x61, 0xfc, 0x92, 0xc5, 0x4a, 0xe0, 0xf6, 0xb9, + 0x02, 0x6d, 0xee, 0xf9, 0x28, 0xd5, 0x70, 0xa2, 0x04, 0x8d, 0xe0, 0xaf, 0x12, 0xd2, 0xdd, 0xe6, + 0x61, 0xe0, 0x25, 0xdc, 0xf3, 0x5a, 0x5e, 0xad, 0x99, 0x08, 0x19, 0x69, 0x05, 0x9d, 0x50, 0xec, + 0x77, 0x33, 0x5c, 0x68, 0xc7, 0x0d, 0x16, 0x85, 0xbe, 0xd7, 0xe8, 0xa0, 0x0d, 0x39, 0x92, 0x62, + 0xd6, 0xdf, 0xf2, 0x50, 0xa0, 0x2c, 0x69, 0xfb, 0x9c, 0xac, 0xc2, 0x52, 0xcc, 0x9a, 0x3b, 0x2c, + 0x8a, 0x59, 0xc3, 0xe1, 0xcc, 0xad, 0x1a, 0x2b, 0xc6, 0xda, 0xe2, 0xfe, 0x0c, 0xed, 0x27, 0x93, + 0x1f, 0x41, 0x39, 0x66, 0xcd, 0x44, 0x63, 0x9c, 0x5d, 0x31, 0xd6, 0x8a, 0x1b, 0x1f, 0xd9, 0xe7, + 0xe6, 0xd0, 0xa6, 0xac, 0x79, 0xe4, 0x44, 0x3d, 0x91, 0xfd, 0x19, 0x3a, 0xa0, 0x84, 0x6c, 0x40, + 0x2e, 0x66, 0xcd, 0x6a, 0x4e, 0xe8, 0xba, 0x92, 0xad, 0x6b, 0x7f, 0x86, 0x22, 0x33, 0xd9, 0x84, + 0x3c, 0x6a, 0xa9, 0xe6, 0x85, 0xd0, 0xd5, 0xb1, 0x1b, 0xd8, 0x9f, 0xa1, 0x42, 0x80, 0x3c, 0x86, + 0x85, 0x53, 0xc6, 0x1d, 0xd7, 0xe1, 0x4e, 0x15, 0x56, 0x72, 0x6b, 0xc5, 0x8d, 0x5a, 0xa6, 0x30, + 0x06, 0xc8, 0x3e, 0x52, 0x12, 0xbb, 0x01, 0x8f, 0x3b, 0xb4, 0xab, 0x80, 0x3c, 0x87, 0x92, 0xc3, + 0x39, 0xc3, 0x64, 0x78, 0x61, 0x90, 0x54, 0x4b, 0x42, 0xe1, 0xed, 0xf1, 0x0a, 0x1f, 0x69, 0x52, + 0x52, 0x69, 0x9f, 0x22, 0xf3, 0x01, 0x2c, 0xf5, 0xd9, 0x24, 0x15, 0xc8, 0xbd, 0x64, 0x1d, 0x99, + 0x18, 0x8a, 0x43, 0x72, 0x09, 0xe6, 0x5e, 0x39, 0x7e, 0x9b, 0x89, 0x1c, 0x94, 0xa8, 0x9c, 0xdc, + 0x9f, 0xbd, 0x67, 0x98, 0x2f, 0xe0, 0xc2, 0x90, 0xfe, 0x11, 0x0a, 0x7e, 0xa0, 0x2b, 0x28, 0x6e, + 0x7c, 0x98, 0xb1, 0x6b, 0x5d, 0x9d, 0x66, 0x69, 0x6b, 0x01, 0x0a, 0xb1, 0x70, 0xc8, 0xfa, 0xad, + 0x01, 0x95, 0xc1, 0x54, 0x93, 0x03, 0x95, 0x24, 0x43, 0x84, 0xe5, 0xee, 0x14, 0xa7, 0x04, 0x09, + 0x2a, 0x30, 0x42, 0x85, 0xb9, 0x09, 0x8b, 0x5d, 0xd2, 0xb8, 0x60, 0x2c, 0x6a, 0x5b, 0xb4, 0x36, + 0x21, 0x47, 0x59, 0x93, 0x94, 0x61, 0xd6, 0x53, 0xe7, 0x9a, 0xce, 0x7a, 0x2e, 0x59, 0x81, 0x9c, + 0xcb, 0x9a, 0xca, 0xf5, 0xb2, 0x1d, 0x9d, 0xd8, 0x3b, 0xac, 0xe9, 0x05, 0x1e, 0xba, 0x48, 0x71, + 0xc9, 0xfa, 0xbd, 0x81, 0xf5, 0x81, 0xdb, 0x22, 0x9f, 0xf4, 0xf9, 0x31, 0xfe, 0xb4, 0x0f, 0xed, + 0xfe, 0x79, 0xf6, 0xee, 0xef, 0xf4, 0x67, 0x62, 0x4c, 0x09, 0xe8, 0xde, 0xfd, 0x18, 0x4a, 0x7a, + 0x6e, 0xc8, 0x3e, 0x14, 0xb5, 0x73, 0xa4, 0x36, 0xbc, 0x3a, 0x59, 0x66, 0xa9, 0x2e, 0x6a, 0xfd, + 0x31, 0x07, 0x45, 0x6d, 0x91, 0x3c, 0x84, 0xfc, 0x4b, 0x2f, 0x90, 0x21, 0x2c, 0x6f, 0xac, 0x4f, + 0xa6, 0xf2, 0xb1, 0x17, 0xb8, 0x54, 0xc8, 0x91, 0xba, 0x56, 0x77, 0xb3, 0x62, 0x5b, 0x77, 0x26, + 0xd3, 0x71, 0x6e, 0xf1, 0xdd, 0x9a, 0xa2, 0x6d, 0xc8, 0xa6, 0x41, 0x20, 0x1f, 0x39, 0xfc, 0x85, + 0x68, 0x1a, 0x8b, 0x54, 0x8c, 0xc9, 0x2d, 0xb8, 0xe8, 0x05, 0xcf, 0x42, 0x1e, 0xd6, 0x63, 0xe6, + 0x7a, 0x78, 0xf8, 0x9e, 0x75, 0x22, 0x56, 0x9d, 0x13, 0x2c, 0xa3, 0x96, 0x48, 0x1d, 0xca, 0x92, + 0x7c, 0xdc, 0x3e, 0xf9, 0x19, 0x6b, 0xf0, 0xa4, 0x5a, 0x10, 0xfe, 0xac, 0x65, 0x6c, 0xe1, 0x40, + 0x17, 0xa0, 0x03, 0xf2, 0x6f, 0x55, 0xed, 0xd6, 0x9f, 0x0d, 0x58, 0xea, 0x53, 0x4f, 0x3e, 0xed, + 0x4b, 0xd5, 0xcd, 0x49, 0xb7, 0xa5, 0x25, 0xeb, 0x33, 0x28, 0xb8, 0x5e, 0x8b, 0x25, 0x5c, 0xa4, + 0x6a, 0x71, 0x6b, 0xe3, 0xdb, 0xef, 0xde, 0x9f, 0xf9, 0xc7, 0x77, 0xef, 0xaf, 0x6b, 0x57, 0x4d, + 0x18, 0xb1, 0xa0, 0x11, 0x06, 0xdc, 0xf1, 0x02, 0x16, 0xe3, 0x05, 0xfb, 0xb1, 0x14, 0xb1, 0x77, + 0xc4, 0x0f, 0x55, 0x1a, 0x30, 0xe8, 0x81, 0x73, 0xca, 0x44, 0x9e, 0x16, 0xa9, 0x18, 0x5b, 0x1c, + 0x96, 0x28, 0xe3, 0xed, 0x38, 0xa0, 0xec, 0xe7, 0x6d, 0x64, 0xfa, 0x5e, 0xda, 0x48, 0xc4, 0xa6, + 0xc7, 0x35, 0x74, 0x64, 0xa4, 0x4a, 0x80, 0xac, 0xc1, 0x1c, 0x8b, 0xe3, 0x30, 0x56, 0xc5, 0x43, + 0x6c, 0x79, 0xd5, 0xdb, 0x71, 0xd4, 0xb0, 0x8f, 0xc5, 0x55, 0x4f, 0x25, 0x83, 0x55, 0x81, 0x72, + 0x6a, 0x35, 0x89, 0xc2, 0x20, 0x61, 0xd6, 0x32, 0x86, 0x2e, 0x6a, 0xf3, 0x44, 0xed, 0xc3, 0xfa, + 0xab, 0x01, 0xe5, 0x94, 0x22, 0x79, 0xc8, 0x17, 0x50, 0xec, 0xb5, 0x86, 0xb4, 0x07, 0xdc, 0xcf, + 0x0c, 0xaa, 0x2e, 0xaf, 0xf5, 0x15, 0xd5, 0x12, 0x74, 0x75, 0xe6, 0x13, 0xa8, 0x0c, 0x32, 0x8c, + 0xc8, 0xfe, 0x07, 0xfd, 0x0d, 0x62, 0xb0, 0x5f, 0x69, 0xa7, 0xe1, 0x5f, 0x06, 0x5c, 0xa6, 0x4c, + 0x60, 0x97, 0x83, 0x53, 0xa7, 0xc5, 0xb6, 0xc3, 0xa0, 0xe9, 0xb5, 0xd2, 0x30, 0x57, 0x44, 0x33, + 0x4c, 0x35, 0x63, 0x5f, 0x5c, 0x83, 0x85, 0xba, 0xef, 0xf0, 0x66, 0x18, 0x9f, 0x2a, 0xe5, 0x25, + 0x54, 0x9e, 0xd2, 0x68, 0x77, 0x95, 0xac, 0x40, 0x51, 0x29, 0x3e, 0x0a, 0xdd, 0x34, 0x9d, 0x3a, + 0x89, 0x54, 0x61, 0xfe, 0x30, 0x6c, 0x3d, 0xc1, 0x64, 0xcb, 0x0a, 0x4b, 0xa7, 0xc4, 0x82, 0x92, + 0x62, 0x8c, 0xbb, 0xd5, 0x35, 0x47, 0xfb, 0x68, 0xe4, 0x3d, 0x58, 0x3c, 0x66, 0x49, 0xe2, 0x85, + 0xc1, 0xc1, 0x4e, 0xb5, 0x20, 0xe4, 0x7b, 0x04, 0xd4, 0x7d, 0xcc, 0xc3, 0x98, 0x1d, 0xec, 0x54, + 0xe7, 0xa5, 0x6e, 0x35, 0xb5, 0x7e, 0x61, 0x80, 0x39, 0xca, 0x63, 0x95, 0xbe, 0xcf, 0xa0, 0x20, + 0x0f, 0xa4, 0xf4, 0xfa, 0x7f, 0x3b, 0xca, 0xf2, 0x97, 0xbc, 0x0b, 0x05, 0xa9, 0x5d, 0x55, 0xa1, + 0x9a, 0x59, 0xbf, 0x2a, 0x40, 0xe9, 0x18, 0x37, 0x90, 0xc6, 0xd9, 0x06, 0xe8, 0xa5, 0x47, 0x1d, + 0xe9, 0xc1, 0xa4, 0x69, 0x1c, 0xc4, 0x84, 0x85, 0x3d, 0x75, 0x7c, 0xd4, 0x0d, 0xd6, 0x9d, 0x93, + 0xcf, 0xa1, 0x98, 0x8e, 0x9f, 0x46, 0xbc, 0x9a, 0x13, 0xe7, 0xef, 0x5e, 0xc6, 0xf9, 0xd3, 0x77, + 0x62, 0x6b, 0xa2, 0xea, 0xf4, 0x69, 0x14, 0x72, 0x13, 0x2e, 0x38, 0xbe, 0x1f, 0x9e, 0xa9, 0x92, + 0x12, 0xc5, 0x21, 0x92, 0xb3, 0x40, 0x87, 0x17, 0xb0, 0x55, 0x6a, 0xc4, 0x47, 0x71, 0xec, 0x74, + 0xf0, 0x34, 0x15, 0x04, 0xff, 0xa8, 0x25, 0xec, 0x5a, 0x7b, 0x5e, 0xe0, 0xf8, 0x55, 0x10, 0x3c, + 0x72, 0x82, 0xa7, 0x61, 0xf7, 0x75, 0x14, 0xc6, 0x9c, 0xc5, 0x8f, 0x38, 0x8f, 0xab, 0x45, 0x11, + 0xcc, 0x3e, 0x1a, 0xa9, 0x43, 0x69, 0xdb, 0x69, 0xbc, 0x60, 0x07, 0xa7, 0x48, 0x4c, 0x91, 0x55, + 0x56, 0x2f, 0x13, 0xec, 0x4f, 0x23, 0x1d, 0x52, 0xe9, 0x1a, 0x48, 0x03, 0xca, 0xa9, 0xeb, 0xb2, + 0x42, 0xab, 0x4b, 0x42, 0xe7, 0x83, 0x69, 0x43, 0x29, 0xa5, 0xa5, 0x89, 0x01, 0x95, 0x98, 0xc8, + 0x5d, 0x2c, 0x46, 0x87, 0xb3, 0x6a, 0x59, 0xf8, 0xdc, 0x9d, 0x93, 0x23, 0x28, 0x1f, 0x0b, 0x40, + 0x5e, 0x47, 0x18, 0xee, 0xb1, 0xa4, 0xba, 0x2c, 0x36, 0x70, 0x7d, 0x78, 0x03, 0x3a, 0x70, 0xb7, + 0x05, 0x7b, 0x87, 0x0e, 0x08, 0x9b, 0x0f, 0xa1, 0x32, 0x98, 0xdc, 0x69, 0x80, 0x91, 0xf9, 0x43, + 0xb8, 0x38, 0xc2, 0xa3, 0xb7, 0x6a, 0x3e, 0x7f, 0x32, 0xe0, 0xc2, 0x50, 0x1a, 0xf0, 0x02, 0x10, + 0x45, 0x2f, 0x55, 0x8a, 0x31, 0x39, 0x82, 0x39, 0x4c, 0x73, 0xa2, 0xa0, 0xc0, 0xe6, 0x34, 0x79, + 0xb5, 0x85, 0xa4, 0x8c, 0xbf, 0xd4, 0x62, 0xde, 0x03, 0xe8, 0x11, 0xa7, 0x82, 0x87, 0x5f, 0xc0, + 0x92, 0x4a, 0xb2, 0xea, 0x17, 0x15, 0x89, 0x2a, 0x94, 0x30, 0xa2, 0x86, 0xde, 0xdd, 0x94, 0x9b, + 0xf2, 0x6e, 0xb2, 0xbe, 0x82, 0x65, 0xca, 0x1c, 0x77, 0xcf, 0xf3, 0xd9, 0xf9, 0x2d, 0x18, 0x8b, + 0xdf, 0xf3, 0x59, 0x1d, 0x91, 0x49, 0x5a, 0xfc, 0x6a, 0x4e, 0xee, 0xc3, 0x1c, 0x75, 0x82, 0x16, + 0x53, 0xa6, 0x3f, 0xc8, 0x30, 0x2d, 0x8c, 0x20, 0x2f, 0x95, 0x22, 0xd6, 0x03, 0x58, 0xec, 0xd2, + 0xb0, 0x75, 0x3d, 0x6d, 0x36, 0x13, 0x26, 0xdb, 0x60, 0x8e, 0xaa, 0x19, 0xd2, 0x0f, 0x59, 0xd0, + 0x52, 0xa6, 0x73, 0x54, 0xcd, 0xac, 0x55, 0x84, 0xf3, 0xe9, 0xce, 0x55, 0x68, 0x08, 0xe4, 0x77, + 0x10, 0xbe, 0x19, 0xa2, 0x5e, 0xc5, 0xd8, 0x72, 0xf1, 0x4e, 0x75, 0xdc, 0x1d, 0x2f, 0x3e, 0xdf, + 0xc1, 0x2a, 0xcc, 0xef, 0x78, 0xb1, 0xe6, 0x5f, 0x3a, 0x25, 0xab, 0x78, 0xdb, 0x36, 0xfc, 0xb6, + 0x8b, 0xde, 0x72, 0x16, 0x07, 0xea, 0x5a, 0x19, 0xa0, 0x5a, 0x9f, 0xc8, 0x38, 0x0a, 0x2b, 0x6a, + 0x33, 0x37, 0x61, 0x9e, 0x05, 0x3c, 0xc6, 0x32, 0x92, 0x57, 0x32, 0xb1, 0xe5, 0x03, 0xd9, 0x16, + 0x0f, 0x64, 0x71, 0xf5, 0xd3, 0x94, 0xc5, 0xda, 0x84, 0x65, 0x24, 0x64, 0x27, 0x82, 0x40, 0x5e, + 0xdb, 0xa4, 0x18, 0x5b, 0xf7, 0xa1, 0xd2, 0x13, 0x54, 0xa6, 0x57, 0x21, 0x8f, 0xd8, 0x54, 0xf5, + 0xf5, 0x51, 0x76, 0xc5, 0xba, 0x75, 0x0d, 0x96, 0xd3, 0xe2, 0x3f, 0xd7, 0xa8, 0x45, 0xa0, 0xd2, + 0x63, 0x52, 0xb0, 0x64, 0x09, 0x8a, 0x75, 0x2f, 0x48, 0x6f, 0x6d, 0xeb, 0x8d, 0x01, 0xa5, 0x7a, + 0x18, 0xf4, 0xee, 0xb4, 0x3a, 0x2c, 0xa7, 0xa5, 0xfb, 0xa8, 0x7e, 0xb0, 0xed, 0x44, 0x69, 0x0c, + 0x56, 0x86, 0xcf, 0x87, 0xfa, 0xc4, 0x60, 0x4b, 0xc6, 0xad, 0x3c, 0x5e, 0x7f, 0x74, 0x50, 0x9c, + 0x7c, 0x0a, 0xf3, 0x87, 0x87, 0x5b, 0x42, 0xd3, 0xec, 0x54, 0x9a, 0x52, 0x31, 0xf2, 0x10, 0xe6, + 0x9f, 0x8b, 0x2f, 0x1f, 0x89, 0xba, 0xa2, 0x46, 0x9c, 0x55, 0x19, 0x21, 0xc9, 0x46, 0x59, 0x23, + 0x8c, 0x5d, 0x9a, 0x0a, 0x59, 0xff, 0x36, 0xa0, 0xf8, 0xdc, 0xe9, 0x21, 0xc2, 0x1e, 0x04, 0x7d, + 0x8b, 0x7b, 0x5b, 0x41, 0xd0, 0x4b, 0x30, 0xe7, 0xb3, 0x57, 0xcc, 0x57, 0x67, 0x5c, 0x4e, 0x90, + 0x9a, 0xbc, 0x08, 0x63, 0x59, 0xd6, 0x25, 0x2a, 0x27, 0x58, 0x10, 0x2e, 0xe3, 0x8e, 0xe7, 0x57, + 0xf3, 0x2b, 0x39, 0xbc, 0xe3, 0xe5, 0x0c, 0x33, 0xd7, 0x8e, 0x7d, 0xf5, 0x2e, 0xc0, 0x21, 0xb1, + 0x20, 0xef, 0x05, 0xcd, 0x50, 0xdc, 0x7f, 0xaa, 0x2d, 0xca, 0x16, 0x7d, 0x10, 0x34, 0x43, 0x2a, + 0xd6, 0xc8, 0x55, 0x28, 0xc4, 0x58, 0x7f, 0x49, 0x75, 0x5e, 0x04, 0x65, 0x11, 0xb9, 0x64, 0x95, + 0xaa, 0x05, 0xab, 0x0c, 0x25, 0xe9, 0xb7, 0x4a, 0xfe, 0x6f, 0x66, 0xe1, 0xe2, 0x13, 0x76, 0xb6, + 0x9d, 0xfa, 0x95, 0x06, 0x64, 0x05, 0x8a, 0x5d, 0xda, 0xc1, 0x8e, 0x3a, 0x42, 0x3a, 0x09, 0x8d, + 0x1d, 0x85, 0xed, 0x80, 0xa7, 0x39, 0x14, 0xc6, 0x04, 0x85, 0xaa, 0x05, 0x72, 0x1d, 0xe6, 0x9f, + 0x30, 0x7e, 0x16, 0xc6, 0x2f, 0x85, 0xd7, 0xe5, 0x8d, 0x22, 0xf2, 0x3c, 0x61, 0x1c, 0x01, 0x1c, + 0x4d, 0xd7, 0x10, 0x15, 0x46, 0x29, 0x2a, 0xcc, 0x8f, 0x42, 0x85, 0xe9, 0x2a, 0xd9, 0x84, 0x62, + 0x23, 0x0c, 0x12, 0x1e, 0x3b, 0x1e, 0x1a, 0x9e, 0x13, 0xcc, 0xef, 0x20, 0xb3, 0x4c, 0xec, 0x76, + 0x6f, 0x91, 0xea, 0x9c, 0x64, 0x1d, 0x80, 0xbd, 0xe6, 0xb1, 0xb3, 0x1f, 0x26, 0xdd, 0x17, 0x14, + 0xa0, 0x1c, 0x12, 0x0e, 0xea, 0x54, 0x5b, 0xb5, 0xde, 0x85, 0x4b, 0xfd, 0x11, 0x51, 0xa1, 0x7a, + 0x00, 0xff, 0x47, 0x99, 0xcf, 0x9c, 0x84, 0x4d, 0x1f, 0x2d, 0xcb, 0x84, 0xea, 0xb0, 0xb0, 0x52, + 0xfc, 0x9f, 0x1c, 0x14, 0x77, 0x5f, 0xb3, 0xc6, 0x11, 0x4b, 0x12, 0xa7, 0x25, 0xb0, 0x69, 0x3d, + 0x0e, 0x1b, 0x2c, 0x49, 0xba, 0xba, 0x7a, 0x04, 0xf2, 0x7d, 0xc8, 0x1f, 0x04, 0x1e, 0x57, 0xf7, + 0xe3, 0x6a, 0xe6, 0xd3, 0xc0, 0xe3, 0x4a, 0xe7, 0xfe, 0x0c, 0x15, 0x52, 0xe4, 0x3e, 0xe4, 0xb1, + 0xbb, 0x4c, 0xd2, 0xe1, 0x5d, 0x4d, 0x16, 0x65, 0xc8, 0x96, 0xf8, 0x84, 0xe7, 0x7d, 0xc9, 0x54, + 0x96, 0xd6, 0xb2, 0xaf, 0x26, 0xef, 0x4b, 0xd6, 0xd3, 0xa0, 0x24, 0xc9, 0x2e, 0x22, 0x6b, 0x27, + 0xe6, 0xcc, 0x55, 0xd9, 0xbb, 0x91, 0x05, 0x88, 0x24, 0x67, 0x4f, 0x4b, 0x2a, 0x8b, 0x41, 0xd8, + 0x7d, 0xed, 0x71, 0x55, 0x0d, 0x59, 0x41, 0x40, 0x36, 0xcd, 0x11, 0x9c, 0xa2, 0xf4, 0x4e, 0x18, + 0x30, 0x81, 0xed, 0xb3, 0xa5, 0x91, 0x4d, 0x93, 0xc6, 0x29, 0x86, 0xe1, 0xd8, 0x6b, 0x21, 0xce, + 0x5c, 0x18, 0x1b, 0x06, 0xc9, 0xa8, 0x85, 0x41, 0x12, 0xb6, 0xe6, 0x61, 0x4e, 0xc0, 0x20, 0xeb, + 0x77, 0x06, 0x14, 0xb5, 0x3c, 0x4d, 0x50, 0x77, 0xef, 0x41, 0x1e, 0x9f, 0xef, 0x2a, 0xff, 0x0b, + 0xa2, 0xea, 0x18, 0x77, 0xa8, 0xa0, 0x62, 0xe3, 0xd8, 0x73, 0x65, 0x53, 0x5c, 0xa2, 0x38, 0x44, + 0xca, 0x33, 0xde, 0x11, 0x29, 0x5b, 0xa0, 0x38, 0x24, 0x37, 0x61, 0xe1, 0x98, 0x35, 0xda, 0xb1, + 0xc7, 0x3b, 0x22, 0x09, 0xe5, 0x8d, 0x8a, 0x68, 0x27, 0x8a, 0x26, 0x8a, 0xb3, 0xcb, 0x61, 0x3d, + 0xc6, 0xc3, 0xd9, 0xdb, 0x20, 0x81, 0xfc, 0x36, 0xbe, 0xc8, 0x70, 0x67, 0x4b, 0x54, 0x8c, 0xf1, + 0x51, 0xbc, 0x3b, 0xee, 0x51, 0xbc, 0x9b, 0x3e, 0x8a, 0xfb, 0x93, 0x8a, 0xb7, 0x8f, 0x16, 0x64, + 0xeb, 0x11, 0x2c, 0x76, 0x0f, 0x1e, 0x29, 0xc3, 0xec, 0x9e, 0xab, 0x2c, 0xcd, 0xee, 0xb9, 0xe8, + 0xca, 0xee, 0xd3, 0x3d, 0x61, 0x65, 0x81, 0xe2, 0xb0, 0x0b, 0x12, 0x72, 0x1a, 0x48, 0xd8, 0xc4, + 0xe7, 0xbe, 0x76, 0xfa, 0x90, 0x89, 0x86, 0x67, 0x49, 0xba, 0x65, 0x1c, 0x4b, 0x37, 0xfc, 0x44, + 0xe8, 0x12, 0x6e, 0xf8, 0x89, 0x75, 0x0d, 0x96, 0xfa, 0xf2, 0x85, 0x4c, 0xe2, 0x7d, 0xa9, 0xb0, + 0x24, 0x8e, 0xd7, 0x19, 0x2c, 0x0f, 0x7c, 0x72, 0x22, 0xd7, 0xa1, 0x20, 0x3f, 0x6d, 0x54, 0x66, + 0xcc, 0xcb, 0x5f, 0x7f, 0xb3, 0xf2, 0xce, 0x00, 0x83, 0x5c, 0x44, 0xb6, 0xad, 0x76, 0xe0, 0xfa, + 0xac, 0x62, 0x8c, 0x64, 0x93, 0x8b, 0x66, 0xfe, 0xd7, 0x7f, 0xb8, 0x32, 0xb3, 0xee, 0xc0, 0x85, + 0xa1, 0xcf, 0x25, 0xe4, 0x1a, 0xe4, 0x8f, 0x99, 0xdf, 0x4c, 0xcd, 0x0c, 0x31, 0xe0, 0x22, 0xb9, + 0x0a, 0x39, 0xea, 0x9c, 0x55, 0x0c, 0xb3, 0xfa, 0xf5, 0x37, 0x2b, 0x97, 0x86, 0xbf, 0xb9, 0x38, + 0x67, 0xd2, 0xc4, 0xc6, 0x5f, 0x00, 0x16, 0x0f, 0x0f, 0xb7, 0xb6, 0x62, 0xcf, 0x6d, 0x31, 0xf2, + 0x4b, 0x03, 0xc8, 0xf0, 0xc3, 0x96, 0xdc, 0xc9, 0xae, 0xf1, 0xd1, 0x2f, 0x7f, 0xf3, 0xee, 0x94, + 0x52, 0x0a, 0x69, 0x7c, 0x0e, 0x73, 0x02, 0x1e, 0x93, 0x0f, 0x27, 0x7c, 0x25, 0x99, 0x6b, 0xe3, + 0x19, 0x95, 0xee, 0x06, 0x2c, 0xa4, 0x10, 0x93, 0xac, 0x67, 0x6e, 0xaf, 0x0f, 0x41, 0x9b, 0x1f, + 0x4d, 0xc4, 0xab, 0x8c, 0xfc, 0x14, 0xe6, 0x15, 0x72, 0x24, 0x37, 0xc6, 0xc8, 0xf5, 0x30, 0xac, + 0xb9, 0x3e, 0x09, 0x6b, 0xcf, 0x8d, 0x14, 0x21, 0x66, 0xba, 0x31, 0x80, 0x3f, 0x33, 0xdd, 0x18, + 0x82, 0x9c, 0x8d, 0xde, 0xbb, 0x32, 0xd3, 0xc8, 0x00, 0xde, 0xcc, 0x34, 0x32, 0x08, 0x3b, 0xc9, + 0x73, 0xc8, 0x23, 0xec, 0x24, 0x59, 0xed, 0x57, 0xc3, 0xa5, 0x66, 0xd6, 0x99, 0xe8, 0xc3, 0xab, + 0x3f, 0xc1, 0x6b, 0x4a, 0x7c, 0x42, 0xc8, 0xbe, 0xa0, 0xb4, 0x2f, 0x82, 0xe6, 0x8d, 0x09, 0x38, + 0x7b, 0xea, 0xd5, 0xf3, 0x7b, 0x6d, 0x82, 0xcf, 0x72, 0xe3, 0xd5, 0x0f, 0x7c, 0x00, 0x0c, 0xa1, + 0xa4, 0xa3, 0x0f, 0x62, 0x67, 0x88, 0x8e, 0x00, 0x6e, 0x66, 0x6d, 0x62, 0x7e, 0x65, 0xf0, 0x2b, + 0x7c, 0x7b, 0xf5, 0x23, 0x13, 0xb2, 0x91, 0x19, 0x8e, 0x91, 0x18, 0xc8, 0xbc, 0x3d, 0x95, 0x8c, + 0x32, 0xee, 0x48, 0xe4, 0xa3, 0xd0, 0x0d, 0xc9, 0xbe, 0xc8, 0xbb, 0x08, 0xc9, 0x9c, 0x90, 0x6f, + 0xcd, 0xb8, 0x65, 0xe0, 0x39, 0x43, 0xc4, 0x9b, 0xa9, 0x5b, 0x7b, 0x0a, 0x64, 0x9e, 0x33, 0x1d, + 0x3a, 0x6f, 0x95, 0xbe, 0x7d, 0x73, 0xc5, 0xf8, 0xfb, 0x9b, 0x2b, 0xc6, 0x3f, 0xdf, 0x5c, 0x31, + 0x4e, 0x0a, 0xe2, 0x7f, 0xce, 0xdb, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x91, 0xe5, 0xca, + 0x70, 0x1e, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2414,6 +2806,8 @@ type LLBBridgeClient interface { ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) // apicaps:CapStatFile StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) + // apicaps:CapGatewayEvaluate + Evaluate(ctx context.Context, in *EvaluateRequest, opts ...grpc.CallOption) (*EvaluateResponse, error) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) // apicaps:CapFrontendInputs @@ -2478,6 +2872,15 @@ func (c *lLBBridgeClient) StatFile(ctx context.Context, in *StatFileRequest, opt return out, nil } +func (c *lLBBridgeClient) Evaluate(ctx context.Context, in *EvaluateRequest, opts ...grpc.CallOption) (*EvaluateResponse, error) { + out := new(EvaluateResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Evaluate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *lLBBridgeClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) { out := new(PongResponse) err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Ping", in, out, opts...) @@ -2575,6 +2978,8 @@ type LLBBridgeServer interface { ReadDir(context.Context, *ReadDirRequest) (*ReadDirResponse, error) // apicaps:CapStatFile StatFile(context.Context, *StatFileRequest) (*StatFileResponse, error) + // apicaps:CapGatewayEvaluate + Evaluate(context.Context, *EvaluateRequest) (*EvaluateResponse, error) Ping(context.Context, *PingRequest) (*PongResponse, error) Return(context.Context, *ReturnRequest) (*ReturnResponse, error) // apicaps:CapFrontendInputs @@ -2605,6 +3010,9 @@ func (*UnimplementedLLBBridgeServer) ReadDir(ctx context.Context, req *ReadDirRe func (*UnimplementedLLBBridgeServer) StatFile(ctx context.Context, req *StatFileRequest) (*StatFileResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StatFile not implemented") } +func (*UnimplementedLLBBridgeServer) Evaluate(ctx context.Context, req *EvaluateRequest) (*EvaluateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Evaluate not implemented") +} func (*UnimplementedLLBBridgeServer) Ping(ctx context.Context, req *PingRequest) (*PongResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") } @@ -2721,6 +3129,24 @@ func _LLBBridge_StatFile_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _LLBBridge_Evaluate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EvaluateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).Evaluate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Evaluate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Evaluate(ctx, req.(*EvaluateRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _LLBBridge_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PingRequest) if err := dec(in); err != nil { @@ -2879,6 +3305,10 @@ var _LLBBridge_serviceDesc = grpc.ServiceDesc{ MethodName: "StatFile", Handler: _LLBBridge_StatFile_Handler, }, + { + MethodName: "Evaluate", + Handler: _LLBBridge_Evaluate_Handler, + }, { MethodName: "Ping", Handler: _LLBBridge_Ping_Handler, @@ -2939,6 +3369,32 @@ func (m *Result) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Attestations) > 0 { + for k := range m.Attestations { + v := m.Attestations[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } if len(m.Metadata) > 0 { for k := range m.Metadata { v := m.Metadata[k] @@ -3194,7 +3650,7 @@ func (m *RefMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ReturnRequest) Marshal() (dAtA []byte, err error) { +func (m *Attestations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3204,12 +3660,12 @@ func (m *ReturnRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReturnRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *Attestations) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReturnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Attestations) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3218,34 +3674,24 @@ func (m *ReturnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.Error != nil { - { - size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Result != nil { - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Attestation) > 0 { + for iNdEx := len(m.Attestation) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attestation[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ReturnResponse) Marshal() (dAtA []byte, err error) { +func (m *Attestation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3255,12 +3701,12 @@ func (m *ReturnResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReturnResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *Attestation) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReturnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Attestation) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3269,10 +3715,76 @@ func (m *ReturnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.InTotoSubjects) > 0 { + for iNdEx := len(m.InTotoSubjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InTotoSubjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.InTotoPredicateType) > 0 { + i -= len(m.InTotoPredicateType) + copy(dAtA[i:], m.InTotoPredicateType) + i = encodeVarintGateway(dAtA, i, uint64(len(m.InTotoPredicateType))) + i-- + dAtA[i] = 0x2a + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x22 + } + if m.Ref != nil { + { + size, err := m.Ref.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Metadata) > 0 { + for k := range m.Metadata { + v := m.Metadata[k] + baseI := i + if len(v) > 0 { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Kind != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } -func (m *InputsRequest) Marshal() (dAtA []byte, err error) { +func (m *InTotoSubject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3282,12 +3794,12 @@ func (m *InputsRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InputsRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *InTotoSubject) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *InputsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InTotoSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3296,18 +3808,144 @@ func (m *InputsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil -} - -func (m *InputsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Digest) > 0 { + for iNdEx := len(m.Digest) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Digest[iNdEx]) + copy(dAtA[i:], m.Digest[iNdEx]) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Digest[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Kind != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReturnRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReturnRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReturnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReturnResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReturnResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReturnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *InputsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InputsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InputsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *InputsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} func (m *InputsResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() @@ -3376,6 +4014,25 @@ func (m *ResolveImageConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, erro i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.StoreID) > 0 { + i -= len(m.StoreID) + copy(dAtA[i:], m.StoreID) + i = encodeVarintGateway(dAtA, i, uint64(len(m.StoreID))) + i-- + dAtA[i] = 0x3a + } + if len(m.SessionID) > 0 { + i -= len(m.SessionID) + copy(dAtA[i:], m.SessionID) + i = encodeVarintGateway(dAtA, i, uint64(len(m.SessionID))) + i-- + dAtA[i] = 0x32 + } + if m.ResolverType != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.ResolverType)) + i-- + dAtA[i] = 0x28 + } if len(m.LogName) > 0 { i -= len(m.LogName) copy(dAtA[i:], m.LogName) @@ -3477,6 +4134,20 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.SourcePolicies) > 0 { + for iNdEx := len(m.SourcePolicies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SourcePolicies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + } if m.Evaluate { i-- if m.Evaluate { @@ -3564,15 +4235,6 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - if len(m.ImportCacheRefsDeprecated) > 0 { - for iNdEx := len(m.ImportCacheRefsDeprecated) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ImportCacheRefsDeprecated[iNdEx]) - copy(dAtA[i:], m.ImportCacheRefsDeprecated[iNdEx]) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ImportCacheRefsDeprecated[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } if len(m.FrontendOpt) > 0 { for k := range m.FrontendOpt { v := m.FrontendOpt[k] @@ -4006,6 +4668,67 @@ func (m *StatFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *EvaluateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvaluateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvaluateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EvaluateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvaluateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvaluateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + func (m *PingRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4626,20 +5349,20 @@ func (m *InitMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x20 } if len(m.Fds) > 0 { - dAtA26 := make([]byte, len(m.Fds)*10) - var j25 int + dAtA28 := make([]byte, len(m.Fds)*10) + var j27 int for _, num := range m.Fds { for num >= 1<<7 { - dAtA26[j25] = uint8(uint64(num)&0x7f | 0x80) + dAtA28[j27] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j25++ + j27++ } - dAtA26[j25] = uint8(num) - j25++ + dAtA28[j27] = uint8(num) + j27++ } - i -= j25 - copy(dAtA[i:], dAtA26[:j25]) - i = encodeVarintGateway(dAtA, i, uint64(j25)) + i -= j27 + copy(dAtA[i:], dAtA28[:j27]) + i = encodeVarintGateway(dAtA, i, uint64(j27)) i-- dAtA[i] = 0x1a } @@ -4915,6 +5638,19 @@ func (m *Result) Size() (n int) { n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) } } + if len(m.Attestations) > 0 { + for k, v := range m.Attestations { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGateway(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5032,19 +5768,17 @@ func (m *RefMap) Size() (n int) { return n } -func (m *ReturnRequest) Size() (n int) { +func (m *Attestations) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGateway(uint64(l)) - } - if m.Error != nil { - l = m.Error.Size() - n += 1 + l + sovGateway(uint64(l)) + if len(m.Attestation) > 0 { + for _, e := range m.Attestation { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -5052,26 +5786,116 @@ func (m *ReturnRequest) Size() (n int) { return n } -func (m *ReturnResponse) Size() (n int) { +func (m *Attestation) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Kind != 0 { + n += 1 + sovGateway(uint64(m.Kind)) } - return n -} - -func (m *InputsRequest) Size() (n int) { - if m == nil { - return 0 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + l = 0 + if len(v) > 0 { + l = 1 + len(v) + sovGateway(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Ref != nil { + l = m.Ref.Size() + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.InTotoPredicateType) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.InTotoSubjects) > 0 { + for _, e := range m.InTotoSubjects { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InTotoSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovGateway(uint64(m.Kind)) + } + if len(m.Digest) > 0 { + for _, s := range m.Digest { + l = len(s) + n += 1 + l + sovGateway(uint64(l)) + } + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReturnRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReturnResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InputsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) } return n } @@ -5123,6 +5947,17 @@ func (m *ResolveImageConfigRequest) Size() (n int) { if l > 0 { n += 1 + l + sovGateway(uint64(l)) } + if m.ResolverType != 0 { + n += 1 + sovGateway(uint64(m.ResolverType)) + } + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.StoreID) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5171,12 +6006,6 @@ func (m *SolveRequest) Size() (n int) { n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) } } - if len(m.ImportCacheRefsDeprecated) > 0 { - for _, s := range m.ImportCacheRefsDeprecated { - l = len(s) - n += 1 + l + sovGateway(uint64(l)) - } - } if m.AllowResultReturn { n += 2 } @@ -5212,6 +6041,12 @@ func (m *SolveRequest) Size() (n int) { if m.Evaluate { n += 2 } + if len(m.SourcePolicies) > 0 { + for _, e := range m.SourcePolicies { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5398,6 +6233,34 @@ func (m *StatFileResponse) Size() (n int) { return n } +func (m *EvaluateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EvaluateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *PingRequest) Size() (n int) { if m == nil { return 0 @@ -6109,6 +6972,135 @@ func (m *Result) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attestations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attestations == nil { + m.Attestations = make(map[string]*Attestations) + } + var mapkey string + var mapvalue *Attestations + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGateway + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGateway + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Attestations{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attestations[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -6608,7 +7600,7 @@ func (m *RefMap) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReturnRequest) Unmarshal(dAtA []byte) error { +func (m *Attestations) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6631,15 +7623,15 @@ func (m *ReturnRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReturnRequest: wiretype end group for non-group") + return fmt.Errorf("proto: Attestations: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReturnRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Attestations: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attestation", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6666,15 +7658,566 @@ func (m *ReturnRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &Result{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Attestation = append(m.Attestation, &Attestation{}) + if err := m.Attestation[len(m.Attestation)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 2 { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Attestation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Attestation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Attestation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= AttestationKind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGateway + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGateway + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ref == nil { + m.Ref = &Ref{} + } + if err := m.Ref.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InTotoPredicateType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InTotoPredicateType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InTotoSubjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InTotoSubjects = append(m.InTotoSubjects, &InTotoSubject{}) + if err := m.InTotoSubjects[len(m.InTotoSubjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InTotoSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InTotoSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InTotoSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= InTotoSubjectKind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = append(m.Digest, github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReturnRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReturnRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReturnRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &Result{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int @@ -7026,25 +8569,93 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveImageConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveImageConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Platform == nil { + m.Platform = &pb.Platform{} + } + if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResolveImageConfigRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResolveImageConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResolveMode", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7072,13 +8683,13 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ref = string(dAtA[iNdEx:postIndex]) + m.ResolveMode = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LogName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -7088,31 +8699,46 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Platform == nil { - m.Platform = &pb.Platform{} + m.LogName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResolverType", wireType) } - if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.ResolverType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResolverType |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResolveMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7140,11 +8766,11 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResolveMode = string(dAtA[iNdEx:postIndex]) + m.SessionID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LogName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StoreID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7172,7 +8798,7 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LogName = string(dAtA[iNdEx:postIndex]) + m.StoreID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -7537,38 +9163,6 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } m.FrontendOpt[mapkey] = mapvalue iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImportCacheRefsDeprecated", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImportCacheRefsDeprecated = append(m.ImportCacheRefsDeprecated, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field AllowResultReturn", wireType) @@ -7846,6 +9440,40 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } } m.Evaluate = bool(v != 0) + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePolicies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePolicies = append(m.SourcePolicies, &pb1.Policy{}) + if err := m.SourcePolicies[len(m.SourcePolicies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -8956,6 +10584,140 @@ func (m *StatFileResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *EvaluateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvaluateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvaluateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvaluateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvaluateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvaluateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PingRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -9065,7 +10827,7 @@ func (m *PongResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FrontendAPICaps = append(m.FrontendAPICaps, pb1.APICap{}) + m.FrontendAPICaps = append(m.FrontendAPICaps, pb2.APICap{}) if err := m.FrontendAPICaps[len(m.FrontendAPICaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9099,7 +10861,7 @@ func (m *PongResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LLBCaps = append(m.LLBCaps, pb1.APICap{}) + m.LLBCaps = append(m.LLBCaps, pb2.APICap{}) if err := m.LLBCaps[len(m.LLBCaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/frontend/gateway/pb/gateway.proto b/frontend/gateway/pb/gateway.proto index 31aaf3b20d85..2e55f1db8688 100644 --- a/frontend/gateway/pb/gateway.proto +++ b/frontend/gateway/pb/gateway.proto @@ -8,6 +8,7 @@ import "github.com/moby/buildkit/solver/pb/ops.proto"; import "github.com/moby/buildkit/api/types/worker.proto"; import "github.com/moby/buildkit/util/apicaps/pb/caps.proto"; import "github.com/tonistiigi/fsutil/types/stat.proto"; +import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto"; option (gogoproto.sizer_all) = true; @@ -25,6 +26,8 @@ service LLBBridge { rpc ReadDir(ReadDirRequest) returns (ReadDirResponse); // apicaps:CapStatFile rpc StatFile(StatFileRequest) returns (StatFileResponse); + // apicaps:CapGatewayEvaluate + rpc Evaluate(EvaluateRequest) returns (EvaluateResponse); rpc Ping(PingRequest) returns (PongResponse); rpc Return(ReturnRequest) returns (ReturnResponse); // apicaps:CapFrontendInputs @@ -48,6 +51,8 @@ message Result { RefMap refs = 4; } map metadata = 10; + // 11 was used during development and is reserved for old attestation format + map attestations = 12; } message RefMapDeprecated { @@ -63,6 +68,39 @@ message RefMap { map refs = 1; } +message Attestations { + repeated Attestation attestation = 1; +} + +message Attestation { + AttestationKind kind = 1; + map metadata = 2; + + Ref ref = 3; + string path = 4; + string inTotoPredicateType = 5; + repeated InTotoSubject inTotoSubjects = 6; +} + +enum AttestationKind { + option (gogoproto.goproto_enum_prefix) = false; + InToto = 0 [(gogoproto.enumvalue_customname) = "AttestationKindInToto"]; + Bundle = 1 [(gogoproto.enumvalue_customname) = "AttestationKindBundle"]; +} + +message InTotoSubject { + InTotoSubjectKind kind = 1; + + repeated string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string name = 3; +} + +enum InTotoSubjectKind { + option (gogoproto.goproto_enum_prefix) = false; + Self = 0 [(gogoproto.enumvalue_customname) = "InTotoSubjectKindSelf"]; + Raw = 1 [(gogoproto.enumvalue_customname) = "InTotoSubjectKindRaw"]; +} + message ReturnRequest { Result result = 1; google.rpc.Status error = 2; @@ -83,6 +121,9 @@ message ResolveImageConfigRequest { pb.Platform Platform = 2; string ResolveMode = 3; string LogName = 4; + int32 ResolverType = 5; + string SessionID = 6; + string StoreID = 7; } message ResolveImageConfigResponse { @@ -94,11 +135,7 @@ message SolveRequest { pb.Definition Definition = 1; string Frontend = 2; map FrontendOpt = 3; - // ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. - // When ImportCacheRefsDeprecated is set, the solver appends - // {.Type = "registry", .Attrs = {"ref": importCacheRef}} - // for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed) - repeated string ImportCacheRefsDeprecated = 4; + // 4 was removed in BuildKit v0.11.0. bool allowResultReturn = 5; bool allowResultArrayRef = 6; @@ -113,6 +150,8 @@ message SolveRequest { map FrontendInputs = 13; bool Evaluate = 14; + + repeated moby.buildkit.v1.sourcepolicy.Policy SourcePolicies = 15; } // CacheOptionsEntry corresponds to the control.CacheOptionsEntry @@ -165,6 +204,13 @@ message StatFileResponse { fsutil.types.Stat stat = 1; } +message EvaluateRequest { + string Ref = 1; +} + +message EvaluateResponse { +} + message PingRequest{ } message PongResponse{ diff --git a/frontend/result.go b/frontend/result.go deleted file mode 100644 index 5afc10c9f89f..000000000000 --- a/frontend/result.go +++ /dev/null @@ -1,25 +0,0 @@ -package frontend - -import ( - "github.com/moby/buildkit/solver" -) - -type Result struct { - Ref solver.ResultProxy - Refs map[string]solver.ResultProxy - Metadata map[string][]byte -} - -func (r *Result) EachRef(fn func(solver.ResultProxy) error) (err error) { - if r.Ref != nil { - err = fn(r.Ref) - } - for _, r := range r.Refs { - if r != nil { - if err1 := fn(r); err1 != nil && err == nil { - err = err1 - } - } - } - return err -} diff --git a/frontend/subrequests/describe.go b/frontend/subrequests/describe.go index cc8053ed24d6..832c9a839ff9 100644 --- a/frontend/subrequests/describe.go +++ b/frontend/subrequests/describe.go @@ -3,6 +3,10 @@ package subrequests import ( "context" "encoding/json" + "fmt" + "io" + "strings" + "text/tabwriter" "github.com/moby/buildkit/frontend/gateway/client" gwpb "github.com/moby/buildkit/frontend/gateway/pb" @@ -18,9 +22,8 @@ var SubrequestsDescribeDefinition = Request{ Type: TypeRPC, Description: "List available subrequest types", Metadata: []Named{ - { - Name: "result.json", - }, + {Name: "result.json"}, + {Name: "result.txt"}, }, } @@ -61,3 +64,18 @@ func Describe(ctx context.Context, c client.Client) ([]Request, error) { } return reqs, nil } + +func PrintDescribe(dt []byte, w io.Writer) error { + var d []Request + if err := json.Unmarshal(dt, &d); err != nil { + return err + } + + tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0) + fmt.Fprintf(tw, "NAME\tVERSION\tDESCRIPTION\n") + + for _, r := range d { + fmt.Fprintf(tw, "%s\t%s\t%s\n", strings.TrimPrefix(r.Name, "frontend."), r.Version, r.Description) + } + return tw.Flush() +} diff --git a/frontend/subrequests/outline/outline.go b/frontend/subrequests/outline/outline.go new file mode 100644 index 000000000000..c0a376b0f94a --- /dev/null +++ b/frontend/subrequests/outline/outline.go @@ -0,0 +1,146 @@ +package outline + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "text/tabwriter" + + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/subrequests" + "github.com/moby/buildkit/solver/pb" +) + +const RequestSubrequestsOutline = "frontend.outline" + +var SubrequestsOutlineDefinition = subrequests.Request{ + Name: RequestSubrequestsOutline, + Version: "1.0.0", + Type: subrequests.TypeRPC, + Description: "List all parameters current build target supports", + Opts: []subrequests.Named{ + { + Name: "target", + Description: "Target build stage", + }, + }, + Metadata: []subrequests.Named{ + {Name: "result.json"}, + {Name: "result.txt"}, + }, +} + +type Outline struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Args []Arg `json:"args,omitempty"` + Secrets []Secret `json:"secrets,omitempty"` + SSH []SSH `json:"ssh,omitempty"` + Cache []CacheMount `json:"cache,omitempty"` + Sources [][]byte `json:"sources,omitempty"` +} + +func (o Outline) ToResult() (*client.Result, error) { + res := client.NewResult() + dt, err := json.MarshalIndent(o, "", " ") + if err != nil { + return nil, err + } + res.AddMeta("result.json", dt) + + b := bytes.NewBuffer(nil) + if err := PrintOutline(dt, b); err != nil { + return nil, err + } + res.AddMeta("result.txt", b.Bytes()) + + res.AddMeta("version", []byte(SubrequestsOutlineDefinition.Version)) + return res, nil +} + +type Arg struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Value string `json:"value,omitempty"` + Location *pb.Location `json:"location,omitempty"` +} + +type Secret struct { + Name string `json:"name"` + Required bool `json:"required,omitempty"` + Location *pb.Location `json:"location,omitempty"` +} + +type SSH struct { + Name string `json:"name"` + Required bool `json:"required,omitempty"` + Location *pb.Location `json:"location,omitempty"` +} + +type CacheMount struct { + ID string `json:"ID"` + Location *pb.Location `json:"location,omitempty"` +} + +func PrintOutline(dt []byte, w io.Writer) error { + var o Outline + + if err := json.Unmarshal(dt, &o); err != nil { + return err + } + + if o.Name != "" || o.Description != "" { + tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0) + name := o.Name + if o.Name == "" { + name = "(default)" + } + fmt.Fprintf(tw, "TARGET:\t%s\n", name) + if o.Description != "" { + fmt.Fprintf(tw, "DESCRIPTION:\t%s\n", o.Description) + } + tw.Flush() + fmt.Fprintln(tw) + } + + if len(o.Args) > 0 { + tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) + fmt.Fprintf(tw, "BUILD ARG\tVALUE\tDESCRIPTION\n") + for _, a := range o.Args { + fmt.Fprintf(tw, "%s\t%s\t%s\n", a.Name, a.Value, a.Description) + } + tw.Flush() + fmt.Fprintln(tw) + } + + if len(o.Secrets) > 0 { + tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) + fmt.Fprintf(tw, "SECRET\tREQUIRED\n") + for _, s := range o.Secrets { + b := "" + if s.Required { + b = "true" + } + fmt.Fprintf(tw, "%s\t%s\n", s.Name, b) + } + tw.Flush() + fmt.Fprintln(tw) + } + + if len(o.SSH) > 0 { + tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) + fmt.Fprintf(tw, "SSH\tREQUIRED\n") + for _, s := range o.SSH { + b := "" + if s.Required { + b = "true" + } + fmt.Fprintf(tw, "%s\t%s\n", s.Name, b) + } + tw.Flush() + fmt.Fprintln(tw) + } + + return nil +} diff --git a/frontend/subrequests/targets/targets.go b/frontend/subrequests/targets/targets.go new file mode 100644 index 000000000000..bf00a3b2bc96 --- /dev/null +++ b/frontend/subrequests/targets/targets.go @@ -0,0 +1,84 @@ +package targets + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "text/tabwriter" + + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/subrequests" + "github.com/moby/buildkit/solver/pb" +) + +const RequestTargets = "frontend.targets" + +var SubrequestsTargetsDefinition = subrequests.Request{ + Name: RequestTargets, + Version: "1.0.0", + Type: subrequests.TypeRPC, + Description: "List all targets current build supports", + Opts: []subrequests.Named{}, + Metadata: []subrequests.Named{ + {Name: "result.json"}, + {Name: "result.txt"}, + }, +} + +type List struct { + Targets []Target `json:"targets"` + Sources [][]byte `json:"sources"` +} + +func (l List) ToResult() (*client.Result, error) { + res := client.NewResult() + dt, err := json.MarshalIndent(l, "", " ") + if err != nil { + return nil, err + } + res.AddMeta("result.json", dt) + + b := bytes.NewBuffer(nil) + if err := PrintTargets(dt, b); err != nil { + return nil, err + } + res.AddMeta("result.txt", b.Bytes()) + + res.AddMeta("version", []byte(SubrequestsTargetsDefinition.Version)) + return res, nil +} + +type Target struct { + Name string `json:"name,omitempty"` + Default bool `json:"default,omitempty"` + Description string `json:"description,omitempty"` + Base string `json:"base,omitempty"` + Platform string `json:"platform,omitempty"` + Location *pb.Location `json:"location,omitempty"` +} + +func PrintTargets(dt []byte, w io.Writer) error { + var l List + + if err := json.Unmarshal(dt, &l); err != nil { + return err + } + + tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0) + fmt.Fprintf(tw, "TARGET\tDESCRIPTION\n") + + for _, t := range l.Targets { + name := t.Name + if name == "" && t.Default { + name = "(default)" + } else { + if t.Default { + name = fmt.Sprintf("%s (default)", name) + } + } + fmt.Fprintf(tw, "%s\t%s\n", name, t.Description) + } + + return tw.Flush() +} diff --git a/go.mod b/go.mod index b3129c69662f..d2c1cecb6829 100644 --- a/go.mod +++ b/go.mod @@ -1,56 +1,68 @@ module github.com/moby/buildkit -go 1.17 +go 1.18 require ( - github.com/Microsoft/go-winio v0.5.1 - github.com/Microsoft/hcsshim v0.9.2 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 + github.com/Microsoft/go-winio v0.5.2 + github.com/Microsoft/hcsshim v0.9.6 github.com/agext/levenshtein v1.2.3 github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 + github.com/aws/aws-sdk-go-v2/config v1.15.5 + github.com/aws/aws-sdk-go-v2/credentials v1.12.0 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 + github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 + github.com/aws/smithy-go v1.11.2 github.com/containerd/console v1.0.3 - github.com/containerd/containerd v1.6.1 - github.com/containerd/continuity v0.2.2 + github.com/containerd/containerd v1.6.18 + github.com/containerd/continuity v0.3.0 github.com/containerd/fuse-overlayfs-snapshotter v1.0.2 - github.com/containerd/go-cni v1.1.3 + github.com/containerd/go-cni v1.1.6 github.com/containerd/go-runc v1.0.0 - github.com/containerd/stargz-snapshotter v0.11.2 - github.com/containerd/stargz-snapshotter/estargz v0.11.2 + github.com/containerd/nydus-snapshotter v0.3.1 + github.com/containerd/stargz-snapshotter v0.13.0 + github.com/containerd/stargz-snapshotter/estargz v0.13.0 github.com/containerd/typeurl v1.0.2 - github.com/coreos/go-systemd/v22 v22.3.2 - github.com/docker/cli v20.10.12+incompatible - github.com/docker/distribution v2.8.0+incompatible - github.com/docker/docker v20.10.7+incompatible // master (v21.xx-dev), see replace() + github.com/coreos/go-systemd/v22 v22.4.0 + github.com/docker/cli v23.0.0-rc.1+incompatible + github.com/docker/distribution v2.8.1+incompatible + github.com/docker/docker v23.0.0-rc.1+incompatible github.com/docker/go-connections v0.4.0 - github.com/docker/go-units v0.4.0 - github.com/gofrs/flock v0.7.3 + github.com/docker/go-units v0.5.0 + github.com/gofrs/flock v0.8.1 github.com/gogo/googleapis v1.4.1 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 - github.com/google/go-cmp v0.5.7 + github.com/google/go-cmp v0.5.9 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/hashicorp/go-immutable-radix v1.3.1 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/golang-lru v0.5.3 - github.com/klauspost/compress v1.15.0 + github.com/hashicorp/golang-lru v0.5.4 + github.com/in-toto/in-toto-golang v0.5.0 + github.com/klauspost/compress v1.15.12 github.com/mitchellh/hashstructure/v2 v2.0.2 github.com/moby/locker v1.0.1 - github.com/moby/sys/mountinfo v0.6.0 - github.com/moby/sys/signal v0.6.0 + github.com/moby/patternmatcher v0.5.0 + github.com/moby/sys/mountinfo v0.6.2 + github.com/moby/sys/signal v0.7.0 github.com/morikuni/aec v1.0.0 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5 - github.com/opencontainers/runc v1.1.0 + github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 + github.com/opencontainers/runc v1.1.3 github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 - github.com/opencontainers/selinux v1.10.0 - github.com/pelletier/go-toml v1.9.4 + github.com/opencontainers/selinux v1.10.2 + github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 + github.com/pelletier/go-toml v1.9.5 github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.5.0 github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 - github.com/sirupsen/logrus v1.8.1 - github.com/stretchr/testify v1.7.0 - github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274 - github.com/tonistiigi/go-actions-cache v0.0.0-20211202175116-9642704158ff + github.com/sirupsen/logrus v1.9.0 + github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f + github.com/stretchr/testify v1.8.0 + github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa + github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 github.com/tonistiigi/go-archvariant v1.0.0 github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f @@ -67,61 +79,76 @@ require ( go.opentelemetry.io/otel/sdk v1.4.1 go.opentelemetry.io/otel/trace v1.4.1 go.opentelemetry.io/proto/otlp v0.12.0 - golang.org/x/crypto v0.0.0-20211202192323-5770296d904e - golang.org/x/net v0.0.0-20211216030914-fe4d6282115f - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa - google.golang.org/grpc v1.44.0 + golang.org/x/crypto v0.2.0 + golang.org/x/net v0.4.0 + golang.org/x/sync v0.1.0 + golang.org/x/sys v0.3.0 + golang.org/x/time v0.1.0 + google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 + google.golang.org/grpc v1.50.1 + google.golang.org/protobuf v1.28.1 ) require ( - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0 // indirect + github.com/aws/aws-sdk-go-v2 v1.16.3 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/containerd/cgroups v1.0.3 // indirect + github.com/containerd/cgroups v1.0.4 // indirect github.com/containerd/fifo v1.0.0 // indirect github.com/containerd/ttrpc v1.1.0 // indirect - github.com/containernetworking/cni v1.0.1 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/containernetworking/cni v1.1.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/docker-credential-helpers v0.6.4 // indirect + github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect - github.com/go-logr/logr v1.2.2 // indirect + github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v4 v4.1.0 // indirect + github.com/golang-jwt/jwt/v4 v4.4.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/uuid v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hanwen/go-fuse/v2 v2.1.1-0.20220112183258-f57e95bda82d // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.1 // indirect - github.com/hashicorp/go-retryablehttp v0.7.0 // indirect - github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/moby/sys/mount v0.3.0 // indirect - github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect + github.com/hashicorp/go-retryablehttp v0.7.1 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/moby/sys/mount v0.3.3 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.12.1 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - github.com/russross/blackfriday/v2 v2.0.1 // indirect - github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/vbatts/tar-split v0.11.2 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 // indirect go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect go.opentelemetry.io/otel/metric v0.27.0 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/protobuf v1.27.1 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - gotest.tools/v3 v3.0.3 // indirect + golang.org/x/text v0.5.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) - -replace github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible diff --git a/go.sum b/go.sum index 80787289e36b..9cb25f9511fe 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,15 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -67,6 +74,14 @@ github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v42.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 h1:Ut0ZGdOwJDw0npYEg+TLlPls3Pq6JiZaP2/aGKir7Zw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA= github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -84,6 +99,7 @@ github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdA github.com/Azure/go-autorest/autorest v0.10.2/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= @@ -92,6 +108,8 @@ github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMl github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= @@ -102,6 +120,7 @@ github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxB github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= @@ -111,6 +130,8 @@ github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0 h1:XMEdVDFxgulDDl0lQmAZS6j8gRQ/0pJ+ZpXH2FHVtDc= +github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Djarvur/go-err113 v0.0.0-20200410182137-af658d038157/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= @@ -130,8 +151,9 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -143,8 +165,10 @@ github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+V github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= -github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY= github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY= +github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20200826032352-301c83a30e7c/go.mod h1:30A5igQ91GEmhYJF8TaRP79pMBOYynRsyOByfVV0dU4= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= @@ -184,6 +208,7 @@ github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= @@ -195,6 +220,42 @@ github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go-v2 v1.16.3 h1:0W1TSJ7O6OzwuEvIXAtJGvOeQ0SGAhcpxPN2/NK5EhM= +github.com/aws/aws-sdk-go-v2 v1.16.3/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 h1:SdK4Ppk5IzLs64ZMvr6MrSficMtjY2oS0WOORXTlxwU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM= +github.com/aws/aws-sdk-go-v2/config v1.15.5 h1:P+xwhr6kabhxDTXTVH9YoHkqjLJ0wVVpIUHtFNr2hjU= +github.com/aws/aws-sdk-go-v2/config v1.15.5/go.mod h1:ZijHHh0xd/A+ZY53az0qzC5tT46kt4JVCePf2NX9Lk4= +github.com/aws/aws-sdk-go-v2/credentials v1.12.0 h1:4R/NqlcRFSkR0wxOhgHi+agGpbEr5qMCjn7VqUIJY+E= +github.com/aws/aws-sdk-go-v2/credentials v1.12.0/go.mod h1:9YWk7VW+eyKsoIL6/CljkTrNVWBSK9pkqOPUuijid4A= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4 h1:FP8gquGeGHHdfY6G5llaMQDF+HAf20VKc8opRwmjf04= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4/go.mod h1:u/s5/Z+ohUQOPXl00m2yJVyioWDECsbpXTQlaqSlufc= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 h1:JL7cY85hyjlgfA29MMyAlItX+JYIH9XsxgMBS7jtlqA= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10/go.mod h1:p+ul5bLZSDRRXCZ/vePvfmZBH9akozXBJA5oMshWa5U= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 h1:uFWgo6mGJI1n17nbcvSc6fxVuR3xLNqvXt12JCnEcT8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10/go.mod h1:F+EZtuIwjlv35kRJPyBGcsA4f7bnSoz15zOQ2lJq1Z4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 h1:cnsvEKSoHN4oAN7spMMr0zhEW2MHnhAVpmqQg8E6UcM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4/go.mod h1:8glyUqVIM4AmeenIsPo0oVh3+NUwnsQml2OFupfQW+0= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11 h1:6cZRymlLEIlDTEB0+5+An6Zj1CKt6rSE69tOmFeu1nk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11/go.mod h1:0MR+sS1b/yxsfAPvAESrw8NfwUoxMinDyw6EYR9BS2U= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.1 h1:C21IDZCm9Yu5xqjb3fKmxDoYvJXtw1DNlOmLZEIlY1M= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.1/go.mod h1:l/BbcfqDCT3hePawhy4ZRtewjtdkl6GWtd9/U+1penQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 h1:T4pFel53bkHjL2mMo+4DKE6r6AuoZnM0fg7k1/ratr4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1/go.mod h1:GeUru+8VzrTXV/83XyMJ80KpH8xO89VPoUileyNQ+tc= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.5 h1:9LSZqt4v1JiehyZTrQnRFf2mY/awmyYNNY/b7zqtduU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.5/go.mod h1:S8TVP66AAkMMdYYCNZGvrdEq9YRm+qLXjio4FqRnrEE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4 h1:b16QW0XWl0jWjLABFc1A+uh145Oqv+xDcObNk0iQgUk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4/go.mod h1:uKkN7qmSIsNJVyMtxNQoCEYMvFEXbOg9fwCJPdfp2u8= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.4 h1:RE/DlZLYrz1OOmq8F28IXHLksuuvlpzUbvJ+SESCZBI= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.4/go.mod h1:oudbsSdDtazNj47z1ut1n37re9hDsKpk2ZI3v7KSxq0= +github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 h1:LCQKnopq2t4oQS3VKivlYTzAHCTJZZoQICM9fny7KHY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9/go.mod h1:iMYipLPXlWpBJ0KFX7QJHZ84rBydHBY8as2aQICTPWk= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 h1:Uw5wBybFQ1UeA9ts0Y07gbv0ncZnIAyw858tDW0NP2o= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.4/go.mod h1:cPDwJwsP4Kff9mldCXAmddjJL6JGQqtA3Mzer2zyr88= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 h1:+xtV90n3abQmgzk1pS++FdxZTrPEDgQng6e4/56WR2A= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.4/go.mod h1:lfSYenAXtavyX2A1LsViglqlG9eEFYxNryTZS5rn3QE= +github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= +github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -232,7 +293,6 @@ github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -257,12 +317,14 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= @@ -277,8 +339,9 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -305,8 +368,10 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= -github.com/containerd/containerd v1.6.1 h1:oa2uY0/0G+JX4X7hpGCYvkp9FjUancz56kSNnb1sG3o= github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/containerd v1.6.9/go.mod h1:XVicUvkxOrftE2Q1YWUXgZwkkAxwQYNOFzYWvfVfEfQ= +github.com/containerd/containerd v1.6.18 h1:qZbsLvmyu+Vlty0/Ex5xc0z2YtKpIsb5n45mAMI+2Ns= +github.com/containerd/containerd v1.6.18/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -314,8 +379,9 @@ github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cE github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA= github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= @@ -328,8 +394,9 @@ github.com/containerd/fuse-overlayfs-snapshotter v1.0.2/go.mod h1:nRZceC8a7dRm3A github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-cni v1.1.3 h1:t0MQwrtM96SH71Md8tH0uKrVE9v+jxkDTbvFSm3B9VE= github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.6 h1:el5WPymG5nRRLQF1EfB97FWob4Tdc8INg8RZMaXWZlo= +github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= @@ -341,15 +408,18 @@ github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6T github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nydus-snapshotter v0.3.1 h1:b8WahTrPkt3XsabjG2o/leN4fw3HWZYr+qxo/Z8Mfzk= +github.com/containerd/nydus-snapshotter v0.3.1/go.mod h1:+8R7NX7vrjlxAgtidnsstwIhpzyTlriYPssTxH++uiM= github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4= -github.com/containerd/stargz-snapshotter v0.11.2 h1:K1tzEZz1PKT+JWa5GkAGZ2bKMGW8fgqeWEwCsYOovgc= -github.com/containerd/stargz-snapshotter v0.11.2/go.mod h1:HfhsbZ98KIoqA2GLmibTpRwMF/lq3utZ0ElV9ARqU7M= +github.com/containerd/stargz-snapshotter v0.13.0 h1:3zr1/IkW1aEo6cMYTQeZ4L2jSuCN+F4kgGfjnuowe4U= +github.com/containerd/stargz-snapshotter v0.13.0/go.mod h1:01uOvoNzN1T4kV+8HeVt9p29esO5/61x8+VP/KU4fvQ= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/stargz-snapshotter/estargz v0.11.2 h1:0P0vWmfrEeTtZ4BBRrpuyu/HxR9HPBLfeljGOra5f6g= -github.com/containerd/stargz-snapshotter/estargz v0.11.2/go.mod h1:rjbdAXaytDSIrAy2WAy2kUrJ4ehzDS0eUQLlIb5UCY0= +github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw= +github.com/containerd/stargz-snapshotter/estargz v0.13.0/go.mod h1:m+9VaGJGlhCnrcEUod8mYumTmRgblwd3rC5UCEh2Yp0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -370,15 +440,18 @@ github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNR github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v1.0.1 h1:9OIL/sZmMYDBe+G8svzILAlulUpaDTUjeAbtH/JNLBo= github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/cni v1.1.1 h1:ky20T7c0MvKvbMOwS/FrlbNwjEoqJEUUYfsL4b0mc4k= +github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -392,23 +465,22 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU= +github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= @@ -430,22 +502,32 @@ github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= github.com/docker/cli v0.0.0-20190925022749-754388324470/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.12+incompatible h1:lZlz0uzG+GH+c0plStMUdF/qk3ppmgnswpR5EbqzVGA= -github.com/docker/cli v20.10.12+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.21+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v23.0.0-rc.1+incompatible h1:Vl3pcUK4/LFAD56Ys3BrqgAtuwpWd/IO3amuSL0ZbP0= +github.com/docker/cli v23.0.0-rc.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.0+incompatible h1:l9EaZDICImO1ngI+uTifW+ZYvvz7fKISBAKpg+MbWbY= -github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible h1:Ptj2To+ezU/mCBUKdYXBQ2r3/2EJojAlOZrsgprF+is= -github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v0.0.0-20200511152416-a93e9eb0e95c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v17.12.0-ce-rc1.0.20200730172259-9f28837c1d93+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.0-rc.1+incompatible h1:Dmn88McWuHc7BSNN1s6RtfhMmt6ZPQAYUEf7FhqpiQI= +github.com/docker/docker v23.0.0-rc.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= +github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -455,8 +537,9 @@ github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= @@ -470,6 +553,7 @@ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkg github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -478,6 +562,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -495,7 +580,6 @@ github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= @@ -513,17 +597,20 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= @@ -570,8 +657,9 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/flock v0.7.3 h1:I0EKY9l8HZCXTMYC4F80vwT6KNypV9uYKP3Alm/hjmQ= github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= @@ -585,8 +673,11 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -625,6 +716,7 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= @@ -649,6 +741,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/crfs v0.0.0-20191108021818-71d77da419c9/go.mod h1:etGhoOqfwPkooV6aqoX3eBGQOJblqdoc9XvWOeuxpPw= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -661,8 +754,10 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.0.0-20191010200024-a3d713f9b7f8/go.mod h1:KyKXa9ciM8+lgMXwOVsXi7UxGrsf9mM61Mzs+xKUrKE= github.com/google/go-containerregistry v0.1.2/go.mod h1:GPivBPgdAyd2SU+vf6EpsgOtWDuPqjW0hJZt4rNdTZ4= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= @@ -680,6 +775,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -691,6 +787,10 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= @@ -708,6 +808,7 @@ github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= @@ -724,7 +825,6 @@ github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -748,7 +848,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hanwen/go-fuse v1.0.0 h1:GxS9Zrn6c35/BnfiVsZVWmsG803xwE7eVRDvcf/BEVc= github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hanwen/go-fuse/v2 v2.0.3/go.mod h1:0EQM6aH2ctVpvZ6a+onrQ/vaykxh2GH7hy3e13vzTUY= github.com/hanwen/go-fuse/v2 v2.1.1-0.20220112183258-f57e95bda82d h1:ibbzF2InxMOS+lLCphY9PHNKPURDUBNKaG6ErSq8gJQ= @@ -773,8 +872,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4= -github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -785,8 +884,9 @@ github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -797,16 +897,17 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= +github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= -github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee h1:PAXLXk1heNZ5yokbMBpVLZQxo43wCZxRwl00mX+dd44= -github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea/go.mod h1:QMdK4dGB3YhEW2BmA1wgGpPYI3HZy/5gD705PXKUVSg= @@ -819,6 +920,10 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= @@ -848,8 +953,8 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U= -github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -867,8 +972,9 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -907,12 +1013,14 @@ github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOq github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -920,7 +1028,6 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y= github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= @@ -932,27 +1039,31 @@ github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQ github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74= github.com/moby/sys/mount v0.1.1/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74= -github.com/moby/sys/mount v0.3.0 h1:bXZYMmq7DBQPwHRxH/MG+u9+XF90ZOwoXpHTOznMGp0= -github.com/moby/sys/mount v0.3.0/go.mod h1:U2Z3ur2rXPFrFmy4q6WMwWrBOAQGYtYTRVM8BIvzbwk= +github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= +github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo= -github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/signal v0.6.0 h1:aDpY94H8VlhTGa9sNYUFCFsMZIUh5wm0B6XkIoJj/iY= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= +github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -973,9 +1084,9 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+ github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -993,6 +1104,10 @@ github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1ls github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1003,8 +1118,11 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1014,8 +1132,11 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5 h1:q37d91F6BO4Jp1UqWiun0dUFYaqv6WsKTLTCaWv+8LY= github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 h1:9iFHD5Kt9hkOfeawBNiEeEaV7bmC4/Z5wJp8E9BptMs= +github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1/go.mod h1:K/JAU0m27RFhDRX4PcFdIKntROP6y5Ed6O91aZYDQfs= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1024,8 +1145,10 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8= github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1038,25 +1161,32 @@ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mo github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4= +github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 h1:DiLBVp4DAcZlBVBEtJpNWZpZVq0AEeCY7Hqk8URVs4o= +github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170/go.mod h1:uQd4a7Rh3ZsVg5j0lNyAfyxIeGde9yrlhjF78GzeW0c= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1078,15 +1208,18 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -1097,8 +1230,9 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1112,8 +1246,9 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quasilyte/go-ruleguard v0.1.2-0.20200318202121-b00d7a75d3d8/go.mod h1:CGFX09Ci3pq9QZdj86B+VGIdNj4VyCo2iPOGS9esB/k= @@ -1125,12 +1260,13 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.0.4/go.mod h1:9T/Cfuxs5StfsocWr4WzDL36HqnX0fVb9d5fSEaLhoE= github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -1143,17 +1279,21 @@ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= +github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= github.com/securego/gosec v0.0.0-20200103095621-79fbf3af8d83/go.mod h1:vvbZ2Ae7AzSq3/kywjUDxSNq2SJ27RxCz2un0H3ePqE= github.com/securego/gosec v0.0.0-20200401082031-e946c8c39989/go.mod h1:i9l/TNj+yDFh9SZXUTvspXTjbFXgZGP/UvhU1S65A4A= github.com/securego/gosec/v2 v2.3.0/go.mod h1:UzeVyUXbxukhLeHKV3VVqo7HdoQR9MrRfFmZYotn8ME= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ= github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -1162,8 +1302,9 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= @@ -1175,6 +1316,9 @@ github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVjBcBGsVmMmak= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM= +github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f h1:9B623Cfs+mclYK6dsae7gLSwuIBHvlgmEup87qpqsAQ= +github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f/go.mod h1:VHzvNsKAfAGqs4ZvwRL+7a0dNsL20s7lGui4K9C0xQM= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -1200,8 +1344,9 @@ github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1209,8 +1354,10 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1233,10 +1380,10 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1 github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85/go.mod h1:a7cilN64dG941IOXfhJhlH0qB92hxJ9A1ewrdUmJ6xo= -github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274 h1:wbyZxD6IPFp0sl5uscMOJRsz5UKGFiNiD16e+MVfKZY= -github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274/go.mod h1:oPAfvw32vlUJSjyDcQ3Bu0nb2ON2B+G0dtVN/SZNJiA= -github.com/tonistiigi/go-actions-cache v0.0.0-20211202175116-9642704158ff h1:n8i1G5sBFmY8aDteg5Kf2rdU15KnFcS807QrYRM9/yQ= -github.com/tonistiigi/go-actions-cache v0.0.0-20211202175116-9642704158ff/go.mod h1:qqvyZqkfwkoJuPU/bw61bItaoO0SJ8YSW0vSVRRvsRg= +github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa h1:XOFp/3aBXlqmOFAg3r6e0qQjPnK5I970LilqX+Is1W8= +github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa/go.mod h1:AvLEd1LEIl64G2Jpgwo7aVV5lGH0ePcKl0ygGIHNYl8= +github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 h1:8eY6m1mjgyB8XySUR7WvebTM8D/Vs86jLJzD/Tw7zkc= +github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7/go.mod h1:qqvyZqkfwkoJuPU/bw61bItaoO0SJ8YSW0vSVRRvsRg= github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/aukdhCdTyb0= github.com/tonistiigi/go-archvariant v1.0.0/go.mod h1:TxFmO5VS6vMq2kvs3ht04iPXtu2rUT/erOnGFYfk5Ho= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0= @@ -1282,6 +1429,7 @@ github.com/xanzy/go-gitlab v0.32.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfD github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -1290,6 +1438,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -1320,7 +1470,6 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= @@ -1338,7 +1487,6 @@ go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g= go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= go.opentelemetry.io/otel/exporters/jaeger v1.4.1 h1:VHCK+2yTZDqDaVXj7JH2Z/khptuydo6C0ttBh2bxAbc= go.opentelemetry.io/otel/exporters/jaeger v1.4.1/go.mod h1:ZW7vkOu9nC1CxsD8bHNHCia5JUbwP39vxgd1q4Z5rCI= -go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 h1:imIM3vRDMyZK1ypQlQlO+brE22I9lRhJsBDXpDWjlz8= @@ -1410,8 +1558,11 @@ golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211202192323-5770296d904e h1:MUP6MR3rJ7Gk9LEia0LP2ytiH6MuCfs7qYz+47jGdD8= -golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE= +golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1451,6 +1602,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1510,13 +1663,21 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.1-0.20221027164007-c63010009c80/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1534,8 +1695,12 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1547,8 +1712,11 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1639,7 +1807,6 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210313202042-bd2e13477e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1649,23 +1816,41 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1675,8 +1860,10 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1684,8 +1871,10 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1772,11 +1961,14 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -1812,6 +2004,13 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1875,10 +2074,25 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 h1:7YDGQC/0sigNGzsEWyb9s72jTxlFdwVEYNJHbfQ+Dtg= +google.golang.org/genproto v0.0.0-20220706185917-7780775163c4/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -1906,12 +2120,20 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1924,8 +2146,10 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1948,7 +2172,6 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -1964,8 +2187,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -1989,7 +2213,7 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= -k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= +k8s.io/api v0.25.4/go.mod h1:IG2+RzyPQLllQxnhzD8KQNEu4c4YvyDTpSMztf4A0OQ= k8s.io/apimachinery v0.0.0-20180904193909-def12e63c512/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= @@ -1998,7 +2222,7 @@ k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= -k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/apimachinery v0.25.4/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= @@ -2011,7 +2235,7 @@ k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= -k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= +k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw= k8s.io/cloud-provider v0.17.4/go.mod h1:XEjKDzfD+b9MTLXQFlDGkk6Ho8SGMpaU8Uugx/KNK9U= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= @@ -2025,7 +2249,8 @@ k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= -k8s.io/cri-api v0.24.0-alpha.3/go.mod h1:c/NLI5Zdyup5+oEYqFO2IE32ptofNiZpS1nL2y51gAg= +k8s.io/cri-api v0.25.0/go.mod h1:J1rAyQkSJ2Q6I+aBMOVgg2/cbbebso6FNa0UagiR0kc= +k8s.io/cri-api v0.26.0-alpha.3/go.mod h1:E49tenyB7esgfIguEd7+g9qYhHOr9peyyBcSaeH6Gxw= k8s.io/csi-translation-lib v0.17.4/go.mod h1:CsxmjwxEI0tTNMzffIAcgR9lX4wOh6AKHdxQrT7L0oo= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -2041,13 +2266,14 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= k8s.io/kubernetes v1.11.10/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js= @@ -2057,7 +2283,7 @@ k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= @@ -2074,15 +2300,16 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= sourcegraph.com/sqs/pbtypes v1.0.0/go.mod h1:3AciMUv4qUuRHRHhOG4TZOB+72GdPVz5k+c648qsFS4= diff --git a/hack/azblob_test/Dockerfile b/hack/azblob_test/Dockerfile new file mode 100644 index 000000000000..37d5d2d8eff1 --- /dev/null +++ b/hack/azblob_test/Dockerfile @@ -0,0 +1,16 @@ +FROM moby/buildkit AS buildkit + +FROM debian:bullseye-slim +RUN apt-get update \ + && curl -fsSL https://deb.nodesource.com/setup_18.x | bash - \ + && apt-get install -y --no-install-recommends ca-certificates containerd curl nodejs npm procps \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && npm install -g azurite@3.18.0 \ + && mkdir /test \ + && mkdir /tmp/azurite \ + && curl -sL https://aka.ms/InstallAzureCLIDeb | bash + +COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/buildctl /bin/ + +COPY --link . /test diff --git a/hack/azblob_test/docker-bake.hcl b/hack/azblob_test/docker-bake.hcl new file mode 100644 index 000000000000..fc57c4a3e4ad --- /dev/null +++ b/hack/azblob_test/docker-bake.hcl @@ -0,0 +1,11 @@ +target "buildkit" { + context = "../../" + cache-from = ["type=gha,scope=binaries"] +} + +target "default" { + contexts = { + buildkit = "target:buildkit" + } + tags = ["moby/buildkit:azblobtest"] +} diff --git a/hack/azblob_test/run_test.sh b/hack/azblob_test/run_test.sh new file mode 100755 index 000000000000..cdf0dc291c0b --- /dev/null +++ b/hack/azblob_test/run_test.sh @@ -0,0 +1,24 @@ +#!/bin/bash -ex + +function cleanup() { + docker rmi moby/buildkit:azblobtest +} + +trap cleanup EXIT +cd "$(dirname "$0")" + +docker buildx bake --load + +AZURE_ACCOUNT_NAME=azblobcacheaccount +AZURE_ACCOUNT_URL=azblobcacheaccount.blob.localhost.com +AZURE_ACCOUNT_KEY=$(echo "azblobcacheaccountkey" | base64) + +docker run \ + --rm \ + --privileged \ + --add-host ${AZURE_ACCOUNT_URL}:127.0.0.1 \ + -e AZURE_ACCOUNT_NAME=${AZURE_ACCOUNT_NAME} \ + -e AZURE_ACCOUNT_KEY=${AZURE_ACCOUNT_KEY} \ + -e AZURE_ACCOUNT_URL=${AZURE_ACCOUNT_URL} \ + moby/buildkit:azblobtest \ + /test/test.sh diff --git a/hack/azblob_test/test.sh b/hack/azblob_test/test.sh new file mode 100755 index 000000000000..adffaf0f03c8 --- /dev/null +++ b/hack/azblob_test/test.sh @@ -0,0 +1,131 @@ +#!/bin/bash -ex + +# Refer to https://docs.microsoft.com/en-us/azure/storage/common/storage-use-azurite Azurite documentation +rm -rf /tmp/azurite + +export AZURITE_ACCOUNTS="${AZURE_ACCOUNT_NAME}:${AZURE_ACCOUNT_KEY}" +BLOB_PORT=10000 + +azurite --silent --location /tmp/azurite --debug /tmp/azurite/azurite.debug --blobPort ${BLOB_PORT} & +timeout 15 bash -c "until echo > /dev/tcp/localhost/${BLOB_PORT}; do sleep 0.5; done" + +buildkitd -debugaddr 0.0.0.0:8060 & +while true; do + curl -s -f http://127.0.0.1:8060/debug/pprof/ >/dev/null && break + sleep 1 +done + +export default_options="type=azblob,container=cachecontainer,account_url=http://${AZURE_ACCOUNT_URL}:${BLOB_PORT},secret_access_key=${AZURE_ACCOUNT_KEY}" + +rm -rf /tmp/destdir1 /tmp/destdir2 + +# First build of test1: no cache +buildctl build \ + --progress plain \ + --frontend dockerfile.v0 \ + --local context=/test/test1 \ + --local dockerfile=/test/test1 \ + --import-cache "$default_options,name=foo" \ + --export-cache "$default_options,mode=max,name=bar;foo" \ + --output type=local,dest=/tmp/destdir1 + +# Check the 4 blob files and 2 manifest files in the azure blob container +blobCount=$(az storage blob list --output tsv --prefix blobs --container-name cachecontainer --connection-string "DefaultEndpointsProtocol=http;AccountName=${AZURE_ACCOUNT_NAME};AccountKey=${AZURE_ACCOUNT_KEY};BlobEndpoint=http://${AZURE_ACCOUNT_URL}:${BLOB_PORT};" | wc -l) +if (("$blobCount" != 4)); then + echo "unexpected number of blobs found: $blobCount" + exit 1 +fi + +manifestCount=$(az storage blob list --output tsv --prefix manifests --container-name cachecontainer --connection-string "DefaultEndpointsProtocol=http;AccountName=${AZURE_ACCOUNT_NAME};AccountKey=${AZURE_ACCOUNT_KEY};BlobEndpoint=http://${AZURE_ACCOUNT_URL}:${BLOB_PORT};" | wc -l) +if (("$manifestCount" != 2)); then + echo "unexpected number of manifests found: $manifestCount" + exit 1 +fi + +mkdir /tmp/content1 +az storage blob download-batch -d /tmp/content1 --pattern blobs/* -s cachecontainer --connection-string "DefaultEndpointsProtocol=http;AccountName=${AZURE_ACCOUNT_NAME};AccountKey=${AZURE_ACCOUNT_KEY};BlobEndpoint=http://${AZURE_ACCOUNT_URL}:${BLOB_PORT};" + +# Second build of test1: Test that cache was used +buildctl build \ + --progress plain \ + --frontend dockerfile.v0 \ + --local context=/test/test1 \ + --local dockerfile=/test/test1 \ + --import-cache "$default_options,name=foo" \ + --export-cache "$default_options,mode=max,name=bar;foo" \ + 2>&1 | tee /tmp/log1 + +# Check that the existing steps were read from the cache +cat /tmp/log1 | grep 'cat /dev/urandom | head -c 100 | sha256sum > unique_first' -A1 | grep CACHED +cat /tmp/log1 | grep 'cat /dev/urandom | head -c 100 | sha256sum > unique_second' -A1 | grep CACHED + +# No change expected in the blobs +mkdir /tmp/content2 +az storage blob download-batch -d /tmp/content2 --pattern blobs/* -s cachecontainer --connection-string "DefaultEndpointsProtocol=http;AccountName=${AZURE_ACCOUNT_NAME};AccountKey=${AZURE_ACCOUNT_KEY};BlobEndpoint=http://${AZURE_ACCOUNT_URL}:${BLOB_PORT};" +diff -r /tmp/content1 /tmp/content2 + +# First build of test2: Test that we can reuse the cache for a different docker image +buildctl prune +buildctl build \ + --progress plain \ + --frontend dockerfile.v0 \ + --local context=/test/test2 \ + --local dockerfile=/test/test2 \ + --import-cache "$default_options,name=foo" \ + --export-cache "$default_options,mode=max,name=bar;foo" \ + --output type=local,dest=/tmp/destdir2 \ + 2>&1 | tee /tmp/log2 + +mkdir /tmp/content3 +az storage blob download-batch -d /tmp/content3 --pattern blobs/* -s cachecontainer --connection-string "DefaultEndpointsProtocol=http;AccountName=${AZURE_ACCOUNT_NAME};AccountKey=${AZURE_ACCOUNT_KEY};BlobEndpoint=http://${AZURE_ACCOUNT_URL}:${BLOB_PORT};" + +# There should ONLY be 1 difference between the contents of /tmp/content1 and /tmp/content3 +# This difference is that in /tmp/content3 there should 1 extra blob corresponding to the layer: RUN cat /dev/urandom | head -c 100 | sha256sum > unique_third +contentDiff=$(diff -r /tmp/content1 /tmp/content3 || :) +if [[ ! "$contentDiff" =~ ^"Only in /tmp/content3/blobs: sha256:"[a-z0-9]{64}$ ]]; then + echo "unexpected diff found $contentDiff" + exit 1 +fi + +# Check the existing steps were not executed, but read from cache +cat /tmp/log2 | grep 'cat /dev/urandom | head -c 100 | sha256sum > unique_first' -A1 | grep CACHED + +# Ensure cache is reused +rm /tmp/destdir2/unique_third +diff -r /tmp/destdir1 /tmp/destdir2 + +# Second build of test2: Test the behavior when a blob is missing +az storage blob delete-batch -s cachecontainer --pattern blobs/* --connection-string "DefaultEndpointsProtocol=http;AccountName=${AZURE_ACCOUNT_NAME};AccountKey=${AZURE_ACCOUNT_KEY};BlobEndpoint=http://${AZURE_ACCOUNT_URL}:${BLOB_PORT};" + +buildctl prune +buildctl build \ + --progress plain \ + --frontend dockerfile.v0 \ + --local context=/test/test2 \ + --local dockerfile=/test/test2 \ + --import-cache "$default_options,name=foo" \ + 2>&1 | tee /tmp/log3 + +cat /tmp/log3 | grep -E 'blob.+not found' >/dev/null + +pids="" + +for i in $(seq 0 9); do + buildctl build \ + --progress plain \ + --frontend dockerfile.v0 \ + --local context=/test/test1 \ + --local dockerfile=/test/test1 \ + --import-cache "$default_options,name=foo" \ + --export-cache "$default_options,mode=max,name=bar;foo" \ + &>/tmp/concurrencytestlog$i & + pids="$pids $!" +done + +wait $pids + +for i in $(seq 0 9); do + cat /tmp/concurrencytestlog$i | grep -q -v 'failed to upload blob ' +done + +echo Azure blob checks ok diff --git a/hack/azblob_test/test1/Dockerfile b/hack/azblob_test/test1/Dockerfile new file mode 100644 index 000000000000..d56dd9d1fd92 --- /dev/null +++ b/hack/azblob_test/test1/Dockerfile @@ -0,0 +1,7 @@ +FROM busybox:1.35 AS build +RUN cat /dev/urandom | head -c 100 | sha256sum > unique_first +RUN cat /dev/urandom | head -c 100 | sha256sum > unique_second + +FROM scratch +COPY --link --from=build /unique_first / +COPY --link --from=build /unique_second / diff --git a/hack/azblob_test/test2/Dockerfile b/hack/azblob_test/test2/Dockerfile new file mode 100644 index 000000000000..c0efe23b40c6 --- /dev/null +++ b/hack/azblob_test/test2/Dockerfile @@ -0,0 +1,9 @@ +FROM busybox:1.35 AS build +RUN cat /dev/urandom | head -c 100 | sha256sum > unique_first +RUN cat /dev/urandom | head -c 100 | sha256sum > unique_second +RUN cat /dev/urandom | head -c 100 | sha256sum > unique_third + +FROM scratch +COPY --link --from=build /unique_first / +COPY --link --from=build /unique_second / +COPY --link --from=build /unique_third / diff --git a/hack/dockerfiles/generated-files.Dockerfile b/hack/dockerfiles/generated-files.Dockerfile index a9f6f0bb9212..a0d57a692287 100644 --- a/hack/dockerfiles/generated-files.Dockerfile +++ b/hack/dockerfiles/generated-files.Dockerfile @@ -1,48 +1,55 @@ # syntax=docker/dockerfile-upstream:master -# protoc is dynamically linked to glibc to can't use golang:1.10-alpine -FROM golang:1.17-buster AS gobuild-base - -RUN apt-get update && apt-get --no-install-recommends install -y \ - unzip \ - && true - -# https://github.com/golang/protobuf/blob/v1.3.5/.travis.yml#L15 -ARG PROTOC_VERSION=3.11.4 -ARG TARGETOS TARGETARCH -RUN set -e; \ - arch=$(echo $TARGETARCH | sed -e s/amd64/x86_64/ -e s/arm64/aarch_64/); \ - wget -q https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-${TARGETOS}-${arch}.zip && unzip protoc-${PROTOC_VERSION}-${TARGETOS}-${arch}.zip -d /usr/local - -ARG GOGO_VERSION=v1.3.2 -RUN --mount=target=/root/.cache,type=cache GO111MODULE=on go install \ - github.com/gogo/protobuf/protoc-gen-gogo@${GOGO_VERSION} \ - github.com/gogo/protobuf/protoc-gen-gogofaster@${GOGO_VERSION} \ - github.com/gogo/protobuf/protoc-gen-gogoslick@${GOGO_VERSION} - -ARG PROTOBUF_VERSION=v1.3.5 -RUN --mount=target=/root/.cache,type=cache GO111MODULE=on go install \ - github.com/golang/protobuf/protoc-gen-go@${PROTOBUF_VERSION} - +ARG GO_VERSION="1.19" +ARG PROTOC_VERSION="3.11.4" + +# protoc is dynamically linked to glibc so can't use alpine base +FROM golang:${GO_VERSION}-buster AS base +RUN apt-get update && apt-get --no-install-recommends install -y git unzip +ARG PROTOC_VERSION +ARG TARGETOS +ARG TARGETARCH +RUN <&2 'ERROR: The result of "go generate" differs. Please update with "make generated-files"' + echo "$diff" + exit 1 + fi +EOT diff --git a/hack/dockerfiles/lint.Dockerfile b/hack/dockerfiles/lint.Dockerfile index 62c3c290b51e..257a9e878196 100644 --- a/hack/dockerfiles/lint.Dockerfile +++ b/hack/dockerfiles/lint.Dockerfile @@ -1,8 +1,9 @@ # syntax=docker/dockerfile-upstream:master -FROM golang:1.17-alpine +FROM golang:1.19-alpine +ENV GOFLAGS="-buildvcs=false" RUN apk add --no-cache gcc musl-dev yamllint -RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.43.0 +RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.48.0 WORKDIR /go/src/github.com/moby/buildkit RUN --mount=target=/go/src/github.com/moby/buildkit --mount=target=/root/.cache,type=cache \ GOARCH=amd64 golangci-lint run && \ diff --git a/hack/dockerfiles/vendor.Dockerfile b/hack/dockerfiles/vendor.Dockerfile index a0d0607e4243..4108aaaecf1b 100644 --- a/hack/dockerfiles/vendor.Dockerfile +++ b/hack/dockerfiles/vendor.Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile-upstream:master -FROM golang:1.17-alpine AS vendored +FROM golang:1.19-alpine AS vendored RUN apk add --no-cache git WORKDIR /src RUN --mount=target=/src,rw \ diff --git a/hack/fixtures/dns-cni.conflist b/hack/fixtures/dns-cni.conflist new file mode 100644 index 000000000000..4faf98419647 --- /dev/null +++ b/hack/fixtures/dns-cni.conflist @@ -0,0 +1,28 @@ +{ + "cniVersion": "0.4.0", + "name": "buildkitdns", + "plugins": [ + { + "type": "bridge", + "bridge": "buildkitdns0", + "isDefaultGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "ranges": [ + [ + { "subnet": "10.11.0.0/16" } + ] + ] + } + }, + { + "type": "firewall" + }, + { + "type": "dnsname", + "domainName": "dns.buildkit" + } + ] +} diff --git a/hack/images b/hack/images index 1b172059eef9..d1315e6a17ef 100755 --- a/hack/images +++ b/hack/images @@ -7,6 +7,7 @@ PUSH=$3 . $(dirname $0)/util set -eu -o pipefail +: ${RELEASE=false} : ${PLATFORMS=} : ${TARGET=} @@ -39,12 +40,15 @@ if [[ "$TAG" == "local" ]]; then fi fi +attestFlags="$(buildAttestFlags)" + outputFlag="--output=type=image,push=false" if [ "$PUSH" = "push" ]; then - outputFlag="--output=type=image,buildinfo-attrs=true,push=true" + outputFlag="--output=type=image,push=true" fi if [ -n "$localmode" ]; then - outputFlag="--output=type=docker,buildinfo-attrs=true" + outputFlag="--output=type=docker" + attestFlags="" fi targetFlag="" @@ -88,5 +92,10 @@ for tagName in $tagNames; do tagFlags="$tagFlags--tag=$tagName " done -buildxCmd build $platformFlag $targetFlag $importCacheFlags $exportCacheFlags $tagFlags $outputFlag \ +nocacheFilterFlag="" +if [[ "$RELEASE" = "true" ]] && [[ "$GITHUB_ACTIONS" = "true" ]]; then + nocacheFilterFlag="--no-cache-filter=git,buildkit-export,gobuild-base" +fi + +buildxCmd build $platformFlag $targetFlag $importCacheFlags $exportCacheFlags $tagFlags $outputFlag $nocacheFilterFlag $attestFlags \ $currentcontext diff --git a/hack/install-buildx b/hack/install-buildx index 22062b978571..d486dca33d3a 100755 --- a/hack/install-buildx +++ b/hack/install-buildx @@ -5,7 +5,7 @@ if [ -z "${BINDIR:-}" ] && [ -z "${PREFIX:-}" ]; then PREFIX="$(realpath "$(dirname "$0")/..")" fi -: "${VERSION:=v0.7.1}" +: "${VERSION:=v0.8.2}" : "${BINDIR:="${PREFIX}/bin"}" : "${DEST:="${BINDIR}/buildx"}" diff --git a/hack/release-tar b/hack/release-tar index bfad2d5336e7..308243f72096 100755 --- a/hack/release-tar +++ b/hack/release-tar @@ -6,6 +6,7 @@ OUT=$2 . $(dirname $0)/util set -eu -o pipefail +: ${RELEASE=false} : ${PLATFORMS=} usage() { @@ -22,7 +23,30 @@ if [ -n "$PLATFORMS" ]; then platformFlag="--platform=$PLATFORMS" fi -buildxCmd build $platformFlag $cacheFromFlags \ +nocacheFilterFlag="" +if [[ "$RELEASE" = "true" ]] && [[ "$GITHUB_ACTIONS" = "true" ]]; then + nocacheFilterFlag="--no-cache-filter=git,gobuild-base" +fi + +output=$(mktemp -d -t buildkit-output.XXXXXXXXXX) + +buildxCmd build $platformFlag $cacheFromFlags $nocacheFilterFlag $(buildAttestFlags) \ + --build-arg "BUILDKIT_MULTI_PLATFORM=true" \ --target release \ - --output "type=local,dest=$OUT" \ + --output "type=local,dest=$output" \ $currentcontext + +for pdir in "${output}"/*/; do + ( + cd "$pdir" + releasetar=$(find . -name '*.tar.gz') + filename=$(basename "${releasetar%.tar.gz}") + mv "provenance.json" "${filename}.provenance.json" + mv "sbom-binaries.spdx.json" "${filename}.sbom.json" + find . -name 'sbom*.json' -exec rm {} \; + ) +done + +mkdir -p "$OUT" +mv "$output"/**/* "$OUT/" +rm -rf $output diff --git a/hack/s3_test/Dockerfile b/hack/s3_test/Dockerfile new file mode 100644 index 000000000000..c20c8ffa1405 --- /dev/null +++ b/hack/s3_test/Dockerfile @@ -0,0 +1,21 @@ +ARG MINIO_VERSION=RELEASE.2022-05-03T20-36-08Z +ARG MINIO_MC_VERSION=RELEASE.2022-05-04T06-07-55Z + +FROM minio/minio:${MINIO_VERSION} AS minio +FROM minio/mc:${MINIO_MC_VERSION} AS minio-mc +FROM moby/buildkit AS buildkit + +FROM debian:bullseye-slim + +RUN apt-get update \ + && apt-get install -y --no-install-recommends wget ca-certificates containerd curl \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir /test + +COPY --from=buildkit /usr/bin/buildkitd /usr/bin/buildctl /bin +COPY --from=minio /opt/bin/minio /bin +COPY --from=minio-mc /usr/bin/mc /bin + +COPY . /test diff --git a/hack/s3_test/docker-bake.hcl b/hack/s3_test/docker-bake.hcl new file mode 100644 index 000000000000..351e84b8ee71 --- /dev/null +++ b/hack/s3_test/docker-bake.hcl @@ -0,0 +1,11 @@ +target "buildkit" { + context = "../../" + cache-from = ["type=gha,scope=binaries"] +} + +target "default" { + contexts = { + buildkit = "target:buildkit" + } + tags = ["moby/buildkit:s3test"] +} diff --git a/hack/s3_test/run_test.sh b/hack/s3_test/run_test.sh new file mode 100755 index 000000000000..a2b2d4d681d0 --- /dev/null +++ b/hack/s3_test/run_test.sh @@ -0,0 +1,7 @@ +#!/bin/sh -ex + +cd "$(dirname "$0")" + +docker buildx bake --load +docker run --rm --privileged -p 9001:9001 -p 8060:8060 moby/buildkit:s3test /test/test.sh +docker rmi moby/buildkit:s3test diff --git a/hack/s3_test/test.sh b/hack/s3_test/test.sh new file mode 100755 index 000000000000..d9918bd5678e --- /dev/null +++ b/hack/s3_test/test.sh @@ -0,0 +1,98 @@ +#!/bin/sh -ex + +/bin/minio server /tmp/data --address=0.0.0.0:9000 --console-address=0.0.0.0:9001 & + +while true; do + curl -s -f http://127.0.0.1:9001 >/dev/null && break + sleep 1 +done + +sleep 2 +mc alias set myminio http://127.0.0.1:9000 minioadmin minioadmin +mc mb myminio/my-bucket +mc admin trace myminio & + +buildkitd -debugaddr 0.0.0.0:8060 & +while true; do + curl -s -f http://127.0.0.1:8060/debug/pprof/ >/dev/null && break + sleep 1 +done + +export default_options="type=s3,bucket=my-bucket,region=us-east-1,endpoint_url=http://127.0.0.1:9000,access_key_id=minioadmin,secret_access_key=minioadmin,use_path_style=true" + +rm -rf /tmp/destdir1 /tmp/destdir2 + +# First build: no cache on s3 +# 4 files should be exported (2 blobs + 2 manifests) +buildctl build \ + --progress plain \ + --frontend dockerfile.v0 \ + --local context=/test/test1 \ + --local dockerfile=/test/test1 \ + --import-cache "$default_options,name=foo" \ + --export-cache "$default_options,mode=max,name=bar;foo" \ + --output type=local,dest=/tmp/destdir1 + +# Check the 5 files are on s3 (3 blobs and 2 manifests) +mc ls --recursive myminio/my-bucket | wc -l | grep 5 + +# Test the refresh workflow +mc ls --recursive myminio/my-bucket/blobs >/tmp/content +buildctl build \ + --progress plain \ + --frontend dockerfile.v0 \ + --local context=/test/test1 \ + --local dockerfile=/test/test1 \ + --import-cache "$default_options,name=foo" \ + --export-cache "$default_options,mode=max,name=bar;foo" +mc ls --recursive myminio/my-bucket/blobs >/tmp/content2 +# No change expected +diff /tmp/content /tmp/content2 + +sleep 2 + +buildctl build \ + --progress plain \ + --frontend dockerfile.v0 \ + --local context=/test/test1 \ + --local dockerfile=/test/test1 \ + --import-cache "$default_options,name=foo" \ + --export-cache "$default_options,mode=max,name=bar;foo,touch_refresh=1s" +mc ls --recursive myminio/my-bucket/blobs >/tmp/content2 +# Touch refresh = 1 should have caused a change in timestamp +if diff /tmp/content /tmp/content2; then + exit 1 +fi + +# Check we can reuse the cache +buildctl prune +buildctl build \ + --progress plain \ + --frontend dockerfile.v0 \ + --local context=/test/test2 \ + --local dockerfile=/test/test2 \ + --import-cache "$default_options,name=foo" \ + --output type=local,dest=/tmp/destdir2 \ + 2>&1 | tee /tmp/log + +# Check the first step was not executed, but read from S3 cache +cat /tmp/log | grep 'cat /dev/urandom | head -c 100 | sha256sum > unique_first' -A1 | grep CACHED + +# Ensure cache is reused +rm /tmp/destdir2/unique_third +diff -r /tmp/destdir1 /tmp/destdir2 + +# Test the behavior when a blob is missing +mc rm --force --recursive myminio/my-bucket/blobs + +buildctl prune +buildctl build \ + --progress plain \ + --frontend dockerfile.v0 \ + --local context=/test/test2 \ + --local dockerfile=/test/test2 \ + --import-cache "$default_options,name=foo" \ + >/tmp/log 2>&1 || true +cat /tmp/log | grep 'NoSuchKey' >/dev/null + +echo S3 Checks ok diff --git a/hack/s3_test/test1/Dockerfile b/hack/s3_test/test1/Dockerfile new file mode 100644 index 000000000000..8338f8ec216f --- /dev/null +++ b/hack/s3_test/test1/Dockerfile @@ -0,0 +1,7 @@ +FROM debian:bullseye-slim AS build +RUN cat /dev/urandom | head -c 100 | sha256sum > unique_first +RUN cat /dev/urandom | head -c 100 | sha256sum > unique_second + +FROM scratch +COPY --link --from=build /unique_first / +COPY --link --from=build /unique_second / diff --git a/hack/s3_test/test2/Dockerfile b/hack/s3_test/test2/Dockerfile new file mode 100644 index 000000000000..cb894a90d44d --- /dev/null +++ b/hack/s3_test/test2/Dockerfile @@ -0,0 +1,9 @@ +FROM debian:bullseye-slim AS build +RUN cat /dev/urandom | head -c 100 | sha256sum > unique_first +RUN cat /dev/urandom | head -c 100 | sha256sum > unique_second +RUN cat /dev/urandom | head -c 100 | sha256sum > unique_third + +FROM scratch +COPY --link --from=build /unique_first / +COPY --link --from=build /unique_second / +COPY --link --from=build /unique_third / diff --git a/hack/test b/hack/test index d5b253055f94..929733db1db5 100755 --- a/hack/test +++ b/hack/test @@ -12,10 +12,20 @@ set -eu -o pipefail : ${TEST_KEEP_CACHE=} : ${DOCKERFILE_RELEASES=} : ${BUILDKIT_WORKER_RANDOM=} +: ${BUILDKITD_TAGS=} -if [ "$TEST_DOCKERD" == "1" ] && ! file $TEST_DOCKERD_BINARY | grep "statically linked" >/dev/null; then - echo "dockerd binary needs to be statically linked" - exit 1 +if [ "$TEST_DOCKERD" == "1" ]; then + if [ ! -f "$TEST_DOCKERD_BINARY" ]; then + echo "dockerd binary not found" + exit 1 + fi + if [ ! -x "$TEST_DOCKERD_BINARY" ]; then + chmod +x "$TEST_DOCKERD_BINARY" + fi + if ! file "$TEST_DOCKERD_BINARY" | grep "statically linked" >/dev/null; then + echo "dockerd binary needs to be statically linked" + exit 1 + fi fi if [ "$#" == 0 ]; then TEST_INTEGRATION=1; fi @@ -51,6 +61,7 @@ if [ "$TEST_COVERAGE" = "1" ]; then fi buildxCmd build $cacheFromFlags \ + --build-arg "BUILDKITD_TAGS=$BUILDKITD_TAGS" \ --target "integration-tests" \ --output "type=docker,name=$iid" \ $currentcontext @@ -63,7 +74,7 @@ fi if [ "$TEST_INTEGRATION" == 1 ]; then cid=$(docker create --rm -v /tmp $coverageVol --volumes-from=$cacheVolume -e TEST_DOCKERD -e SKIP_INTEGRATION_TESTS ${BUILDKIT_INTEGRATION_SNAPSHOTTER:+"-eBUILDKIT_INTEGRATION_SNAPSHOTTER"} -e BUILDKIT_REGISTRY_MIRROR_DIR=/root/.cache/registry --privileged $iid go test $coverageFlags ${TESTFLAGS:--v} ${TESTPKGS:-./...}) if [ "$TEST_DOCKERD" = "1" ]; then - docker cp "$TEST_DOCKERD_BINARY" $cid:/usr/bin/ + docker cp "$TEST_DOCKERD_BINARY" $cid:/usr/bin/dockerd fi docker start -a $cid fi @@ -71,7 +82,7 @@ fi if [ "$TEST_GATEWAY" == 1 ]; then # Build-test "github.com/moby/buildkit/frontend/gateway/client", which isn't otherwise built by CI # It really only needs buildkit-base. We have integration-tests in $iid, which is a direct child of buildkit-base. - cid=$(docker create --rm --volumes-from=$cacheVolume $iid go build -v ./frontend/gateway/client) + cid=$(docker create --rm --volumes-from=$cacheVolume --entrypoint="" $iid go build -v ./frontend/gateway/client) docker start -a $cid fi @@ -104,7 +115,7 @@ if [ "$TEST_DOCKERFILE" == 1 ]; then cid=$(docker create -v /tmp $coverageVol --rm --privileged --volumes-from=$cacheVolume -e TEST_DOCKERD -e BUILDKIT_REGISTRY_MIRROR_DIR=/root/.cache/registry -e BUILDKIT_WORKER_RANDOM -e FRONTEND_GATEWAY_ONLY=local:/$release.tar -e EXTERNAL_DF_FRONTEND=/dockerfile-frontend $iid go test $coverageFlags --count=1 -tags "$buildtags" ${TESTFLAGS:--v} ./frontend/dockerfile) docker cp $tarout $cid:/$release.tar if [ "$TEST_DOCKERD" = "1" ]; then - docker cp "$TEST_DOCKERD_BINARY" $cid:/usr/bin/ + docker cp "$TEST_DOCKERD_BINARY" $cid:/usr/bin/dockerd fi docker start -a $cid fi diff --git a/hack/update-generated-files b/hack/update-generated-files index 5536fcac4f7b..d038d7d6b632 100755 --- a/hack/update-generated-files +++ b/hack/update-generated-files @@ -3,18 +3,13 @@ . $(dirname $0)/util set -eu -gogo_version=$(awk '$1 == "github.com/gogo/protobuf" { print $2 }' go.mod) -# protobuf_version=$(awk '$3 == "github.com/golang/protobuf" { print $4 }' go.mod) output=$(mktemp -d -t buildctl-output.XXXXXXXXXX) buildxCmd build \ --target "update" \ - --build-arg "GOGO_VERSION=$gogo_version" \ --output "type=local,dest=$output" \ --file "./hack/dockerfiles/generated-files.Dockerfile" \ . -# --build-arg "PROTOBUF_VERSION=$protobuf_version" \ - -cp -R "$output/generated-files/." . +cp -R "$output/." . rm -rf $output diff --git a/hack/util b/hack/util index 44fb5c9fa728..ed25cee3d315 100755 --- a/hack/util +++ b/hack/util @@ -1,43 +1,51 @@ #!/usr/bin/env sh -export BUILDX_NO_DEFAULT_LOAD=true -: ${PREFER_BUILDCTL=} -: ${PREFER_LEGACY=} -: ${CI=} -: ${GITHUB_ACTIONS=} -: ${CACHE_FROM=} -: ${CACHE_TO=} +: "${CI=}" +: "${GITHUB_ACTIONS=}" +: "${GITHUB_REPOSITORY=}" +: "${GITHUB_RUN_ID=}" +: "${BUILDX_BUILDER=}" -if [ "$PREFER_BUILDCTL" = "1" ]; then - echo >&2 "WARNING: PREFER_BUILDCTL is no longer supported. Ignoring." -fi - -if [ "$PREFER_LEGACY" = "1" ]; then - echo >&2 "WARNING: PREFER_LEGACY is no longer supported. Ignoring." -fi +: "${CONTEXT=}" +: "${CACHE_FROM=}" +: "${CACHE_TO=}" progressFlag="" if [ "$CI" = "true" ]; then progressFlag="--progress=plain" fi -buildxCmd() { - if docker buildx version >/dev/null 2>&1; then +buildxBin="" +builderName="" +if docker buildx version >/dev/null 2>&1; then + buildxBin="docker buildx" +elif buildx version >/dev/null 2>&1; then + buildxBin="buildx" +else + topdir="$(realpath $(dirname "$0")/..)" + if [ ! -x "${topdir}/bin/buildx" ]; then set -x - docker buildx "$@" $progressFlag - elif buildx version >/dev/null 2>&1; then + "${topdir}/hack/install-buildx" + fi + buildxBin="${topdir}/bin/buildx" + builderName="moby-buildkit" + "${topdir}/hack/bootstrap-buildx" "${builderName}" +fi + +buildxCmd() { + ( set -x - buildx "$@" $progressFlag - else - topdir="$(realpath $(dirname "$0")/..)" - if [ ! -x "${topdir}/bin/buildx" ]; then - set -x - "${topdir}/hack/install-buildx" + BUILDX_NO_DEFAULT_LOAD=true BUILDX_BUILDER="${builderName:-${BUILDX_BUILDER}}" $buildxBin "$@" $progressFlag + ) +} + +buildAttestFlags() { + if $buildxBin build --help 2>&1 | grep -- '--attest' >/dev/null; then + prvattrs="mode=max" + if [ "$GITHUB_ACTIONS" = "true" ]; then + prvattrs="$prvattrs,builder-id=https://github.com/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}" fi - set -x - bootstrapName="moby-buildkit" - "${topdir}/hack/bootstrap-buildx" "${bootstrapName}" - BUILDX_BUILDER="${bootstrapName}" "${topdir}/bin/buildx" "$@" $progressFlag + echo "--attest=type=sbom --attest=type=provenance,$prvattrs" fi } @@ -61,3 +69,6 @@ fi if [ -n "$currentref" ]; then currentcontext="--build-arg BUILDKIT_CONTEXT_KEEP_GIT_DIR=1 $currentref" fi +if [ -n "$CONTEXT" ]; then + currentcontext=$CONTEXT +fi diff --git a/hack/validate-generated-files b/hack/validate-generated-files index e3c9e5fe0e19..705ce5405ada 100755 --- a/hack/validate-generated-files +++ b/hack/validate-generated-files @@ -1,30 +1,10 @@ #!/usr/bin/env bash + +. $(dirname $0)/util set -eu -case ${1:-} in - '') - . $(dirname $0)/util - gogo_version=$(awk '$1 == "github.com/gogo/protobuf" { print $2 }' go.mod) - buildxCmd build \ - --target validate \ - --build-arg "GOGO_VERSION=$gogo_version" \ - --file ./hack/dockerfiles/generated-files.Dockerfile \ - . - ;; - check) - diffs="$(git status --porcelain -- **/*.pb.go 2>/dev/null)" - set +x - if [ "$diffs" ]; then - { - echo 'The result of "go generate" differs' - echo - echo "$diffs" - echo - echo 'Please update with "make generated-files"' - echo - } >&2 - exit 1 - fi - echo 'Congratulations! All auto generated files are correct.' - ;; -esac +buildxCmd build \ + --target "validate" \ + --output "type=cacheonly" \ + --file "./hack/dockerfiles/generated-files.Dockerfile" \ + . diff --git a/identity/randomid.go b/identity/randomid.go index 0eb13527aac5..2b8796f095b2 100644 --- a/identity/randomid.go +++ b/identity/randomid.go @@ -2,9 +2,10 @@ package identity import ( cryptorand "crypto/rand" - "fmt" "io" "math/big" + + "github.com/pkg/errors" ) var ( @@ -45,7 +46,7 @@ func NewID() string { var p [randomIDEntropyBytes]byte if _, err := io.ReadFull(idReader, p[:]); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) + panic(errors.Wrap(err, "failed to read random bytes: %v")) } p[0] |= 0x80 // set high bit to avoid the need for padding diff --git a/session/auth/auth.go b/session/auth/auth.go index 85e6f68053ad..232022ad23b2 100644 --- a/session/auth/auth.go +++ b/session/auth/auth.go @@ -2,8 +2,8 @@ package auth import ( "context" + "crypto/rand" "crypto/subtle" - "math/rand" "sync" "github.com/moby/buildkit/session" diff --git a/session/auth/authprovider/authprovider.go b/session/auth/authprovider/authprovider.go index 35396b044c93..d77aaa96bc81 100644 --- a/session/auth/authprovider/authprovider.go +++ b/session/auth/authprovider/authprovider.go @@ -6,7 +6,6 @@ import ( "crypto/hmac" "crypto/sha256" "fmt" - "io" "net/http" "os" "strconv" @@ -18,6 +17,7 @@ import ( remoteserrors "github.com/containerd/containerd/remotes/errors" "github.com/docker/cli/cli/config" "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/types" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/auth" "github.com/moby/buildkit/util/progress/progresswriter" @@ -30,19 +30,21 @@ import ( const defaultExpiration = 60 -func NewDockerAuthProvider(stderr io.Writer) session.Attachable { +func NewDockerAuthProvider(cfg *configfile.ConfigFile) session.Attachable { return &authProvider{ - config: config.LoadDefaultConfigFile(stderr), - seeds: &tokenSeeds{dir: config.Dir()}, - loggerCache: map[string]struct{}{}, + authConfigCache: map[string]*types.AuthConfig{}, + config: cfg, + seeds: &tokenSeeds{dir: config.Dir()}, + loggerCache: map[string]struct{}{}, } } type authProvider struct { - config *configfile.ConfigFile - seeds *tokenSeeds - logger progresswriter.Logger - loggerCache map[string]struct{} + authConfigCache map[string]*types.AuthConfig + config *configfile.ConfigFile + seeds *tokenSeeds + logger progresswriter.Logger + loggerCache map[string]struct{} // The need for this mutex is not well understood. // Without it, the docker cli on OS X hangs when @@ -62,6 +64,16 @@ func (ap *authProvider) Register(server *grpc.Server) { } func (ap *authProvider) FetchToken(ctx context.Context, req *auth.FetchTokenRequest) (rr *auth.FetchTokenResponse, err error) { + ac, err := ap.getAuthConfig(req.Host) + if err != nil { + return nil, err + } + + // check for statically configured bearer token + if ac.RegistryToken != "" { + return toTokenResponse(ac.RegistryToken, time.Time{}, 0), nil + } + creds, err := ap.credentials(req.Host) if err != nil { return nil, err @@ -117,12 +129,7 @@ func (ap *authProvider) FetchToken(ctx context.Context, req *auth.FetchTokenRequ } func (ap *authProvider) credentials(host string) (*auth.CredentialsResponse, error) { - ap.mu.Lock() - defer ap.mu.Unlock() - if host == "registry-1.docker.io" { - host = "https://index.docker.io/v1/" - } - ac, err := ap.config.GetAuthConfig(host) + ac, err := ap.getAuthConfig(host) if err != nil { return nil, err } @@ -173,6 +180,23 @@ func (ap *authProvider) VerifyTokenAuthority(ctx context.Context, req *auth.Veri return &auth.VerifyTokenAuthorityResponse{Signed: sign.Sign(nil, req.Payload, priv)}, nil } +func (ap *authProvider) getAuthConfig(host string) (*types.AuthConfig, error) { + ap.mu.Lock() + defer ap.mu.Unlock() + if _, exists := ap.authConfigCache[host]; !exists { + if host == "registry-1.docker.io" { + host = "https://index.docker.io/v1/" + } + ac, err := ap.config.GetAuthConfig(host) + if err != nil { + return nil, err + } + ap.authConfigCache[host] = &ac + } + + return ap.authConfigCache[host], nil +} + func (ap *authProvider) getAuthorityKey(host string, salt []byte) (ed25519.PrivateKey, error) { if v, err := strconv.ParseBool(os.Getenv("BUILDKIT_NO_CLIENT_TOKEN")); err == nil && v { return nil, status.Errorf(codes.Unavailable, "client side tokens disabled") diff --git a/session/auth/authprovider/tokenseed.go b/session/auth/authprovider/tokenseed.go index d626e5989c30..0fdcea4c3968 100644 --- a/session/auth/authprovider/tokenseed.go +++ b/session/auth/authprovider/tokenseed.go @@ -3,7 +3,6 @@ package authprovider import ( "crypto/rand" "encoding/json" - "io/ioutil" "os" "path/filepath" "sync" @@ -47,7 +46,7 @@ func (ts *tokenSeeds) getSeed(host string) ([]byte, error) { fp := filepath.Join(ts.dir, ".token_seed") // we include client side randomness to avoid chosen plaintext attack from the daemon side - dt, err := ioutil.ReadFile(fp) + dt, err := os.ReadFile(fp) if err != nil { if !errors.Is(err, os.ErrNotExist) && !errors.Is(err, syscall.ENOTDIR) && !errors.Is(err, os.ErrPermission) { return nil, err @@ -68,7 +67,7 @@ func (ts *tokenSeeds) getSeed(host string) ([]byte, error) { return nil, err } - if err := ioutil.WriteFile(fp, dt, 0600); err != nil { + if err := os.WriteFile(fp, dt, 0600); err != nil { if !errors.Is(err, syscall.EROFS) && !errors.Is(err, os.ErrPermission) { return nil, err } diff --git a/session/content/content_test.go b/session/content/content_test.go index 65960ac6360b..89e3e6c1df56 100644 --- a/session/content/content_test.go +++ b/session/content/content_test.go @@ -2,8 +2,6 @@ package content import ( "context" - "io/ioutil" - "os" "testing" "github.com/containerd/containerd/content" @@ -24,10 +22,7 @@ func TestContentAttachable(t *testing.T) { attachableStores := make(map[string]content.Store) testBlobs := make(map[string]map[digest.Digest][]byte) for _, id := range ids { - tmpDir, err := ioutil.TempDir("", "contenttest") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - store, err := local.NewStore(tmpDir) + store, err := local.NewStore(t.TempDir()) require.NoError(t, err) blob := []byte("test-content-attachable-" + id) w, err := store.Writer(ctx, content.WithRef(string(blob))) diff --git a/session/filesync/filesync.go b/session/filesync/filesync.go index ae3f29f86c3d..e31354262930 100644 --- a/session/filesync/filesync.go +++ b/session/filesync/filesync.go @@ -27,27 +27,35 @@ const ( ) type fsSyncProvider struct { - dirs map[string]SyncedDir + dirs DirSource p progressCb doneCh chan error } type SyncedDir struct { - Name string Dir string Excludes []string - Map func(string, *fstypes.Stat) bool + Map func(string, *fstypes.Stat) fsutil.MapResult +} + +type DirSource interface { + LookupDir(string) (SyncedDir, bool) +} + +type StaticDirSource map[string]SyncedDir + +var _ DirSource = StaticDirSource{} + +func (dirs StaticDirSource) LookupDir(name string) (SyncedDir, bool) { + dir, found := dirs[name] + return dir, found } // NewFSSyncProvider creates a new provider for sending files from client -func NewFSSyncProvider(dirs []SyncedDir) session.Attachable { - p := &fsSyncProvider{ - dirs: map[string]SyncedDir{}, +func NewFSSyncProvider(dirs DirSource) session.Attachable { + return &fsSyncProvider{ + dirs: dirs, } - for _, d := range dirs { - p.dirs[d.Name] = d - } - return p } func (sp *fsSyncProvider) Register(server *grpc.Server) { @@ -81,7 +89,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr dirName = name[0] } - dir, ok := sp.dirs[dirName] + dir, ok := sp.dirs.LookupDir(dirName) if !ok { return InvalidSessionError{status.Errorf(codes.NotFound, "no access allowed to dir %q", dirName)} } diff --git a/session/filesync/filesync_test.go b/session/filesync/filesync_test.go index b569d173166c..424ffe31f25b 100644 --- a/session/filesync/filesync_test.go +++ b/session/filesync/filesync_test.go @@ -2,7 +2,7 @@ package filesync import ( "context" - "io/ioutil" + "os" "path/filepath" "testing" @@ -16,16 +16,14 @@ import ( func TestFileSyncIncludePatterns(t *testing.T) { ctx := context.TODO() t.Parallel() - tmpDir, err := ioutil.TempDir("", "fsynctest") - require.NoError(t, err) - destDir, err := ioutil.TempDir("", "fsynctest") - require.NoError(t, err) + tmpDir := t.TempDir() + destDir := t.TempDir() - err = ioutil.WriteFile(filepath.Join(tmpDir, "foo"), []byte("content1"), 0600) + err := os.WriteFile(filepath.Join(tmpDir, "foo"), []byte("content1"), 0600) require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(tmpDir, "bar"), []byte("content2"), 0600) + err = os.WriteFile(filepath.Join(tmpDir, "bar"), []byte("content2"), 0600) require.NoError(t, err) s, err := session.NewSession(ctx, "foo", "bar") @@ -34,7 +32,7 @@ func TestFileSyncIncludePatterns(t *testing.T) { m, err := session.NewManager() require.NoError(t, err) - fs := NewFSSyncProvider([]SyncedDir{{Name: "test0", Dir: tmpDir}}) + fs := NewFSSyncProvider(StaticDirSource{"test0": {Dir: tmpDir}}) s.Allow(fs) dialer := session.Dialer(testutil.TestStream(testutil.Handler(m.HandleConn))) @@ -58,10 +56,10 @@ func TestFileSyncIncludePatterns(t *testing.T) { return err } - _, err = ioutil.ReadFile(filepath.Join(destDir, "foo")) + _, err = os.ReadFile(filepath.Join(destDir, "foo")) assert.Error(t, err) - dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar")) + dt, err := os.ReadFile(filepath.Join(destDir, "bar")) if err != nil { return err } diff --git a/session/grpc.go b/session/grpc.go index a7237ac35046..dd67c69b6466 100644 --- a/session/grpc.go +++ b/session/grpc.go @@ -2,6 +2,7 @@ package session import ( "context" + "math" "net" "sync/atomic" "time" @@ -10,6 +11,7 @@ import ( "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/grpcerrors" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.opentelemetry.io/otel/trace" "golang.org/x/net/http2" @@ -79,21 +81,55 @@ func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func()) defer cancelConn() defer cc.Close() - ticker := time.NewTicker(1 * time.Second) + ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() healthClient := grpc_health_v1.NewHealthClient(cc) + failedBefore := false + consecutiveSuccessful := 0 + defaultHealthcheckDuration := 30 * time.Second + lastHealthcheckDuration := time.Duration(0) + for { select { case <-ctx.Done(): return case <-ticker.C: - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + // This healthcheck can erroneously fail in some instances, such as receiving lots of data in a low-bandwidth scenario or too many concurrent builds. + // So, this healthcheck is purposely long, and can tolerate some failures on purpose. + + healthcheckStart := time.Now() + + timeout := time.Duration(math.Max(float64(defaultHealthcheckDuration), float64(lastHealthcheckDuration)*1.5)) + ctx, cancel := context.WithTimeout(ctx, timeout) _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) cancel() + + lastHealthcheckDuration = time.Since(healthcheckStart) + logFields := logrus.Fields{ + "timeout": timeout, + "actualDuration": lastHealthcheckDuration, + } + if err != nil { - return + if failedBefore { + bklog.G(ctx).Error("healthcheck failed fatally") + return + } + + failedBefore = true + consecutiveSuccessful = 0 + bklog.G(ctx).WithFields(logFields).Warn("healthcheck failed") + } else { + consecutiveSuccessful++ + + if consecutiveSuccessful >= 5 && failedBefore { + failedBefore = false + bklog.G(ctx).WithFields(logFields).Debug("reset healthcheck failure") + } } + + bklog.G(ctx).WithFields(logFields).Debug("healthcheck completed") } } } diff --git a/session/manager.go b/session/manager.go index edac93063c38..2678e6738dab 100644 --- a/session/manager.go +++ b/session/manager.go @@ -160,12 +160,10 @@ func (sm *Manager) Get(ctx context.Context, id string, noWait bool) (Caller, err defer cancel() go func() { - select { - case <-ctx.Done(): - sm.mu.Lock() - sm.updateCondition.Broadcast() - sm.mu.Unlock() - } + <-ctx.Done() + sm.mu.Lock() + sm.updateCondition.Broadcast() + sm.mu.Unlock() }() var c *client diff --git a/session/secrets/secretsprovider/store.go b/session/secrets/secretsprovider/store.go index 3a846f84e2b9..922036aed67b 100644 --- a/session/secrets/secretsprovider/store.go +++ b/session/secrets/secretsprovider/store.go @@ -2,7 +2,6 @@ package secretsprovider import ( "context" - "io/ioutil" "os" "github.com/moby/buildkit/session/secrets" @@ -57,7 +56,7 @@ func (fs *fileStore) GetSecret(ctx context.Context, id string) ([]byte, error) { if v.Env != "" { return []byte(os.Getenv(v.Env)), nil } - dt, err := ioutil.ReadFile(v.FilePath) + dt, err := os.ReadFile(v.FilePath) if err != nil { return nil, err } diff --git a/session/sshforward/copy.go b/session/sshforward/copy.go index 6db414894923..a4a065b46e36 100644 --- a/session/sshforward/copy.go +++ b/session/sshforward/copy.go @@ -1,10 +1,10 @@ package sshforward import ( - io "io" + "context" + "io" "github.com/pkg/errors" - context "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) @@ -14,16 +14,24 @@ type Stream interface { } func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStream func() error) error { + defer conn.Close() g, ctx := errgroup.WithContext(ctx) g.Go(func() (retErr error) { p := &BytesMessage{} for { if err := stream.RecvMsg(p); err != nil { - conn.Close() if err == io.EOF { + // indicates client performed CloseSend, but they may still be + // reading data + if conn, ok := conn.(interface { + CloseWrite() error + }); ok { + conn.CloseWrite() + } return nil } + conn.Close() return errors.WithStack(err) } select { diff --git a/session/sshforward/ssh.go b/session/sshforward/ssh.go index a7a4c2e228a3..a808fcb1f077 100644 --- a/session/sshforward/ssh.go +++ b/session/sshforward/ssh.go @@ -1,14 +1,13 @@ package sshforward import ( - "io/ioutil" + "context" "net" "os" "path/filepath" "github.com/moby/buildkit/session" "github.com/pkg/errors" - context "golang.org/x/net/context" "golang.org/x/sync/errgroup" "google.golang.org/grpc/metadata" ) @@ -64,7 +63,7 @@ type SocketOpt struct { } func MountSSHSocket(ctx context.Context, c session.Caller, opt SocketOpt) (sockPath string, closer func() error, err error) { - dir, err := ioutil.TempDir("", ".buildkit-ssh-sock") + dir, err := os.MkdirTemp("", ".buildkit-ssh-sock") if err != nil { return "", nil, errors.WithStack(err) } diff --git a/session/sshforward/sshprovider/agentprovider.go b/session/sshforward/sshprovider/agentprovider.go index 981eb96f5628..f6501113d710 100644 --- a/session/sshforward/sshprovider/agentprovider.go +++ b/session/sshforward/sshprovider/agentprovider.go @@ -3,7 +3,6 @@ package sshprovider import ( "context" "io" - "io/ioutil" "net" "os" "runtime" @@ -166,7 +165,7 @@ func toAgentSource(paths []string) (source, error) { if err != nil { return source{}, errors.Wrapf(err, "failed to open %s", p) } - dt, err := ioutil.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024}) + dt, err := io.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024}) if err != nil { return source{}, errors.Wrapf(err, "failed to read %s", p) } diff --git a/snapshot/diffapply_unix.go b/snapshot/diffapply_unix.go index 501051936539..136d7a6282e8 100644 --- a/snapshot/diffapply_unix.go +++ b/snapshot/diffapply_unix.go @@ -14,9 +14,9 @@ import ( "github.com/containerd/containerd/leases" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/snapshots" + "github.com/containerd/containerd/snapshots/overlay/overlayutils" "github.com/containerd/continuity/fs" "github.com/containerd/continuity/sysx" - "github.com/containerd/stargz-snapshotter/snapshot/overlayutils" "github.com/hashicorp/go-multierror" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/util/bklog" @@ -379,6 +379,18 @@ func (a *applier) applyCopy(ctx context.Context, ca *changeApply) error { return errors.Errorf("unhandled file type %d during merge at path %q", ca.srcStat.Mode&unix.S_IFMT, ca.srcPath) } + // NOTE: it's important that chown happens before setting xattrs due to the fact that chown will + // reset the security.capabilities xattr which results in file capabilities being lost. + if err := os.Lchown(ca.dstPath, int(ca.srcStat.Uid), int(ca.srcStat.Gid)); err != nil { + return errors.Wrap(err, "failed to chown during apply") + } + + if ca.srcStat.Mode&unix.S_IFMT != unix.S_IFLNK { + if err := unix.Chmod(ca.dstPath, ca.srcStat.Mode); err != nil { + return errors.Wrapf(err, "failed to chmod path %q during apply", ca.dstPath) + } + } + if ca.srcPath != "" { xattrs, err := sysx.LListxattr(ca.srcPath) if err != nil { @@ -410,16 +422,6 @@ func (a *applier) applyCopy(ctx context.Context, ca *changeApply) error { } } - if err := os.Lchown(ca.dstPath, int(ca.srcStat.Uid), int(ca.srcStat.Gid)); err != nil { - return errors.Wrap(err, "failed to chown during apply") - } - - if ca.srcStat.Mode&unix.S_IFMT != unix.S_IFLNK { - if err := unix.Chmod(ca.dstPath, ca.srcStat.Mode); err != nil { - return errors.Wrapf(err, "failed to chmod path %q during apply", ca.dstPath) - } - } - atimeSpec := unix.Timespec{Sec: ca.srcStat.Atim.Sec, Nsec: ca.srcStat.Atim.Nsec} mtimeSpec := unix.Timespec{Sec: ca.srcStat.Mtim.Sec, Nsec: ca.srcStat.Mtim.Nsec} if ca.srcStat.Mode&unix.S_IFMT != unix.S_IFDIR { diff --git a/snapshot/localmounter_unix.go b/snapshot/localmounter_unix.go index ef73e263fc91..27cff3ebdf8c 100644 --- a/snapshot/localmounter_unix.go +++ b/snapshot/localmounter_unix.go @@ -4,7 +4,6 @@ package snapshot import ( - "io/ioutil" "os" "syscall" @@ -38,7 +37,7 @@ func (lm *localMounter) Mount() (string, error) { } } - dir, err := ioutil.TempDir("", "buildkit-mount") + dir, err := os.MkdirTemp("", "buildkit-mount") if err != nil { return "", errors.Wrap(err, "failed to create temp dir") } diff --git a/snapshot/merge.go b/snapshot/merge.go index 35cc0b71e347..b565a844844c 100644 --- a/snapshot/merge.go +++ b/snapshot/merge.go @@ -130,7 +130,7 @@ func (sn *mergeSnapshotter) Merge(ctx context.Context, key string, diffs []Diff, diffs = diffs[baseIndex:] } - tempLeaseCtx, done, err := leaseutil.WithLease(ctx, sn.lm, leaseutil.MakeTemporary) + ctx, done, err := leaseutil.WithLease(ctx, sn.lm, leaseutil.MakeTemporary) if err != nil { return errors.Wrap(err, "failed to create temporary lease for view mounts during merge") } @@ -138,7 +138,7 @@ func (sn *mergeSnapshotter) Merge(ctx context.Context, key string, diffs []Diff, // Make the snapshot that will be merged into prepareKey := identity.NewID() - if err := sn.Prepare(tempLeaseCtx, prepareKey, baseKey); err != nil { + if err := sn.Prepare(ctx, prepareKey, baseKey); err != nil { return errors.Wrapf(err, "failed to prepare %q", key) } applyMounts, err := sn.Mounts(ctx, prepareKey) diff --git a/snapshot/snapshotter_test.go b/snapshot/snapshotter_test.go index c2eb0577fabf..86723a7e5f4c 100644 --- a/snapshot/snapshotter_test.go +++ b/snapshot/snapshotter_test.go @@ -5,8 +5,6 @@ package snapshot import ( "context" - "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -23,39 +21,20 @@ import ( "github.com/containerd/containerd/snapshots/native" "github.com/containerd/containerd/snapshots/overlay" "github.com/containerd/continuity/fs/fstest" - "github.com/hashicorp/go-multierror" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/util/leaseutil" + "github.com/pkg/errors" "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" ) -func newSnapshotter(ctx context.Context, snapshotterName string) (_ context.Context, _ *mergeSnapshotter, _ func() error, rerr error) { +func newSnapshotter(ctx context.Context, t *testing.T, snapshotterName string) (_ context.Context, _ *mergeSnapshotter, rerr error) { ns := "buildkit-test" ctx = namespaces.WithNamespace(ctx, ns) - defers := make([]func() error, 0) - cleanup := func() error { - var err error - for i := range defers { - err = multierror.Append(err, defers[len(defers)-1-i]()).ErrorOrNil() - } - return err - } - defer func() { - if rerr != nil && cleanup != nil { - cleanup() - } - }() - - tmpdir, err := ioutil.TempDir("", "buildkit-test") - if err != nil { - return nil, nil, nil, err - } - defers = append(defers, func() error { - return os.RemoveAll(tmpdir) - }) + tmpdir := t.TempDir() + var err error var ctdSnapshotter snapshots.Snapshotter var noHardlink bool switch snapshotterName { @@ -65,35 +44,38 @@ func newSnapshotter(ctx context.Context, snapshotterName string) (_ context.Cont case "native": ctdSnapshotter, err = native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) if err != nil { - return nil, nil, nil, err + return nil, nil, err } case "overlayfs": ctdSnapshotter, err = overlay.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) if err != nil { - return nil, nil, nil, err + return nil, nil, err } default: - return nil, nil, nil, fmt.Errorf("unhandled snapshotter: %s", snapshotterName) + return nil, nil, errors.Errorf("unhandled snapshotter: %s", snapshotterName) } + t.Cleanup(func() { + require.NoError(t, ctdSnapshotter.Close()) + }) store, err := local.NewStore(tmpdir) if err != nil { - return nil, nil, nil, err + return nil, nil, err } db, err := bolt.Open(filepath.Join(tmpdir, "containerdmeta.db"), 0644, nil) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - defers = append(defers, func() error { - return db.Close() + t.Cleanup(func() { + require.NoError(t, db.Close()) }) mdb := ctdmetadata.NewDB(db, store, map[string]snapshots.Snapshotter{ snapshotterName: ctdSnapshotter, }) if err := mdb.Init(context.TODO()); err != nil { - return nil, nil, nil, err + return nil, nil, err } lm := leaseutil.WithNamespace(ctdmetadata.NewLeaseManager(mdb), ns) @@ -101,6 +83,9 @@ func newSnapshotter(ctx context.Context, snapshotterName string) (_ context.Cont if noHardlink { snapshotter.tryCrossSnapshotLink = false } + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) leaseID := identity.NewID() _, err = lm.Create(ctx, func(l *leases.Lease) error { @@ -111,11 +96,11 @@ func newSnapshotter(ctx context.Context, snapshotterName string) (_ context.Cont return nil }, leaseutil.MakeTemporary) if err != nil { - return nil, nil, nil, err + return nil, nil, err } ctx = leases.WithLease(ctx, leaseID) - return ctx, snapshotter, cleanup, nil + return ctx, snapshotter, nil } func TestMerge(t *testing.T) { @@ -127,9 +112,8 @@ func TestMerge(t *testing.T) { requireRoot(t) } - ctx, sn, cleanup, err := newSnapshotter(context.Background(), snName) + ctx, sn, err := newSnapshotter(context.Background(), t, snName) require.NoError(t, err) - defer cleanup() ts := time.Unix(0, 0) snapA := committedKey(ctx, t, sn, identity.NewID(), "", @@ -332,9 +316,8 @@ func TestHardlinks(t *testing.T) { requireRoot(t) } - ctx, sn, cleanup, err := newSnapshotter(context.Background(), snName) + ctx, sn, err := newSnapshotter(context.Background(), t, snName) require.NoError(t, err) - defer cleanup() base1Snap := committedKey(ctx, t, sn, identity.NewID(), "", fstest.CreateFile("1", []byte("1"), 0600), @@ -398,9 +381,8 @@ func TestUsage(t *testing.T) { requireRoot(t) } - ctx, sn, cleanup, err := newSnapshotter(context.Background(), snName) + ctx, sn, err := newSnapshotter(context.Background(), t, snName) require.NoError(t, err) - defer cleanup() const direntByteSize = 4096 diff --git a/solver/bboltcachestorage/storage_test.go b/solver/bboltcachestorage/storage_test.go index 5e72c889b47d..ed7997948fd4 100644 --- a/solver/bboltcachestorage/storage_test.go +++ b/solver/bboltcachestorage/storage_test.go @@ -1,8 +1,6 @@ package bboltcachestorage import ( - "io/ioutil" - "os" "path/filepath" "testing" @@ -12,20 +10,15 @@ import ( ) func TestBoltCacheStorage(t *testing.T) { - testutil.RunCacheStorageTests(t, func() (solver.CacheKeyStorage, func()) { - tmpDir, err := ioutil.TempDir("", "storage") - require.NoError(t, err) - - cleanup := func() { - os.RemoveAll(tmpDir) - } + testutil.RunCacheStorageTests(t, func() solver.CacheKeyStorage { + tmpDir := t.TempDir() st, err := NewStore(filepath.Join(tmpDir, "cache.db")) - if err != nil { - cleanup() - } require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, st.db.Close()) + }) - return st, cleanup + return st }) } diff --git a/solver/cache_test.go b/solver/cache_test.go index 5830d62cef21..8e20145f9e42 100644 --- a/solver/cache_test.go +++ b/solver/cache_test.go @@ -19,7 +19,7 @@ func depKeys(cks ...ExportableCacheKey) []CacheKeyWithSelector { } func testCacheKey(dgst digest.Digest, output Index, deps ...ExportableCacheKey) *CacheKey { - k := NewCacheKey(dgst, output) + k := NewCacheKey(dgst, "", output) k.deps = make([][]CacheKeyWithSelector, len(deps)) for i, dep := range deps { k.deps[i] = depKeys(dep) @@ -28,7 +28,7 @@ func testCacheKey(dgst digest.Digest, output Index, deps ...ExportableCacheKey) } func testCacheKeyWithDeps(dgst digest.Digest, output Index, deps [][]CacheKeyWithSelector) *CacheKey { - k := NewCacheKey(dgst, output) + k := NewCacheKey(dgst, "", output) k.deps = deps return k } @@ -42,7 +42,7 @@ func TestInMemoryCache(t *testing.T) { m := NewInMemoryCacheManager() - cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0), testResult("result0"), time.Now()) + cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), "", 0), testResult("result0"), time.Now()) require.NoError(t, err) keys, err := m.Query(nil, 0, dgst("foo"), 0) @@ -58,7 +58,7 @@ func TestInMemoryCache(t *testing.T) { require.Equal(t, "result0", unwrap(res)) // another record - cacheBar, err := m.Save(NewCacheKey(dgst("bar"), 0), testResult("result1"), time.Now()) + cacheBar, err := m.Save(NewCacheKey(dgst("bar"), "", 0), testResult("result1"), time.Now()) require.NoError(t, err) keys, err = m.Query(nil, 0, dgst("bar"), 0) @@ -155,7 +155,7 @@ func TestInMemoryCacheSelector(t *testing.T) { m := NewInMemoryCacheManager() - cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0), testResult("result0"), time.Now()) + cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), "", 0), testResult("result0"), time.Now()) require.NoError(t, err) _, err = m.Save(testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ @@ -189,11 +189,11 @@ func TestInMemoryCacheSelectorNested(t *testing.T) { m := NewInMemoryCacheManager() - cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0), testResult("result0"), time.Now()) + cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), "", 0), testResult("result0"), time.Now()) require.NoError(t, err) _, err = m.Save(testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: *cacheFoo, Selector: dgst("sel0")}, {CacheKey: expKey(NewCacheKey(dgst("second"), 0))}}, + {{CacheKey: *cacheFoo, Selector: dgst("sel0")}, {CacheKey: expKey(NewCacheKey(dgst("second"), "", 0))}}, }), testResult("result1"), time.Now()) require.NoError(t, err) @@ -219,7 +219,7 @@ func TestInMemoryCacheSelectorNested(t *testing.T) { require.NoError(t, err) require.Equal(t, len(keys), 0) - keys, err = m.Query(depKeys(expKey(NewCacheKey(dgst("second"), 0))), 0, dgst("bar"), 0) + keys, err = m.Query(depKeys(expKey(NewCacheKey(dgst("second"), "", 0))), 0, dgst("bar"), 0) require.NoError(t, err) require.Equal(t, len(keys), 1) @@ -231,7 +231,7 @@ func TestInMemoryCacheSelectorNested(t *testing.T) { require.NoError(t, err) require.Equal(t, "result1", unwrap(res)) - keys, err = m.Query(depKeys(expKey(NewCacheKey(dgst("second"), 0))), 0, dgst("bar"), 0) + keys, err = m.Query(depKeys(expKey(NewCacheKey(dgst("second"), "", 0))), 0, dgst("bar"), 0) require.NoError(t, err) require.Equal(t, len(keys), 1) } @@ -242,7 +242,7 @@ func TestInMemoryCacheReleaseParent(t *testing.T) { m := NewCacheManager(context.TODO(), identity.NewID(), storage, results) res0 := testResult("result0") - cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0), res0, time.Now()) + cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), "", 0), res0, time.Now()) require.NoError(t, err) res1 := testResult("result1") @@ -294,7 +294,7 @@ func TestInMemoryCacheRestoreOfflineDeletion(t *testing.T) { m := NewCacheManager(context.TODO(), identity.NewID(), storage, results) res0 := testResult("result0") - cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0), res0, time.Now()) + cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), "", 0), res0, time.Now()) require.NoError(t, err) res1 := testResult("result1") @@ -329,20 +329,20 @@ func TestCarryOverFromSublink(t *testing.T) { results := NewInMemoryResultStorage() m := NewCacheManager(context.TODO(), identity.NewID(), storage, results) - cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), 0), testResult("resultFoo"), time.Now()) + cacheFoo, err := m.Save(NewCacheKey(dgst("foo"), "", 0), testResult("resultFoo"), time.Now()) require.NoError(t, err) _, err = m.Save(testCacheKeyWithDeps(dgst("res"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: *cacheFoo, Selector: dgst("sel0")}, {CacheKey: expKey(NewCacheKey(dgst("content0"), 0))}}, + {{CacheKey: *cacheFoo, Selector: dgst("sel0")}, {CacheKey: expKey(NewCacheKey(dgst("content0"), "", 0))}}, }), testResult("result0"), time.Now()) require.NoError(t, err) - cacheBar, err := m.Save(NewCacheKey(dgst("bar"), 0), testResult("resultBar"), time.Now()) + cacheBar, err := m.Save(NewCacheKey(dgst("bar"), "", 0), testResult("resultBar"), time.Now()) require.NoError(t, err) keys, err := m.Query([]CacheKeyWithSelector{ {CacheKey: *cacheBar, Selector: dgst("sel0")}, - {CacheKey: expKey(NewCacheKey(dgst("content0"), 0))}, + {CacheKey: expKey(NewCacheKey(dgst("content0"), "", 0))}, }, 0, dgst("res"), 0) require.NoError(t, err) require.Equal(t, len(keys), 1) diff --git a/solver/cachekey.go b/solver/cachekey.go index 3749af0ab3ac..398368716ad2 100644 --- a/solver/cachekey.go +++ b/solver/cachekey.go @@ -7,10 +7,11 @@ import ( ) // NewCacheKey creates a new cache key for a specific output index -func NewCacheKey(dgst digest.Digest, output Index) *CacheKey { +func NewCacheKey(dgst, vtx digest.Digest, output Index) *CacheKey { return &CacheKey{ ID: rootKey(dgst, output).String(), digest: dgst, + vtx: vtx, output: output, ids: map[*cacheManager]string{}, } @@ -29,6 +30,7 @@ type CacheKey struct { ID string deps [][]CacheKeyWithSelector // only [][]*inMemoryCacheKey digest digest.Digest + vtx digest.Digest output Index ids map[*cacheManager]string @@ -56,6 +58,7 @@ func (ck *CacheKey) clone() *CacheKey { nk := &CacheKey{ ID: ck.ID, digest: ck.digest, + vtx: ck.vtx, output: ck.output, ids: map[*cacheManager]string{}, } diff --git a/solver/cacheopts.go b/solver/cacheopts.go index d5821b4e9134..4b661471ed82 100644 --- a/solver/cacheopts.go +++ b/solver/cacheopts.go @@ -4,12 +4,15 @@ import ( "context" "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/progress" digest "github.com/opencontainers/go-digest" ) type CacheOpts map[interface{}]interface{} +type progressKey struct{} + type cacheOptGetterKey struct{} func CacheOptGetterOf(ctx context.Context) func(includeAncestors bool, keys ...interface{}) map[interface{}]interface{} { @@ -91,3 +94,15 @@ func walkAncestors(ctx context.Context, start *state, f func(*state) bool) { } } } + +func ProgressControllerFromContext(ctx context.Context) progress.Controller { + var pg progress.Controller + if optGetter := CacheOptGetterOf(ctx); optGetter != nil { + if kv := optGetter(false, progressKey{}); kv != nil { + if v, ok := kv[progressKey{}].(progress.Controller); ok { + pg = v + } + } + } + return pg +} diff --git a/solver/edge.go b/solver/edge.go index 8504d9f657d6..5e3068010f80 100644 --- a/solver/edge.go +++ b/solver/edge.go @@ -136,11 +136,11 @@ func (e *edge) release() { // commitOptions returns parameters for the op execution func (e *edge) commitOptions() ([]*CacheKey, []CachedResult) { - k := NewCacheKey(e.cacheMap.Digest, e.edge.Index) + k := NewCacheKey(e.cacheMap.Digest, e.edge.Vertex.Digest(), e.edge.Index) if len(e.deps) == 0 { keys := make([]*CacheKey, 0, len(e.cacheMapDigests)) for _, dgst := range e.cacheMapDigests { - keys = append(keys, NewCacheKey(dgst, e.edge.Index)) + keys = append(keys, NewCacheKey(dgst, e.edge.Vertex.Digest(), e.edge.Index)) } return keys, nil } @@ -201,6 +201,7 @@ func (e *edge) probeCache(d *dep, depKeys []CacheKeyWithSelector) bool { } found := false for _, k := range keys { + k.vtx = e.edge.Vertex.Digest() if _, ok := d.keyMap[k.ID]; !ok { d.keyMap[k.ID] = k found = true @@ -275,7 +276,7 @@ func (e *edge) currentIndexKey() *CacheKey { } } - k := NewCacheKey(e.cacheMap.Digest, e.edge.Index) + k := NewCacheKey(e.cacheMap.Digest, e.edge.Vertex.Digest(), e.edge.Index) k.deps = keys return k @@ -317,10 +318,10 @@ func (e *edge) skipPhase2FastCache(dep *dep) bool { // previous calls. // To avoid deadlocks and resource leaks this function needs to follow // following rules: -// 1) this function needs to return unclosed outgoing requests if some incoming -// requests were not completed -// 2) this function may not return outgoing requests if it has completed all -// incoming requests +// 1. this function needs to return unclosed outgoing requests if some incoming +// requests were not completed +// 2. this function may not return outgoing requests if it has completed all +// incoming requests func (e *edge) unpark(incoming []pipe.Sender, updates, allPipes []pipe.Receiver, f *pipeFactory) { // process all incoming changes depChanged := false @@ -403,6 +404,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) { bklog.G(context.TODO()).Error(errors.Wrap(err, "invalid query response")) // make the build fail for this error } else { for _, k := range keys { + k.vtx = e.edge.Vertex.Digest() records, err := e.op.Cache().Records(k) if err != nil { bklog.G(context.TODO()).Errorf("error receiving cache records: %v", err) @@ -508,7 +510,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) { } else if !dep.slowCacheComplete { dgst := upt.Status().Value.(digest.Digest) if e.cacheMap.Deps[int(dep.index)].ComputeDigestFunc != nil && dgst != "" { - k := NewCacheKey(dgst, -1) + k := NewCacheKey(dgst, "", -1) dep.slowCacheKey = &ExportableCacheKey{CacheKey: k, Exporter: &exporter{k: k}} slowKeyExp := CacheKeyWithSelector{CacheKey: *dep.slowCacheKey} defKeys := make([]CacheKeyWithSelector, 0, len(dep.result.CacheKeys())) diff --git a/solver/errdefs/context.go b/solver/errdefs/context.go index ea6bdfbf0985..9e0c5bb990c6 100644 --- a/solver/errdefs/context.go +++ b/solver/errdefs/context.go @@ -3,11 +3,25 @@ package errdefs import ( "context" "errors" + "strings" "github.com/moby/buildkit/util/grpcerrors" "google.golang.org/grpc/codes" ) -func IsCanceled(err error) bool { - return errors.Is(err, context.Canceled) || grpcerrors.Code(err) == codes.Canceled +func IsCanceled(ctx context.Context, err error) bool { + if errors.Is(err, context.Canceled) || grpcerrors.Code(err) == codes.Canceled { + return true + } + // grpc does not set cancel correctly when stream gets cancelled and then Recv is called + if err != nil && ctx.Err() == context.Canceled { + // when this error comes from containerd it is not typed at all, just concatenated string + if strings.Contains(err.Error(), "EOF") { + return true + } + if strings.Contains(err.Error(), context.Canceled.Error()) { + return true + } + } + return false } diff --git a/solver/errdefs/errdefs.pb.go b/solver/errdefs/errdefs.pb.go index 5da34b6e591b..e02cfb9696d8 100644 --- a/solver/errdefs/errdefs.pb.go +++ b/solver/errdefs/errdefs.pb.go @@ -186,6 +186,7 @@ type Solve struct { MountIDs []string `protobuf:"bytes,2,rep,name=mountIDs,proto3" json:"mountIDs,omitempty"` Op *pb.Op `protobuf:"bytes,3,opt,name=op,proto3" json:"op,omitempty"` // Types that are valid to be assigned to Subject: + // // *Solve_File // *Solve_Cache Subject isSolve_Subject `protobuf_oneof:"subject"` diff --git a/solver/exporter.go b/solver/exporter.go index 67ede422239b..78ce77c2d2f5 100644 --- a/solver/exporter.go +++ b/solver/exporter.go @@ -96,12 +96,17 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach addRecord = *e.override } - if e.record == nil && len(e.k.Deps()) > 0 { + exportRecord := opt.ExportRoots + if len(e.k.Deps()) > 0 { + exportRecord = true + } + + if e.record == nil && exportRecord { e.record = getBestResult(e.records) } var remote *Remote - if v := e.record; v != nil && len(e.k.Deps()) > 0 && addRecord { + if v := e.record; v != nil && exportRecord && addRecord { var variants []CacheExporterRecord cm := v.cacheManager @@ -121,7 +126,7 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach if opt.CompressionOpt != nil { for _, r := range remotes { // record all remaining remotes as well rec := t.Add(recKey) - rec.AddResult(v.CreatedAt, r) + rec.AddResult(e.k.vtx, int(e.k.output), v.CreatedAt, r) variants = append(variants, rec) } } @@ -142,7 +147,7 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach if opt.CompressionOpt != nil { for _, r := range remotes { // record all remaining remotes as well rec := t.Add(recKey) - rec.AddResult(v.CreatedAt, r) + rec.AddResult(e.k.vtx, int(e.k.output), v.CreatedAt, r) variants = append(variants, rec) } } @@ -150,7 +155,7 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach if remote != nil { for _, rec := range allRec { - rec.AddResult(v.CreatedAt, remote) + rec.AddResult(e.k.vtx, int(e.k.output), v.CreatedAt, remote) } } allRec = append(allRec, variants...) diff --git a/solver/index_test.go b/solver/index_test.go index c77ff3cbce94..da7dd818951d 100644 --- a/solver/index_test.go +++ b/solver/index_test.go @@ -18,21 +18,21 @@ func TestIndexSimple(t *testing.T) { e2 := &edge{} e3 := &edge{} - k1 := NewCacheKey(dgst("foo"), 0) + k1 := NewCacheKey(dgst("foo"), "", 0) v := idx.LoadOrStore(k1, e1) require.Nil(t, v) - k2 := NewCacheKey(dgst("bar"), 0) + k2 := NewCacheKey(dgst("bar"), "", 0) v = idx.LoadOrStore(k2, e2) require.Nil(t, v) - v = idx.LoadOrStore(NewCacheKey(dgst("bar"), 0), e3) + v = idx.LoadOrStore(NewCacheKey(dgst("bar"), "", 0), e3) require.Equal(t, v, e2) - v = idx.LoadOrStore(NewCacheKey(dgst("bar"), 0), e3) + v = idx.LoadOrStore(NewCacheKey(dgst("bar"), "", 0), e3) require.Equal(t, v, e2) - v = idx.LoadOrStore(NewCacheKey(dgst("foo"), 0), e3) + v = idx.LoadOrStore(NewCacheKey(dgst("foo"), "", 0), e3) require.Equal(t, v, e1) idx.Release(e1) @@ -48,16 +48,16 @@ func TestIndexMultiLevelSimple(t *testing.T) { e3 := &edge{} k1 := testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(NewCacheKey("s0", 0)), Selector: dgst("s0")}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, + {{CacheKey: expKey(NewCacheKey("s0", "", 0)), Selector: dgst("s0")}}, }) v := idx.LoadOrStore(k1, e1) require.Nil(t, v) k2 := testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(NewCacheKey("s0", 0)), Selector: dgst("s0")}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, + {{CacheKey: expKey(NewCacheKey("s0", "", 0)), Selector: dgst("s0")}}, }) v = idx.LoadOrStore(k2, e2) @@ -72,18 +72,18 @@ func TestIndexMultiLevelSimple(t *testing.T) { // update selector k2 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(NewCacheKey("s0", 0))}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, + {{CacheKey: expKey(NewCacheKey("s0", "", 0))}}, }) v = idx.LoadOrStore(k2, e2) require.Nil(t, v) // add one dep to e1 k2 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, { - {CacheKey: expKey(NewCacheKey("s0", 0)), Selector: dgst("s0")}, - {CacheKey: expKey(NewCacheKey("s1", 1))}, + {CacheKey: expKey(NewCacheKey("s0", "", 0)), Selector: dgst("s0")}, + {CacheKey: expKey(NewCacheKey("s1", "", 1))}, }, }) v = idx.LoadOrStore(k2, e2) @@ -91,9 +91,9 @@ func TestIndexMultiLevelSimple(t *testing.T) { // recheck with only the new dep key k2 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, { - {CacheKey: expKey(NewCacheKey("s1", 1))}, + {CacheKey: expKey(NewCacheKey("s1", "", 1))}, }, }) v = idx.LoadOrStore(k2, e2) @@ -101,10 +101,10 @@ func TestIndexMultiLevelSimple(t *testing.T) { // combine e1 and e2 k2 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, { - {CacheKey: expKey(NewCacheKey("s0", 0))}, - {CacheKey: expKey(NewCacheKey("s1", 1))}, + {CacheKey: expKey(NewCacheKey("s0", "", 0))}, + {CacheKey: expKey(NewCacheKey("s1", "", 1))}, }, }) v = idx.LoadOrStore(k2, e2) @@ -112,8 +112,8 @@ func TestIndexMultiLevelSimple(t *testing.T) { // initial e2 now points to e1 k2 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(NewCacheKey("s0", 0))}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, + {{CacheKey: expKey(NewCacheKey("s0", "", 0))}}, }) v = idx.LoadOrStore(k2, e2) require.Equal(t, v, e1) @@ -122,8 +122,8 @@ func TestIndexMultiLevelSimple(t *testing.T) { // e2 still remains after e1 is gone k2 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(NewCacheKey("s0", 0))}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, + {{CacheKey: expKey(NewCacheKey("s0", "", 0))}}, }) v = idx.LoadOrStore(k2, e3) require.Equal(t, v, e2) @@ -140,8 +140,8 @@ func TestIndexThreeLevels(t *testing.T) { e3 := &edge{} k1 := testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, - {{CacheKey: expKey(NewCacheKey("s0", 0)), Selector: dgst("s0")}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, + {{CacheKey: expKey(NewCacheKey("s0", "", 0)), Selector: dgst("s0")}}, }) v := idx.LoadOrStore(k1, e1) @@ -151,26 +151,26 @@ func TestIndexThreeLevels(t *testing.T) { require.Equal(t, v, e1) k2 := testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, {{CacheKey: expKey(k1)}}, }) v = idx.LoadOrStore(k2, e2) require.Nil(t, v) k2 = testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, { {CacheKey: expKey(k1)}, - {CacheKey: expKey(NewCacheKey("alt", 0))}, + {CacheKey: expKey(NewCacheKey("alt", "", 0))}, }, }) v = idx.LoadOrStore(k2, e2) require.Nil(t, v) k2 = testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, { - {CacheKey: expKey(NewCacheKey("alt", 0))}, + {CacheKey: expKey(NewCacheKey("alt", "", 0))}, }, }) v = idx.LoadOrStore(k2, e3) @@ -179,13 +179,13 @@ func TestIndexThreeLevels(t *testing.T) { // change dep in a low key k1 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ { - {CacheKey: expKey(NewCacheKey("f0", 0))}, - {CacheKey: expKey(NewCacheKey("f0_", 0))}, + {CacheKey: expKey(NewCacheKey("f0", "", 0))}, + {CacheKey: expKey(NewCacheKey("f0_", "", 0))}, }, - {{CacheKey: expKey(NewCacheKey("s0", 0)), Selector: dgst("s0")}}, + {{CacheKey: expKey(NewCacheKey("s0", "", 0)), Selector: dgst("s0")}}, }) k2 = testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, {{CacheKey: expKey(k1)}}, }) v = idx.LoadOrStore(k2, e3) @@ -194,12 +194,12 @@ func TestIndexThreeLevels(t *testing.T) { // reload with only f0_ still matches k1 = testCacheKeyWithDeps(dgst("foo"), 1, [][]CacheKeyWithSelector{ { - {CacheKey: expKey(NewCacheKey("f0_", 0))}, + {CacheKey: expKey(NewCacheKey("f0_", "", 0))}, }, - {{CacheKey: expKey(NewCacheKey("s0", 0)), Selector: dgst("s0")}}, + {{CacheKey: expKey(NewCacheKey("s0", "", 0)), Selector: dgst("s0")}}, }) k2 = testCacheKeyWithDeps(dgst("bar"), 0, [][]CacheKeyWithSelector{ - {{CacheKey: expKey(NewCacheKey("f0", 0))}}, + {{CacheKey: expKey(NewCacheKey("f0", "", 0))}}, {{CacheKey: expKey(k1)}}, }) v = idx.LoadOrStore(k2, e3) diff --git a/solver/jobs.go b/solver/jobs.go index 0d59fb368ece..27e15348615f 100644 --- a/solver/jobs.go +++ b/solver/jobs.go @@ -3,7 +3,6 @@ package solver import ( "context" "fmt" - "strings" "sync" "time" @@ -13,9 +12,11 @@ import ( "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/progress/controller" "github.com/moby/buildkit/util/tracing" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) @@ -23,7 +24,7 @@ import ( type ResolveOpFunc func(Vertex, Builder) (Op, error) type Builder interface { - Build(ctx context.Context, e Edge) (CachedResult, BuildSources, error) + Build(ctx context.Context, e Edge) (CachedResultWithProvenance, error) InContext(ctx context.Context, f func(ctx context.Context, g session.Group) error) error EachValue(ctx context.Context, key string, fn func(interface{}) error) error } @@ -198,16 +199,15 @@ type subBuilder struct { exporters []ExportableCacheKey } -func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResult, BuildSources, error) { - // TODO(@crazy-max): Handle BuildInfo from subbuild +func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResultWithProvenance, error) { res, err := sb.solver.subBuild(ctx, e, sb.vtx) if err != nil { - return nil, nil, err + return nil, err } sb.mu.Lock() sb.exporters = append(sb.exporters, res.CacheKeys()[0]) // all keys already have full export chain sb.mu.Unlock() - return res, nil, nil + return &withProvenance{CachedResult: res}, nil } func (sb *subBuilder) InContext(ctx context.Context, f func(context.Context, session.Group) error) error { @@ -230,15 +230,18 @@ func (sb *subBuilder) EachValue(ctx context.Context, key string, fn func(interfa } type Job struct { - list *Solver - pr *progress.MultiReader - pw progress.Writer - span trace.Span - values sync.Map - id string + list *Solver + pr *progress.MultiReader + pw progress.Writer + span trace.Span + values sync.Map + id string + startedTime time.Time + completedTime time.Time progressCloser func() SessionID string + uniqueID string // unique ID is used for provenance. We use a different field that client can't control } type SolverOpt struct { @@ -448,6 +451,8 @@ func (jl *Solver) NewJob(id string) (*Job, error) { progressCloser: progressCloser, span: span, id: id, + startedTime: time.Now(), + uniqueID: identity.NewID(), } jl.jobs[id] = j @@ -497,48 +502,70 @@ func (jl *Solver) deleteIfUnreferenced(k digest.Digest, st *state) { } } -func (j *Job) Build(ctx context.Context, e Edge) (CachedResult, BuildSources, error) { +func (j *Job) Build(ctx context.Context, e Edge) (CachedResultWithProvenance, error) { if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { j.span = span } v, err := j.list.load(e.Vertex, nil, j) if err != nil { - return nil, nil, err + return nil, err } e.Vertex = v res, err := j.list.s.build(ctx, e) if err != nil { - return nil, nil, err + return nil, err } j.list.mu.Lock() defer j.list.mu.Unlock() - return res, j.walkBuildSources(ctx, e, make(BuildSources)), nil + return &withProvenance{CachedResult: res, j: j, e: e}, nil } -func (j *Job) walkBuildSources(ctx context.Context, e Edge, bsrc BuildSources) BuildSources { - for _, inp := range e.Vertex.Inputs() { - if st, ok := j.list.actives[inp.Vertex.Digest()]; ok { - st.mu.Lock() - for _, cacheRes := range st.op.cacheRes { - for key, val := range cacheRes.BuildSources { - if _, ok := bsrc[key]; !ok { - bsrc[key] = val - } - } +type withProvenance struct { + CachedResult + j *Job + e Edge +} + +func (wp *withProvenance) WalkProvenance(ctx context.Context, f func(ProvenanceProvider) error) error { + if wp.j == nil { + return nil + } + m := map[digest.Digest]struct{}{} + return wp.j.walkProvenance(ctx, wp.e, f, m) +} + +func (j *Job) walkProvenance(ctx context.Context, e Edge, f func(ProvenanceProvider) error, visited map[digest.Digest]struct{}) error { + if _, ok := visited[e.Vertex.Digest()]; ok { + return nil + } + visited[e.Vertex.Digest()] = struct{}{} + if st, ok := j.list.actives[e.Vertex.Digest()]; ok { + st.mu.Lock() + if wp, ok := st.op.op.(ProvenanceProvider); ok { + if err := f(wp); err != nil { + st.mu.Unlock() + return err } - st.mu.Unlock() - bsrc = j.walkBuildSources(ctx, inp, bsrc) } + st.mu.Unlock() } - return bsrc + for _, inp := range e.Vertex.Inputs() { + if err := j.walkProvenance(ctx, inp, f, visited); err != nil { + return err + } + } + return nil } -func (j *Job) Discard() error { - defer j.progressCloser() +func (j *Job) CloseProgress() { + j.progressCloser() + j.pw.Close() +} +func (j *Job) Discard() error { j.list.mu.Lock() defer j.list.mu.Unlock() @@ -550,9 +577,7 @@ func (j *Job) Discard() error { delete(st.jobs, j) j.list.deleteIfUnreferenced(k, st) } - if _, ok := st.allPw[j.pw]; ok { - delete(st.allPw, j.pw) - } + delete(st.allPw, j.pw) st.mu.Unlock() } @@ -566,6 +591,21 @@ func (j *Job) Discard() error { return nil } +func (j *Job) StartedTime() time.Time { + return j.startedTime +} + +func (j *Job) RegisterCompleteTime() time.Time { + if j.completedTime.IsZero() { + j.completedTime = time.Now() + } + return j.completedTime +} + +func (j *Job) UniqueID() string { + return j.uniqueID +} + func (j *Job) InContext(ctx context.Context, f func(context.Context, session.Group) error) error { return f(progress.WithProgress(ctx, j.pw), session.NewGroup(j.SessionID)) } @@ -621,8 +661,9 @@ type sharedOp struct { subBuilder *subBuilder err error - execRes *execRes - execErr error + execRes *execRes + execDone bool + execErr error cacheRes []*CacheMap cacheDone bool @@ -647,7 +688,7 @@ func (s *sharedOp) LoadCache(ctx context.Context, rec *CacheRecord) (Result, err ctx = trace.ContextWithSpan(ctx, s.st.mspan) } // no cache hit. start evaluating the node - span, ctx := tracing.StartSpan(ctx, "load cache: "+s.st.vtx.Name()) + span, ctx := tracing.StartSpan(ctx, "load cache: "+s.st.vtx.Name(), trace.WithAttributes(attribute.String("vertex", s.st.vtx.Digest().String()))) notifyCompleted := notifyStarted(ctx, &s.st.clientVertex, true) res, err := s.Cache().Load(withAncestorCacheOpts(ctx, s.st), rec) tracing.FinishWithError(span, err) @@ -705,7 +746,7 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF if err != nil { select { case <-ctx.Done(): - if strings.Contains(err.Error(), context.Canceled.Error()) { + if errdefs.IsCanceled(ctx, err) { complete = false releaseError(err) err = errors.Wrap(ctx.Err(), err.Error()) @@ -759,7 +800,7 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, ctx = withAncestorCacheOpts(ctx, s.st) if len(s.st.vtx.Inputs()) == 0 { // no cache hit. start evaluating the node - span, ctx := tracing.StartSpan(ctx, "cache request: "+s.st.vtx.Name()) + span, ctx := tracing.StartSpan(ctx, "cache request: "+s.st.vtx.Name(), trace.WithAttributes(attribute.String("vertex", s.st.vtx.Digest().String()))) notifyCompleted := notifyStarted(ctx, &s.st.clientVertex, false) defer func() { tracing.FinishWithError(span, retErr) @@ -771,7 +812,7 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, if err != nil { select { case <-ctx.Done(): - if strings.Contains(err.Error(), context.Canceled.Error()) { + if errdefs.IsCanceled(ctx, err) { complete = false releaseError(err) err = errors.Wrap(ctx.Err(), err.Error()) @@ -781,6 +822,15 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, } if complete { if err == nil { + if res.Opts == nil { + res.Opts = CacheOpts(make(map[interface{}]interface{})) + } + res.Opts[progressKey{}] = &controller.Controller{ + WriterFactory: progress.FromContext(ctx), + Digest: s.st.vtx.Digest(), + Name: s.st.vtx.Name(), + ProgressGroup: s.st.vtx.Options().ProgressGroup, + } s.cacheRes = append(s.cacheRes, res) s.cacheDone = done } @@ -810,8 +860,11 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, } flightControlKey := "exec" res, err := s.g.Do(ctx, flightControlKey, func(ctx context.Context) (ret interface{}, retErr error) { - if s.execRes != nil || s.execErr != nil { - return s.execRes, s.execErr + if s.execDone { + if s.execErr != nil { + return nil, s.execErr + } + return s.execRes, nil } release, err := op.Acquire(ctx) if err != nil { @@ -826,7 +879,7 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, ctx = withAncestorCacheOpts(ctx, s.st) // no cache hit. start evaluating the node - span, ctx := tracing.StartSpan(ctx, s.st.vtx.Name()) + span, ctx := tracing.StartSpan(ctx, s.st.vtx.Name(), trace.WithAttributes(attribute.String("vertex", s.st.vtx.Digest().String()))) notifyCompleted := notifyStarted(ctx, &s.st.clientVertex, false) defer func() { tracing.FinishWithError(span, retErr) @@ -838,7 +891,7 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, if err != nil { select { case <-ctx.Done(): - if strings.Contains(err.Error(), context.Canceled.Error()) { + if errdefs.IsCanceled(ctx, err) { complete = false releaseError(err) err = errors.Wrap(ctx.Err(), err.Error()) @@ -847,6 +900,7 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, } } if complete { + s.execDone = true if res != nil { var subExporters []ExportableCacheKey s.subBuilder.mu.Lock() @@ -859,9 +913,12 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, } s.execErr = err } - return s.execRes, err + if s.execRes == nil || err != nil { + return nil, err + } + return s.execRes, nil }) - if err != nil { + if res == nil || err != nil { return nil, nil, err } r := res.(*execRes) diff --git a/solver/jobs_test.go b/solver/jobs_test.go index 091bd2f8e927..8e3ca77d835a 100644 --- a/solver/jobs_test.go +++ b/solver/jobs_test.go @@ -1,8 +1,6 @@ package solver import ( - "io/ioutil" - "os" "testing" "time" @@ -62,11 +60,8 @@ func testParallelism(t *testing.T, sb integration.Sandbox) { timeStart := time.Now() eg, egCtx := errgroup.WithContext(ctx) - tmpDir, err := ioutil.TempDir("", "solver-jobs-test-") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) solveOpt := client.SolveOpt{ - LocalDirs: map[string]string{"cache": tmpDir}, + LocalDirs: map[string]string{"cache": t.TempDir()}, } eg.Go(func() error { _, err := c.Solve(egCtx, d1, solveOpt, nil) diff --git a/solver/llbsolver/bridge.go b/solver/llbsolver/bridge.go index bd31bbfdc68b..185fe81f0649 100644 --- a/solver/llbsolver/bridge.go +++ b/solver/llbsolver/bridge.go @@ -3,7 +3,6 @@ package llbsolver import ( "context" "fmt" - "strings" "sync" "time" @@ -12,7 +11,6 @@ import ( "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" gw "github.com/moby/buildkit/frontend/gateway/client" "github.com/moby/buildkit/identity" @@ -20,9 +18,11 @@ import ( "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/errdefs" llberrdefs "github.com/moby/buildkit/solver/llbsolver/errdefs" + "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/sourcepolicy" + spb "github.com/moby/buildkit/sourcepolicy/pb" "github.com/moby/buildkit/util/bklog" - "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/worker" @@ -64,20 +64,35 @@ func (b *llbBridge) Warn(ctx context.Context, dgst digest.Digest, msg string, op }) } -func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImports []gw.CacheOptionsEntry) (solver.CachedResult, solver.BuildSources, error) { +func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImports []gw.CacheOptionsEntry, pol []*spb.Policy) (solver.CachedResultWithProvenance, error) { w, err := b.resolveWorker() if err != nil { - return nil, nil, err + return nil, err } ent, err := loadEntitlements(b.builder) if err != nil { - return nil, nil, err + return nil, err + } + srcPol, err := loadSourcePolicy(b.builder) + if err != nil { + return nil, err + } + var polEngine SourcePolicyEvaluator + if srcPol != nil || len(pol) > 0 { + if srcPol != nil { + pol = append([]*spb.Policy{srcPol}, pol...) + } + + polEngine = sourcepolicy.NewEngine(pol) + if err != nil { + return nil, err + } } var cms []solver.CacheManager for _, im := range cacheImports { cmID, err := cmKey(im) if err != nil { - return nil, nil, err + return nil, err } b.cmsMu.Lock() var cm solver.CacheManager @@ -92,7 +107,7 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp } ci, desc, err := resolveCI(ctx, g, im.Attrs) if err != nil { - return err + return errors.Wrapf(err, "failed to configure %v cache importer", im.Type) } cmNew, err = ci.Resolve(ctx, desc, cmID, w) return err @@ -112,9 +127,9 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp } dpc := &detectPrunedCacheID{} - edge, err := Load(def, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), NormalizeRuntimePlatforms(), WithValidateCaps()) + edge, err := Load(ctx, def, polEngine, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), NormalizeRuntimePlatforms(), WithValidateCaps()) if err != nil { - return nil, nil, errors.Wrap(err, "failed to load LLB") + return nil, errors.Wrap(err, "failed to load LLB") } if len(dpc.ids) > 0 { @@ -125,107 +140,44 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp if err := b.eachWorker(func(w worker.Worker) error { return w.PruneCacheMounts(ctx, ids) }); err != nil { - return nil, nil, err - } - } - - res, bi, err := b.builder.Build(ctx, edge) - if err != nil { - return nil, nil, err - } - return res, bi, nil -} - -func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid string) (res *frontend.Result, err error) { - if req.Definition != nil && req.Definition.Def != nil && req.Frontend != "" { - return nil, errors.New("cannot solve with both Definition and Frontend specified") - } - - if req.Definition != nil && req.Definition.Def != nil { - res = &frontend.Result{Ref: newResultProxy(b, req)} - if req.Evaluate { - _, err = res.Ref.Result(ctx) - } - } else if req.Frontend != "" { - f, ok := b.frontends[req.Frontend] - if !ok { - return nil, errors.Errorf("invalid frontend: %s", req.Frontend) - } - res, err = f.Solve(ctx, b, req.FrontendOpt, req.FrontendInputs, sid, b.sm) - if err != nil { return nil, err } - } else { - return &frontend.Result{}, nil } - if len(res.Refs) > 0 { - for p := range res.Refs { - dtbi, err := buildinfo.GetMetadata(res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p), req.Frontend, req.FrontendOpt) - if err != nil { - return nil, err - } - if dtbi != nil && len(dtbi) > 0 { - if res.Metadata == nil { - res.Metadata = make(map[string][]byte) - } - res.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p)] = dtbi - } - } - } else { - dtbi, err := buildinfo.GetMetadata(res.Metadata, exptypes.ExporterBuildInfo, req.Frontend, req.FrontendOpt) - if err != nil { - return nil, err - } - if dtbi != nil && len(dtbi) > 0 { - if res.Metadata == nil { - res.Metadata = make(map[string][]byte) - } - res.Metadata[exptypes.ExporterBuildInfo] = dtbi - } + res, err := b.builder.Build(ctx, edge) + if err != nil { + return nil, err } - - return + return res, nil } type resultProxy struct { - cb func(context.Context) (solver.CachedResult, solver.BuildSources, error) - def *pb.Definition + id string + b *provenanceBridge + req frontend.SolveRequest g flightcontrol.Group mu sync.Mutex released bool v solver.CachedResult - bsrc solver.BuildSources err error errResults []solver.Result + provenance *provenance.Capture } -func newResultProxy(b *llbBridge, req frontend.SolveRequest) *resultProxy { - rp := &resultProxy{ - def: req.Definition, - } - rp.cb = func(ctx context.Context) (solver.CachedResult, solver.BuildSources, error) { - res, bsrc, err := b.loadResult(ctx, req.Definition, req.CacheImports) - var ee *llberrdefs.ExecError - if errors.As(err, &ee) { - ee.EachRef(func(res solver.Result) error { - rp.errResults = append(rp.errResults, res) - return nil - }) - // acquire ownership so ExecError finalizer doesn't attempt to release as well - ee.OwnerBorrowed = true - } - return res, bsrc, err - } - return rp +func newResultProxy(b *provenanceBridge, req frontend.SolveRequest) *resultProxy { + return &resultProxy{req: req, b: b, id: identity.NewID()} +} + +func (rp *resultProxy) ID() string { + return rp.id } func (rp *resultProxy) Definition() *pb.Definition { - return rp.def + return rp.req.Definition } -func (rp *resultProxy) BuildSources() solver.BuildSources { - return rp.bsrc +func (rp *resultProxy) Provenance() interface{} { + return rp.provenance } func (rp *resultProxy) Release(ctx context.Context) (err error) { @@ -256,12 +208,12 @@ func (rp *resultProxy) wrapError(err error) error { } var ve *errdefs.VertexError if errors.As(err, &ve) { - if rp.def.Source != nil { - locs, ok := rp.def.Source.Locations[string(ve.Digest)] + if rp.req.Definition.Source != nil { + locs, ok := rp.req.Definition.Source.Locations[string(ve.Digest)] if ok { for _, loc := range locs.Locations { err = errdefs.WithSource(err, errdefs.Source{ - Info: rp.def.Source.Infos[loc.SourceIndex], + Info: rp.req.Definition.Source.Infos[loc.SourceIndex], Ranges: loc.Ranges, }) } @@ -271,6 +223,20 @@ func (rp *resultProxy) wrapError(err error) error { return err } +func (rp *resultProxy) loadResult(ctx context.Context) (solver.CachedResultWithProvenance, error) { + res, err := rp.b.loadResult(ctx, rp.req.Definition, rp.req.CacheImports, rp.req.SourcePolicies) + var ee *llberrdefs.ExecError + if errors.As(err, &ee) { + ee.EachRef(func(res solver.Result) error { + rp.errResults = append(rp.errResults, res) + return nil + }) + // acquire ownership so ExecError finalizer doesn't attempt to release as well + ee.OwnerBorrowed = true + } + return res, err +} + func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err error) { defer func() { err = rp.wrapError(err) @@ -286,11 +252,11 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err return rp.v, rp.err } rp.mu.Unlock() - v, bsrc, err := rp.cb(ctx) + v, err := rp.loadResult(ctx) if err != nil { select { case <-ctx.Done(): - if strings.Contains(err.Error(), context.Canceled.Error()) { + if errdefs.IsCanceled(ctx, err) { return v, err } default: @@ -305,8 +271,16 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err return nil, errors.Errorf("evaluating released result") } rp.v = v - rp.bsrc = bsrc rp.err = err + if err == nil { + capture, err := captureProvenance(ctx, v) + if err != nil && rp.err != nil { + rp.err = errors.Wrapf(rp.err, "failed to capture provenance: %v", err) + v.Release(context.TODO()) + rp.v = nil + } + rp.provenance = capture + } rp.mu.Unlock() return v, err }) diff --git a/solver/llbsolver/file/backend.go b/solver/llbsolver/file/backend.go index 732e67474144..974c2e04e877 100644 --- a/solver/llbsolver/file/backend.go +++ b/solver/llbsolver/file/backend.go @@ -2,7 +2,6 @@ package file import ( "context" - "io/ioutil" "log" "os" "path/filepath" @@ -110,7 +109,7 @@ func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *cop return err } - if err := ioutil.WriteFile(p, action.Data, os.FileMode(action.Mode)&0777); err != nil { + if err := os.WriteFile(p, action.Data, os.FileMode(action.Mode)&0777); err != nil { return err } diff --git a/solver/llbsolver/file/refmanager.go b/solver/llbsolver/file/refmanager.go index e1c58c1e5453..b9f3b2ea3ca3 100644 --- a/solver/llbsolver/file/refmanager.go +++ b/solver/llbsolver/file/refmanager.go @@ -11,12 +11,13 @@ import ( "github.com/pkg/errors" ) -func NewRefManager(cm cache.Manager) *RefManager { - return &RefManager{cm: cm} +func NewRefManager(cm cache.Manager, name string) *RefManager { + return &RefManager{cm: cm, desc: name} } type RefManager struct { - cm cache.Manager + cm cache.Manager + desc string } func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool, g session.Group) (_ fileoptypes.Mount, rerr error) { @@ -33,7 +34,13 @@ func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly return &Mount{m: m, readonly: readonly}, nil } - mr, err := rm.cm.New(ctx, ir, g, cache.WithDescription("fileop target"), cache.CachePolicyRetain) + desc := "fileop target" + + if d := rm.desc; d != "" { + desc = d + } + + mr, err := rm.cm.New(ctx, ir, g, cache.WithDescription(desc), cache.CachePolicyRetain) if err != nil { return nil, err } diff --git a/solver/llbsolver/history.go b/solver/llbsolver/history.go new file mode 100644 index 000000000000..c8310cc48ebb --- /dev/null +++ b/solver/llbsolver/history.go @@ -0,0 +1,675 @@ +package llbsolver + +import ( + "bufio" + "context" + "encoding/binary" + "io" + "os" + "sort" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/leases" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/cmd/buildkitd/config" + "github.com/moby/buildkit/util/leaseutil" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +const ( + recordsBucket = "_records" +) + +type HistoryQueueOpt struct { + DB *bolt.DB + LeaseManager leases.Manager + ContentStore content.Store + CleanConfig *config.HistoryConfig +} + +type HistoryQueue struct { + mu sync.Mutex + initOnce sync.Once + HistoryQueueOpt + ps *pubsub[*controlapi.BuildHistoryEvent] + active map[string]*controlapi.BuildHistoryRecord + refs map[string]int + deleted map[string]struct{} +} + +type StatusImportResult struct { + Descriptor ocispecs.Descriptor + NumCachedSteps int + NumCompletedSteps int + NumTotalSteps int +} + +func NewHistoryQueue(opt HistoryQueueOpt) *HistoryQueue { + if opt.CleanConfig == nil { + opt.CleanConfig = &config.HistoryConfig{ + MaxAge: int64((48 * time.Hour).Seconds()), + MaxEntries: 50, + } + } + h := &HistoryQueue{ + HistoryQueueOpt: opt, + ps: &pubsub[*controlapi.BuildHistoryEvent]{ + m: map[*channel[*controlapi.BuildHistoryEvent]]struct{}{}, + }, + active: map[string]*controlapi.BuildHistoryRecord{}, + refs: map[string]int{}, + deleted: map[string]struct{}{}, + } + + go func() { + for { + h.gc() + time.Sleep(120 * time.Second) + } + }() + + return h +} + +func (h *HistoryQueue) gc() error { + var records []*controlapi.BuildHistoryRecord + + if err := h.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return nil + } + return b.ForEach(func(key, dt []byte) error { + var br controlapi.BuildHistoryRecord + if err := br.Unmarshal(dt); err != nil { + return errors.Wrapf(err, "failed to unmarshal build record %s", key) + } + if br.Pinned { + return nil + } + records = append(records, &br) + return nil + }) + }); err != nil { + return err + } + + // in order for record to get deleted by gc it exceed both maxentries and maxage criteria + + if len(records) < int(h.CleanConfig.MaxEntries) { + return nil + } + + sort.Slice(records, func(i, j int) bool { + return records[i].CompletedAt.Before(*records[j].CompletedAt) + }) + + h.mu.Lock() + defer h.mu.Unlock() + + now := time.Now() + for _, r := range records[h.CleanConfig.MaxEntries:] { + if now.Add(time.Duration(h.CleanConfig.MaxAge) * -time.Second).After(*r.CompletedAt) { + if err := h.delete(r.Ref, false); err != nil { + return err + } + } + } + + return nil +} + +func (h *HistoryQueue) delete(ref string, sync bool) error { + if _, ok := h.refs[ref]; ok { + h.deleted[ref] = struct{}{} + return nil + } + delete(h.deleted, ref) + if err := h.DB.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return os.ErrNotExist + } + err1 := b.Delete([]byte(ref)) + var opts []leases.DeleteOpt + if sync { + opts = append(opts, leases.SynchronousDelete) + } + err2 := h.LeaseManager.Delete(context.TODO(), leases.Lease{ID: h.leaseID(ref)}, opts...) + if err1 != nil { + return err1 + } + return err2 + }); err != nil { + return err + } + return nil +} + +func (h *HistoryQueue) init() error { + var err error + h.initOnce.Do(func() { + err = h.DB.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucketIfNotExists([]byte(recordsBucket)) + return err + }) + }) + return err +} + +func (h *HistoryQueue) leaseID(id string) string { + return "ref_" + id +} + +func (h *HistoryQueue) addResource(ctx context.Context, l leases.Lease, desc *controlapi.Descriptor) error { + if desc == nil { + return nil + } + return h.LeaseManager.AddResource(ctx, l, leases.Resource{ + ID: string(desc.Digest), + Type: "content", + }) +} + +func (h *HistoryQueue) UpdateRef(ctx context.Context, ref string, upt func(r *controlapi.BuildHistoryRecord) error) error { + h.mu.Lock() + defer h.mu.Unlock() + + var br controlapi.BuildHistoryRecord + if err := h.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return os.ErrNotExist + } + dt := b.Get([]byte(ref)) + if dt == nil { + return os.ErrNotExist + } + + if err := br.Unmarshal(dt); err != nil { + return errors.Wrapf(err, "failed to unmarshal build record %s", ref) + } + return nil + }); err != nil { + return err + } + + if err := upt(&br); err != nil { + return err + } + br.Generation++ + + if br.Ref != ref { + return errors.Errorf("invalid ref change") + } + + if err := h.update(ctx, br); err != nil { + return err + } + h.ps.Send(&controlapi.BuildHistoryEvent{ + Type: controlapi.BuildHistoryEventType_COMPLETE, + Record: &br, + }) + return nil +} + +func (h *HistoryQueue) Status(ctx context.Context, ref string, st chan<- *client.SolveStatus) error { + h.init() + var br controlapi.BuildHistoryRecord + if err := h.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return os.ErrNotExist + } + dt := b.Get([]byte(ref)) + if dt == nil { + return os.ErrNotExist + } + + if err := br.Unmarshal(dt); err != nil { + return errors.Wrapf(err, "failed to unmarshal build record %s", ref) + } + return nil + }); err != nil { + return err + } + + if br.Logs == nil { + return nil + } + + ra, err := h.ContentStore.ReaderAt(ctx, ocispecs.Descriptor{ + Digest: br.Logs.Digest, + Size: br.Logs.Size_, + MediaType: br.Logs.MediaType, + }) + if err != nil { + return err + } + defer ra.Close() + + brdr := bufio.NewReader(&reader{ReaderAt: ra}) + + buf := make([]byte, 32*1024) + + for { + _, err := io.ReadAtLeast(brdr, buf[:4], 4) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + sz := binary.LittleEndian.Uint32(buf[:4]) + if sz > uint32(len(buf)) { + buf = make([]byte, sz) + } + _, err = io.ReadAtLeast(brdr, buf[:sz], int(sz)) + if err != nil { + return err + } + var sr controlapi.StatusResponse + if err := sr.Unmarshal(buf[:sz]); err != nil { + return err + } + st <- client.NewSolveStatus(&sr) + } + + return nil +} + +func (h *HistoryQueue) update(ctx context.Context, rec controlapi.BuildHistoryRecord) error { + return h.DB.Update(func(tx *bolt.Tx) (err error) { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return nil + } + dt, err := rec.Marshal() + if err != nil { + return err + } + + l, err := h.LeaseManager.Create(ctx, leases.WithID(h.leaseID(rec.Ref))) + created := true + if err != nil { + if !errors.Is(err, errdefs.ErrAlreadyExists) { + return err + } + l = leases.Lease{ID: h.leaseID(rec.Ref)} + created = false + } + + defer func() { + if err != nil && created { + h.LeaseManager.Delete(ctx, l) + } + }() + + if err := h.addResource(ctx, l, rec.Logs); err != nil { + return err + } + if err := h.addResource(ctx, l, rec.Trace); err != nil { + return err + } + if rec.Result != nil { + if err := h.addResource(ctx, l, rec.Result.Result); err != nil { + return err + } + for _, att := range rec.Result.Attestations { + if err := h.addResource(ctx, l, att); err != nil { + return err + } + } + } + for _, r := range rec.Results { + if err := h.addResource(ctx, l, r.Result); err != nil { + return err + } + for _, att := range r.Attestations { + if err := h.addResource(ctx, l, att); err != nil { + return err + } + } + } + + return b.Put([]byte(rec.Ref), dt) + }) +} + +func (h *HistoryQueue) Update(ctx context.Context, e *controlapi.BuildHistoryEvent) error { + h.init() + h.mu.Lock() + defer h.mu.Unlock() + + if e.Type == controlapi.BuildHistoryEventType_STARTED { + h.active[e.Record.Ref] = e.Record + h.ps.Send(e) + } + + if e.Type == controlapi.BuildHistoryEventType_COMPLETE { + delete(h.active, e.Record.Ref) + if err := h.update(ctx, *e.Record); err != nil { + return err + } + h.ps.Send(e) + } + return nil +} + +func (h *HistoryQueue) Delete(ctx context.Context, ref string) error { + h.mu.Lock() + defer h.mu.Unlock() + + return h.delete(ref, true) +} + +func (h *HistoryQueue) OpenBlobWriter(ctx context.Context, mt string) (_ *Writer, err error) { + l, err := h.LeaseManager.Create(ctx, leases.WithRandomID(), leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + h.LeaseManager.Delete(ctx, l) + } + }() + + ctx = leases.WithLease(ctx, l.ID) + + w, err := content.OpenWriter(ctx, h.ContentStore, content.WithRef("history-"+h.leaseID(l.ID))) + if err != nil { + return nil, err + } + + return &Writer{ + mt: mt, + lm: h.LeaseManager, + l: l, + w: w, + dgstr: digest.Canonical.Digester(), + }, nil +} + +type Writer struct { + mt string + w content.Writer + lm leases.Manager + l leases.Lease + + dgstr digest.Digester + sz int +} + +func (w *Writer) Write(p []byte) (int, error) { + if _, err := w.dgstr.Hash().Write(p); err != nil { + return 0, err + } + w.sz += len(p) + return w.w.Write(p) +} + +func (w *Writer) Discard() { + w.w.Close() + w.lm.Delete(context.TODO(), w.l) +} + +func (w *Writer) Commit(ctx context.Context) (*ocispecs.Descriptor, func(), error) { + dgst := w.dgstr.Digest() + sz := int64(w.sz) + if err := w.w.Commit(ctx, int64(w.sz), dgst); err != nil { + if !errdefs.IsAlreadyExists(err) { + w.Discard() + return nil, nil, err + } + } + return &ocispecs.Descriptor{ + MediaType: w.mt, + Digest: dgst, + Size: sz, + }, + func() { + w.lm.Delete(context.TODO(), w.l) + }, nil +} + +func (h *HistoryQueue) ImportStatus(ctx context.Context, ch chan *client.SolveStatus) (_ *StatusImportResult, _ func(), err error) { + defer func() { + if ch == nil { + return + } + for range ch { + } + }() + + w, err := h.OpenBlobWriter(ctx, "application/vnd.buildkit.status.v0") + if err != nil { + return nil, nil, err + } + + bufW := bufio.NewWriter(w) + + defer func() { + if err != nil { + w.Discard() + } + }() + + type vtxInfo struct { + cached bool + completed bool + } + vtxMap := make(map[digest.Digest]*vtxInfo) + + buf := make([]byte, 32*1024) + for st := range ch { + for _, vtx := range st.Vertexes { + if _, ok := vtxMap[vtx.Digest]; !ok { + vtxMap[vtx.Digest] = &vtxInfo{} + } + if vtx.Cached { + vtxMap[vtx.Digest].cached = true + } + if vtx.Completed != nil { + vtxMap[vtx.Digest].completed = true + } + } + + hdr := make([]byte, 4) + for _, pst := range st.Marshal() { + sz := pst.Size() + if len(buf) < sz { + buf = make([]byte, sz) + } + n, err := pst.MarshalTo(buf) + if err != nil { + return nil, nil, err + } + binary.LittleEndian.PutUint32(hdr, uint32(n)) + if _, err := bufW.Write(hdr); err != nil { + return nil, nil, err + } + if _, err := bufW.Write(buf[:n]); err != nil { + return nil, nil, err + } + } + } + if err := bufW.Flush(); err != nil { + return nil, nil, err + } + desc, release, err := w.Commit(ctx) + if err != nil { + return nil, nil, err + } + + numCached := 0 + numCompleted := 0 + for _, info := range vtxMap { + if info.cached { + numCached++ + } + if info.completed { + numCompleted++ + } + } + + return &StatusImportResult{ + Descriptor: *desc, + NumCachedSteps: numCached, + NumCompletedSteps: numCompleted, + NumTotalSteps: len(vtxMap), + }, release, nil +} + +func (h *HistoryQueue) Listen(ctx context.Context, req *controlapi.BuildHistoryRequest, f func(*controlapi.BuildHistoryEvent) error) error { + h.init() + + h.mu.Lock() + sub := h.ps.Subscribe() + defer sub.close() + + if req.Ref != "" { + if _, ok := h.deleted[req.Ref]; ok { + h.mu.Unlock() + return errors.Wrapf(os.ErrNotExist, "ref %s is deleted", req.Ref) + } + + h.refs[req.Ref]++ + defer func() { + h.mu.Lock() + h.refs[req.Ref]-- + if _, ok := h.deleted[req.Ref]; ok { + if h.refs[req.Ref] == 0 { + delete(h.refs, req.Ref) + h.delete(req.Ref, false) + } + } + h.mu.Unlock() + }() + } + + for _, e := range h.active { + if req.Ref != "" && e.Ref != req.Ref { + continue + } + sub.ps.Send(&controlapi.BuildHistoryEvent{ + Type: controlapi.BuildHistoryEventType_STARTED, + Record: e, + }) + } + + h.mu.Unlock() + + if !req.ActiveOnly { + if err := h.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return nil + } + return b.ForEach(func(key, dt []byte) error { + if req.Ref != "" && req.Ref != string(key) { + return nil + } + var br controlapi.BuildHistoryRecord + if err := br.Unmarshal(dt); err != nil { + return errors.Wrapf(err, "failed to unmarshal build record %s", key) + } + if err := f(&controlapi.BuildHistoryEvent{ + Record: &br, + Type: controlapi.BuildHistoryEventType_COMPLETE, + }); err != nil { + return err + } + return nil + }) + }); err != nil { + return err + } + } + + if req.EarlyExit { + return nil + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case e := <-sub.ch: + if req.Ref != "" && req.Ref != e.Record.Ref { + continue + } + if err := f(e); err != nil { + return err + } + case <-sub.done: + return nil + } + } +} + +type pubsub[T any] struct { + mu sync.Mutex + m map[*channel[T]]struct{} +} + +func (p *pubsub[T]) Subscribe() *channel[T] { + p.mu.Lock() + c := &channel[T]{ + ps: p, + ch: make(chan T, 32), + done: make(chan struct{}), + } + p.m[c] = struct{}{} + p.mu.Unlock() + return c +} + +func (p *pubsub[T]) Send(v T) { + p.mu.Lock() + for c := range p.m { + go c.send(v) + } + p.mu.Unlock() +} + +type channel[T any] struct { + ps *pubsub[T] + ch chan T + done chan struct{} + closeOnce sync.Once +} + +func (p *channel[T]) send(v T) { + select { + case p.ch <- v: + case <-p.done: + } +} + +func (p *channel[T]) close() { + p.closeOnce.Do(func() { + p.ps.mu.Lock() + delete(p.ps.m, p) + p.ps.mu.Unlock() + close(p.done) + }) +} + +type reader struct { + io.ReaderAt + pos int64 +} + +func (r *reader) Read(p []byte) (int, error) { + n, err := r.ReaderAt.ReadAt(p, r.pos) + r.pos += int64(len(p)) + return n, err +} diff --git a/solver/llbsolver/mounts/mount.go b/solver/llbsolver/mounts/mount.go index ffa4df5da367..2cfeaae7a213 100644 --- a/solver/llbsolver/mounts/mount.go +++ b/solver/llbsolver/mounts/mount.go @@ -3,7 +3,6 @@ package mounts import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "sync" @@ -252,14 +251,14 @@ func (mm *MountManager) getSecretMountable(ctx context.Context, m *pb.Mount, g s err = mm.sm.Any(ctx, g, func(ctx context.Context, _ string, caller session.Caller) error { dt, err = secrets.GetSecret(ctx, caller, id) if err != nil { - if errors.Is(err, secrets.ErrNotFound) && m.SecretOpt.Optional { - return nil - } return err } return nil }) - if err != nil || dt == nil { + if err != nil { + if errors.Is(err, secrets.ErrNotFound) && m.SecretOpt.Optional { + return nil, nil + } return nil, err } return &secretMount{mount: m, data: dt, idmap: mm.cm.IdentityMapping()}, nil @@ -282,7 +281,7 @@ type secretMountInstance struct { } func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) { - dir, err := ioutil.TempDir("", "buildkit-secrets") + dir, err := os.MkdirTemp("", "buildkit-secrets") if err != nil { return nil, nil, errors.Wrap(err, "failed to create temp dir") } @@ -320,7 +319,7 @@ func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) { randID := identity.NewID() fp := filepath.Join(dir, randID) - if err := ioutil.WriteFile(fp, sm.sm.data, 0600); err != nil { + if err := os.WriteFile(fp, sm.sm.data, 0600); err != nil { cleanup() return nil, nil, err } diff --git a/solver/llbsolver/mounts/mount_test.go b/solver/llbsolver/mounts/mount_test.go index 7469aef55861..cbcd7ee46bdb 100644 --- a/solver/llbsolver/mounts/mount_test.go +++ b/solver/llbsolver/mounts/mount_test.go @@ -2,7 +2,6 @@ package mounts import ( "context" - "io/ioutil" "os" "path/filepath" "sync" @@ -43,41 +42,19 @@ type cmOut struct { cs content.Store } -func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() error, err error) { +func newCacheManager(ctx context.Context, t *testing.T, opt cmOpt) (co *cmOut, err error) { ns, ok := namespaces.Namespace(ctx) if !ok { - return nil, nil, errors.Errorf("namespace required for test") + return nil, errors.Errorf("namespace required for test") } if opt.snapshotterName == "" { opt.snapshotterName = "native" } - tmpdir, err := ioutil.TempDir("", "cachemanager") - if err != nil { - return nil, nil, err - } + tmpdir := t.TempDir() - defers := make([]func() error, 0) - cleanup = func() error { - var err error - for i := range defers { - if err1 := defers[len(defers)-1-i](); err1 != nil && err == nil { - err = err1 - } - } - return err - } - defer func() { - if err != nil { - cleanup() - } - }() - if opt.tmpdir == "" { - defers = append(defers, func() error { - return os.RemoveAll(tmpdir) - }) - } else { + if opt.tmpdir != "" { os.RemoveAll(tmpdir) tmpdir = opt.tmpdir } @@ -85,29 +62,29 @@ func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() if opt.snapshotter == nil { snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) if err != nil { - return nil, nil, err + return nil, err } opt.snapshotter = snapshotter } store, err := local.NewStore(tmpdir) if err != nil { - return nil, nil, err + return nil, err } db, err := bolt.Open(filepath.Join(tmpdir, "containerdmeta.db"), 0644, nil) if err != nil { - return nil, nil, err + return nil, err } - defers = append(defers, func() error { - return db.Close() + t.Cleanup(func() { + require.NoError(t, db.Close()) }) mdb := ctdmetadata.NewDB(db, store, map[string]snapshots.Snapshotter{ opt.snapshotterName: opt.snapshotter, }) if err := mdb.Init(context.TODO()); err != nil { - return nil, nil, err + return nil, err } lm := leaseutil.WithNamespace(ctdmetadata.NewLeaseManager(mdb), ns) @@ -117,8 +94,11 @@ func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() md, err := metadata.NewStore(filepath.Join(tmpdir, "metadata.db")) if err != nil { - return nil, nil, err + return nil, err } + t.Cleanup(func() { + require.NoError(t, md.Close()) + }) cm, err := cache.NewManager(cache.ManagerOpt{ Snapshotter: snapshot.FromContainerdSnapshotter(opt.snapshotterName, containerdsnapshot.NSSnapshotter(ns, mdb.Snapshotter(opt.snapshotterName)), nil), @@ -131,13 +111,17 @@ func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() MountPoolRoot: filepath.Join(tmpdir, "cachemounts"), }) if err != nil { - return nil, nil, err + return nil, err } + t.Cleanup(func() { + require.NoError(t, cm.Close()) + }) + return &cmOut{ manager: cm, lm: lm, cs: mdb.ContentStore(), - }, cleanup, nil + }, nil } func newRefGetter(m cache.Manager, shared *cacheRefs) *cacheRefGetter { @@ -153,21 +137,20 @@ func TestCacheMountPrivateRefs(t *testing.T) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() - g1 := newRefGetter(co.manager, sharedCacheRefs) g2 := newRefGetter(co.manager, sharedCacheRefs) g3 := newRefGetter(co.manager, sharedCacheRefs) @@ -220,21 +203,20 @@ func TestCacheMountSharedRefs(t *testing.T) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() - g1 := newRefGetter(co.manager, sharedCacheRefs) g2 := newRefGetter(co.manager, sharedCacheRefs) g3 := newRefGetter(co.manager, sharedCacheRefs) @@ -270,21 +252,20 @@ func TestCacheMountLockedRefs(t *testing.T) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() - g1 := newRefGetter(co.manager, sharedCacheRefs) g2 := newRefGetter(co.manager, sharedCacheRefs) @@ -323,7 +304,7 @@ func TestCacheMountLockedRefs(t *testing.T) { select { case <-gotRef4: - case <-time.After(500 * time.Millisecond): + case <-time.After(2 * time.Second): require.FailNow(t, "mount did not unlock") } } @@ -333,21 +314,20 @@ func TestCacheMountSharedRefsDeadlock(t *testing.T) { // not parallel ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") - tmpdir, err := ioutil.TempDir("", "cachemanager") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) - co, cleanup, err := newCacheManager(ctx, cmOpt{ + co, err := newCacheManager(ctx, t, cmOpt{ snapshotter: snapshotter, snapshotterName: "native", }) require.NoError(t, err) - defer cleanup() - var sharedCacheRefs = &cacheRefs{} g1 := newRefGetter(co.manager, sharedCacheRefs) diff --git a/solver/llbsolver/ops/build.go b/solver/llbsolver/ops/build.go index 39d2a7707571..fd47df3ae311 100644 --- a/solver/llbsolver/ops/build.go +++ b/solver/llbsolver/ops/build.go @@ -11,7 +11,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" @@ -20,24 +20,26 @@ import ( const buildCacheType = "buildkit.build.v0" -type buildOp struct { +type BuildOp struct { op *pb.BuildOp b frontend.FrontendLLBBridge v solver.Vertex } -func NewBuildOp(v solver.Vertex, op *pb.Op_Build, b frontend.FrontendLLBBridge, _ worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { +var _ solver.Op = &BuildOp{} + +func NewBuildOp(v solver.Vertex, op *pb.Op_Build, b frontend.FrontendLLBBridge, _ worker.Worker) (*BuildOp, error) { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } - return &buildOp{ + return &BuildOp{ op: op.Build, b: b, v: v, }, nil } -func (b *buildOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { +func (b *BuildOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { dt, err := json.Marshal(struct { Type string Exec *pb.BuildOp @@ -59,7 +61,7 @@ func (b *buildOp) CacheMap(ctx context.Context, g session.Group, index int) (*so }, true, nil } -func (b *buildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (outputs []solver.Result, retErr error) { +func (b *BuildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (outputs []solver.Result, retErr error) { if b.op.Builder != pb.LLBBuilder { return nil, errors.Errorf("only LLB builder is currently allowed") } @@ -130,9 +132,12 @@ func (b *buildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Res return nil, err } - for _, r := range newRes.Refs { - r.Release(context.TODO()) - } + newRes.EachRef(func(ref solver.ResultProxy) error { + if ref == newRes.Ref { + return nil + } + return ref.Release(context.TODO()) + }) r, err := newRes.Ref.Result(ctx) if err != nil { @@ -142,7 +147,9 @@ func (b *buildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Res return []solver.Result{r}, err } -func (b *buildOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { +func (b *BuildOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { // buildOp itself does not count towards parallelism budget. return func() {}, nil } + +func (b *BuildOp) IsProvenanceProvider() {} diff --git a/solver/llbsolver/ops/diff.go b/solver/llbsolver/ops/diff.go index 1a05f7a6c7ca..338a8748e8c6 100644 --- a/solver/llbsolver/ops/diff.go +++ b/solver/llbsolver/ops/diff.go @@ -4,15 +4,13 @@ import ( "context" "encoding/json" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/util/progress/controller" "github.com/moby/buildkit/worker" "github.com/pkg/errors" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" ) @@ -23,11 +21,10 @@ type diffOp struct { op *pb.DiffOp worker worker.Worker vtx solver.Vertex - pg progress.Controller } func NewDiffOp(v solver.Vertex, op *pb.Op_Diff, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } return &diffOp{ @@ -64,17 +61,8 @@ func (d *diffOp) CacheMap(ctx context.Context, group session.Group, index int) ( ComputeDigestFunc solver.ResultBasedCacheFunc PreprocessFunc solver.PreprocessFunc }, depCount), - Opts: solver.CacheOpts(make(map[interface{}]interface{})), } - d.pg = &controller.Controller{ - WriterFactory: progress.FromContext(ctx), - Digest: d.vtx.Digest(), - Name: d.vtx.Name(), - ProgressGroup: d.vtx.Options().ProgressGroup, - } - cm.Opts[cache.ProgressKey{}] = d.pg - return cm, true, nil } @@ -121,7 +109,7 @@ func (d *diffOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu return []solver.Result{worker.NewWorkerRefResult(nil, d.worker)}, nil } - diffRef, err := d.worker.CacheManager().Diff(ctx, lowerRef, upperRef, d.pg, + diffRef, err := d.worker.CacheManager().Diff(ctx, lowerRef, upperRef, solver.ProgressControllerFromContext(ctx), cache.WithDescription(d.vtx.Name())) if err != nil { return nil, err diff --git a/solver/llbsolver/ops/exec.go b/solver/llbsolver/ops/exec.go index 6cca733c0bf2..2bee1283b436 100644 --- a/solver/llbsolver/ops/exec.go +++ b/solver/llbsolver/ops/exec.go @@ -17,12 +17,10 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/secrets" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" "github.com/moby/buildkit/solver/llbsolver/errdefs" "github.com/moby/buildkit/solver/llbsolver/mounts" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/util/progress/controller" "github.com/moby/buildkit/util/progress/logs" utilsystem "github.com/moby/buildkit/util/system" "github.com/moby/buildkit/worker" @@ -35,7 +33,7 @@ import ( const execCacheType = "buildkit.exec.v0" -type execOp struct { +type ExecOp struct { op *pb.ExecOp cm cache.Manager mm *mounts.MountManager @@ -45,15 +43,16 @@ type execOp struct { platform *pb.Platform numInputs int parallelism *semaphore.Weighted - vtx solver.Vertex } -func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, parallelism *semaphore.Weighted, sm *session.Manager, exec executor.Executor, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { +var _ solver.Op = &ExecOp{} + +func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, parallelism *semaphore.Weighted, sm *session.Manager, exec executor.Executor, w worker.Worker) (*ExecOp, error) { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } name := fmt.Sprintf("exec %s", strings.Join(op.Exec.Meta.Args, " ")) - return &execOp{ + return &ExecOp{ op: op.Exec, mm: mounts.NewMountManager(name, cm, sm), cm: cm, @@ -63,10 +62,13 @@ func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache. w: w, platform: platform, parallelism: parallelism, - vtx: v, }, nil } +func (e *ExecOp) Proto() *pb.ExecOp { + return e.op +} + func cloneExecOp(old *pb.ExecOp) pb.ExecOp { n := *old meta := *n.Meta @@ -84,7 +86,7 @@ func cloneExecOp(old *pb.ExecOp) pb.ExecOp { return n } -func (e *execOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { +func (e *ExecOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { op := cloneExecOp(e.op) for i := range op.Meta.ExtraHosts { h := op.Meta.ExtraHosts[i] @@ -145,14 +147,6 @@ func (e *execOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol ComputeDigestFunc solver.ResultBasedCacheFunc PreprocessFunc solver.PreprocessFunc }, e.numInputs), - Opts: solver.CacheOpts(map[interface{}]interface{}{ - cache.ProgressKey{}: &controller.Controller{ - WriterFactory: progress.FromContext(ctx), - Digest: e.vtx.Digest(), - Name: e.vtx.Name(), - ProgressGroup: e.vtx.Options().ProgressGroup, - }, - }), } deps, err := e.getMountDeps() @@ -169,9 +163,9 @@ func (e *execOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol cm.Deps[i].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0})) } if !dep.NoContentBasedHash { - cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(toSelectors(dedupePaths(dep.Selectors))) + cm.Deps[i].ComputeDigestFunc = opsutils.NewContentHashFunc(toSelectors(dedupePaths(dep.Selectors))) } - cm.Deps[i].PreprocessFunc = llbsolver.UnlazyResultFunc + cm.Deps[i].PreprocessFunc = unlazyResultFunc } return cm, true, nil @@ -201,10 +195,10 @@ func dedupePaths(inp []string) []string { return paths } -func toSelectors(p []string) []llbsolver.Selector { - sel := make([]llbsolver.Selector, 0, len(p)) +func toSelectors(p []string) []opsutils.Selector { + sel := make([]opsutils.Selector, 0, len(p)) for _, p := range p { - sel = append(sel, llbsolver.Selector{Path: p, FollowLinks: true}) + sel = append(sel, opsutils.Selector{Path: p, FollowLinks: true}) } return sel } @@ -214,7 +208,7 @@ type dep struct { NoContentBasedHash bool } -func (e *execOp) getMountDeps() ([]dep, error) { +func (e *ExecOp) getMountDeps() ([]dep, error) { deps := make([]dep, e.numInputs) for _, m := range e.op.Mounts { if m.Input == pb.Empty { @@ -246,7 +240,7 @@ func addDefaultEnvvar(env []string, k, v string) []string { return append(env, k+"="+v) } -func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (results []solver.Result, err error) { +func (e *ExecOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (results []solver.Result, err error) { trace.SpanFromContext(ctx).AddEvent("ExecOp started") refs := make([]*worker.WorkerRef, len(inputs)) @@ -325,17 +319,18 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu } meta := executor.Meta{ - Args: e.op.Meta.Args, - Env: e.op.Meta.Env, - Cwd: e.op.Meta.Cwd, - User: e.op.Meta.User, - Hostname: e.op.Meta.Hostname, - ReadonlyRootFS: p.ReadonlyRootFS, - ExtraHosts: extraHosts, - Ulimit: e.op.Meta.Ulimit, - CgroupParent: e.op.Meta.CgroupParent, - NetMode: e.op.Network, - SecurityMode: e.op.Security, + Args: e.op.Meta.Args, + Env: e.op.Meta.Env, + Cwd: e.op.Meta.Cwd, + User: e.op.Meta.User, + Hostname: e.op.Meta.Hostname, + ReadonlyRootFS: p.ReadonlyRootFS, + ExtraHosts: extraHosts, + Ulimit: e.op.Meta.Ulimit, + CgroupParent: e.op.Meta.CgroupParent, + NetMode: e.op.Network, + SecurityMode: e.op.Security, + RemoveMountStubsRecursive: e.op.Meta.RemoveMountStubsRecursive, } if e.op.Meta.ProxyEnv != nil { @@ -405,7 +400,7 @@ func proxyEnvList(p *pb.ProxyEnv) []string { return out } -func (e *execOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { +func (e *ExecOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { if e.parallelism == nil { return func() {}, nil } @@ -418,7 +413,7 @@ func (e *execOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { }, nil } -func (e *execOp) loadSecretEnv(ctx context.Context, g session.Group) ([]string, error) { +func (e *ExecOp) loadSecretEnv(ctx context.Context, g session.Group) ([]string, error) { secretenv := e.op.Secretenv if len(secretenv) == 0 { return nil, nil @@ -448,3 +443,6 @@ func (e *execOp) loadSecretEnv(ctx context.Context, g session.Group) ([]string, } return out, nil } + +func (e *ExecOp) IsProvenanceProvider() { +} diff --git a/solver/llbsolver/ops/exec_binfmt.go b/solver/llbsolver/ops/exec_binfmt.go index 56433d49fdec..c2c5504cc36b 100644 --- a/solver/llbsolver/ops/exec_binfmt.go +++ b/solver/llbsolver/ops/exec_binfmt.go @@ -2,7 +2,6 @@ package ops import ( "context" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -28,6 +27,7 @@ var qemuArchMap = map[string]string{ "riscv64": "riscv64", "arm": "arm", "s390x": "s390x", + "ppc64": "ppc64", "ppc64le": "ppc64le", "386": "i386", } @@ -47,7 +47,7 @@ type staticEmulatorMount struct { } func (m *staticEmulatorMount) Mount() ([]mount.Mount, func() error, error) { - tmpdir, err := ioutil.TempDir("", "buildkit-qemu-emulator") + tmpdir, err := os.MkdirTemp("", "buildkit-qemu-emulator") if err != nil { return nil, nil, err } diff --git a/solver/llbsolver/ops/file.go b/solver/llbsolver/ops/file.go index 012ef4cc12a5..7bbb3276797c 100644 --- a/solver/llbsolver/ops/file.go +++ b/solver/llbsolver/ops/file.go @@ -13,14 +13,12 @@ import ( "github.com/moby/buildkit/cache" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" "github.com/moby/buildkit/solver/llbsolver/errdefs" "github.com/moby/buildkit/solver/llbsolver/file" "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/flightcontrol" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/util/progress/controller" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -37,11 +35,10 @@ type fileOp struct { solver *FileOpSolver numInputs int parallelism *semaphore.Weighted - vtx solver.Vertex } func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, parallelism *semaphore.Weighted, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } return &fileOp{ @@ -49,14 +46,13 @@ func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, parallelism *s md: cm, numInputs: len(v.Inputs()), w: w, - solver: NewFileOpSolver(w, &file.Backend{}, file.NewRefManager(cm)), + solver: NewFileOpSolver(w, &file.Backend{}, file.NewRefManager(cm, v.Name())), parallelism: parallelism, - vtx: v, }, nil } func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { - selectors := map[int][]llbsolver.Selector{} + selectors := map[int][]opsutils.Selector{} invalidSelectors := map[int]struct{}{} actions := make([][]byte, 0, len(f.op.Actions)) @@ -138,14 +134,6 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol ComputeDigestFunc solver.ResultBasedCacheFunc PreprocessFunc solver.PreprocessFunc }, f.numInputs), - Opts: solver.CacheOpts(map[interface{}]interface{}{ - cache.ProgressKey{}: &controller.Controller{ - WriterFactory: progress.FromContext(ctx), - Digest: f.vtx.Digest(), - Name: f.vtx.Name(), - ProgressGroup: f.vtx.Options().ProgressGroup, - }, - }), } for idx, m := range selectors { @@ -161,10 +149,10 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol }) cm.Deps[idx].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0})) - cm.Deps[idx].ComputeDigestFunc = llbsolver.NewContentHashFunc(dedupeSelectors(m)) + cm.Deps[idx].ComputeDigestFunc = opsutils.NewContentHashFunc(dedupeSelectors(m)) } for idx := range cm.Deps { - cm.Deps[idx].PreprocessFunc = llbsolver.UnlazyResultFunc + cm.Deps[idx].PreprocessFunc = unlazyResultFunc } return cm, true, nil @@ -206,8 +194,8 @@ func (f *fileOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { }, nil } -func addSelector(m map[int][]llbsolver.Selector, idx int, sel string, wildcard, followLinks bool, includePatterns, excludePatterns []string) { - s := llbsolver.Selector{ +func addSelector(m map[int][]opsutils.Selector, idx int, sel string, wildcard, followLinks bool, includePatterns, excludePatterns []string) { + s := opsutils.Selector{ Path: sel, FollowLinks: followLinks, Wildcard: wildcard && containsWildcards(sel), @@ -231,7 +219,7 @@ func containsWildcards(name string) bool { return false } -func dedupeSelectors(m []llbsolver.Selector) []llbsolver.Selector { +func dedupeSelectors(m []opsutils.Selector) []opsutils.Selector { paths := make([]string, 0, len(m)) pathsFollow := make([]string, 0, len(m)) for _, sel := range m { @@ -245,13 +233,13 @@ func dedupeSelectors(m []llbsolver.Selector) []llbsolver.Selector { } paths = dedupePaths(paths) pathsFollow = dedupePaths(pathsFollow) - selectors := make([]llbsolver.Selector, 0, len(m)) + selectors := make([]opsutils.Selector, 0, len(m)) for _, p := range paths { - selectors = append(selectors, llbsolver.Selector{Path: p}) + selectors = append(selectors, opsutils.Selector{Path: p}) } for _, p := range pathsFollow { - selectors = append(selectors, llbsolver.Selector{Path: p, FollowLinks: true}) + selectors = append(selectors, opsutils.Selector{Path: p, FollowLinks: true}) } for _, sel := range m { @@ -267,7 +255,7 @@ func dedupeSelectors(m []llbsolver.Selector) []llbsolver.Selector { return selectors } -func processOwner(chopt *pb.ChownOpt, selectors map[int][]llbsolver.Selector) error { +func processOwner(chopt *pb.ChownOpt, selectors map[int][]opsutils.Selector) error { if chopt == nil { return nil } @@ -677,3 +665,14 @@ func isDefaultIndexes(idxs [][]int) bool { } return true } + +func unlazyResultFunc(ctx context.Context, res solver.Result, g session.Group) error { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return errors.Errorf("invalid reference: %T", res) + } + if ref.ImmutableRef == nil { + return nil + } + return ref.ImmutableRef.Extract(ctx, g) +} diff --git a/solver/llbsolver/ops/merge.go b/solver/llbsolver/ops/merge.go index 13bb60ba88a7..db1b025bff40 100644 --- a/solver/llbsolver/ops/merge.go +++ b/solver/llbsolver/ops/merge.go @@ -4,15 +4,13 @@ import ( "context" "encoding/json" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/util/progress/controller" "github.com/moby/buildkit/worker" "github.com/pkg/errors" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" ) @@ -23,11 +21,10 @@ type mergeOp struct { op *pb.MergeOp worker worker.Worker vtx solver.Vertex - pg progress.Controller } func NewMergeOp(v solver.Vertex, op *pb.Op_Merge, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } return &mergeOp{ @@ -56,17 +53,8 @@ func (m *mergeOp) CacheMap(ctx context.Context, group session.Group, index int) ComputeDigestFunc solver.ResultBasedCacheFunc PreprocessFunc solver.PreprocessFunc }, len(m.op.Inputs)), - Opts: solver.CacheOpts(make(map[interface{}]interface{})), } - m.pg = &controller.Controller{ - WriterFactory: progress.FromContext(ctx), - Digest: m.vtx.Digest(), - Name: m.vtx.Name(), - ProgressGroup: m.vtx.Options().ProgressGroup, - } - cm.Opts[cache.ProgressKey{}] = m.pg - return cm, true, nil } @@ -93,7 +81,7 @@ func (m *mergeOp) Exec(ctx context.Context, g session.Group, inputs []solver.Res return nil, nil } - mergedRef, err := m.worker.CacheManager().Merge(ctx, refs, m.pg, + mergedRef, err := m.worker.CacheManager().Merge(ctx, refs, solver.ProgressControllerFromContext(ctx), cache.WithDescription(m.vtx.Name())) if err != nil { return nil, err diff --git a/solver/llbsolver/ops/opsutils/contenthash.go b/solver/llbsolver/ops/opsutils/contenthash.go new file mode 100644 index 000000000000..8bdd8f939e15 --- /dev/null +++ b/solver/llbsolver/ops/opsutils/contenthash.go @@ -0,0 +1,71 @@ +package opsutils + +import ( + "bytes" + "context" + "path" + + "github.com/moby/buildkit/cache/contenthash" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +type Selector struct { + Path string + Wildcard bool + FollowLinks bool + IncludePatterns []string + ExcludePatterns []string +} + +func (sel Selector) HasWildcardOrFilters() bool { + return sel.Wildcard || len(sel.IncludePatterns) != 0 || len(sel.ExcludePatterns) != 0 +} + +func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { + return func(ctx context.Context, res solver.Result, s session.Group) (digest.Digest, error) { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return "", errors.Errorf("invalid reference: %T", res) + } + + if len(selectors) == 0 { + selectors = []Selector{{}} + } + + dgsts := make([][]byte, len(selectors)) + + eg, ctx := errgroup.WithContext(ctx) + + for i, sel := range selectors { + i, sel := i, sel + eg.Go(func() error { + dgst, err := contenthash.Checksum( + ctx, ref.ImmutableRef, path.Join("/", sel.Path), + contenthash.ChecksumOpts{ + Wildcard: sel.Wildcard, + FollowLinks: sel.FollowLinks, + IncludePatterns: sel.IncludePatterns, + ExcludePatterns: sel.ExcludePatterns, + }, + s, + ) + if err != nil { + return errors.Wrapf(err, "failed to calculate checksum of ref %s", ref.ID()) + } + dgsts[i] = []byte(dgst) + return nil + }) + } + + if err := eg.Wait(); err != nil { + return "", err + } + + return digest.FromBytes(bytes.Join(dgsts, []byte{0})), nil + } +} diff --git a/solver/llbsolver/ops/opsutils/validate.go b/solver/llbsolver/ops/opsutils/validate.go new file mode 100644 index 000000000000..8e0d30d9ecf3 --- /dev/null +++ b/solver/llbsolver/ops/opsutils/validate.go @@ -0,0 +1,63 @@ +package opsutils + +import ( + "github.com/moby/buildkit/solver/pb" + "github.com/pkg/errors" +) + +func Validate(op *pb.Op) error { + if op == nil { + return errors.Errorf("invalid nil op") + } + + switch op := op.Op.(type) { + case *pb.Op_Source: + if op.Source == nil { + return errors.Errorf("invalid nil source op") + } + case *pb.Op_Exec: + if op.Exec == nil { + return errors.Errorf("invalid nil exec op") + } + if op.Exec.Meta == nil { + return errors.Errorf("invalid exec op with no meta") + } + if len(op.Exec.Meta.Args) == 0 { + return errors.Errorf("invalid exec op with no args") + } + if len(op.Exec.Mounts) == 0 { + return errors.Errorf("invalid exec op with no mounts") + } + + isRoot := false + for _, m := range op.Exec.Mounts { + if m.Dest == pb.RootMount { + isRoot = true + break + } + } + if !isRoot { + return errors.Errorf("invalid exec op with no rootfs") + } + case *pb.Op_File: + if op.File == nil { + return errors.Errorf("invalid nil file op") + } + if len(op.File.Actions) == 0 { + return errors.Errorf("invalid file op with no actions") + } + case *pb.Op_Build: + if op.Build == nil { + return errors.Errorf("invalid nil build op") + } + case *pb.Op_Merge: + if op.Merge == nil { + return errors.Errorf("invalid nil merge op") + } + case *pb.Op_Diff: + if op.Diff == nil { + return errors.Errorf("invalid nil diff op") + } + } + return nil +} diff --git a/solver/llbsolver/ops/source.go b/solver/llbsolver/ops/source.go index d24a902da570..fabd300d4b5c 100644 --- a/solver/llbsolver/ops/source.go +++ b/solver/llbsolver/ops/source.go @@ -7,7 +7,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/source" "github.com/moby/buildkit/worker" @@ -17,7 +17,7 @@ import ( const sourceCacheType = "buildkit.source.v0" -type sourceOp struct { +type SourceOp struct { mu sync.Mutex op *pb.Op_Source platform *pb.Platform @@ -27,13 +27,17 @@ type sourceOp struct { w worker.Worker vtx solver.Vertex parallelism *semaphore.Weighted + pin string + id source.Identifier } -func NewSourceOp(vtx solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, parallelism *semaphore.Weighted, sessM *session.Manager, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { +var _ solver.Op = &SourceOp{} + +func NewSourceOp(vtx solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, parallelism *semaphore.Weighted, sessM *session.Manager, w worker.Worker) (*SourceOp, error) { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } - return &sourceOp{ + return &SourceOp{ op: op, sm: sm, w: w, @@ -44,7 +48,13 @@ func NewSourceOp(vtx solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm }, nil } -func (s *sourceOp) instance(ctx context.Context) (source.SourceInstance, error) { +func (s *SourceOp) IsProvenanceProvider() {} + +func (s *SourceOp) Pin() (source.Identifier, string) { + return s.id, s.pin +} + +func (s *SourceOp) instance(ctx context.Context) (source.SourceInstance, error) { s.mu.Lock() defer s.mu.Unlock() if s.src != nil { @@ -59,10 +69,11 @@ func (s *sourceOp) instance(ctx context.Context) (source.SourceInstance, error) return nil, err } s.src = src + s.id = id return s.src, nil } -func (s *sourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { +func (s *SourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { src, err := s.instance(ctx) if err != nil { return nil, false, err @@ -73,25 +84,23 @@ func (s *sourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*s return nil, false, err } + if s.pin == "" { + s.pin = pin + } + dgst := digest.FromBytes([]byte(sourceCacheType + ":" + k)) if strings.HasPrefix(k, "session:") { dgst = digest.Digest("random:" + strings.TrimPrefix(dgst.String(), dgst.Algorithm().String()+":")) } - var buildSources map[string]string - if !strings.HasPrefix(s.op.Source.GetIdentifier(), "local://") { - buildSources = map[string]string{s.op.Source.GetIdentifier(): pin} - } - return &solver.CacheMap{ // TODO: add os/arch - Digest: dgst, - Opts: cacheOpts, - BuildSources: buildSources, + Digest: dgst, + Opts: cacheOpts, }, done, nil } -func (s *sourceOp) Exec(ctx context.Context, g session.Group, _ []solver.Result) (outputs []solver.Result, err error) { +func (s *SourceOp) Exec(ctx context.Context, g session.Group, _ []solver.Result) (outputs []solver.Result, err error) { src, err := s.instance(ctx) if err != nil { return nil, err @@ -103,7 +112,7 @@ func (s *sourceOp) Exec(ctx context.Context, g session.Group, _ []solver.Result) return []solver.Result{worker.NewWorkerRefResult(ref, s.w)}, nil } -func (s *sourceOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { +func (s *SourceOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { if s.parallelism == nil { return func() {}, nil } diff --git a/solver/llbsolver/proc/provenance.go b/solver/llbsolver/proc/provenance.go new file mode 100644 index 000000000000..1af3af196028 --- /dev/null +++ b/solver/llbsolver/proc/provenance.go @@ -0,0 +1,77 @@ +package proc + +import ( + "context" + "encoding/json" + "strconv" + + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/result" + "github.com/pkg/errors" +) + +func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { + return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { + ps, err := exptypes.ParsePlatforms(res.Metadata) + if err != nil { + return nil, err + } + + var inlineOnly bool + if v, err := strconv.ParseBool(attrs["inline-only"]); v && err == nil { + inlineOnly = true + } + + for _, p := range ps.Platforms { + cp, ok := res.Provenance.FindRef(p.ID) + if !ok { + return nil, errors.Errorf("no build info found for provenance %s", p.ID) + } + + if cp == nil { + continue + } + + ref, ok := res.FindRef(p.ID) + if !ok { + return nil, errors.Errorf("could not find ref %s", p.ID) + } + + pc, err := llbsolver.NewProvenanceCreator(ctx, cp, ref, attrs, j) + if err != nil { + return nil, err + } + + filename := "provenance.json" + if v, ok := attrs["filename"]; ok { + filename = v + } + + res.AddAttestation(p.ID, llbsolver.Attestation{ + Kind: gatewaypb.AttestationKindInToto, + Metadata: map[string][]byte{ + result.AttestationReasonKey: []byte(result.AttestationReasonProvenance), + result.AttestationInlineOnlyKey: []byte(strconv.FormatBool(inlineOnly)), + }, + InToto: result.InTotoAttestation{ + PredicateType: slsa02.PredicateSLSAProvenance, + }, + Path: filename, + ContentFunc: func() ([]byte, error) { + pr, err := pc.Predicate() + if err != nil { + return nil, err + } + + return json.MarshalIndent(pr, "", " ") + }, + }) + } + + return res, nil + } +} diff --git a/solver/llbsolver/proc/sbom.go b/solver/llbsolver/proc/sbom.go new file mode 100644 index 000000000000..2d7e969ba547 --- /dev/null +++ b/solver/llbsolver/proc/sbom.go @@ -0,0 +1,76 @@ +package proc + +import ( + "context" + + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/attestations/sbom" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/result" + "github.com/pkg/errors" +) + +func SBOMProcessor(scannerRef string, useCache bool) llbsolver.Processor { + return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { + // skip sbom generation if we already have an sbom + if sbom.HasSBOM(res.Result) { + return res, nil + } + + ps, err := exptypes.ParsePlatforms(res.Metadata) + if err != nil { + return nil, err + } + + scanner, err := sbom.CreateSBOMScanner(ctx, s.Bridge(j), scannerRef) + if err != nil { + return nil, err + } + if scanner == nil { + return res, nil + } + + for _, p := range ps.Platforms { + ref, ok := res.FindRef(p.ID) + if !ok { + return nil, errors.Errorf("could not find ref %s", p.ID) + } + defop, err := llb.NewDefinitionOp(ref.Definition()) + if err != nil { + return nil, err + } + st := llb.NewState(defop) + + var opts []llb.ConstraintsOpt + if !useCache { + opts = append(opts, llb.IgnoreCache) + } + att, err := scanner(ctx, p.ID, st, nil, opts...) + if err != nil { + return nil, err + } + attSolve, err := result.ConvertAttestation(&att, func(st llb.State) (solver.ResultProxy, error) { + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + + r, err := s.Bridge(j).Solve(ctx, frontend.SolveRequest{ // TODO: buildinfo + Definition: def.ToPB(), + }, j.SessionID) + if err != nil { + return nil, err + } + return r.Ref, nil + }) + if err != nil { + return nil, err + } + res.AddAttestation(p.ID, *attSolve) + } + return res, nil + } +} diff --git a/solver/llbsolver/provenance.go b/solver/llbsolver/provenance.go new file mode 100644 index 000000000000..b30581c852d9 --- /dev/null +++ b/solver/llbsolver/provenance.go @@ -0,0 +1,571 @@ +package llbsolver + +import ( + "context" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/ops" + "github.com/moby/buildkit/solver/llbsolver/provenance" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type resultWithBridge struct { + res *frontend.Result + bridge *provenanceBridge +} + +// provenanceBridge provides scoped access to LLBBridge and captures the request it makes for provenance +type provenanceBridge struct { + *llbBridge + mu sync.Mutex + req *frontend.SolveRequest + + images []provenance.ImageSource + builds []resultWithBridge + subBridges []*provenanceBridge +} + +func (b *provenanceBridge) eachRef(f func(r solver.ResultProxy) error) error { + for _, b := range b.builds { + if err := b.res.EachRef(f); err != nil { + return err + } + } + for _, b := range b.subBridges { + if err := b.eachRef(f); err != nil { + return err + } + } + return nil +} + +func (b *provenanceBridge) allImages() []provenance.ImageSource { + res := make([]provenance.ImageSource, 0, len(b.images)) + res = append(res, b.images...) + for _, sb := range b.subBridges { + res = append(res, sb.allImages()...) + } + return res +} + +func (b *provenanceBridge) requests(r *frontend.Result) (*resultRequests, error) { + reqs := &resultRequests{ + refs: make(map[string]*resultWithBridge), + atts: make(map[string][]*resultWithBridge), + } + + if r.Ref != nil { + ref, ok := b.findByResult(r.Ref) + if !ok { + return nil, errors.Errorf("could not find request for ref %s", r.Ref.ID()) + } + reqs.ref = ref + } + + for k, ref := range r.Refs { + r, ok := b.findByResult(ref) + if !ok { + return nil, errors.Errorf("could not find request for ref %s", ref.ID()) + } + reqs.refs[k] = r + } + + for k, atts := range r.Attestations { + for _, att := range atts { + if att.Ref == nil { + continue + } + r, ok := b.findByResult(att.Ref) + if !ok { + return nil, errors.Errorf("could not find request for ref %s", att.Ref.ID()) + } + reqs.atts[k] = append(reqs.atts[k], r) + } + } + + ps, err := exptypes.ParsePlatforms(r.Metadata) + if err != nil { + return nil, err + } + reqs.platforms = ps.Platforms + + return reqs, nil +} + +func (b *provenanceBridge) findByResult(rp solver.ResultProxy) (*resultWithBridge, bool) { + for _, br := range b.subBridges { + if req, ok := br.findByResult(rp); ok { + return req, true + } + } + for _, bld := range b.builds { + found := false + bld.res.EachRef(func(r solver.ResultProxy) error { + if r.ID() == rp.ID() { + found = true + } + return nil + }) + if found { + return &bld, true + } + } + return nil, false +} + +func (b *provenanceBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) { + dgst, config, err = b.llbBridge.ResolveImageConfig(ctx, ref, opt) + if err != nil { + return "", nil, err + } + + b.images = append(b.images, provenance.ImageSource{ + Ref: ref, + Platform: opt.Platform, + Digest: dgst, + }) + return dgst, config, nil +} + +func (b *provenanceBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid string) (res *frontend.Result, err error) { + if req.Definition != nil && req.Definition.Def != nil && req.Frontend != "" { + return nil, errors.New("cannot solve with both Definition and Frontend specified") + } + + if req.Definition != nil && req.Definition.Def != nil { + rp := newResultProxy(b, req) + res = &frontend.Result{Ref: rp} + b.mu.Lock() + b.builds = append(b.builds, resultWithBridge{res: res, bridge: b}) + b.mu.Unlock() + } else if req.Frontend != "" { + f, ok := b.llbBridge.frontends[req.Frontend] + if !ok { + return nil, errors.Errorf("invalid frontend: %s", req.Frontend) + } + wb := &provenanceBridge{llbBridge: b.llbBridge, req: &req} + res, err = f.Solve(ctx, wb, req.FrontendOpt, req.FrontendInputs, sid, b.llbBridge.sm) + if err != nil { + return nil, err + } + wb.builds = append(wb.builds, resultWithBridge{res: res, bridge: wb}) + b.mu.Lock() + b.subBridges = append(b.subBridges, wb) + b.mu.Unlock() + } else { + return &frontend.Result{}, nil + } + if req.Evaluate { + err = res.EachRef(func(ref solver.ResultProxy) error { + _, err := res.Ref.Result(ctx) + return err + }) + } + return +} + +type resultRequests struct { + ref *resultWithBridge + refs map[string]*resultWithBridge + atts map[string][]*resultWithBridge + platforms []exptypes.Platform +} + +// filterImagePlatforms filter out images that not for the current platform if an image exists for every platform in a result +func (reqs *resultRequests) filterImagePlatforms(k string, imgs []provenance.ImageSource) []provenance.ImageSource { + if len(reqs.platforms) == 0 { + return imgs + } + m := map[string]string{} + for _, img := range imgs { + if _, ok := m[img.Ref]; ok { + continue + } + hasPlatform := true + for _, p := range reqs.platforms { + matcher := platforms.NewMatcher(p.Platform) + found := false + for _, img2 := range imgs { + if img.Ref == img2.Ref && img2.Platform != nil { + if matcher.Match(*img2.Platform) { + found = true + break + } + } + } + if !found { + hasPlatform = false + break + } + } + if hasPlatform { + m[img.Ref] = img.Ref + } + } + + var current ocispecs.Platform + for _, p := range reqs.platforms { + if p.ID == k { + current = p.Platform + } + } + + out := make([]provenance.ImageSource, 0, len(imgs)) + for _, img := range imgs { + if _, ok := m[img.Ref]; ok && img.Platform != nil { + if current.OS == img.Platform.OS && current.Architecture == img.Platform.Architecture { + out = append(out, img) + } + } else { + out = append(out, img) + } + } + return out +} + +func (reqs *resultRequests) allRes() map[string]struct{} { + res := make(map[string]struct{}) + if reqs.ref != nil { + res[reqs.ref.res.Ref.ID()] = struct{}{} + } + for _, r := range reqs.refs { + res[r.res.Ref.ID()] = struct{}{} + } + for _, rs := range reqs.atts { + for _, r := range rs { + res[r.res.Ref.ID()] = struct{}{} + } + } + return res +} + +func captureProvenance(ctx context.Context, res solver.CachedResultWithProvenance) (*provenance.Capture, error) { + if res == nil { + return nil, nil + } + c := &provenance.Capture{} + + err := res.WalkProvenance(ctx, func(pp solver.ProvenanceProvider) error { + switch op := pp.(type) { + case *ops.SourceOp: + id, pin := op.Pin() + switch s := id.(type) { + case *source.ImageIdentifier: + dgst, err := digest.Parse(pin) + if err != nil { + return errors.Wrapf(err, "failed to parse image digest %s", pin) + } + c.AddImage(provenance.ImageSource{ + Ref: s.Reference.String(), + Platform: s.Platform, + Digest: dgst, + }) + case *source.LocalIdentifier: + c.AddLocal(provenance.LocalSource{ + Name: s.Name, + }) + case *source.GitIdentifier: + url := s.Remote + if s.Ref != "" { + url += "#" + s.Ref + } + c.AddGit(provenance.GitSource{ + URL: url, + Commit: pin, + }) + if s.AuthTokenSecret != "" { + c.AddSecret(provenance.Secret{ + ID: s.AuthTokenSecret, + Optional: true, + }) + } + if s.AuthHeaderSecret != "" { + c.AddSecret(provenance.Secret{ + ID: s.AuthHeaderSecret, + Optional: true, + }) + } + if s.MountSSHSock != "" { + c.AddSSH(provenance.SSH{ + ID: s.MountSSHSock, + Optional: true, + }) + } + case *source.HTTPIdentifier: + dgst, err := digest.Parse(pin) + if err != nil { + return errors.Wrapf(err, "failed to parse HTTP digest %s", pin) + } + c.AddHTTP(provenance.HTTPSource{ + URL: s.URL, + Digest: dgst, + }) + case *source.OCIIdentifier: + dgst, err := digest.Parse(pin) + if err != nil { + return errors.Wrapf(err, "failed to parse OCI digest %s", pin) + } + c.AddLocalImage(provenance.ImageSource{ + Ref: s.Reference.String(), + Platform: s.Platform, + Digest: dgst, + }) + default: + return errors.Errorf("unknown source identifier %T", id) + } + case *ops.ExecOp: + pr := op.Proto() + for _, m := range pr.Mounts { + if m.MountType == pb.MountType_SECRET { + c.AddSecret(provenance.Secret{ + ID: m.SecretOpt.GetID(), + Optional: m.SecretOpt.GetOptional(), + }) + } + if m.MountType == pb.MountType_SSH { + c.AddSSH(provenance.SSH{ + ID: m.SSHOpt.GetID(), + Optional: m.SSHOpt.GetOptional(), + }) + } + } + for _, se := range pr.Secretenv { + c.AddSecret(provenance.Secret{ + ID: se.GetID(), + Optional: se.GetOptional(), + }) + } + if pr.Network != pb.NetMode_NONE { + c.NetworkAccess = true + } + case *ops.BuildOp: + c.IncompleteMaterials = true // not supported yet + } + return nil + }) + if err != nil { + return nil, err + } + return c, err +} + +type ProvenanceCreator struct { + pr *provenance.ProvenancePredicate + j *solver.Job + addLayers func() error +} + +func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solver.ResultProxy, attrs map[string]string, j *solver.Job) (*ProvenanceCreator, error) { + var reproducible bool + if v, ok := attrs["reproducible"]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse reproducible flag %q", v) + } + reproducible = b + } + + mode := "max" + if v, ok := attrs["mode"]; ok { + switch v { + case "full": + mode = "max" + case "max", "min": + mode = v + default: + return nil, errors.Errorf("invalid mode %q", v) + } + } + + pr, err := provenance.NewPredicate(cp) + if err != nil { + return nil, err + } + + st := j.StartedTime() + + pr.Metadata.BuildStartedOn = &st + pr.Metadata.Reproducible = reproducible + pr.Metadata.BuildInvocationID = j.UniqueID() + + pr.Builder.ID = attrs["builder-id"] + + var addLayers func() error + + switch mode { + case "min": + args := make(map[string]string) + for k, v := range pr.Invocation.Parameters.Args { + if strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") { + pr.Metadata.Completeness.Parameters = false + continue + } + args[k] = v + } + pr.Invocation.Parameters.Args = args + pr.Invocation.Parameters.Secrets = nil + pr.Invocation.Parameters.SSH = nil + case "max": + dgsts, err := provenance.AddBuildConfig(ctx, pr, res) + if err != nil { + return nil, err + } + + r, err := res.Result(ctx) + if err != nil { + return nil, err + } + + wref, ok := r.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid worker ref %T", r.Sys()) + } + + addLayers = func() error { + e := newCacheExporter() + + if wref.ImmutableRef != nil { + ctx = withDescHandlerCacheOpts(ctx, wref.ImmutableRef) + } + + if _, err := r.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ + ResolveRemotes: resolveRemotes, + Mode: solver.CacheExportModeRemoteOnly, + ExportRoots: true, + }); err != nil { + return err + } + + m := map[string][][]ocispecs.Descriptor{} + + for l, descs := range e.layers { + idx, ok := dgsts[l.digest] + if !ok { + continue + } + + m[fmt.Sprintf("step%d:%d", idx, l.index)] = descs + } + + if len(m) != 0 { + if pr.Metadata == nil { + pr.Metadata = &provenance.ProvenanceMetadata{} + } + + pr.Metadata.BuildKitMetadata.Layers = m + } + + return nil + } + default: + return nil, errors.Errorf("invalid mode %q", mode) + } + + return &ProvenanceCreator{ + pr: pr, + j: j, + addLayers: addLayers, + }, nil +} + +func (p *ProvenanceCreator) Predicate() (*provenance.ProvenancePredicate, error) { + end := p.j.RegisterCompleteTime() + p.pr.Metadata.BuildFinishedOn = &end + + if p.addLayers != nil { + if err := p.addLayers(); err != nil { + return nil, err + } + } + + return p.pr, nil +} + +type edge struct { + digest digest.Digest + index int +} + +func newCacheExporter() *cacheExporter { + return &cacheExporter{ + m: map[interface{}]struct{}{}, + layers: map[edge][][]ocispecs.Descriptor{}, + } +} + +type cacheExporter struct { + layers map[edge][][]ocispecs.Descriptor + m map[interface{}]struct{} +} + +func (ce *cacheExporter) Add(dgst digest.Digest) solver.CacheExporterRecord { + return &cacheRecord{ + ce: ce, + } +} + +func (ce *cacheExporter) Visit(v interface{}) { + ce.m[v] = struct{}{} +} + +func (ce *cacheExporter) Visited(v interface{}) bool { + _, ok := ce.m[v] + return ok +} + +type cacheRecord struct { + ce *cacheExporter +} + +func (c *cacheRecord) AddResult(dgst digest.Digest, idx int, createdAt time.Time, result *solver.Remote) { + if result == nil || dgst == "" { + return + } + e := edge{ + digest: dgst, + index: idx, + } + descs := make([]ocispecs.Descriptor, len(result.Descriptors)) + for i, desc := range result.Descriptors { + d := desc + d.Annotations = containerimage.RemoveInternalLayerAnnotations(d.Annotations, true) + descs[i] = d + } + c.ce.layers[e] = append(c.ce.layers[e], descs) +} + +func (c *cacheRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) { +} + +func resolveRemotes(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid result: %T", res.Sys()) + } + + remotes, err := ref.GetRemotes(ctx, false, config.RefConfig{}, true, nil) + if err != nil { + if errors.Is(err, cache.ErrNoBlobs) { + return nil, nil + } + return nil, err + } + return remotes, nil +} diff --git a/solver/llbsolver/provenance/buildconfig.go b/solver/llbsolver/provenance/buildconfig.go new file mode 100644 index 000000000000..4d9bf85ec1ba --- /dev/null +++ b/solver/llbsolver/provenance/buildconfig.go @@ -0,0 +1,187 @@ +package provenance + +import ( + "context" + "fmt" + + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type BuildConfig struct { + Definition []BuildStep `json:"llbDefinition,omitempty"` + DigestMapping map[digest.Digest]string `json:"digestMapping,omitempty"` +} + +type BuildStep struct { + ID string `json:"id,omitempty"` + Op interface{} `json:"op,omitempty"` + Inputs []string `json:"inputs,omitempty"` +} + +type Source struct { + Locations map[string]*pb.Locations `json:"locations,omitempty"` + Infos []SourceInfo `json:"infos,omitempty"` +} + +type SourceInfo struct { + Filename string `json:"filename,omitempty"` + Data []byte `json:"data,omitempty"` + Definition []BuildStep `json:"llbDefinition,omitempty"` + DigestMapping map[digest.Digest]string `json:"digestMapping,omitempty"` +} + +func digestMap(idx map[digest.Digest]int) map[digest.Digest]string { + m := map[digest.Digest]string{} + for k, v := range idx { + m[k] = fmt.Sprintf("step%d", v) + } + return m +} + +func AddBuildConfig(ctx context.Context, p *ProvenancePredicate, rp solver.ResultProxy) (map[digest.Digest]int, error) { + def := rp.Definition() + steps, indexes, err := toBuildSteps(def) + if err != nil { + return nil, err + } + + bc := &BuildConfig{ + Definition: steps, + DigestMapping: digestMap(indexes), + } + + p.BuildConfig = bc + + if def.Source != nil { + sis := make([]SourceInfo, len(def.Source.Infos)) + for i, si := range def.Source.Infos { + steps, indexes, err := toBuildSteps(si.Definition) + if err != nil { + return nil, err + } + s := SourceInfo{ + Filename: si.Filename, + Data: si.Data, + Definition: steps, + DigestMapping: digestMap(indexes), + } + sis[i] = s + } + + if len(def.Source.Infos) != 0 { + locs := map[string]*pb.Locations{} + for k, l := range def.Source.Locations { + idx, ok := indexes[digest.Digest(k)] + if !ok { + continue + } + locs[fmt.Sprintf("step%d", idx)] = l + } + + if p.Metadata == nil { + p.Metadata = &ProvenanceMetadata{} + } + p.Metadata.BuildKitMetadata.Source = &Source{ + Infos: sis, + Locations: locs, + } + } + } + + return indexes, nil +} + +func toBuildSteps(def *pb.Definition) ([]BuildStep, map[digest.Digest]int, error) { + if def == nil || len(def.Def) == 0 { + return nil, nil, nil + } + + ops := make(map[digest.Digest]*pb.Op) + defs := make(map[digest.Digest][]byte) + + var dgst digest.Digest + for _, dt := range def.Def { + var op pb.Op + if err := (&op).Unmarshal(dt); err != nil { + return nil, nil, errors.Wrap(err, "failed to parse llb proto op") + } + if src := op.GetSource(); src != nil { + for k := range src.Attrs { + if k == "local.session" || k == "local.unique" { + delete(src.Attrs, k) + } + } + } + dgst = digest.FromBytes(dt) + ops[dgst] = &op + defs[dgst] = dt + } + + if dgst == "" { + return nil, nil, nil + } + + // depth first backwards + dgsts := make([]digest.Digest, 0, len(def.Def)) + op := ops[dgst] + + if op.Op != nil { + return nil, nil, errors.Errorf("invalid last vertex: %T", op.Op) + } + + if len(op.Inputs) != 1 { + return nil, nil, errors.Errorf("invalid last vertex inputs: %v", len(op.Inputs)) + } + + visited := map[digest.Digest]struct{}{} + dgsts, err := walkDigests(dgsts, ops, dgst, visited) + if err != nil { + return nil, nil, err + } + indexes := map[digest.Digest]int{} + for i, dgst := range dgsts { + indexes[dgst] = i + } + + out := make([]BuildStep, 0, len(dgsts)) + for i, dgst := range dgsts { + op := *ops[dgst] + inputs := make([]string, len(op.Inputs)) + for i, inp := range op.Inputs { + inputs[i] = fmt.Sprintf("step%d:%d", indexes[inp.Digest], inp.Index) + } + op.Inputs = nil + out = append(out, BuildStep{ + ID: fmt.Sprintf("step%d", i), + Inputs: inputs, + Op: op, + }) + } + return out, indexes, nil +} + +func walkDigests(dgsts []digest.Digest, ops map[digest.Digest]*pb.Op, dgst digest.Digest, visited map[digest.Digest]struct{}) ([]digest.Digest, error) { + if _, ok := visited[dgst]; ok { + return dgsts, nil + } + op, ok := ops[dgst] + if !ok { + return nil, errors.Errorf("failed to find input %v", dgst) + } + if op == nil { + return nil, errors.Errorf("invalid nil input %v", dgst) + } + visited[dgst] = struct{}{} + for _, inp := range op.Inputs { + var err error + dgsts, err = walkDigests(dgsts, ops, inp.Digest, visited) + if err != nil { + return nil, err + } + } + dgsts = append(dgsts, dgst) + return dgsts, nil +} diff --git a/solver/llbsolver/provenance/capture.go b/solver/llbsolver/provenance/capture.go new file mode 100644 index 000000000000..6252ebc3cf34 --- /dev/null +++ b/solver/llbsolver/provenance/capture.go @@ -0,0 +1,251 @@ +package provenance + +import ( + "sort" + + distreference "github.com/docker/distribution/reference" + "github.com/moby/buildkit/solver/result" + "github.com/moby/buildkit/util/urlutil" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type Result = result.Result[*Capture] + +type ImageSource struct { + Ref string + Platform *ocispecs.Platform + Digest digest.Digest +} + +type GitSource struct { + URL string + Commit string +} + +type HTTPSource struct { + URL string + Digest digest.Digest +} + +type LocalSource struct { + Name string `json:"name"` +} + +type Secret struct { + ID string `json:"id"` + Optional bool `json:"optional,omitempty"` +} + +type SSH struct { + ID string `json:"id"` + Optional bool `json:"optional,omitempty"` +} + +type Sources struct { + Images []ImageSource + LocalImages []ImageSource + Git []GitSource + HTTP []HTTPSource + Local []LocalSource +} + +type Capture struct { + Frontend string + Args map[string]string + Sources Sources + Secrets []Secret + SSH []SSH + NetworkAccess bool + IncompleteMaterials bool +} + +func (c *Capture) Merge(c2 *Capture) error { + if c2 == nil { + return nil + } + for _, i := range c2.Sources.Images { + c.AddImage(i) + } + for _, i := range c2.Sources.LocalImages { + c.AddLocalImage(i) + } + for _, l := range c2.Sources.Local { + c.AddLocal(l) + } + for _, g := range c2.Sources.Git { + c.AddGit(g) + } + for _, h := range c2.Sources.HTTP { + c.AddHTTP(h) + } + for _, s := range c2.Secrets { + c.AddSecret(s) + } + for _, s := range c2.SSH { + c.AddSSH(s) + } + if c2.NetworkAccess { + c.NetworkAccess = true + } + if c2.IncompleteMaterials { + c.IncompleteMaterials = true + } + return nil +} + +func (c *Capture) Sort() { + sort.Slice(c.Sources.Images, func(i, j int) bool { + return c.Sources.Images[i].Ref < c.Sources.Images[j].Ref + }) + sort.Slice(c.Sources.LocalImages, func(i, j int) bool { + return c.Sources.LocalImages[i].Ref < c.Sources.LocalImages[j].Ref + }) + sort.Slice(c.Sources.Local, func(i, j int) bool { + return c.Sources.Local[i].Name < c.Sources.Local[j].Name + }) + sort.Slice(c.Sources.Git, func(i, j int) bool { + return c.Sources.Git[i].URL < c.Sources.Git[j].URL + }) + sort.Slice(c.Sources.HTTP, func(i, j int) bool { + return c.Sources.HTTP[i].URL < c.Sources.HTTP[j].URL + }) + sort.Slice(c.Secrets, func(i, j int) bool { + return c.Secrets[i].ID < c.Secrets[j].ID + }) + sort.Slice(c.SSH, func(i, j int) bool { + return c.SSH[i].ID < c.SSH[j].ID + }) +} + +// OptimizeImageSources filters out image sources by digest reference if same digest +// is already present by a tag reference. +func (c *Capture) OptimizeImageSources() error { + m := map[string]struct{}{} + for _, i := range c.Sources.Images { + ref, nameTag, err := parseRefName(i.Ref) + if err != nil { + return err + } + if _, ok := ref.(distreference.Canonical); !ok { + m[nameTag] = struct{}{} + } + } + + images := make([]ImageSource, 0, len(c.Sources.Images)) + for _, i := range c.Sources.Images { + ref, nameTag, err := parseRefName(i.Ref) + if err != nil { + return err + } + if _, ok := ref.(distreference.Canonical); ok { + if _, ok := m[nameTag]; ok { + continue + } + } + images = append(images, i) + } + c.Sources.Images = images + return nil +} + +func (c *Capture) AddImage(i ImageSource) { + for _, v := range c.Sources.Images { + if v.Ref == i.Ref { + if v.Platform == i.Platform { + return + } + if v.Platform != nil && i.Platform != nil { + if v.Platform.Architecture == i.Platform.Architecture && v.Platform.OS == i.Platform.OS && v.Platform.Variant == i.Platform.Variant { + return + } + } + } + } + c.Sources.Images = append(c.Sources.Images, i) +} + +func (c *Capture) AddLocalImage(i ImageSource) { + for _, v := range c.Sources.LocalImages { + if v.Ref == i.Ref { + if v.Platform == i.Platform { + return + } + if v.Platform != nil && i.Platform != nil { + if v.Platform.Architecture == i.Platform.Architecture && v.Platform.OS == i.Platform.OS && v.Platform.Variant == i.Platform.Variant { + return + } + } + } + } + c.Sources.LocalImages = append(c.Sources.LocalImages, i) +} + +func (c *Capture) AddLocal(l LocalSource) { + for _, v := range c.Sources.Local { + if v.Name == l.Name { + return + } + } + c.Sources.Local = append(c.Sources.Local, l) +} + +func (c *Capture) AddGit(g GitSource) { + g.URL = urlutil.RedactCredentials(g.URL) + for _, v := range c.Sources.Git { + if v.URL == g.URL { + return + } + } + c.Sources.Git = append(c.Sources.Git, g) +} + +func (c *Capture) AddHTTP(h HTTPSource) { + h.URL = urlutil.RedactCredentials(h.URL) + for _, v := range c.Sources.HTTP { + if v.URL == h.URL { + return + } + } + c.Sources.HTTP = append(c.Sources.HTTP, h) +} + +func (c *Capture) AddSecret(s Secret) { + for i, v := range c.Secrets { + if v.ID == s.ID { + if !s.Optional { + c.Secrets[i].Optional = false + } + return + } + } + c.Secrets = append(c.Secrets, s) +} + +func (c *Capture) AddSSH(s SSH) { + if s.ID == "" { + s.ID = "default" + } + for i, v := range c.SSH { + if v.ID == s.ID { + if !s.Optional { + c.SSH[i].Optional = false + } + return + } + } + c.SSH = append(c.SSH, s) +} + +func parseRefName(s string) (distreference.Named, string, error) { + ref, err := distreference.ParseNormalizedNamed(s) + if err != nil { + return nil, "", err + } + name := ref.Name() + tag := "latest" + if r, ok := ref.(distreference.Tagged); ok { + tag = r.Tag() + } + return ref, name + ":" + tag, nil +} diff --git a/solver/llbsolver/provenance/predicate.go b/solver/llbsolver/provenance/predicate.go new file mode 100644 index 000000000000..a7b5a78cca51 --- /dev/null +++ b/solver/llbsolver/provenance/predicate.go @@ -0,0 +1,258 @@ +package provenance + +import ( + "strings" + + "github.com/containerd/containerd/platforms" + slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + "github.com/moby/buildkit/util/purl" + "github.com/moby/buildkit/util/urlutil" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/package-url/packageurl-go" +) + +const ( + BuildKitBuildType = "https://mobyproject.org/buildkit@v1" +) + +type ProvenancePredicate struct { + slsa02.ProvenancePredicate + Invocation ProvenanceInvocation `json:"invocation,omitempty"` + BuildConfig *BuildConfig `json:"buildConfig,omitempty"` + Metadata *ProvenanceMetadata `json:"metadata,omitempty"` +} + +type ProvenanceInvocation struct { + ConfigSource slsa02.ConfigSource `json:"configSource,omitempty"` + Parameters Parameters `json:"parameters,omitempty"` + Environment Environment `json:"environment,omitempty"` +} + +type Parameters struct { + Frontend string `json:"frontend,omitempty"` + Args map[string]string `json:"args,omitempty"` + Secrets []*Secret `json:"secrets,omitempty"` + SSH []*SSH `json:"ssh,omitempty"` + Locals []*LocalSource `json:"locals,omitempty"` + // TODO: select export attributes + // TODO: frontend inputs +} + +type Environment struct { + Platform string `json:"platform"` +} + +type ProvenanceMetadata struct { + slsa02.ProvenanceMetadata + BuildKitMetadata BuildKitMetadata `json:"https://mobyproject.org/buildkit@v1#metadata,omitempty"` + Hermetic bool `json:"https://mobyproject.org/buildkit@v1#hermetic,omitempty"` +} + +type BuildKitMetadata struct { + VCS map[string]string `json:"vcs,omitempty"` + Source *Source `json:"source,omitempty"` + Layers map[string][][]ocispecs.Descriptor `json:"layers,omitempty"` +} + +func slsaMaterials(srcs Sources) ([]slsa.ProvenanceMaterial, error) { + count := len(srcs.Images) + len(srcs.Git) + len(srcs.HTTP) + len(srcs.LocalImages) + out := make([]slsa.ProvenanceMaterial, 0, count) + + for _, s := range srcs.Images { + uri, err := purl.RefToPURL(s.Ref, s.Platform) + if err != nil { + return nil, err + } + out = append(out, slsa.ProvenanceMaterial{ + URI: uri, + Digest: slsa.DigestSet{ + s.Digest.Algorithm().String(): s.Digest.Hex(), + }, + }) + } + + for _, s := range srcs.Git { + out = append(out, slsa.ProvenanceMaterial{ + URI: s.URL, + Digest: slsa.DigestSet{ + "sha1": s.Commit, + }, + }) + } + + for _, s := range srcs.HTTP { + out = append(out, slsa.ProvenanceMaterial{ + URI: s.URL, + Digest: slsa.DigestSet{ + s.Digest.Algorithm().String(): s.Digest.Hex(), + }, + }) + } + + for _, s := range srcs.LocalImages { + q := []packageurl.Qualifier{} + if s.Platform != nil { + q = append(q, packageurl.Qualifier{ + Key: "platform", + Value: platforms.Format(*s.Platform), + }) + } + packageurl.NewPackageURL(packageurl.TypeOCI, "", s.Ref, "", q, "") + out = append(out, slsa.ProvenanceMaterial{ + URI: s.Ref, + Digest: slsa.DigestSet{ + s.Digest.Algorithm().String(): s.Digest.Hex(), + }, + }) + } + return out, nil +} + +func findMaterial(srcs Sources, uri string) (*slsa.ProvenanceMaterial, bool) { + for _, s := range srcs.Git { + if s.URL == uri { + return &slsa.ProvenanceMaterial{ + URI: s.URL, + Digest: slsa.DigestSet{ + "sha1": s.Commit, + }, + }, true + } + } + for _, s := range srcs.HTTP { + if s.URL == uri { + return &slsa.ProvenanceMaterial{ + URI: s.URL, + Digest: slsa.DigestSet{ + s.Digest.Algorithm().String(): s.Digest.Hex(), + }, + }, true + } + } + return nil, false +} + +func NewPredicate(c *Capture) (*ProvenancePredicate, error) { + materials, err := slsaMaterials(c.Sources) + if err != nil { + return nil, err + } + inv := ProvenanceInvocation{} + + contextKey := "context" + if v, ok := c.Args["contextkey"]; ok && v != "" { + contextKey = v + } + + if v, ok := c.Args[contextKey]; ok && v != "" { + if m, ok := findMaterial(c.Sources, v); ok { + inv.ConfigSource.URI = m.URI + inv.ConfigSource.Digest = m.Digest + } else { + inv.ConfigSource.URI = v + } + inv.ConfigSource.URI = urlutil.RedactCredentials(inv.ConfigSource.URI) + delete(c.Args, contextKey) + } + + if v, ok := c.Args["filename"]; ok && v != "" { + inv.ConfigSource.EntryPoint = v + delete(c.Args, "filename") + } + + vcs := make(map[string]string) + for k, v := range c.Args { + if strings.HasPrefix(k, "vcs:") { + if k == "vcs:source" { + v = urlutil.RedactCredentials(v) + } + delete(c.Args, k) + if v != "" { + vcs[strings.TrimPrefix(k, "vcs:")] = v + } + } + } + + inv.Environment.Platform = platforms.Format(platforms.Normalize(platforms.DefaultSpec())) + + inv.Parameters.Frontend = c.Frontend + inv.Parameters.Args = c.Args + + for _, s := range c.Secrets { + inv.Parameters.Secrets = append(inv.Parameters.Secrets, &Secret{ + ID: s.ID, + Optional: s.Optional, + }) + } + for _, s := range c.SSH { + inv.Parameters.SSH = append(inv.Parameters.SSH, &SSH{ + ID: s.ID, + Optional: s.Optional, + }) + } + for _, s := range c.Sources.Local { + inv.Parameters.Locals = append(inv.Parameters.Locals, &LocalSource{ + Name: s.Name, + }) + } + + incompleteMaterials := c.IncompleteMaterials + if !incompleteMaterials { + if len(c.Sources.Local) > 0 { + incompleteMaterials = true + } + } + + pr := &ProvenancePredicate{ + Invocation: inv, + ProvenancePredicate: slsa02.ProvenancePredicate{ + BuildType: BuildKitBuildType, + Materials: materials, + }, + Metadata: &ProvenanceMetadata{ + ProvenanceMetadata: slsa02.ProvenanceMetadata{ + Completeness: slsa02.ProvenanceComplete{ + Parameters: c.Frontend != "", + Environment: true, + Materials: !incompleteMaterials, + }, + }, + Hermetic: !incompleteMaterials && !c.NetworkAccess, + }, + } + + if len(vcs) > 0 { + pr.Metadata.BuildKitMetadata.VCS = vcs + } + + return pr, nil +} + +func FilterArgs(m map[string]string) map[string]string { + var hostSpecificArgs = map[string]struct{}{ + "cgroup-parent": {}, + "image-resolve-mode": {}, + "platform": {}, + "cache-imports": {}, + } + const defaultContextKey = "context" + contextKey := defaultContextKey + if v, ok := m["contextkey"]; ok && v != "" { + contextKey = v + } + out := make(map[string]string) + for k, v := range m { + if _, ok := hostSpecificArgs[k]; ok { + continue + } + if strings.HasPrefix(k, "attest:") { + continue + } + if k == contextKey || strings.HasPrefix(k, defaultContextKey+":") { + v = urlutil.RedactCredentials(v) + } + out[k] = v + } + return out +} diff --git a/solver/llbsolver/result.go b/solver/llbsolver/result.go index 0cadda547d54..718b1b09d301 100644 --- a/solver/llbsolver/result.go +++ b/solver/llbsolver/result.go @@ -1,86 +1,23 @@ package llbsolver import ( - "bytes" "context" - "path" cacheconfig "github.com/moby/buildkit/cache/config" - "github.com/moby/buildkit/cache/contenthash" + "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/worker" - digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "golang.org/x/sync/errgroup" ) -type Selector struct { - Path string - Wildcard bool - FollowLinks bool - IncludePatterns []string - ExcludePatterns []string +type Result struct { + *frontend.Result + Provenance *provenance.Result } -func (sel Selector) HasWildcardOrFilters() bool { - return sel.Wildcard || len(sel.IncludePatterns) != 0 || len(sel.ExcludePatterns) != 0 -} - -func UnlazyResultFunc(ctx context.Context, res solver.Result, g session.Group) error { - ref, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return errors.Errorf("invalid reference: %T", res) - } - if ref.ImmutableRef == nil { - return nil - } - return ref.ImmutableRef.Extract(ctx, g) -} - -func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { - return func(ctx context.Context, res solver.Result, s session.Group) (digest.Digest, error) { - ref, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return "", errors.Errorf("invalid reference: %T", res) - } - - if len(selectors) == 0 { - selectors = []Selector{{}} - } - - dgsts := make([][]byte, len(selectors)) - - eg, ctx := errgroup.WithContext(ctx) - - for i, sel := range selectors { - i, sel := i, sel - eg.Go(func() error { - dgst, err := contenthash.Checksum( - ctx, ref.ImmutableRef, path.Join("/", sel.Path), - contenthash.ChecksumOpts{ - Wildcard: sel.Wildcard, - FollowLinks: sel.FollowLinks, - IncludePatterns: sel.IncludePatterns, - ExcludePatterns: sel.ExcludePatterns, - }, - s, - ) - if err != nil { - return errors.Wrapf(err, "failed to calculate checksum of ref %s", ref.ID()) - } - dgsts[i] = []byte(dgst) - return nil - }) - } - - if err := eg.Wait(); err != nil { - return "", err - } - - return digest.FromBytes(bytes.Join(dgsts, []byte{0})), nil - } -} +type Attestation = frontend.Attestation func workerRefResolver(refCfg cacheconfig.RefConfig, all bool, g session.Group) func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { return func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { diff --git a/solver/llbsolver/solver.go b/solver/llbsolver/solver.go index ee06233da5ad..2f7ba61e5f8f 100644 --- a/solver/llbsolver/solver.go +++ b/solver/llbsolver/solver.go @@ -3,10 +3,15 @@ package llbsolver import ( "context" "encoding/base64" + "encoding/json" "fmt" + "os" "strings" + "sync" "time" + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/cache" cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/cache/remotecache" @@ -19,27 +24,60 @@ import ( "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/provenance" + "github.com/moby/buildkit/solver/result" + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/entitlements" + "github.com/moby/buildkit/util/grpcerrors" "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/tracing/detect" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) -const keyEntitlements = "llb.entitlements" +const ( + keyEntitlements = "llb.entitlements" + keySourcePolicy = "llb.sourcepolicy" +) type ExporterRequest struct { - Exporter exporter.ExporterInstance - CacheExporter remotecache.Exporter - CacheExportMode solver.CacheExportMode + Type string + Attrs map[string]string + Exporter exporter.ExporterInstance + CacheExporters []RemoteCacheExporter +} + +type RemoteCacheExporter struct { + remotecache.Exporter + solver.CacheExportMode + IgnoreError bool } // ResolveWorkerFunc returns default worker for the temporary default non-distributed use cases type ResolveWorkerFunc func() (worker.Worker, error) +// Opt defines options for new Solver. +type Opt struct { + CacheManager solver.CacheManager + CacheResolvers map[string]remotecache.ResolveCacheImporterFunc + Entitlements []string + Frontends map[string]frontend.Frontend + GatewayForwarder *controlgateway.GatewayForwarder + SessionManager *session.Manager + WorkerController *worker.Controller + HistoryQueue *HistoryQueue +} + type Solver struct { workerController *worker.Controller solver *solver.Solver @@ -50,23 +88,29 @@ type Solver struct { gatewayForwarder *controlgateway.GatewayForwarder sm *session.Manager entitlements []string + history *HistoryQueue } -func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.CacheManager, resolveCI map[string]remotecache.ResolveCacheImporterFunc, gatewayForwarder *controlgateway.GatewayForwarder, sm *session.Manager, ents []string) (*Solver, error) { +// Processor defines a processing function to be applied after solving, but +// before exporting +type Processor func(ctx context.Context, result *Result, s *Solver, j *solver.Job) (*Result, error) + +func New(opt Opt) (*Solver, error) { s := &Solver{ - workerController: wc, - resolveWorker: defaultResolver(wc), - eachWorker: allWorkers(wc), - frontends: f, - resolveCacheImporterFuncs: resolveCI, - gatewayForwarder: gatewayForwarder, - sm: sm, - entitlements: ents, + workerController: opt.WorkerController, + resolveWorker: defaultResolver(opt.WorkerController), + eachWorker: allWorkers(opt.WorkerController), + frontends: opt.Frontends, + resolveCacheImporterFuncs: opt.CacheResolvers, + gatewayForwarder: opt.GatewayForwarder, + sm: opt.SessionManager, + entitlements: opt.Entitlements, + history: opt.HistoryQueue, } s.solver = solver.NewSolver(solver.SolverOpt{ ResolveOpFunc: s.resolver(), - DefaultCache: cache, + DefaultCache: opt.CacheManager, }) return s, nil } @@ -81,8 +125,8 @@ func (s *Solver) resolver() solver.ResolveOpFunc { } } -func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { - return &llbBridge{ +func (s *Solver) bridge(b solver.Builder) *provenanceBridge { + return &provenanceBridge{llbBridge: &llbBridge{ builder: b, frontends: s.frontends, resolveWorker: s.resolveWorker, @@ -90,10 +134,272 @@ func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { resolveCacheImporterFuncs: s.resolveCacheImporterFuncs, cms: map[string]solver.CacheManager{}, sm: s.sm, + }} +} + +func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { + return s.bridge(b) +} + +func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend.SolveRequest, exp ExporterRequest, j *solver.Job) (func(*Result, exporter.DescriptorReference, error) error, error) { + var stopTrace func() []tracetest.SpanStub + + if s := trace.SpanFromContext(ctx); s.SpanContext().IsValid() { + if exp, err := detect.Exporter(); err == nil { + if rec, ok := exp.(*detect.TraceRecorder); ok { + stopTrace = rec.Record(s.SpanContext().TraceID()) + } + } + } + + st := time.Now() + rec := &controlapi.BuildHistoryRecord{ + Ref: id, + Frontend: req.Frontend, + FrontendAttrs: req.FrontendOpt, + CreatedAt: &st, + } + + if exp.Type != "" { + rec.Exporters = []*controlapi.Exporter{{ + Type: exp.Type, + Attrs: exp.Attrs, + }} + } + + if err := s.history.Update(ctx, &controlapi.BuildHistoryEvent{ + Type: controlapi.BuildHistoryEventType_STARTED, + Record: rec, + }); err != nil { + return nil, err } + + return func(res *Result, descref exporter.DescriptorReference, err error) error { + en := time.Now() + rec.CompletedAt = &en + + j.CloseProgress() + + if res != nil && len(res.Metadata) > 0 { + rec.ExporterResponse = map[string]string{} + for k, v := range res.Metadata { + rec.ExporterResponse[k] = string(v) + } + } + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + var mu sync.Mutex + ch := make(chan *client.SolveStatus) + eg, ctx2 := errgroup.WithContext(ctx) + var releasers []func() + + attrs := map[string]string{ + "mode": "max", + } + + makeProvenance := func(res solver.ResultProxy, cap *provenance.Capture) (*controlapi.Descriptor, func(), error) { + prc, err := NewProvenanceCreator(ctx2, cap, res, attrs, j) + if err != nil { + return nil, nil, err + } + pr, err := prc.Predicate() + if err != nil { + return nil, nil, err + } + dt, err := json.MarshalIndent(pr, "", " ") + if err != nil { + return nil, nil, err + } + w, err := s.history.OpenBlobWriter(ctx, attestation.MediaTypeDockerSchema2AttestationType) + if err != nil { + return nil, nil, err + } + defer func() { + if w != nil { + w.Discard() + } + }() + if _, err := w.Write(dt); err != nil { + return nil, nil, err + } + desc, release, err := w.Commit(ctx2) + if err != nil { + return nil, nil, err + } + w = nil + return &controlapi.Descriptor{ + Digest: desc.Digest, + Size_: desc.Size, + MediaType: desc.MediaType, + Annotations: map[string]string{ + "in-toto.io/predicate-type": slsa02.PredicateSLSAProvenance, + }, + }, release, nil + } + + if res != nil { + if res.Ref != nil { + eg.Go(func() error { + desc, release, err := makeProvenance(res.Ref, res.Provenance.Ref) + if err != nil { + return err + } + + mu.Lock() + releasers = append(releasers, release) + if rec.Result == nil { + rec.Result = &controlapi.BuildResultInfo{} + } + rec.Result.Attestations = append(rec.Result.Attestations, desc) + mu.Unlock() + return nil + }) + } + + for k, r := range res.Refs { + k, r := k, r + cp := res.Provenance.Refs[k] + eg.Go(func() error { + desc, release, err := makeProvenance(r, cp) + if err != nil { + return err + } + + mu.Lock() + releasers = append(releasers, release) + if rec.Results == nil { + rec.Results = make(map[string]*controlapi.BuildResultInfo) + } + if rec.Results[k] == nil { + rec.Results[k] = &controlapi.BuildResultInfo{} + } + rec.Results[k].Attestations = append(rec.Results[k].Attestations, desc) + mu.Unlock() + return nil + }) + } + } + + eg.Go(func() error { + st, releaseStatus, err := s.history.ImportStatus(ctx2, ch) + if err != nil { + return err + } + mu.Lock() + releasers = append(releasers, releaseStatus) + rec.Logs = &controlapi.Descriptor{ + Digest: st.Descriptor.Digest, + Size_: st.Descriptor.Size, + MediaType: st.Descriptor.MediaType, + } + rec.NumCachedSteps = int32(st.NumCachedSteps) + rec.NumCompletedSteps = int32(st.NumCompletedSteps) + rec.NumTotalSteps = int32(st.NumTotalSteps) + mu.Unlock() + return nil + }) + eg.Go(func() error { + return j.Status(ctx2, ch) + }) + + if descref != nil { + eg.Go(func() error { + mu.Lock() + if rec.Result == nil { + rec.Result = &controlapi.BuildResultInfo{} + } + desc := descref.Descriptor() + rec.Result.Result = &controlapi.Descriptor{ + Digest: desc.Digest, + Size_: desc.Size, + MediaType: desc.MediaType, + Annotations: desc.Annotations, + } + mu.Unlock() + return nil + }) + } + + if err1 := eg.Wait(); err == nil { + err = err1 + } + + defer func() { + for _, f := range releasers { + f() + } + }() + + if err != nil { + st, ok := grpcerrors.AsGRPCStatus(grpcerrors.ToGRPC(err)) + if !ok { + st = status.New(codes.Unknown, err.Error()) + } + rec.Error = grpcerrors.ToRPCStatus(st.Proto()) + } + if err1 := s.history.Update(ctx, &controlapi.BuildHistoryEvent{ + Type: controlapi.BuildHistoryEventType_COMPLETE, + Record: rec, + }); err1 != nil { + if err == nil { + err = err1 + } + } + + if stopTrace == nil { + logrus.Warn("no trace recorder found, skipping") + return err + } + go func() { + time.Sleep(3 * time.Second) + spans := stopTrace() + + if len(spans) == 0 { + return + } + + if err := func() error { + w, err := s.history.OpenBlobWriter(context.TODO(), "application/vnd.buildkit.otlp.json.v0") + if err != nil { + return err + } + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + for _, sp := range spans { + if err := enc.Encode(sp); err != nil { + return err + } + } + + desc, release, err := w.Commit(context.TODO()) + if err != nil { + return err + } + defer release() + + if err := s.history.UpdateRef(context.TODO(), id, func(rec *controlapi.BuildHistoryRecord) error { + rec.Trace = &controlapi.Descriptor{ + Digest: desc.Digest, + MediaType: desc.MediaType, + Size_: desc.Size, + } + return nil + }); err != nil { + return err + } + return nil + }(); err != nil { + logrus.Errorf("failed to save trace for %s: %+v", id, err) + } + }() + + return err + }, nil } -func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req frontend.SolveRequest, exp ExporterRequest, ent []entitlements.Entitlement) (*client.SolveResponse, error) { +func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req frontend.SolveRequest, exp ExporterRequest, ent []entitlements.Entitlement, post []Processor, internal bool, srcPol *spb.Policy) (_ *client.SolveResponse, err error) { j, err := s.solver.NewJob(id) if err != nil { return nil, err @@ -101,17 +407,48 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro defer j.Discard() + var res *frontend.Result + var resProv *Result + var descref exporter.DescriptorReference + + var releasers []func() + defer func() { + for _, f := range releasers { + f() + } + if descref != nil { + descref.Release() + } + }() + + if internal { + defer j.CloseProgress() + } else { + rec, err1 := s.recordBuildHistory(ctx, id, req, exp, j) + if err != nil { + defer j.CloseProgress() + return nil, err1 + } + defer func() { + err = rec(resProv, descref, err) + }() + } + set, err := entitlements.WhiteList(ent, supportedEntitlements(s.entitlements)) if err != nil { return nil, err } j.SetValue(keyEntitlements, set) + if srcPol != nil { + j.SetValue(keySourcePolicy, *srcPol) + } + j.SessionID = sessionID - var res *frontend.Result + br := s.bridge(j) if s.gatewayForwarder != nil && req.Definition == nil && req.Frontend == "" { - fwd := gateway.NewBridgeForwarder(ctx, s.Bridge(j), s.workerController, req.FrontendInputs, sessionID, s.sm) + fwd := gateway.NewBridgeForwarder(ctx, br, s.workerController, req.FrontendInputs, sessionID, s.sm) defer fwd.Discard() if err := s.gatewayForwarder.RegisterBuild(ctx, id, fwd); err != nil { return nil, err @@ -129,7 +466,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro return nil, err } } else { - res, err = s.Bridge(j).Solve(ctx, req, sessionID) + res, err = br.Solve(ctx, req, sessionID) if err != nil { return nil, err } @@ -139,12 +476,12 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro res = &frontend.Result{} } - defer func() { + releasers = append(releasers, func() { res.EachRef(func(ref solver.ResultProxy) error { go ref.Release(context.TODO()) return nil }) - }() + }) eg, ctx2 := errgroup.WithContext(ctx) res.EachRef(func(ref solver.ResultProxy) error { @@ -158,149 +495,60 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro return nil, err } - if r := res.Ref; r != nil { - dtbi, err := buildinfo.Encode(ctx, res.Metadata, exptypes.ExporterBuildInfo, r.BuildSources()) + resProv, err = addProvenanceToResult(res, br) + if err != nil { + return nil, err + } + + for _, post := range post { + res2, err := post(ctx, resProv, s, j) if err != nil { return nil, err } - if dtbi != nil && len(dtbi) > 0 { - if res.Metadata == nil { - res.Metadata = make(map[string][]byte) - } - res.Metadata[exptypes.ExporterBuildInfo] = dtbi - } + resProv = res2 } - if res.Refs != nil { - for k, r := range res.Refs { - if r == nil { - continue - } - dtbi, err := buildinfo.Encode(ctx, res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), r.BuildSources()) - if err != nil { - return nil, err - } - if dtbi != nil && len(dtbi) > 0 { - if res.Metadata == nil { - res.Metadata = make(map[string][]byte) - } - res.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k)] = dtbi - } + res = resProv.Result + + cached, err := result.ConvertResult(res, func(res solver.ResultProxy) (solver.CachedResult, error) { + return res.Result(ctx) + }) + if err != nil { + return nil, err + } + inp, err := result.ConvertResult(cached, func(res solver.CachedResult) (cache.ImmutableRef, error) { + workerRef, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference: %T", res.Sys()) } + return workerRef.ImmutableRef, nil + }) + if err != nil { + return nil, err } + cacheExporters, inlineCacheExporter := splitCacheExporters(exp.CacheExporters) + var exporterResponse map[string]string if e := exp.Exporter; e != nil { - inp := exporter.Source{ - Metadata: res.Metadata, - } - if inp.Metadata == nil { - inp.Metadata = make(map[string][]byte) - } - var cr solver.CachedResult - var crMap = map[string]solver.CachedResult{} - if res := res.Ref; res != nil { - r, err := res.Result(ctx) - if err != nil { - return nil, err - } - workerRef, ok := r.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid reference: %T", r.Sys()) - } - inp.Ref = workerRef.ImmutableRef - cr = r - } - if res.Refs != nil { - m := make(map[string]cache.ImmutableRef, len(res.Refs)) - for k, res := range res.Refs { - if res == nil { - m[k] = nil - } else { - r, err := res.Result(ctx) - if err != nil { - return nil, err - } - workerRef, ok := r.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid reference: %T", r.Sys()) - } - m[k] = workerRef.ImmutableRef - crMap[k] = r - } - } - inp.Refs = m + meta, err := runInlineCacheExporter(ctx, e, inlineCacheExporter, j, cached) + if err != nil { + return nil, err } - if _, ok := asInlineCache(exp.CacheExporter); ok { - if err := inBuilderContext(ctx, j, "preparing layers for inline cache", "", func(ctx context.Context, _ session.Group) error { - if cr != nil { - dtic, err := inlineCache(ctx, exp.CacheExporter, cr, e.Config().Compression, session.NewGroup(sessionID)) - if err != nil { - return err - } - if dtic != nil { - inp.Metadata[exptypes.ExporterInlineCache] = dtic - } - } - for k, res := range crMap { - dtic, err := inlineCache(ctx, exp.CacheExporter, res, e.Config().Compression, session.NewGroup(sessionID)) - if err != nil { - return err - } - if dtic != nil { - inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, k)] = dtic - } - } - exp.CacheExporter = nil - return nil - }); err != nil { - return nil, err - } + for k, v := range meta { + inp.AddMeta(k, v) } - if err := inBuilderContext(ctx, j, e.Name(), "", func(ctx context.Context, _ session.Group) error { - exporterResponse, err = e.Export(ctx, inp, j.SessionID) + + if err := inBuilderContext(ctx, j, e.Name(), j.SessionID+"-export", func(ctx context.Context, _ session.Group) error { + exporterResponse, descref, err = e.Export(ctx, inp, j.SessionID) return err }); err != nil { return nil, err } } - g := session.NewGroup(j.SessionID) - var cacheExporterResponse map[string]string - if e := exp.CacheExporter; e != nil { - if err := inBuilderContext(ctx, j, "exporting cache", "", func(ctx context.Context, _ session.Group) error { - prepareDone := oneOffProgress(ctx, "preparing build cache for export") - if err := res.EachRef(func(res solver.ResultProxy) error { - r, err := res.Result(ctx) - if err != nil { - return err - } - - workerRef, ok := r.Sys().(*worker.WorkerRef) - if !ok { - return errors.Errorf("invalid reference: %T", r.Sys()) - } - ctx = withDescHandlerCacheOpts(ctx, workerRef.ImmutableRef) - - // Configure compression - compressionConfig := e.Config().Compression - - // all keys have same export chain so exporting others is not needed - _, err = r.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ - ResolveRemotes: workerRefResolver(cacheconfig.RefConfig{Compression: compressionConfig}, false, g), - Mode: exp.CacheExportMode, - Session: g, - CompressionOpt: &compressionConfig, - }) - return err - }); err != nil { - return prepareDone(err) - } - prepareDone(nil) - cacheExporterResponse, err = e.Finalize(ctx) - return err - }); err != nil { - return nil, err - } + cacheExporterResponse, err := runCacheExporters(ctx, cacheExporters, j, cached, inp) + if err != nil { + return nil, err } if exporterResponse == nil { @@ -326,6 +574,235 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro }, nil } +func runCacheExporters(ctx context.Context, exporters []RemoteCacheExporter, j *solver.Job, cached *result.Result[solver.CachedResult], inp *result.Result[cache.ImmutableRef]) (map[string]string, error) { + eg, ctx := errgroup.WithContext(ctx) + g := session.NewGroup(j.SessionID) + var cacheExporterResponse map[string]string + resps := make([]map[string]string, len(exporters)) + for i, exp := range exporters { + func(exp RemoteCacheExporter, i int) { + eg.Go(func() (err error) { + id := fmt.Sprint(j.SessionID, "-cache-", i) + err = inBuilderContext(ctx, j, exp.Exporter.Name(), id, func(ctx context.Context, _ session.Group) error { + prepareDone := progress.OneOff(ctx, "preparing build cache for export") + if err := result.EachRef(cached, inp, func(res solver.CachedResult, ref cache.ImmutableRef) error { + ctx = withDescHandlerCacheOpts(ctx, ref) + + // Configure compression + compressionConfig := exp.Config().Compression + + // all keys have same export chain so exporting others is not needed + _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, exp, solver.CacheExportOpt{ + ResolveRemotes: workerRefResolver(cacheconfig.RefConfig{Compression: compressionConfig}, false, g), + Mode: exp.CacheExportMode, + Session: g, + CompressionOpt: &compressionConfig, + }) + return err + }); err != nil { + return prepareDone(err) + } + resps[i], err = exp.Finalize(ctx) + return prepareDone(err) + }) + if exp.IgnoreError { + err = nil + } + return err + }) + }(exp, i) + } + if err := eg.Wait(); err != nil { + return nil, err + } + for _, resp := range resps { + for k, v := range resp { + if cacheExporterResponse == nil { + cacheExporterResponse = make(map[string]string) + } + cacheExporterResponse[k] = v + } + } + return cacheExporterResponse, nil +} + +func runInlineCacheExporter(ctx context.Context, e exporter.ExporterInstance, inlineExporter *RemoteCacheExporter, j *solver.Job, cached *result.Result[solver.CachedResult]) (map[string][]byte, error) { + meta := map[string][]byte{} + if inlineExporter == nil { + return nil, nil + } + if err := inBuilderContext(ctx, j, "preparing layers for inline cache", j.SessionID+"-cache-inline", func(ctx context.Context, _ session.Group) error { + if res := cached.Ref; res != nil { + dtic, err := inlineCache(ctx, inlineExporter.Exporter, res, e.Config().Compression(), session.NewGroup(j.SessionID)) + if err != nil { + return err + } + if dtic != nil { + meta[exptypes.ExporterInlineCache] = dtic + } + } + for k, res := range cached.Refs { + dtic, err := inlineCache(ctx, inlineExporter.Exporter, res, e.Config().Compression(), session.NewGroup(j.SessionID)) + if err != nil { + return err + } + if dtic != nil { + meta[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, k)] = dtic + } + } + return nil + }); err != nil { + return nil, err + } + return meta, nil +} + +func splitCacheExporters(exporters []RemoteCacheExporter) (rest []RemoteCacheExporter, inline *RemoteCacheExporter) { + rest = make([]RemoteCacheExporter, 0, len(exporters)) + for i, exp := range exporters { + if _, ok := asInlineCache(exp.Exporter); ok { + inline = &exporters[i] + continue + } + rest = append(rest, exp) + } + return rest, inline +} + +func addProvenanceToResult(res *frontend.Result, br *provenanceBridge) (*Result, error) { + if res == nil { + return nil, nil + } + reqs, err := br.requests(res) + if err != nil { + return nil, err + } + out := &Result{ + Result: res, + Provenance: &provenance.Result{}, + } + + if res.Ref != nil { + cp, err := getProvenance(res.Ref, reqs.ref.bridge, "", reqs) + if err != nil { + return nil, err + } + out.Provenance.Ref = cp + if res.Metadata == nil { + res.Metadata = map[string][]byte{} + } + if err := buildinfo.AddMetadata(res.Metadata, exptypes.ExporterBuildInfo, cp); err != nil { + return nil, err + } + } + + if len(res.Refs) != 0 { + out.Provenance.Refs = make(map[string]*provenance.Capture, len(res.Refs)) + } + for k, ref := range res.Refs { + cp, err := getProvenance(ref, reqs.refs[k].bridge, k, reqs) + if err != nil { + return nil, err + } + out.Provenance.Refs[k] = cp + if res.Metadata == nil { + res.Metadata = map[string][]byte{} + } + if err := buildinfo.AddMetadata(res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), cp); err != nil { + return nil, err + } + } + + if len(res.Attestations) != 0 { + out.Provenance.Attestations = make(map[string][]result.Attestation[*provenance.Capture], len(res.Attestations)) + } + for k, as := range res.Attestations { + for i, a := range as { + a2, err := result.ConvertAttestation(&a, func(r solver.ResultProxy) (*provenance.Capture, error) { + return getProvenance(r, reqs.atts[k][i].bridge, k, reqs) + }) + if err != nil { + return nil, err + } + out.Provenance.Attestations[k] = append(out.Provenance.Attestations[k], *a2) + } + } + + return out, nil +} + +func getRefProvenance(ref solver.ResultProxy, br *provenanceBridge) (*provenance.Capture, error) { + if ref == nil { + return nil, nil + } + p := ref.Provenance() + if p == nil { + return nil, errors.Errorf("missing provenance for %s", ref.ID()) + } + pr, ok := p.(*provenance.Capture) + if !ok { + return nil, errors.Errorf("invalid provenance type %T", p) + } + + if br.req != nil { + if pr == nil { + return nil, errors.Errorf("missing provenance for %s", ref.ID()) + } + + pr.Frontend = br.req.Frontend + pr.Args = provenance.FilterArgs(br.req.FrontendOpt) + // TODO: should also save some output options like compression + + if len(br.req.FrontendInputs) > 0 { + pr.IncompleteMaterials = true // not implemented + } + } + + return pr, nil +} + +func getProvenance(ref solver.ResultProxy, br *provenanceBridge, id string, reqs *resultRequests) (*provenance.Capture, error) { + pr, err := getRefProvenance(ref, br) + if err != nil { + return nil, err + } + if pr == nil { + return nil, nil + } + + visited := reqs.allRes() + visited[ref.ID()] = struct{}{} + // provenance for all the refs not directly in the result needs to be captured as well + if err := br.eachRef(func(r solver.ResultProxy) error { + if _, ok := visited[r.ID()]; ok { + return nil + } + visited[r.ID()] = struct{}{} + pr2, err := getRefProvenance(r, br) + if err != nil { + return err + } + return pr.Merge(pr2) + }); err != nil { + return nil, err + } + + imgs := br.allImages() + if id != "" { + imgs = reqs.filterImagePlatforms(id, imgs) + } + for _, img := range imgs { + pr.AddImage(img) + } + + if err := pr.OptimizeImageSources(); err != nil { + return nil, err + } + pr.Sort() + + return pr, nil +} + type inlineCacheExporter interface { ExportForLayers(context.Context, []digest.Digest) ([]byte, error) } @@ -384,6 +861,15 @@ func withDescHandlerCacheOpts(ctx context.Context, ref cache.ImmutableRef) conte } func (s *Solver) Status(ctx context.Context, id string, statusChan chan *client.SolveStatus) error { + if err := s.history.Status(ctx, id, statusChan); err != nil { + if !errors.Is(err, os.ErrNotExist) { + close(statusChan) + return err + } + } else { + close(statusChan) + return nil + } j, err := s.solver.Get(id) if err != nil { close(statusChan) @@ -412,23 +898,6 @@ func allWorkers(wc *worker.Controller) func(func(w worker.Worker) error) error { } } -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.NewFromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} - func inBuilderContext(ctx context.Context, b solver.Builder, name, id string, f func(ctx context.Context, g session.Group) error) error { if id == "" { id = name @@ -497,3 +966,29 @@ func loadEntitlements(b solver.Builder) (entitlements.Set, error) { } return ent, nil } + +func loadSourcePolicy(b solver.Builder) (*spb.Policy, error) { + set := make(map[spb.Rule]struct{}, 0) + err := b.EachValue(context.TODO(), keySourcePolicy, func(v interface{}) error { + x, ok := v.(spb.Policy) + if !ok { + return errors.Errorf("invalid source policy %T", v) + } + for _, f := range x.Rules { + set[*f] = struct{}{} + } + return nil + }) + if err != nil { + return nil, err + } + var srcPol *spb.Policy + if len(set) > 0 { + srcPol = &spb.Policy{} + for k := range set { + k := k + srcPol.Rules = append(srcPol.Rules, &k) + } + } + return srcPol, nil +} diff --git a/solver/llbsolver/sourcepolicy.go b/solver/llbsolver/sourcepolicy.go new file mode 100644 index 000000000000..11a49616b301 --- /dev/null +++ b/solver/llbsolver/sourcepolicy.go @@ -0,0 +1,11 @@ +package llbsolver + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" +) + +type SourcePolicyEvaluator interface { + Evaluate(ctx context.Context, op *pb.Op) (bool, error) +} diff --git a/solver/llbsolver/vertex.go b/solver/llbsolver/vertex.go index 4f36c2eddbb3..6901332d2b65 100644 --- a/solver/llbsolver/vertex.go +++ b/solver/llbsolver/vertex.go @@ -1,11 +1,13 @@ package llbsolver import ( + "context" "fmt" "strings" "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/source" "github.com/moby/buildkit/util/entitlements" @@ -143,8 +145,8 @@ func (dpc *detectPrunedCacheID) Load(op *pb.Op, md *pb.OpMetadata, opt *solver.V return nil } -func Load(def *pb.Definition, opts ...LoadOpt) (solver.Edge, error) { - return loadLLB(def, func(dgst digest.Digest, pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error) { +func Load(ctx context.Context, def *pb.Definition, polEngine SourcePolicyEvaluator, opts ...LoadOpt) (solver.Edge, error) { + return loadLLB(ctx, def, polEngine, func(dgst digest.Digest, pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error) { opMetadata := def.Metadata[dgst] vtx, err := newVertex(dgst, pbOp, &opMetadata, load, opts...) if err != nil { @@ -185,36 +187,105 @@ func newVertex(dgst digest.Digest, op *pb.Op, opMeta *pb.OpMetadata, load func(d return vtx, nil } +func recomputeDigests(ctx context.Context, all map[digest.Digest]*pb.Op, visited map[digest.Digest]digest.Digest, dgst digest.Digest) (digest.Digest, error) { + if dgst, ok := visited[dgst]; ok { + return dgst, nil + } + op := all[dgst] + + var mutated bool + for _, input := range op.Inputs { + if ctx.Err() != nil { + return "", ctx.Err() + } + + iDgst, err := recomputeDigests(ctx, all, visited, input.Digest) + if err != nil { + return "", err + } + if input.Digest != iDgst { + mutated = true + input.Digest = iDgst + } + } + + if !mutated { + return dgst, nil + } + + dt, err := op.Marshal() + if err != nil { + return "", err + } + newDgst := digest.FromBytes(dt) + visited[dgst] = newDgst + all[newDgst] = op + delete(all, dgst) + return newDgst, nil +} + // loadLLB loads LLB. // fn is executed sequentially. -func loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error)) (solver.Edge, error) { +func loadLLB(ctx context.Context, def *pb.Definition, polEngine SourcePolicyEvaluator, fn func(digest.Digest, *pb.Op, func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error)) (solver.Edge, error) { if len(def.Def) == 0 { return solver.Edge{}, errors.New("invalid empty definition") } allOps := make(map[digest.Digest]*pb.Op) + mutatedDigests := make(map[digest.Digest]digest.Digest) // key: old, val: new - var dgst digest.Digest + var lastDgst digest.Digest for _, dt := range def.Def { var op pb.Op if err := (&op).Unmarshal(dt); err != nil { return solver.Edge{}, errors.Wrap(err, "failed to parse llb proto op") } - dgst = digest.FromBytes(dt) + dgst := digest.FromBytes(dt) + if polEngine != nil { + mutated, err := polEngine.Evaluate(ctx, &op) + if err != nil { + return solver.Edge{}, errors.Wrap(err, "error evaluating the source policy") + } + if mutated { + dtMutated, err := op.Marshal() + if err != nil { + return solver.Edge{}, err + } + dgstMutated := digest.FromBytes(dtMutated) + mutatedDigests[dgst] = dgstMutated + dgst = dgstMutated + } + } allOps[dgst] = &op + lastDgst = dgst + } + + for dgst := range allOps { + _, err := recomputeDigests(ctx, allOps, mutatedDigests, dgst) + if err != nil { + return solver.Edge{}, err + } } if len(allOps) < 2 { return solver.Edge{}, errors.Errorf("invalid LLB with %d vertexes", len(allOps)) } - lastOp := allOps[dgst] - delete(allOps, dgst) + for { + newDgst, ok := mutatedDigests[lastDgst] + if !ok { + break + } + lastDgst = newDgst + } + + lastOp := allOps[lastDgst] + delete(allOps, lastDgst) if len(lastOp.Inputs) == 0 { return solver.Edge{}, errors.Errorf("invalid LLB with no inputs on last vertex") } - dgst = lastOp.Inputs[0].Digest + dgst := lastOp.Inputs[0].Digest cache := make(map[digest.Digest]solver.Vertex) @@ -228,7 +299,7 @@ func loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Dige return nil, errors.Errorf("invalid missing input digest %s", dgst) } - if err := ValidateOp(op); err != nil { + if err := opsutils.Validate(op); err != nil { return nil, err } @@ -301,63 +372,6 @@ func llbOpName(pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (st } } -func ValidateOp(op *pb.Op) error { - if op == nil { - return errors.Errorf("invalid nil op") - } - - switch op := op.Op.(type) { - case *pb.Op_Source: - if op.Source == nil { - return errors.Errorf("invalid nil source op") - } - case *pb.Op_Exec: - if op.Exec == nil { - return errors.Errorf("invalid nil exec op") - } - if op.Exec.Meta == nil { - return errors.Errorf("invalid exec op with no meta") - } - if len(op.Exec.Meta.Args) == 0 { - return errors.Errorf("invalid exec op with no args") - } - if len(op.Exec.Mounts) == 0 { - return errors.Errorf("invalid exec op with no mounts") - } - - isRoot := false - for _, m := range op.Exec.Mounts { - if m.Dest == pb.RootMount { - isRoot = true - break - } - } - if !isRoot { - return errors.Errorf("invalid exec op with no rootfs") - } - case *pb.Op_File: - if op.File == nil { - return errors.Errorf("invalid nil file op") - } - if len(op.File.Actions) == 0 { - return errors.Errorf("invalid file op with no actions") - } - case *pb.Op_Build: - if op.Build == nil { - return errors.Errorf("invalid nil build op") - } - case *pb.Op_Merge: - if op.Merge == nil { - return errors.Errorf("invalid nil merge op") - } - case *pb.Op_Diff: - if op.Diff == nil { - return errors.Errorf("invalid nil diff op") - } - } - return nil -} - func fileOpName(actions []*pb.FileAction) string { names := make([]string, 0, len(actions)) for _, action := range actions { diff --git a/solver/llbsolver/vertex_test.go b/solver/llbsolver/vertex_test.go new file mode 100644 index 000000000000..fe1f2cb6f156 --- /dev/null +++ b/solver/llbsolver/vertex_test.go @@ -0,0 +1,55 @@ +package llbsolver + +import ( + "context" + "testing" + + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRecomputeDigests(t *testing.T) { + op1 := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + } + oldData, err := op1.Marshal() + require.NoError(t, err) + oldDigest := digest.FromBytes(oldData) + + op1.GetOp().(*pb.Op_Source).Source.Identifier = "docker-image://docker.io/library/busybox:1.31.1" + newData, err := op1.Marshal() + require.NoError(t, err) + newDigest := digest.FromBytes(newData) + + op2 := &pb.Op{ + Inputs: []*pb.Input{ + {Digest: oldDigest}, // Input is the old digest, this should be updated after recomputeDigests + }, + } + op2Data, err := op2.Marshal() + require.NoError(t, err) + op2Digest := digest.FromBytes(op2Data) + + all := map[digest.Digest]*pb.Op{ + newDigest: op1, + op2Digest: op2, + } + visited := map[digest.Digest]digest.Digest{oldDigest: newDigest} + + updated, err := recomputeDigests(context.Background(), all, visited, op2Digest) + require.NoError(t, err) + require.Len(t, visited, 2) + require.Len(t, all, 2) + assert.Equal(t, op1, all[newDigest]) + require.Equal(t, newDigest, visited[oldDigest]) + require.Equal(t, op1, all[newDigest]) + assert.Equal(t, op2, all[updated]) + require.Equal(t, newDigest, op2.Inputs[0].Digest) + assert.NotEqual(t, op2Digest, updated) +} diff --git a/solver/pb/attr.go b/solver/pb/attr.go index aa08a0e8289c..85e7cce60ee8 100644 --- a/solver/pb/attr.go +++ b/solver/pb/attr.go @@ -26,6 +26,11 @@ const AttrImageResolveModeDefault = "default" const AttrImageResolveModeForcePull = "pull" const AttrImageResolveModePreferLocal = "local" const AttrImageRecordType = "image.recordtype" +const AttrImageLayerLimit = "image.layerlimit" + +const AttrOCILayoutSessionID = "oci.session" +const AttrOCILayoutStoreID = "oci.store" +const AttrOCILayoutLayerLimit = "oci.layerlimit" const AttrLocalDiffer = "local.differ" const AttrLocalDifferNone = "none" diff --git a/solver/pb/caps.go b/solver/pb/caps.go index 24b27893488d..02380a4babe6 100644 --- a/solver/pb/caps.go +++ b/solver/pb/caps.go @@ -9,8 +9,10 @@ var Caps apicaps.CapList // considered immutable. After a capability is marked stable it should not be disabled. const ( - CapSourceImage apicaps.CapID = "source.image" - CapSourceImageResolveMode apicaps.CapID = "source.image.resolvemode" + CapSourceImage apicaps.CapID = "source.image" + CapSourceImageResolveMode apicaps.CapID = "source.image.resolvemode" + CapSourceImageLayerLimit apicaps.CapID = "source.image.layerlimit" + CapSourceLocal apicaps.CapID = "source.local" CapSourceLocalUnique apicaps.CapID = "source.local.unique" CapSourceLocalSessionID apicaps.CapID = "source.local.sessionid" @@ -33,6 +35,8 @@ const ( CapSourceHTTPPerm apicaps.CapID = "source.http.perm" CapSourceHTTPUIDGID apicaps.CapID = "soruce.http.uidgid" + CapSourceOCILayout apicaps.CapID = "source.ocilayout" + CapBuildOpLLBFileName apicaps.CapID = "source.buildop.llbfilename" CapExecMetaBase apicaps.CapID = "exec.meta.base" @@ -43,6 +47,7 @@ const ( CapExecMetaSecurityDeviceWhitelistV1 apicaps.CapID = "exec.meta.security.devices.v1" CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath" CapExecMetaUlimit apicaps.CapID = "exec.meta.ulimit" + CapExecMetaRemoveMountStubsRecursive apicaps.CapID = "exec.meta.removemountstubs.recursive" CapExecMountBind apicaps.CapID = "exec.mount.bind" CapExecMountBindReadWriteNoOuput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" CapExecMountCache apicaps.CapID = "exec.mount.cache" @@ -67,10 +72,20 @@ const ( CapMetaDescription apicaps.CapID = "meta.description" CapMetaExportCache apicaps.CapID = "meta.exportcache" - CapRemoteCacheGHA apicaps.CapID = "cache.gha" + CapRemoteCacheGHA apicaps.CapID = "cache.gha" + CapRemoteCacheS3 apicaps.CapID = "cache.s3" + CapRemoteCacheAzBlob apicaps.CapID = "cache.azblob" CapMergeOp apicaps.CapID = "mergeop" CapDiffOp apicaps.CapID = "diffop" + + CapAnnotations apicaps.CapID = "exporter.image.annotations" + CapAttestations apicaps.CapID = "exporter.image.attestations" + + // CapSourceDateEpoch is the capability to automatically handle the date epoch + CapSourceDateEpoch apicaps.CapID = "exporter.sourcedateepoch" + + CapSourcePolicy apicaps.CapID = "source.policy" ) func init() { @@ -86,6 +101,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapSourceImageLayerLimit, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapSourceLocal, Enabled: true, @@ -194,6 +215,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapSourceOCILayout, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapSourceHTTPUIDGID, Enabled: true, @@ -383,14 +410,53 @@ func init() { Enabled: true, Status: apicaps.CapStatusExperimental, }) + + Caps.Init(apicaps.Cap{ + ID: CapRemoteCacheS3, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapRemoteCacheAzBlob, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapMergeOp, Enabled: true, Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ ID: CapDiffOp, Enabled: true, Status: apicaps.CapStatusExperimental, }) + + Caps.Init(apicaps.Cap{ + ID: CapAnnotations, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapAttestations, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceDateEpoch, + Name: "source date epoch", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourcePolicy, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) } diff --git a/solver/pb/generate.go b/solver/pb/generate.go index c31e148f2adf..88adaa270208 100644 --- a/solver/pb/generate.go +++ b/solver/pb/generate.go @@ -1,3 +1,3 @@ package pb -//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. ops.proto +//go:generate protoc -I=. -I=../../vendor/ -I=../../vendor/github.com/gogo/protobuf/ --gogofaster_out=. ops.proto diff --git a/solver/pb/ops.pb.go b/solver/pb/ops.pb.go index 252227a94415..e8afea0233e7 100644 --- a/solver/pb/ops.pb.go +++ b/solver/pb/ops.pb.go @@ -154,6 +154,7 @@ type Op struct { // inputs is a set of input edges. Inputs []*Input `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty"` // Types that are valid to be assigned to Op: + // // *Op_Exec // *Op_Source // *Op_File @@ -495,15 +496,16 @@ func (m *ExecOp) GetSecretenv() []*SecretEnv { // Meta is unrelated to LLB metadata. // FIXME: rename (ExecContext? ExecArgs?) type Meta struct { - Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` - Env []string `protobuf:"bytes,2,rep,name=env,proto3" json:"env,omitempty"` - Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` - User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` - ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"` - ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` - Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` - Ulimit []*Ulimit `protobuf:"bytes,9,rep,name=ulimit,proto3" json:"ulimit,omitempty"` - CgroupParent string `protobuf:"bytes,10,opt,name=cgroupParent,proto3" json:"cgroupParent,omitempty"` + Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` + Env []string `protobuf:"bytes,2,rep,name=env,proto3" json:"env,omitempty"` + Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` + User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` + ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"` + ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` + Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` + Ulimit []*Ulimit `protobuf:"bytes,9,rep,name=ulimit,proto3" json:"ulimit,omitempty"` + CgroupParent string `protobuf:"bytes,10,opt,name=cgroupParent,proto3" json:"cgroupParent,omitempty"` + RemoveMountStubsRecursive bool `protobuf:"varint,11,opt,name=removeMountStubsRecursive,proto3" json:"removeMountStubsRecursive,omitempty"` } func (m *Meta) Reset() { *m = Meta{} } @@ -598,6 +600,13 @@ func (m *Meta) GetCgroupParent() string { return "" } +func (m *Meta) GetRemoveMountStubsRecursive() bool { + if m != nil { + return m.RemoveMountStubsRecursive + } + return false +} + type HostIP struct { Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` IP string `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"` @@ -1038,7 +1047,7 @@ func (m *SecretOpt) GetOptional() bool { return false } -// SSHOpt defines options describing secret mounts +// SSHOpt defines options describing ssh mounts type SSHOpt struct { // ID of exposed ssh rule. Used for quering the value. ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1586,8 +1595,8 @@ func (m *Range) GetEnd() Position { // Position is single location in a source file type Position struct { - Line int32 `protobuf:"varint,1,opt,name=Line,proto3" json:"Line,omitempty"` - Character int32 `protobuf:"varint,2,opt,name=Character,proto3" json:"Character,omitempty"` + Line int32 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"` + Character int32 `protobuf:"varint,2,opt,name=character,proto3" json:"character,omitempty"` } func (m *Position) Reset() { *m = Position{} } @@ -1948,6 +1957,7 @@ type FileAction struct { SecondaryInput InputIndex `protobuf:"varint,2,opt,name=secondaryInput,proto3,customtype=InputIndex" json:"secondaryInput"` Output OutputIndex `protobuf:"varint,3,opt,name=output,proto3,customtype=OutputIndex" json:"output"` // Types that are valid to be assigned to Action: + // // *FileAction_Copy // *FileAction_Mkfile // *FileAction_Mkdir @@ -2465,6 +2475,7 @@ func (m *ChownOpt) GetGroup() *UserOpt { type UserOpt struct { // Types that are valid to be assigned to User: + // // *UserOpt_ByName // *UserOpt_ByID User isUserOpt_User `protobuf_oneof:"user"` @@ -2831,166 +2842,168 @@ func init() { func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) } var fileDescriptor_8de16154b2733812 = []byte{ - // 2538 bytes of a gzipped FileDescriptorProto + // 2564 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcf, 0x6f, 0x5b, 0xc7, - 0xf1, 0x17, 0x7f, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xe8, 0xeb, 0xaf, 0xac, 0xbc, 0xe4, - 0x1b, 0xc8, 0xb2, 0x2d, 0xe1, 0xab, 0x00, 0x71, 0x60, 0x14, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xdb, - 0xa2, 0xb0, 0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xe9, 0x41, 0x8f, 0x6f, 0x1f, 0xf6, - 0x2d, 0x23, 0xb1, 0x87, 0x1e, 0x7a, 0x2f, 0x10, 0xa0, 0x40, 0xd1, 0x4b, 0xd1, 0x7f, 0xa2, 0xc7, - 0xf6, 0x1e, 0xa0, 0x97, 0x1c, 0x7a, 0x08, 0x7a, 0x48, 0x0b, 0xe7, 0xd2, 0x3f, 0xa2, 0x05, 0x8a, - 0x99, 0xdd, 0xf7, 0x83, 0x94, 0x02, 0xc7, 0x6d, 0xd1, 0x13, 0xe7, 0xcd, 0x7c, 0x76, 0x66, 0x76, - 0x77, 0x66, 0x67, 0x76, 0x09, 0x0d, 0x19, 0xc5, 0x5b, 0x91, 0x92, 0x5a, 0xb2, 0x62, 0x74, 0xbc, - 0x7a, 0xef, 0xc4, 0xd7, 0xa7, 0xd3, 0xe3, 0x2d, 0x4f, 0x4e, 0xb6, 0x4f, 0xe4, 0x89, 0xdc, 0x26, - 0xd1, 0xf1, 0x74, 0x4c, 0x5f, 0xf4, 0x41, 0x94, 0x19, 0xe2, 0xfc, 0xad, 0x08, 0xc5, 0x41, 0xc4, - 0xde, 0x85, 0xaa, 0x1f, 0x46, 0x53, 0x1d, 0x77, 0x0a, 0xeb, 0xa5, 0x8d, 0xe6, 0x4e, 0x63, 0x2b, - 0x3a, 0xde, 0xea, 0x23, 0x87, 0x5b, 0x01, 0x5b, 0x87, 0xb2, 0xb8, 0x10, 0x5e, 0xa7, 0xb8, 0x5e, - 0xd8, 0x68, 0xee, 0x00, 0x02, 0x7a, 0x17, 0xc2, 0x1b, 0x44, 0x07, 0x4b, 0x9c, 0x24, 0xec, 0x03, - 0xa8, 0xc6, 0x72, 0xaa, 0x3c, 0xd1, 0x29, 0x11, 0x66, 0x19, 0x31, 0x43, 0xe2, 0x10, 0xca, 0x4a, - 0x51, 0xd3, 0xd8, 0x0f, 0x44, 0xa7, 0x9c, 0x69, 0x7a, 0xe8, 0x07, 0x06, 0x43, 0x12, 0xf6, 0x1e, - 0x54, 0x8e, 0xa7, 0x7e, 0x30, 0xea, 0x54, 0x08, 0xd2, 0x44, 0xc8, 0x1e, 0x32, 0x08, 0x63, 0x64, - 0x08, 0x9a, 0x08, 0x75, 0x22, 0x3a, 0xd5, 0x0c, 0xf4, 0x04, 0x19, 0x06, 0x44, 0x32, 0xb4, 0x35, - 0xf2, 0xc7, 0xe3, 0x4e, 0x2d, 0xb3, 0xd5, 0xf5, 0xc7, 0x63, 0x63, 0x0b, 0x25, 0x6c, 0x03, 0xea, - 0x51, 0xe0, 0xea, 0xb1, 0x54, 0x93, 0x0e, 0x64, 0x7e, 0x1f, 0x59, 0x1e, 0x4f, 0xa5, 0xec, 0x3e, - 0x34, 0x3d, 0x19, 0xc6, 0x5a, 0xb9, 0x7e, 0xa8, 0xe3, 0x4e, 0x93, 0xc0, 0x6f, 0x22, 0xf8, 0x33, - 0xa9, 0xce, 0x84, 0xda, 0xcf, 0x84, 0x3c, 0x8f, 0xdc, 0x2b, 0x43, 0x51, 0x46, 0xce, 0xaf, 0x0a, - 0x50, 0x4f, 0xb4, 0x32, 0x07, 0x96, 0x77, 0x95, 0x77, 0xea, 0x6b, 0xe1, 0xe9, 0xa9, 0x12, 0x9d, - 0xc2, 0x7a, 0x61, 0xa3, 0xc1, 0xe7, 0x78, 0xac, 0x05, 0xc5, 0xc1, 0x90, 0xd6, 0xbb, 0xc1, 0x8b, - 0x83, 0x21, 0xeb, 0x40, 0xed, 0xb9, 0xab, 0x7c, 0x37, 0xd4, 0xb4, 0xc0, 0x0d, 0x9e, 0x7c, 0xb2, - 0x9b, 0xd0, 0x18, 0x0c, 0x9f, 0x0b, 0x15, 0xfb, 0x32, 0xa4, 0x65, 0x6d, 0xf0, 0x8c, 0xc1, 0xd6, - 0x00, 0x06, 0xc3, 0x87, 0xc2, 0x45, 0xa5, 0x71, 0xa7, 0xb2, 0x5e, 0xda, 0x68, 0xf0, 0x1c, 0xc7, - 0xf9, 0x19, 0x54, 0x68, 0xab, 0xd9, 0xa7, 0x50, 0x1d, 0xf9, 0x27, 0x22, 0xd6, 0xc6, 0x9d, 0xbd, - 0x9d, 0x2f, 0xbf, 0xb9, 0xb5, 0xf4, 0xe7, 0x6f, 0x6e, 0x6d, 0xe6, 0x62, 0x4a, 0x46, 0x22, 0xf4, - 0x64, 0xa8, 0x5d, 0x3f, 0x14, 0x2a, 0xde, 0x3e, 0x91, 0xf7, 0xcc, 0x90, 0xad, 0x2e, 0xfd, 0x70, - 0xab, 0x81, 0xdd, 0x86, 0x8a, 0x1f, 0x8e, 0xc4, 0x05, 0xf9, 0x5f, 0xda, 0xbb, 0x6e, 0x55, 0x35, - 0x07, 0x53, 0x1d, 0x4d, 0x75, 0x1f, 0x45, 0xdc, 0x20, 0x9c, 0x3f, 0x16, 0xa0, 0x6a, 0x42, 0x89, - 0xdd, 0x84, 0xf2, 0x44, 0x68, 0x97, 0xec, 0x37, 0x77, 0xea, 0x66, 0x4b, 0xb5, 0xcb, 0x89, 0x8b, - 0x51, 0x3a, 0x91, 0x53, 0x5c, 0xfb, 0x62, 0x16, 0xa5, 0x4f, 0x90, 0xc3, 0xad, 0x80, 0xfd, 0x1f, - 0xd4, 0x42, 0xa1, 0xcf, 0xa5, 0x3a, 0xa3, 0x35, 0x6a, 0x99, 0xb0, 0x38, 0x14, 0xfa, 0x89, 0x1c, - 0x09, 0x9e, 0xc8, 0xd8, 0x5d, 0xa8, 0xc7, 0xc2, 0x9b, 0x2a, 0x5f, 0xcf, 0x68, 0xbd, 0x5a, 0x3b, - 0x6d, 0x0a, 0x56, 0xcb, 0x23, 0x70, 0x8a, 0x60, 0x77, 0xa0, 0x11, 0x0b, 0x4f, 0x09, 0x2d, 0xc2, - 0xcf, 0x69, 0xfd, 0x9a, 0x3b, 0x2b, 0x16, 0xae, 0x84, 0xee, 0x85, 0x9f, 0xf3, 0x4c, 0xee, 0xfc, - 0xa2, 0x08, 0x65, 0xf4, 0x99, 0x31, 0x28, 0xbb, 0xea, 0xc4, 0x64, 0x54, 0x83, 0x13, 0xcd, 0xda, - 0x50, 0x42, 0x1d, 0x45, 0x62, 0x21, 0x89, 0x1c, 0xef, 0x7c, 0x64, 0x37, 0x14, 0x49, 0x1c, 0x37, - 0x8d, 0x85, 0xb2, 0xfb, 0x48, 0x34, 0xbb, 0x0d, 0x8d, 0x48, 0xc9, 0x8b, 0xd9, 0x0b, 0xe3, 0x41, - 0x16, 0xa5, 0xc8, 0x44, 0x07, 0xea, 0x91, 0xa5, 0xd8, 0x26, 0x80, 0xb8, 0xd0, 0xca, 0x3d, 0x90, - 0xb1, 0x8e, 0x3b, 0x55, 0xf2, 0x96, 0xe2, 0x1e, 0x19, 0xfd, 0x23, 0x9e, 0x93, 0xb2, 0x55, 0xa8, - 0x9f, 0xca, 0x58, 0x87, 0xee, 0x44, 0x50, 0x86, 0x34, 0x78, 0xfa, 0xcd, 0x1c, 0xa8, 0x4e, 0x03, - 0x7f, 0xe2, 0xeb, 0x4e, 0x23, 0xd3, 0xf1, 0x8c, 0x38, 0xdc, 0x4a, 0x30, 0x8a, 0xbd, 0x13, 0x25, - 0xa7, 0xd1, 0x91, 0xab, 0x44, 0xa8, 0x29, 0x7f, 0x1a, 0x7c, 0x8e, 0xe7, 0xdc, 0x85, 0xaa, 0xb1, - 0x8c, 0x13, 0x43, 0xca, 0xc6, 0x3a, 0xd1, 0x18, 0xe3, 0xfd, 0xa3, 0x24, 0xc6, 0xfb, 0x47, 0x4e, - 0x17, 0xaa, 0xc6, 0x06, 0xa2, 0x0f, 0xd1, 0x2f, 0x8b, 0x46, 0x1a, 0x79, 0x43, 0x39, 0xd6, 0x26, - 0xa6, 0x38, 0xd1, 0xa4, 0xd5, 0x55, 0x66, 0x05, 0x4b, 0x9c, 0x68, 0xe7, 0x11, 0x34, 0xd2, 0xbd, - 0x21, 0x13, 0x5d, 0xab, 0xa6, 0xd8, 0xef, 0xe2, 0x00, 0x9a, 0xb0, 0x31, 0x4a, 0x34, 0x2e, 0x84, - 0x8c, 0xb4, 0x2f, 0x43, 0x37, 0x20, 0x45, 0x75, 0x9e, 0x7e, 0x3b, 0xbf, 0x2e, 0x41, 0x85, 0x82, - 0x8c, 0x6d, 0x60, 0x4c, 0x47, 0x53, 0x33, 0x83, 0xd2, 0x1e, 0xb3, 0x31, 0x0d, 0x94, 0x3d, 0x69, - 0x48, 0x63, 0x26, 0xad, 0x62, 0x7c, 0x05, 0xc2, 0xd3, 0x52, 0x59, 0x3b, 0xe9, 0x37, 0xda, 0x1f, - 0x61, 0x8e, 0x99, 0x2d, 0x27, 0x9a, 0xdd, 0x81, 0xaa, 0xa4, 0xc4, 0xa0, 0x5d, 0xff, 0x8e, 0x74, - 0xb1, 0x10, 0x54, 0xae, 0x84, 0x3b, 0x92, 0x61, 0x30, 0xa3, 0x58, 0xa8, 0xf3, 0xf4, 0x1b, 0x43, - 0x95, 0x32, 0xe1, 0xe9, 0x2c, 0x32, 0x07, 0x63, 0xcb, 0x84, 0xea, 0x93, 0x84, 0xc9, 0x33, 0x39, - 0x1e, 0x7d, 0x4f, 0x27, 0xd1, 0x38, 0x1e, 0x44, 0xba, 0x73, 0x3d, 0x0b, 0xaa, 0x84, 0xc7, 0x53, - 0x29, 0x22, 0x3d, 0xd7, 0x3b, 0x15, 0x88, 0xbc, 0x91, 0x21, 0xf7, 0x2d, 0x8f, 0xa7, 0xd2, 0x2c, - 0x57, 0x10, 0xfa, 0x26, 0x41, 0x73, 0xb9, 0x82, 0xd8, 0x4c, 0x8e, 0x31, 0x36, 0x1c, 0x1e, 0x20, - 0xf2, 0xad, 0xec, 0x7c, 0x36, 0x1c, 0x6e, 0x25, 0x66, 0xb6, 0xf1, 0x34, 0xd0, 0xfd, 0x6e, 0xe7, - 0x6d, 0xb3, 0x94, 0xc9, 0xb7, 0xb3, 0x96, 0x4d, 0x00, 0x97, 0x35, 0xf6, 0x7f, 0x6a, 0xe2, 0xa5, - 0xc4, 0x89, 0x76, 0xfa, 0x50, 0x4f, 0x5c, 0xbc, 0x14, 0x06, 0xf7, 0xa0, 0x16, 0x9f, 0xba, 0xca, - 0x0f, 0x4f, 0x68, 0x87, 0x5a, 0x3b, 0xd7, 0xd3, 0x19, 0x0d, 0x0d, 0x1f, 0xbd, 0x48, 0x30, 0x8e, - 0x4c, 0x42, 0xea, 0x2a, 0x5d, 0x6d, 0x28, 0x4d, 0xfd, 0x11, 0xe9, 0x59, 0xe1, 0x48, 0x22, 0xe7, - 0xc4, 0x37, 0x41, 0xb9, 0xc2, 0x91, 0x44, 0xff, 0x26, 0x72, 0x64, 0xaa, 0xde, 0x0a, 0x27, 0x7a, - 0x2e, 0xec, 0x2a, 0x0b, 0x61, 0x17, 0x24, 0x6b, 0xf3, 0x5f, 0xb1, 0xf6, 0xcb, 0x02, 0xd4, 0x93, - 0x52, 0x8d, 0x05, 0xc3, 0x1f, 0x89, 0x50, 0xfb, 0x63, 0x5f, 0x28, 0x6b, 0x38, 0xc7, 0x61, 0xf7, - 0xa0, 0xe2, 0x6a, 0xad, 0x92, 0x63, 0xf8, 0xed, 0x7c, 0x9d, 0xdf, 0xda, 0x45, 0x49, 0x2f, 0xd4, - 0x6a, 0xc6, 0x0d, 0x6a, 0xf5, 0x63, 0x80, 0x8c, 0x89, 0xbe, 0x9e, 0x89, 0x99, 0xd5, 0x8a, 0x24, - 0xbb, 0x01, 0x95, 0xcf, 0xdd, 0x60, 0x9a, 0x64, 0xa4, 0xf9, 0x78, 0x50, 0xfc, 0xb8, 0xe0, 0xfc, - 0xa1, 0x08, 0x35, 0x5b, 0xf7, 0xd9, 0x5d, 0xa8, 0x51, 0xdd, 0xb7, 0x1e, 0x5d, 0x9d, 0x7e, 0x09, - 0x84, 0x6d, 0xa7, 0x0d, 0x4d, 0xce, 0x47, 0xab, 0xca, 0x34, 0x36, 0xd6, 0xc7, 0xac, 0xbd, 0x29, - 0x8d, 0xc4, 0xd8, 0x76, 0x2e, 0x2d, 0xea, 0x13, 0xc4, 0xd8, 0x0f, 0x7d, 0x5c, 0x1f, 0x8e, 0x22, - 0x76, 0x37, 0x99, 0x75, 0x99, 0x34, 0xbe, 0x95, 0xd7, 0x78, 0x79, 0xd2, 0x7d, 0x68, 0xe6, 0xcc, - 0x5c, 0x31, 0xeb, 0xf7, 0xf3, 0xb3, 0xb6, 0x26, 0x49, 0x9d, 0x69, 0xbb, 0xb2, 0x55, 0xf8, 0x37, - 0xd6, 0xef, 0x23, 0x80, 0x4c, 0xe5, 0xf7, 0x3f, 0xbe, 0x9c, 0xdf, 0x97, 0x00, 0x06, 0x11, 0x56, - 0xb1, 0x91, 0x4b, 0x75, 0x77, 0xd9, 0x3f, 0x09, 0xa5, 0x12, 0x2f, 0x28, 0xcd, 0x69, 0x7c, 0x9d, - 0x37, 0x0d, 0x8f, 0x32, 0x86, 0xed, 0x42, 0x73, 0x24, 0x62, 0x4f, 0xf9, 0x14, 0x50, 0x76, 0xd1, - 0x6f, 0xe1, 0x9c, 0x32, 0x3d, 0x5b, 0xdd, 0x0c, 0x61, 0xd6, 0x2a, 0x3f, 0x86, 0xed, 0xc0, 0xb2, - 0xb8, 0x88, 0xa4, 0xd2, 0xd6, 0x8a, 0x69, 0x0f, 0xaf, 0x99, 0x46, 0x13, 0xf9, 0x64, 0x89, 0x37, - 0x45, 0xf6, 0xc1, 0x5c, 0x28, 0x7b, 0x6e, 0x14, 0xdb, 0xa2, 0xdc, 0x59, 0xb0, 0xb7, 0xef, 0x46, - 0x66, 0xd1, 0xf6, 0x3e, 0xc4, 0xb9, 0xfe, 0xfc, 0x2f, 0xb7, 0xee, 0xe4, 0x3a, 0x99, 0x89, 0x3c, - 0x9e, 0x6d, 0x53, 0xbc, 0x9c, 0xf9, 0x7a, 0x7b, 0xaa, 0xfd, 0x60, 0xdb, 0x8d, 0x7c, 0x54, 0x87, - 0x03, 0xfb, 0x5d, 0x4e, 0xaa, 0xd9, 0xc7, 0xd0, 0x8a, 0x94, 0x3c, 0x51, 0x22, 0x8e, 0x5f, 0x50, - 0x5d, 0xb3, 0xfd, 0xe6, 0x1b, 0xb6, 0xfe, 0x92, 0xe4, 0x13, 0x14, 0xf0, 0x95, 0x28, 0xff, 0xb9, - 0xfa, 0x43, 0x68, 0x2f, 0xce, 0xf8, 0x75, 0x76, 0x6f, 0xf5, 0x3e, 0x34, 0xd2, 0x19, 0xbc, 0x6a, - 0x60, 0x3d, 0xbf, 0xed, 0xbf, 0x2b, 0x40, 0xd5, 0xe4, 0x23, 0xbb, 0x0f, 0x8d, 0x40, 0x7a, 0x2e, - 0x3a, 0x90, 0xf4, 0xf6, 0xef, 0x64, 0xe9, 0xba, 0xf5, 0x38, 0x91, 0x99, 0xfd, 0xc8, 0xb0, 0x18, - 0x9e, 0x7e, 0x38, 0x96, 0x49, 0xfe, 0xb4, 0xb2, 0x41, 0xfd, 0x70, 0x2c, 0xb9, 0x11, 0xae, 0x3e, - 0x82, 0xd6, 0xbc, 0x8a, 0x2b, 0xfc, 0x7c, 0x6f, 0x3e, 0xd0, 0xa9, 0x1a, 0xa4, 0x83, 0xf2, 0x6e, - 0xdf, 0x87, 0x46, 0xca, 0x67, 0x9b, 0x97, 0x1d, 0x5f, 0xce, 0x8f, 0xcc, 0xf9, 0xea, 0x04, 0x00, - 0x99, 0x6b, 0x78, 0xcc, 0xe1, 0x25, 0x22, 0xcc, 0x9a, 0x87, 0xf4, 0x9b, 0x6a, 0xaf, 0xab, 0x5d, - 0x72, 0x65, 0x99, 0x13, 0xcd, 0xb6, 0x00, 0x46, 0x69, 0xaa, 0x7f, 0xc7, 0x01, 0x90, 0x43, 0x38, - 0x03, 0xa8, 0x27, 0x4e, 0xb0, 0x75, 0x68, 0xc6, 0xd6, 0x32, 0xf6, 0xba, 0x68, 0xae, 0xc2, 0xf3, - 0x2c, 0xec, 0x59, 0x95, 0x1b, 0x9e, 0x88, 0xb9, 0x9e, 0x95, 0x23, 0x87, 0x5b, 0x81, 0xf3, 0x19, - 0x54, 0x88, 0x81, 0x09, 0x1a, 0x6b, 0x57, 0x69, 0xdb, 0xfe, 0x9a, 0x0e, 0x4f, 0xc6, 0x64, 0x76, - 0xaf, 0x8c, 0x21, 0xcc, 0x0d, 0x80, 0xbd, 0x8f, 0x7d, 0xe4, 0xc8, 0xae, 0xe8, 0x55, 0x38, 0x14, - 0x3b, 0x3f, 0x80, 0x7a, 0xc2, 0xc6, 0x99, 0x3f, 0xf6, 0x43, 0x61, 0x5d, 0x24, 0x1a, 0xaf, 0x0d, - 0xfb, 0xa7, 0xae, 0x72, 0x3d, 0x2d, 0x4c, 0x9b, 0x52, 0xe1, 0x19, 0xc3, 0x79, 0x0f, 0x9a, 0xb9, - 0xbc, 0xc3, 0x70, 0x7b, 0x4e, 0xdb, 0x68, 0xb2, 0xdf, 0x7c, 0x38, 0x9f, 0xc0, 0xca, 0x5c, 0x0e, - 0x60, 0xb1, 0xf2, 0x47, 0x49, 0xb1, 0x32, 0x85, 0xe8, 0x52, 0xb7, 0xc5, 0xa0, 0x7c, 0x2e, 0xdc, - 0x33, 0xdb, 0x69, 0x11, 0xed, 0xfc, 0x16, 0x6f, 0x47, 0x49, 0x0f, 0xfb, 0xbf, 0x00, 0xa7, 0x5a, - 0x47, 0x2f, 0xa8, 0xa9, 0xb5, 0xca, 0x1a, 0xc8, 0x21, 0x04, 0xbb, 0x05, 0x4d, 0xfc, 0x88, 0xad, - 0xdc, 0xa8, 0xa6, 0x11, 0xb1, 0x01, 0xfc, 0x0f, 0x34, 0xc6, 0xe9, 0xf0, 0x92, 0x8d, 0x81, 0x64, - 0xf4, 0x3b, 0x50, 0x0f, 0xa5, 0x95, 0x99, 0x1e, 0xbb, 0x16, 0xca, 0x74, 0x9c, 0x1b, 0x04, 0x56, - 0x56, 0x31, 0xe3, 0xdc, 0x20, 0x20, 0xa1, 0x73, 0x07, 0xde, 0xb8, 0x74, 0xcf, 0x63, 0x6f, 0x41, - 0x75, 0xec, 0x07, 0x9a, 0x8a, 0x12, 0xf6, 0xf4, 0xf6, 0xcb, 0xf9, 0x47, 0x01, 0x20, 0x8b, 0x1f, - 0xcc, 0x0a, 0xac, 0x2e, 0x88, 0x59, 0x36, 0xd5, 0x24, 0x80, 0xfa, 0xc4, 0x9e, 0x53, 0x36, 0x32, - 0x6e, 0xce, 0xc7, 0xdc, 0x56, 0x72, 0x8c, 0x99, 0x13, 0x6c, 0xc7, 0x9e, 0x60, 0xaf, 0x73, 0x17, - 0x4b, 0x2d, 0x50, 0xa3, 0x95, 0xbf, 0x9a, 0x43, 0x96, 0xce, 0xdc, 0x4a, 0x56, 0x1f, 0xc1, 0xca, - 0x9c, 0xc9, 0xef, 0x59, 0xb3, 0xb2, 0xf3, 0x36, 0x9f, 0xcb, 0x3b, 0x50, 0x35, 0x77, 0x7a, 0xb6, - 0x01, 0x35, 0xd7, 0x33, 0x69, 0x9c, 0x3b, 0x4a, 0x50, 0xb8, 0x4b, 0x6c, 0x9e, 0x88, 0x9d, 0x3f, - 0x15, 0x01, 0x32, 0xfe, 0x6b, 0x74, 0xdb, 0x0f, 0xa0, 0x15, 0x0b, 0x4f, 0x86, 0x23, 0x57, 0xcd, - 0x48, 0x6a, 0x2f, 0x9d, 0x57, 0x0d, 0x59, 0x40, 0xe6, 0x3a, 0xef, 0xd2, 0xab, 0x3b, 0xef, 0x0d, - 0x28, 0x7b, 0x32, 0x9a, 0xd9, 0xd2, 0xc4, 0xe6, 0x27, 0xb2, 0x2f, 0xa3, 0xd9, 0xc1, 0x12, 0x27, - 0x04, 0xdb, 0x82, 0xea, 0xe4, 0x8c, 0x5e, 0x39, 0xcc, 0x6d, 0xed, 0xc6, 0x3c, 0xf6, 0xc9, 0x19, - 0xd2, 0x07, 0x4b, 0xdc, 0xa2, 0xd8, 0x1d, 0xa8, 0x4c, 0xce, 0x46, 0xbe, 0xb2, 0xc5, 0xe5, 0xfa, - 0x22, 0xbc, 0xeb, 0x2b, 0x7a, 0xd4, 0x40, 0x0c, 0x73, 0xa0, 0xa8, 0x26, 0xf6, 0x49, 0xa3, 0xbd, - 0xb0, 0x9a, 0x93, 0x83, 0x25, 0x5e, 0x54, 0x93, 0xbd, 0x3a, 0x54, 0xcd, 0xba, 0x3a, 0x7f, 0x2f, - 0x41, 0x6b, 0xde, 0x4b, 0xdc, 0xd9, 0x58, 0x79, 0xc9, 0xce, 0xc6, 0xca, 0x4b, 0x2f, 0x25, 0xc5, - 0xdc, 0xa5, 0xc4, 0x81, 0x8a, 0x3c, 0x0f, 0x85, 0xca, 0x3f, 0xe7, 0xec, 0x9f, 0xca, 0xf3, 0x10, - 0x1b, 0x63, 0x23, 0x9a, 0xeb, 0x33, 0x2b, 0xb6, 0xcf, 0x7c, 0x1f, 0x56, 0xc6, 0x32, 0x08, 0xe4, - 0xf9, 0x70, 0x36, 0x09, 0xfc, 0xf0, 0xcc, 0x36, 0x9b, 0xf3, 0x4c, 0xb6, 0x01, 0xd7, 0x46, 0xbe, - 0x42, 0x77, 0xf6, 0x65, 0xa8, 0x45, 0x48, 0x97, 0x55, 0xc4, 0x2d, 0xb2, 0xd9, 0xa7, 0xb0, 0xee, - 0x6a, 0x2d, 0x26, 0x91, 0x7e, 0x16, 0x46, 0xae, 0x77, 0xd6, 0x95, 0x1e, 0x65, 0xe1, 0x24, 0x72, - 0xb5, 0x7f, 0xec, 0x07, 0x78, 0x89, 0xaf, 0xd1, 0xd0, 0x57, 0xe2, 0xd8, 0x07, 0xd0, 0xf2, 0x94, - 0x70, 0xb5, 0xe8, 0x8a, 0x58, 0x1f, 0xb9, 0xfa, 0xb4, 0x53, 0xa7, 0x91, 0x0b, 0x5c, 0x9c, 0x83, - 0x8b, 0xde, 0x7e, 0xe6, 0x07, 0x23, 0x0f, 0xaf, 0x97, 0x0d, 0x33, 0x87, 0x39, 0x26, 0xdb, 0x02, - 0x46, 0x8c, 0xde, 0x24, 0xd2, 0xb3, 0x14, 0x0a, 0x04, 0xbd, 0x42, 0x82, 0x07, 0xae, 0xf6, 0x27, - 0x22, 0xd6, 0xee, 0x24, 0xa2, 0xf7, 0xa3, 0x12, 0xcf, 0x18, 0xec, 0x36, 0xb4, 0xfd, 0xd0, 0x0b, - 0xa6, 0x23, 0xf1, 0x22, 0xc2, 0x89, 0xa8, 0x30, 0xee, 0x2c, 0xd3, 0xa9, 0x72, 0xcd, 0xf2, 0x8f, - 0x2c, 0x1b, 0xa1, 0xe2, 0x62, 0x01, 0xba, 0x62, 0xa0, 0x96, 0x9f, 0x40, 0x9d, 0x2f, 0x0a, 0xd0, - 0x5e, 0x0c, 0x3c, 0xdc, 0xb6, 0x08, 0x27, 0x6f, 0x2f, 0xd7, 0x48, 0xa7, 0x5b, 0x59, 0xcc, 0x6d, - 0x65, 0x52, 0x2f, 0x4b, 0xb9, 0x7a, 0x99, 0x86, 0x45, 0xf9, 0xbb, 0xc3, 0x62, 0x6e, 0xa2, 0x95, - 0x85, 0x89, 0x3a, 0xbf, 0x29, 0xc0, 0xb5, 0x85, 0xe0, 0xfe, 0xde, 0x1e, 0xad, 0x43, 0x73, 0xe2, - 0x9e, 0x09, 0xf3, 0xb8, 0x10, 0xdb, 0x12, 0x92, 0x67, 0xfd, 0x07, 0xfc, 0x0b, 0x61, 0x39, 0x9f, - 0x51, 0x57, 0xfa, 0x96, 0x04, 0xc8, 0xa1, 0xd4, 0x0f, 0xe5, 0xd4, 0xd6, 0xe2, 0x24, 0x40, 0x12, - 0xe6, 0xe5, 0x30, 0x2a, 0x5d, 0x11, 0x46, 0xce, 0x21, 0xd4, 0x13, 0x07, 0xd9, 0x2d, 0xfb, 0xfa, - 0x53, 0xc8, 0x1e, 0x35, 0x9f, 0xc5, 0x42, 0xa1, 0xef, 0xe6, 0x29, 0xe8, 0x5d, 0xa8, 0x98, 0x36, - 0xb4, 0x78, 0x19, 0x61, 0x24, 0xce, 0x10, 0x6a, 0x96, 0xc3, 0x36, 0xa1, 0x7a, 0x3c, 0x4b, 0xdf, - 0x51, 0xec, 0x71, 0x81, 0xdf, 0x23, 0x8b, 0xc0, 0x33, 0xc8, 0x20, 0xd8, 0x0d, 0x28, 0x1f, 0xcf, - 0xfa, 0x5d, 0x73, 0xb1, 0xc4, 0x93, 0x0c, 0xbf, 0xf6, 0xaa, 0xc6, 0x21, 0xe7, 0x31, 0x2c, 0xe7, - 0xc7, 0xa5, 0x85, 0xbd, 0x90, 0x2b, 0xec, 0xe9, 0x91, 0x5d, 0x7c, 0xd5, 0x0d, 0xe3, 0x23, 0x00, - 0x7a, 0xab, 0x7d, 0xdd, 0x9b, 0xc9, 0xff, 0x43, 0xcd, 0xbe, 0xf1, 0xb2, 0x0f, 0x16, 0xde, 0xac, - 0x5b, 0xe9, 0x03, 0xf0, 0xdc, 0xc3, 0xb5, 0xf3, 0x00, 0x7b, 0xd4, 0x73, 0xa1, 0xba, 0xfe, 0x78, - 0xfc, 0xba, 0xe6, 0x1e, 0x40, 0xeb, 0x59, 0x14, 0xfd, 0x6b, 0x63, 0x7f, 0x02, 0x55, 0xf3, 0xd4, - 0x8c, 0x63, 0x02, 0xf4, 0xc0, 0xee, 0x01, 0x33, 0x7d, 0x6c, 0xde, 0x25, 0x6e, 0x00, 0x88, 0x9c, - 0xa2, 0x3d, 0xbb, 0xb9, 0x84, 0x9c, 0x77, 0x80, 0x1b, 0xc0, 0xe6, 0x06, 0xd4, 0xec, 0xab, 0x26, - 0x6b, 0x40, 0xe5, 0xd9, 0xe1, 0xb0, 0xf7, 0xb4, 0xbd, 0xc4, 0xea, 0x50, 0x3e, 0x18, 0x0c, 0x9f, - 0xb6, 0x0b, 0x48, 0x1d, 0x0e, 0x0e, 0x7b, 0xed, 0xe2, 0xe6, 0x6d, 0x58, 0xce, 0xbf, 0x6b, 0xb2, - 0x26, 0xd4, 0x86, 0xbb, 0x87, 0xdd, 0xbd, 0xc1, 0x8f, 0xdb, 0x4b, 0x6c, 0x19, 0xea, 0xfd, 0xc3, - 0x61, 0x6f, 0xff, 0x19, 0xef, 0xb5, 0x0b, 0x9b, 0x3f, 0x82, 0x46, 0xfa, 0x50, 0x84, 0x1a, 0xf6, - 0xfa, 0x87, 0xdd, 0xf6, 0x12, 0x03, 0xa8, 0x0e, 0x7b, 0xfb, 0xbc, 0x87, 0x7a, 0x6b, 0x50, 0x1a, - 0x0e, 0x0f, 0xda, 0x45, 0xb4, 0xba, 0xbf, 0xbb, 0x7f, 0xd0, 0x6b, 0x97, 0x90, 0x7c, 0xfa, 0xe4, - 0xe8, 0xe1, 0xb0, 0x5d, 0xde, 0xfc, 0x08, 0xae, 0x2d, 0x3c, 0xa1, 0xd0, 0xe8, 0x83, 0x5d, 0xde, - 0x43, 0x4d, 0x4d, 0xa8, 0x1d, 0xf1, 0xfe, 0xf3, 0xdd, 0xa7, 0xbd, 0x76, 0x01, 0x05, 0x8f, 0x07, - 0xfb, 0x8f, 0x7a, 0xdd, 0x76, 0x71, 0xef, 0xe6, 0x97, 0x2f, 0xd7, 0x0a, 0x5f, 0xbd, 0x5c, 0x2b, - 0x7c, 0xfd, 0x72, 0xad, 0xf0, 0xd7, 0x97, 0x6b, 0x85, 0x2f, 0xbe, 0x5d, 0x5b, 0xfa, 0xea, 0xdb, - 0xb5, 0xa5, 0xaf, 0xbf, 0x5d, 0x5b, 0x3a, 0xae, 0xd2, 0x9f, 0x15, 0x1f, 0xfe, 0x33, 0x00, 0x00, - 0xff, 0xff, 0x92, 0xc4, 0x20, 0x2a, 0xec, 0x18, 0x00, 0x00, + 0xf1, 0x17, 0x7f, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xe8, 0xeb, 0xaf, 0xac, 0xbc, 0xa4, + 0x81, 0x2c, 0xdb, 0x32, 0xaa, 0x00, 0x71, 0x60, 0x04, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xc7, 0xa2, + 0xb0, 0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xe9, 0x41, 0x8f, 0x6f, 0x1f, 0xf6, 0x2d, + 0x2d, 0xb1, 0x87, 0x1e, 0xfa, 0x17, 0x04, 0x28, 0x50, 0xf4, 0x52, 0xf4, 0x9f, 0xe8, 0xb1, 0xbd, + 0x07, 0xc8, 0x25, 0x87, 0x1e, 0x82, 0x1e, 0xd2, 0xc2, 0xb9, 0xf4, 0x8f, 0x68, 0x81, 0x62, 0x66, + 0xf7, 0xfd, 0x20, 0x25, 0xc3, 0x71, 0x5b, 0xf4, 0xc4, 0x79, 0x33, 0x9f, 0x9d, 0x9d, 0x9d, 0x9d, + 0xd9, 0x99, 0x5d, 0x42, 0x43, 0x46, 0xf1, 0x56, 0xa4, 0xa4, 0x96, 0xac, 0x18, 0x1d, 0xad, 0xde, + 0x39, 0xf6, 0xf5, 0xc9, 0xf4, 0x68, 0xcb, 0x93, 0x93, 0xbb, 0xc7, 0xf2, 0x58, 0xde, 0x25, 0xd1, + 0xd1, 0x74, 0x4c, 0x5f, 0xf4, 0x41, 0x94, 0x19, 0xe2, 0xfc, 0xbd, 0x08, 0xc5, 0x41, 0xc4, 0xde, + 0x85, 0xaa, 0x1f, 0x46, 0x53, 0x1d, 0x77, 0x0a, 0xeb, 0xa5, 0x8d, 0xe6, 0x76, 0x63, 0x2b, 0x3a, + 0xda, 0xea, 0x23, 0x87, 0x5b, 0x01, 0x5b, 0x87, 0xb2, 0x38, 0x17, 0x5e, 0xa7, 0xb8, 0x5e, 0xd8, + 0x68, 0x6e, 0x03, 0x02, 0x7a, 0xe7, 0xc2, 0x1b, 0x44, 0xfb, 0x4b, 0x9c, 0x24, 0xec, 0x03, 0xa8, + 0xc6, 0x72, 0xaa, 0x3c, 0xd1, 0x29, 0x11, 0x66, 0x19, 0x31, 0x43, 0xe2, 0x10, 0xca, 0x4a, 0x51, + 0xd3, 0xd8, 0x0f, 0x44, 0xa7, 0x9c, 0x69, 0x7a, 0xe0, 0x07, 0x06, 0x43, 0x12, 0xf6, 0x1e, 0x54, + 0x8e, 0xa6, 0x7e, 0x30, 0xea, 0x54, 0x08, 0xd2, 0x44, 0xc8, 0x2e, 0x32, 0x08, 0x63, 0x64, 0x08, + 0x9a, 0x08, 0x75, 0x2c, 0x3a, 0xd5, 0x0c, 0xf4, 0x08, 0x19, 0x06, 0x44, 0x32, 0x9c, 0x6b, 0xe4, + 0x8f, 0xc7, 0x9d, 0x5a, 0x36, 0x57, 0xd7, 0x1f, 0x8f, 0xcd, 0x5c, 0x28, 0x61, 0x1b, 0x50, 0x8f, + 0x02, 0x57, 0x8f, 0xa5, 0x9a, 0x74, 0x20, 0xb3, 0xfb, 0xd0, 0xf2, 0x78, 0x2a, 0x65, 0xf7, 0xa0, + 0xe9, 0xc9, 0x30, 0xd6, 0xca, 0xf5, 0x43, 0x1d, 0x77, 0x9a, 0x04, 0x7e, 0x13, 0xc1, 0x5f, 0x48, + 0x75, 0x2a, 0xd4, 0x5e, 0x26, 0xe4, 0x79, 0xe4, 0x6e, 0x19, 0x8a, 0x32, 0x72, 0x7e, 0x53, 0x80, + 0x7a, 0xa2, 0x95, 0x39, 0xb0, 0xbc, 0xa3, 0xbc, 0x13, 0x5f, 0x0b, 0x4f, 0x4f, 0x95, 0xe8, 0x14, + 0xd6, 0x0b, 0x1b, 0x0d, 0x3e, 0xc7, 0x63, 0x2d, 0x28, 0x0e, 0x86, 0xe4, 0xef, 0x06, 0x2f, 0x0e, + 0x86, 0xac, 0x03, 0xb5, 0xa7, 0xae, 0xf2, 0xdd, 0x50, 0x93, 0x83, 0x1b, 0x3c, 0xf9, 0x64, 0xd7, + 0xa1, 0x31, 0x18, 0x3e, 0x15, 0x2a, 0xf6, 0x65, 0x48, 0x6e, 0x6d, 0xf0, 0x8c, 0xc1, 0xd6, 0x00, + 0x06, 0xc3, 0x07, 0xc2, 0x45, 0xa5, 0x71, 0xa7, 0xb2, 0x5e, 0xda, 0x68, 0xf0, 0x1c, 0xc7, 0xf9, + 0x25, 0x54, 0x68, 0xab, 0xd9, 0x67, 0x50, 0x1d, 0xf9, 0xc7, 0x22, 0xd6, 0xc6, 0x9c, 0xdd, 0xed, + 0xaf, 0xbe, 0xbb, 0xb1, 0xf4, 0x97, 0xef, 0x6e, 0x6c, 0xe6, 0x62, 0x4a, 0x46, 0x22, 0xf4, 0x64, + 0xa8, 0x5d, 0x3f, 0x14, 0x2a, 0xbe, 0x7b, 0x2c, 0xef, 0x98, 0x21, 0x5b, 0x5d, 0xfa, 0xe1, 0x56, + 0x03, 0xbb, 0x09, 0x15, 0x3f, 0x1c, 0x89, 0x73, 0xb2, 0xbf, 0xb4, 0x7b, 0xd5, 0xaa, 0x6a, 0x0e, + 0xa6, 0x3a, 0x9a, 0xea, 0x3e, 0x8a, 0xb8, 0x41, 0x38, 0x5f, 0x17, 0xa0, 0x6a, 0x42, 0x89, 0x5d, + 0x87, 0xf2, 0x44, 0x68, 0x97, 0xe6, 0x6f, 0x6e, 0xd7, 0xcd, 0x96, 0x6a, 0x97, 0x13, 0x17, 0xa3, + 0x74, 0x22, 0xa7, 0xe8, 0xfb, 0x62, 0x16, 0xa5, 0x8f, 0x90, 0xc3, 0xad, 0x80, 0xfd, 0x08, 0x6a, + 0xa1, 0xd0, 0x67, 0x52, 0x9d, 0x92, 0x8f, 0x5a, 0x26, 0x2c, 0x0e, 0x84, 0x7e, 0x24, 0x47, 0x82, + 0x27, 0x32, 0x76, 0x1b, 0xea, 0xb1, 0xf0, 0xa6, 0xca, 0xd7, 0x33, 0xf2, 0x57, 0x6b, 0xbb, 0x4d, + 0xc1, 0x6a, 0x79, 0x04, 0x4e, 0x11, 0xec, 0x16, 0x34, 0x62, 0xe1, 0x29, 0xa1, 0x45, 0xf8, 0x9c, + 0xfc, 0xd7, 0xdc, 0x5e, 0xb1, 0x70, 0x25, 0x74, 0x2f, 0x7c, 0xce, 0x33, 0xb9, 0xf3, 0x75, 0x11, + 0xca, 0x68, 0x33, 0x63, 0x50, 0x76, 0xd5, 0xb1, 0xc9, 0xa8, 0x06, 0x27, 0x9a, 0xb5, 0xa1, 0x84, + 0x3a, 0x8a, 0xc4, 0x42, 0x12, 0x39, 0xde, 0xd9, 0xc8, 0x6e, 0x28, 0x92, 0x38, 0x6e, 0x1a, 0x0b, + 0x65, 0xf7, 0x91, 0x68, 0x76, 0x13, 0x1a, 0x91, 0x92, 0xe7, 0xb3, 0x67, 0xc6, 0x82, 0x2c, 0x4a, + 0x91, 0x89, 0x06, 0xd4, 0x23, 0x4b, 0xb1, 0x4d, 0x00, 0x71, 0xae, 0x95, 0xbb, 0x2f, 0x63, 0x1d, + 0x77, 0xaa, 0x64, 0x2d, 0xc5, 0x3d, 0x32, 0xfa, 0x87, 0x3c, 0x27, 0x65, 0xab, 0x50, 0x3f, 0x91, + 0xb1, 0x0e, 0xdd, 0x89, 0xa0, 0x0c, 0x69, 0xf0, 0xf4, 0x9b, 0x39, 0x50, 0x9d, 0x06, 0xfe, 0xc4, + 0xd7, 0x9d, 0x46, 0xa6, 0xe3, 0x09, 0x71, 0xb8, 0x95, 0x60, 0x14, 0x7b, 0xc7, 0x4a, 0x4e, 0xa3, + 0x43, 0x57, 0x89, 0x50, 0x53, 0xfe, 0x34, 0xf8, 0x1c, 0x8f, 0x7d, 0x02, 0xef, 0x28, 0x31, 0x91, + 0xcf, 0x05, 0x6d, 0xd4, 0x50, 0x4f, 0x8f, 0x62, 0x8e, 0x8e, 0x8d, 0xfd, 0xe7, 0x82, 0x72, 0xa8, + 0xce, 0x5f, 0x0e, 0x70, 0x6e, 0x43, 0xd5, 0xd8, 0x8d, 0x6e, 0x41, 0xca, 0x66, 0x0a, 0xd1, 0x98, + 0x21, 0xfd, 0xc3, 0x24, 0x43, 0xfa, 0x87, 0x4e, 0x17, 0xaa, 0xc6, 0x42, 0x44, 0x1f, 0xe0, 0xaa, + 0x2c, 0x1a, 0x69, 0xe4, 0x0d, 0xe5, 0x58, 0x9b, 0x88, 0xe4, 0x44, 0x93, 0x56, 0x57, 0x19, 0xff, + 0x97, 0x38, 0xd1, 0xce, 0x43, 0x68, 0xa4, 0x3b, 0x4b, 0x53, 0x74, 0xad, 0x9a, 0x62, 0xbf, 0x8b, + 0x03, 0xc8, 0x5d, 0x66, 0x52, 0xa2, 0xd1, 0x8d, 0x32, 0xd2, 0xbe, 0x0c, 0xdd, 0x80, 0x14, 0xd5, + 0x79, 0xfa, 0xed, 0xfc, 0xb6, 0x04, 0x15, 0x5a, 0x18, 0xdb, 0xc0, 0x8c, 0x88, 0xa6, 0x66, 0x05, + 0xa5, 0x5d, 0x66, 0x33, 0x02, 0x28, 0xf7, 0xd2, 0x84, 0xc0, 0x3c, 0x5c, 0xc5, 0xe8, 0x0c, 0x84, + 0xa7, 0xa5, 0xb2, 0xf3, 0xa4, 0xdf, 0x38, 0xff, 0x08, 0x33, 0xd4, 0x04, 0x0c, 0xd1, 0xec, 0x16, + 0x54, 0x25, 0xa5, 0x15, 0xc5, 0xcc, 0x4b, 0x92, 0xcd, 0x42, 0x50, 0xb9, 0x12, 0xee, 0x48, 0x86, + 0xc1, 0x8c, 0x22, 0xa9, 0xce, 0xd3, 0x6f, 0x0c, 0x74, 0xca, 0xa3, 0xc7, 0xb3, 0xc8, 0x1c, 0xab, + 0x2d, 0x13, 0xe8, 0x8f, 0x12, 0x26, 0xcf, 0xe4, 0x78, 0x70, 0x3e, 0x9e, 0x44, 0xe3, 0x78, 0x10, + 0xe9, 0xce, 0xd5, 0x2c, 0x24, 0x13, 0x1e, 0x4f, 0xa5, 0x88, 0xf4, 0x5c, 0xef, 0x44, 0x20, 0xf2, + 0x5a, 0x86, 0xdc, 0xb3, 0x3c, 0x9e, 0x4a, 0xb3, 0x4c, 0x43, 0xe8, 0x9b, 0x04, 0xcd, 0x65, 0x1a, + 0x62, 0x33, 0x39, 0x46, 0xe8, 0x70, 0xb8, 0x8f, 0xc8, 0xb7, 0xb2, 0xd3, 0xdd, 0x70, 0xb8, 0x95, + 0x98, 0xd5, 0xc6, 0xd3, 0x40, 0xf7, 0xbb, 0x9d, 0xb7, 0x8d, 0x2b, 0x93, 0x6f, 0x67, 0x2d, 0x5b, + 0x00, 0xba, 0x35, 0xf6, 0x7f, 0x61, 0xe2, 0xa5, 0xc4, 0x89, 0x76, 0xfa, 0x50, 0x4f, 0x4c, 0xbc, + 0x10, 0x06, 0x77, 0xa0, 0x16, 0x9f, 0xb8, 0xca, 0x0f, 0x8f, 0x69, 0x87, 0x5a, 0xdb, 0x57, 0xd3, + 0x15, 0x0d, 0x0d, 0x1f, 0xad, 0x48, 0x30, 0x8e, 0x4c, 0x42, 0xea, 0x32, 0x5d, 0x6d, 0x28, 0x4d, + 0xfd, 0x11, 0xe9, 0x59, 0xe1, 0x48, 0x22, 0xe7, 0xd8, 0x37, 0x41, 0xb9, 0xc2, 0x91, 0x44, 0xfb, + 0x26, 0x72, 0x64, 0x6a, 0xe6, 0x0a, 0x27, 0x7a, 0x2e, 0xec, 0x2a, 0x0b, 0x61, 0x17, 0x24, 0xbe, + 0xf9, 0x9f, 0xcc, 0xf6, 0xeb, 0x02, 0xd4, 0x93, 0x42, 0x8f, 0xe5, 0xc6, 0x1f, 0x89, 0x50, 0xfb, + 0x63, 0x5f, 0x28, 0x3b, 0x71, 0x8e, 0xc3, 0xee, 0x40, 0xc5, 0xd5, 0x5a, 0x25, 0x87, 0xf8, 0xdb, + 0xf9, 0x2e, 0x61, 0x6b, 0x07, 0x25, 0xbd, 0x50, 0xab, 0x19, 0x37, 0xa8, 0xd5, 0x8f, 0x01, 0x32, + 0x26, 0xda, 0x7a, 0x2a, 0x66, 0x56, 0x2b, 0x92, 0xec, 0x1a, 0x54, 0x9e, 0xbb, 0xc1, 0x34, 0xc9, + 0x48, 0xf3, 0x71, 0xbf, 0xf8, 0x71, 0xc1, 0xf9, 0x53, 0x11, 0x6a, 0xb6, 0x6b, 0x60, 0xb7, 0xa1, + 0x46, 0x5d, 0x83, 0xb5, 0xe8, 0xf2, 0xf4, 0x4b, 0x20, 0xec, 0x6e, 0xda, 0x0e, 0xe5, 0x6c, 0xb4, + 0xaa, 0x4c, 0x5b, 0x64, 0x6d, 0xcc, 0x9a, 0xa3, 0xd2, 0x48, 0x8c, 0x6d, 0xdf, 0xd3, 0xa2, 0x2e, + 0x43, 0x8c, 0xfd, 0xd0, 0x47, 0xff, 0x70, 0x14, 0xb1, 0xdb, 0xc9, 0xaa, 0xcb, 0xa4, 0xf1, 0xad, + 0xbc, 0xc6, 0x8b, 0x8b, 0xee, 0x43, 0x33, 0x37, 0xcd, 0x25, 0xab, 0x7e, 0x3f, 0xbf, 0x6a, 0x3b, + 0x25, 0xa9, 0x33, 0x4d, 0x5b, 0xe6, 0x85, 0xff, 0xc0, 0x7f, 0x1f, 0x01, 0x64, 0x2a, 0x7f, 0xf8, + 0xf1, 0xe5, 0xfc, 0xb1, 0x04, 0x30, 0x88, 0xb0, 0x06, 0x8e, 0x5c, 0xaa, 0xda, 0xcb, 0xfe, 0x71, + 0x28, 0x95, 0x78, 0x46, 0x69, 0x4e, 0xe3, 0xeb, 0xbc, 0x69, 0x78, 0x94, 0x31, 0x6c, 0x07, 0x9a, + 0x23, 0x11, 0x7b, 0xca, 0xa7, 0x80, 0xb2, 0x4e, 0xbf, 0x81, 0x6b, 0xca, 0xf4, 0x6c, 0x75, 0x33, + 0x84, 0xf1, 0x55, 0x7e, 0x0c, 0xdb, 0x86, 0x65, 0x71, 0x1e, 0x49, 0xa5, 0xed, 0x2c, 0xa6, 0xb9, + 0xbc, 0x62, 0xda, 0x54, 0xe4, 0xd3, 0x4c, 0xbc, 0x29, 0xb2, 0x0f, 0xe6, 0x42, 0xd9, 0x73, 0xa3, + 0xd8, 0x96, 0xf4, 0xce, 0xc2, 0x7c, 0x7b, 0x6e, 0x64, 0x9c, 0xb6, 0xfb, 0x21, 0xae, 0xf5, 0x57, + 0x7f, 0xbd, 0x71, 0x2b, 0xd7, 0x07, 0x4d, 0xe4, 0xd1, 0xec, 0x2e, 0xc5, 0xcb, 0xa9, 0xaf, 0xef, + 0x4e, 0xb5, 0x1f, 0xdc, 0x75, 0x23, 0x1f, 0xd5, 0xe1, 0xc0, 0x7e, 0x97, 0x93, 0x6a, 0xf6, 0x31, + 0xb4, 0x22, 0x25, 0x8f, 0x95, 0x88, 0xe3, 0x67, 0x54, 0x15, 0x6d, 0xb7, 0xfa, 0x86, 0xad, 0xde, + 0x24, 0xf9, 0x14, 0x05, 0x7c, 0x25, 0xca, 0x7f, 0xae, 0xfe, 0x04, 0xda, 0x8b, 0x2b, 0x7e, 0x9d, + 0xdd, 0x5b, 0xbd, 0x07, 0x8d, 0x74, 0x05, 0xaf, 0x1a, 0x58, 0xcf, 0x6f, 0xfb, 0x1f, 0x0a, 0x50, + 0x35, 0xf9, 0xc8, 0xee, 0x41, 0x23, 0x90, 0x9e, 0x8b, 0x06, 0x24, 0x37, 0x83, 0x77, 0xb2, 0x74, + 0xdd, 0xfa, 0x3c, 0x91, 0x99, 0xfd, 0xc8, 0xb0, 0x18, 0x9e, 0x7e, 0x38, 0x96, 0x49, 0xfe, 0xb4, + 0xb2, 0x41, 0xfd, 0x70, 0x2c, 0xb9, 0x11, 0xae, 0x3e, 0x84, 0xd6, 0xbc, 0x8a, 0x4b, 0xec, 0x7c, + 0x6f, 0x3e, 0xd0, 0xa9, 0x1a, 0xa4, 0x83, 0xf2, 0x66, 0xdf, 0x83, 0x46, 0xca, 0x67, 0x9b, 0x17, + 0x0d, 0x5f, 0xce, 0x8f, 0xcc, 0xd9, 0xea, 0x04, 0x00, 0x99, 0x69, 0x78, 0xcc, 0xe1, 0x15, 0x24, + 0xcc, 0x9a, 0x87, 0xf4, 0x9b, 0x6a, 0xaf, 0xab, 0x5d, 0x32, 0x65, 0x99, 0x13, 0xcd, 0xb6, 0x00, + 0x46, 0x69, 0xaa, 0xbf, 0xe4, 0x00, 0xc8, 0x21, 0x9c, 0x01, 0xd4, 0x13, 0x23, 0xd8, 0x3a, 0x34, + 0x63, 0x3b, 0x33, 0x76, 0xca, 0x38, 0x5d, 0x85, 0xe7, 0x59, 0xd8, 0xf1, 0x2a, 0x37, 0x3c, 0x16, + 0x73, 0x1d, 0x2f, 0x47, 0x0e, 0xb7, 0x02, 0xe7, 0x0b, 0xa8, 0x10, 0x03, 0x13, 0x34, 0xd6, 0xae, + 0xd2, 0xb6, 0x79, 0x36, 0xfd, 0xa1, 0x8c, 0x69, 0xda, 0xdd, 0x32, 0x86, 0x30, 0x37, 0x00, 0xf6, + 0x3e, 0x76, 0xa1, 0x23, 0xeb, 0xd1, 0xcb, 0x70, 0x28, 0x76, 0x3e, 0x81, 0x7a, 0xc2, 0xc6, 0x95, + 0x07, 0x7e, 0x28, 0xac, 0x89, 0x44, 0xe3, 0xa5, 0xc3, 0x3b, 0x71, 0x95, 0xeb, 0x69, 0x61, 0xda, + 0x94, 0x0a, 0xcf, 0x18, 0xce, 0x7b, 0xd0, 0xcc, 0xe5, 0x1d, 0x86, 0xdb, 0x53, 0xda, 0x46, 0x93, + 0xfd, 0xe6, 0xc3, 0xf9, 0x14, 0x56, 0xe6, 0x72, 0x00, 0x8b, 0x95, 0x3f, 0x4a, 0x8a, 0x95, 0x29, + 0x44, 0x17, 0xba, 0x2d, 0x06, 0xe5, 0x33, 0xe1, 0x9e, 0xda, 0x4e, 0x8b, 0x68, 0xe7, 0xf7, 0x78, + 0xb7, 0x4a, 0x3a, 0xe0, 0xff, 0x07, 0x38, 0xd1, 0x3a, 0x7a, 0x46, 0x2d, 0xb1, 0x55, 0xd6, 0x40, + 0x0e, 0x21, 0xd8, 0x0d, 0x68, 0xe2, 0x47, 0x6c, 0xe5, 0x46, 0x35, 0x8d, 0x88, 0x0d, 0xe0, 0xff, + 0xa0, 0x31, 0x4e, 0x87, 0x97, 0x6c, 0x0c, 0x24, 0xa3, 0xdf, 0x81, 0x7a, 0x28, 0xad, 0xcc, 0x74, + 0xe8, 0xb5, 0x50, 0xa6, 0xe3, 0xdc, 0x20, 0xb0, 0xb2, 0x8a, 0x19, 0xe7, 0x06, 0x01, 0x09, 0x9d, + 0x5b, 0xf0, 0xc6, 0x85, 0x5b, 0x22, 0x7b, 0x0b, 0xaa, 0x63, 0x3f, 0xd0, 0x54, 0x94, 0xf0, 0x46, + 0x60, 0xbf, 0x9c, 0x7f, 0x16, 0x00, 0xb2, 0xf8, 0xc1, 0xac, 0xc0, 0xea, 0x82, 0x98, 0x65, 0x53, + 0x4d, 0x02, 0xa8, 0x4f, 0xec, 0x39, 0x65, 0x23, 0xe3, 0xfa, 0x7c, 0xcc, 0x6d, 0x25, 0xc7, 0x98, + 0x39, 0xc1, 0xb6, 0xed, 0x09, 0xf6, 0x3a, 0x37, 0xb9, 0x74, 0x06, 0x6a, 0xb4, 0xf2, 0x17, 0x7b, + 0xc8, 0xd2, 0x99, 0x5b, 0xc9, 0xea, 0x43, 0x58, 0x99, 0x9b, 0xf2, 0x07, 0xd6, 0xac, 0xec, 0xbc, + 0xcd, 0xe7, 0xf2, 0x36, 0x54, 0xcd, 0x8b, 0x00, 0xdb, 0x80, 0x9a, 0xeb, 0x99, 0x34, 0xce, 0x1d, + 0x25, 0x28, 0xdc, 0x21, 0x36, 0x4f, 0xc4, 0xce, 0x9f, 0x8b, 0x00, 0x19, 0xff, 0x35, 0xba, 0xed, + 0xfb, 0xd0, 0x8a, 0x85, 0x27, 0xc3, 0x91, 0xab, 0x66, 0x24, 0xb5, 0x57, 0xd6, 0xcb, 0x86, 0x2c, + 0x20, 0x73, 0x9d, 0x77, 0xe9, 0xd5, 0x9d, 0xf7, 0x06, 0x94, 0x3d, 0x19, 0xcd, 0x6c, 0x69, 0x62, + 0xf3, 0x0b, 0xd9, 0x93, 0xd1, 0x6c, 0x7f, 0x89, 0x13, 0x82, 0x6d, 0x41, 0x75, 0x72, 0x4a, 0x6f, + 0x24, 0xe6, 0xae, 0x77, 0x6d, 0x1e, 0xfb, 0xe8, 0x14, 0xe9, 0xfd, 0x25, 0x6e, 0x51, 0xec, 0x16, + 0x54, 0x26, 0xa7, 0x23, 0x5f, 0xd9, 0xe2, 0x72, 0x75, 0x11, 0xde, 0xf5, 0x15, 0x3d, 0x89, 0x20, + 0x86, 0x39, 0x50, 0x54, 0x13, 0xfb, 0x20, 0xd2, 0x5e, 0xf0, 0xe6, 0x64, 0x7f, 0x89, 0x17, 0xd5, + 0x64, 0xb7, 0x0e, 0x55, 0xe3, 0x57, 0xe7, 0x1f, 0x25, 0x68, 0xcd, 0x5b, 0x89, 0x3b, 0x1b, 0x2b, + 0x2f, 0xd9, 0xd9, 0x58, 0x79, 0xe9, 0xa5, 0xa4, 0x98, 0xbb, 0x94, 0x38, 0x50, 0x91, 0x67, 0xa1, + 0x50, 0xf9, 0xc7, 0xa0, 0xbd, 0x13, 0x79, 0x16, 0x62, 0x63, 0x6c, 0x44, 0x73, 0x7d, 0x66, 0xc5, + 0xf6, 0x99, 0xef, 0xc3, 0xca, 0x58, 0x06, 0x81, 0x3c, 0x1b, 0xce, 0x26, 0x81, 0x1f, 0x9e, 0xda, + 0x66, 0x73, 0x9e, 0xc9, 0x36, 0xe0, 0xca, 0xc8, 0x57, 0x68, 0xce, 0x9e, 0x0c, 0xb5, 0x08, 0xe9, + 0xaa, 0x8b, 0xb8, 0x45, 0x36, 0xfb, 0x0c, 0xd6, 0x5d, 0xad, 0xc5, 0x24, 0xd2, 0x4f, 0xc2, 0xc8, + 0xf5, 0x4e, 0xbb, 0xd2, 0xa3, 0x2c, 0x9c, 0x44, 0xae, 0xf6, 0x8f, 0xfc, 0xc0, 0xd7, 0x33, 0x72, + 0x46, 0x9d, 0xbf, 0x12, 0xc7, 0x3e, 0x80, 0x96, 0xa7, 0x84, 0xab, 0x45, 0x57, 0xc4, 0xfa, 0xd0, + 0xd5, 0x27, 0x9d, 0x3a, 0x8d, 0x5c, 0xe0, 0xe2, 0x1a, 0x5c, 0xb4, 0xf6, 0x0b, 0x3f, 0x18, 0x79, + 0x78, 0xbd, 0x6c, 0x98, 0x35, 0xcc, 0x31, 0xd9, 0x16, 0x30, 0x62, 0xf4, 0x26, 0x91, 0x9e, 0xa5, + 0x50, 0x20, 0xe8, 0x25, 0x12, 0x3c, 0x70, 0xb5, 0x3f, 0x11, 0xb1, 0x76, 0x27, 0x11, 0xdd, 0x9c, + 0x4b, 0x3c, 0x63, 0xb0, 0x9b, 0xd0, 0xf6, 0x43, 0x2f, 0x98, 0x8e, 0xc4, 0xb3, 0x08, 0x17, 0xa2, + 0xc2, 0xb8, 0xb3, 0x4c, 0xa7, 0xca, 0x15, 0xcb, 0x3f, 0xb4, 0x6c, 0x84, 0x8a, 0xf3, 0x05, 0xe8, + 0x8a, 0x81, 0x5a, 0x7e, 0x02, 0x75, 0xbe, 0x2c, 0x40, 0x7b, 0x31, 0xf0, 0x70, 0xdb, 0x22, 0x5c, + 0xbc, 0xbd, 0x5c, 0x23, 0x9d, 0x6e, 0x65, 0x31, 0xb7, 0x95, 0x49, 0xbd, 0x2c, 0xe5, 0xea, 0x65, + 0x1a, 0x16, 0xe5, 0x97, 0x87, 0xc5, 0xdc, 0x42, 0x2b, 0x0b, 0x0b, 0x75, 0x7e, 0x57, 0x80, 0x2b, + 0x0b, 0xc1, 0xfd, 0x83, 0x2d, 0x5a, 0x87, 0xe6, 0xc4, 0x3d, 0x15, 0xe6, 0x69, 0x22, 0xb6, 0x25, + 0x24, 0xcf, 0xfa, 0x2f, 0xd8, 0x17, 0xc2, 0x72, 0x3e, 0xa3, 0x2e, 0xb5, 0x2d, 0x09, 0x90, 0x03, + 0xa9, 0x1f, 0xc8, 0xa9, 0xad, 0xc5, 0x49, 0x80, 0x24, 0xcc, 0x8b, 0x61, 0x54, 0xba, 0x24, 0x8c, + 0x9c, 0x03, 0xa8, 0x27, 0x06, 0xb2, 0x1b, 0xf6, 0xed, 0xa8, 0x90, 0x3d, 0x89, 0x3e, 0x89, 0x85, + 0x42, 0xdb, 0xcd, 0x43, 0xd2, 0xbb, 0x50, 0x31, 0x6d, 0x68, 0xf1, 0x22, 0xc2, 0x48, 0x9c, 0x21, + 0xd4, 0x2c, 0x87, 0x6d, 0x42, 0xf5, 0x68, 0x96, 0xbe, 0xa3, 0xd8, 0xe3, 0x02, 0xbf, 0x47, 0x16, + 0x81, 0x67, 0x90, 0x41, 0xb0, 0x6b, 0x50, 0x3e, 0x9a, 0xf5, 0xbb, 0xe6, 0x62, 0x89, 0x27, 0x19, + 0x7e, 0xed, 0x56, 0x8d, 0x41, 0xce, 0xe7, 0xb0, 0x9c, 0x1f, 0x97, 0x16, 0xf6, 0x42, 0xae, 0xb0, + 0xa7, 0x47, 0x76, 0xf1, 0x55, 0x37, 0x8c, 0x8f, 0x00, 0xe8, 0xa5, 0xf7, 0x75, 0x6f, 0x26, 0x3f, + 0x86, 0x9a, 0x7d, 0x21, 0x66, 0x1f, 0x2c, 0xbc, 0x78, 0xb7, 0xd2, 0xe7, 0xe3, 0xb9, 0x67, 0x6f, + 0xe7, 0x3e, 0xf6, 0xa8, 0x67, 0x42, 0x75, 0xfd, 0xf1, 0xf8, 0x75, 0xa7, 0xbb, 0x0f, 0xad, 0x27, + 0x51, 0xf4, 0xef, 0x8d, 0xfd, 0x39, 0x54, 0xcd, 0x43, 0x35, 0x8e, 0x09, 0xd0, 0x02, 0xbb, 0x07, + 0xcc, 0xf4, 0xb1, 0x79, 0x93, 0xb8, 0x01, 0x20, 0x72, 0x8a, 0xf3, 0xd9, 0xcd, 0x25, 0xe4, 0xbc, + 0x01, 0xdc, 0x00, 0x36, 0x37, 0xa0, 0x66, 0xdf, 0x44, 0x59, 0x03, 0x2a, 0x4f, 0x0e, 0x86, 0xbd, + 0xc7, 0xed, 0x25, 0x56, 0x87, 0xf2, 0xfe, 0x60, 0xf8, 0xb8, 0x5d, 0x40, 0xea, 0x60, 0x70, 0xd0, + 0x6b, 0x17, 0x37, 0x6f, 0xc2, 0x72, 0xfe, 0x55, 0x94, 0x35, 0xa1, 0x36, 0xdc, 0x39, 0xe8, 0xee, + 0x0e, 0x7e, 0xd6, 0x5e, 0x62, 0xcb, 0x50, 0xef, 0x1f, 0x0c, 0x7b, 0x7b, 0x4f, 0x78, 0xaf, 0x5d, + 0xd8, 0xfc, 0x29, 0x34, 0xd2, 0x87, 0x22, 0xd4, 0xb0, 0xdb, 0x3f, 0xe8, 0xb6, 0x97, 0x18, 0x40, + 0x75, 0xd8, 0xdb, 0xe3, 0x3d, 0xd4, 0x5b, 0x83, 0xd2, 0x70, 0xb8, 0xdf, 0x2e, 0xe2, 0xac, 0x7b, + 0x3b, 0x7b, 0xfb, 0xbd, 0x76, 0x09, 0xc9, 0xc7, 0x8f, 0x0e, 0x1f, 0x0c, 0xdb, 0xe5, 0xcd, 0x8f, + 0xe0, 0xca, 0xc2, 0x13, 0x0a, 0x8d, 0xde, 0xdf, 0xe1, 0x3d, 0xd4, 0xd4, 0x84, 0xda, 0x21, 0xef, + 0x3f, 0xdd, 0x79, 0xdc, 0x6b, 0x17, 0x50, 0xf0, 0xf9, 0x60, 0xef, 0x61, 0xaf, 0xdb, 0x2e, 0xee, + 0x5e, 0xff, 0xea, 0xc5, 0x5a, 0xe1, 0x9b, 0x17, 0x6b, 0x85, 0x6f, 0x5f, 0xac, 0x15, 0xfe, 0xf6, + 0x62, 0xad, 0xf0, 0xe5, 0xf7, 0x6b, 0x4b, 0xdf, 0x7c, 0xbf, 0xb6, 0xf4, 0xed, 0xf7, 0x6b, 0x4b, + 0x47, 0x55, 0xfa, 0xab, 0xe3, 0xc3, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x4f, 0x06, 0xaa, + 0x2a, 0x19, 0x00, 0x00, } func (m *Op) Marshal() (dAtA []byte, err error) { @@ -3377,6 +3390,16 @@ func (m *Meta) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RemoveMountStubsRecursive { + i-- + if m.RemoveMountStubsRecursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } if len(m.CgroupParent) > 0 { i -= len(m.CgroupParent) copy(dAtA[i:], m.CgroupParent) @@ -5718,6 +5741,9 @@ func (m *Meta) Size() (n int) { if l > 0 { n += 1 + l + sovOps(uint64(l)) } + if m.RemoveMountStubsRecursive { + n += 2 + } return n } @@ -7771,6 +7797,26 @@ func (m *Meta) Unmarshal(dAtA []byte) error { } m.CgroupParent = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoveMountStubsRecursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RemoveMountStubsRecursive = bool(v != 0) default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) diff --git a/solver/pb/ops.proto b/solver/pb/ops.proto index d1e30068dffd..87cb771902dd 100644 --- a/solver/pb/ops.proto +++ b/solver/pb/ops.proto @@ -63,6 +63,7 @@ message Meta { string hostname = 7; repeated Ulimit ulimit = 9; string cgroupParent = 10; + bool removeMountStubsRecursive = 11; } message HostIP { @@ -157,7 +158,7 @@ message SecretOpt { bool optional = 5; } -// SSHOpt defines options describing secret mounts +// SSHOpt defines options describing ssh mounts message SSHOpt { // ID of exposed ssh rule. Used for quering the value. string ID = 1; @@ -243,8 +244,8 @@ message Range { // Position is single location in a source file message Position { - int32 Line = 1; - int32 Character = 2; + int32 line = 1; + int32 character = 2; } message ExportCache { diff --git a/solver/progress.go b/solver/progress.go index 6e5434967141..3fb954f867c4 100644 --- a/solver/progress.go +++ b/solver/progress.go @@ -3,6 +3,7 @@ package solver import ( "context" "io" + "sort" "time" "github.com/moby/buildkit/util/bklog" @@ -72,6 +73,22 @@ func (j *Job) Status(ctx context.Context, ch chan *client.SolveStatus) error { ss.Warnings = append(ss.Warnings, &v) } } + sort.Slice(ss.Vertexes, func(i, j int) bool { + if ss.Vertexes[i].Started == nil { + return true + } + if ss.Vertexes[j].Started == nil { + return false + } + return ss.Vertexes[i].Started.Before(*ss.Vertexes[j].Started) + }) + sort.Slice(ss.Statuses, func(i, j int) bool { + return ss.Statuses[i].Timestamp.Before(ss.Statuses[j].Timestamp) + }) + sort.Slice(ss.Logs, func(i, j int) bool { + return ss.Logs[i].Timestamp.Before(ss.Logs[j].Timestamp) + }) + select { case <-ctx.Done(): return ctx.Err() diff --git a/solver/result.go b/solver/result.go index 81766a30f4bc..2ba1ef9bc1b6 100644 --- a/solver/result.go +++ b/solver/result.go @@ -108,3 +108,26 @@ type SharedCachedResult struct { *SharedResult CachedResult } + +type splitResultProxy struct { + released int64 + sem *int64 + ResultProxy +} + +func (r *splitResultProxy) Release(ctx context.Context) error { + if atomic.AddInt64(&r.released, 1) > 1 { + err := errors.New("releasing already released reference") + bklog.G(ctx).Error(err) + return err + } + if atomic.AddInt64(r.sem, 1) == 2 { + return r.ResultProxy.Release(ctx) + } + return nil +} + +func SplitResultProxy(res ResultProxy) (ResultProxy, ResultProxy) { + sem := int64(0) + return &splitResultProxy{ResultProxy: res, sem: &sem}, &splitResultProxy{ResultProxy: res, sem: &sem} +} diff --git a/solver/result/attestation.go b/solver/result/attestation.go new file mode 100644 index 000000000000..77af74da1906 --- /dev/null +++ b/solver/result/attestation.go @@ -0,0 +1,79 @@ +package result + +import ( + "reflect" + + pb "github.com/moby/buildkit/frontend/gateway/pb" + digest "github.com/opencontainers/go-digest" +) + +const ( + AttestationReasonKey = "reason" + AttestationSBOMCore = "sbom-core" + AttestationInlineOnlyKey = "inline-only" +) + +const ( + AttestationReasonSBOM = "sbom" + AttestationReasonProvenance = "provenance" +) + +type Attestation[T any] struct { + Kind pb.AttestationKind + + Metadata map[string][]byte + + Ref T + Path string + ContentFunc func() ([]byte, error) + + InToto InTotoAttestation +} + +type InTotoAttestation struct { + PredicateType string + Subjects []InTotoSubject +} + +type InTotoSubject struct { + Kind pb.InTotoSubjectKind + + Name string + Digest []digest.Digest +} + +func ToDigestMap(ds ...digest.Digest) map[string]string { + m := map[string]string{} + for _, d := range ds { + m[d.Algorithm().String()] = d.Encoded() + } + return m +} + +func FromDigestMap(m map[string]string) []digest.Digest { + var ds []digest.Digest + for k, v := range m { + ds = append(ds, digest.NewDigestFromEncoded(digest.Algorithm(k), v)) + } + return ds +} + +func ConvertAttestation[U any, V any](a *Attestation[U], fn func(U) (V, error)) (*Attestation[V], error) { + var ref V + if reflect.ValueOf(a.Ref).IsValid() { + var err error + ref, err = fn(a.Ref) + if err != nil { + return nil, err + } + } + + return &Attestation[V]{ + Kind: a.Kind, + Metadata: a.Metadata, + Ref: ref, + Path: a.Path, + ContentFunc: a.ContentFunc, + InToto: a.InToto, + }, nil +} diff --git a/solver/result/result.go b/solver/result/result.go new file mode 100644 index 000000000000..d5fe2d03cfcd --- /dev/null +++ b/solver/result/result.go @@ -0,0 +1,180 @@ +package result + +import ( + "reflect" + "sync" + + "github.com/pkg/errors" +) + +type Result[T any] struct { + mu sync.Mutex + Ref T + Refs map[string]T + Metadata map[string][]byte + Attestations map[string][]Attestation[T] +} + +func (r *Result[T]) AddMeta(k string, v []byte) { + r.mu.Lock() + if r.Metadata == nil { + r.Metadata = map[string][]byte{} + } + r.Metadata[k] = v + r.mu.Unlock() +} + +func (r *Result[T]) AddRef(k string, ref T) { + r.mu.Lock() + if r.Refs == nil { + r.Refs = map[string]T{} + } + r.Refs[k] = ref + r.mu.Unlock() +} + +func (r *Result[T]) AddAttestation(k string, v Attestation[T]) { + r.mu.Lock() + if r.Attestations == nil { + r.Attestations = map[string][]Attestation[T]{} + } + r.Attestations[k] = append(r.Attestations[k], v) + r.mu.Unlock() +} + +func (r *Result[T]) SetRef(ref T) { + r.Ref = ref +} + +func (r *Result[T]) SingleRef() (T, error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.Refs != nil && !reflect.ValueOf(r.Ref).IsValid() { + var t T + return t, errors.Errorf("invalid map result") + } + return r.Ref, nil +} + +func (r *Result[T]) FindRef(key string) (T, bool) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.Refs != nil { + if ref, ok := r.Refs[key]; ok { + return ref, true + } + if len(r.Refs) == 1 { + for _, ref := range r.Refs { + return ref, true + } + } + var t T + return t, false + } + return r.Ref, true +} + +func (r *Result[T]) EachRef(fn func(T) error) (err error) { + if reflect.ValueOf(r.Ref).IsValid() { + err = fn(r.Ref) + } + for _, r := range r.Refs { + if reflect.ValueOf(r).IsValid() { + if err1 := fn(r); err1 != nil && err == nil { + err = err1 + } + } + } + for _, as := range r.Attestations { + for _, a := range as { + if reflect.ValueOf(a.Ref).IsValid() { + if err1 := fn(a.Ref); err1 != nil && err == nil { + err = err1 + } + } + } + } + return err +} + +// EachRef iterates over references in both a and b. +// a and b are assumed to be of the same size and map their references +// to the same set of keys +func EachRef[U any, V any](a *Result[U], b *Result[V], fn func(U, V) error) (err error) { + if reflect.ValueOf(a.Ref).IsValid() && reflect.ValueOf(b.Ref).IsValid() { + err = fn(a.Ref, b.Ref) + } + for k, r := range a.Refs { + r2, ok := b.Refs[k] + if !ok { + continue + } + if reflect.ValueOf(r).IsValid() && reflect.ValueOf(r2).IsValid() { + if err1 := fn(r, r2); err1 != nil && err == nil { + err = err1 + } + } + } + for k, atts := range a.Attestations { + atts2, ok := b.Attestations[k] + if !ok { + continue + } + for i, att := range atts { + if i >= len(atts2) { + break + } + att2 := atts2[i] + if reflect.ValueOf(att.Ref).IsValid() && reflect.ValueOf(att2.Ref).IsValid() { + if err1 := fn(att.Ref, att2.Ref); err1 != nil && err == nil { + err = err1 + } + } + } + } + return err +} + +func ConvertResult[U any, V any](r *Result[U], fn func(U) (V, error)) (*Result[V], error) { + r2 := &Result[V]{} + var err error + + if reflect.ValueOf(r.Ref).IsValid() { + r2.Ref, err = fn(r.Ref) + if err != nil { + return nil, err + } + } + + if r.Refs != nil { + r2.Refs = map[string]V{} + } + for k, r := range r.Refs { + if !reflect.ValueOf(r).IsValid() { + continue + } + r2.Refs[k], err = fn(r) + if err != nil { + return nil, err + } + } + + if r.Attestations != nil { + r2.Attestations = map[string][]Attestation[V]{} + } + for k, as := range r.Attestations { + for _, a := range as { + a2, err := ConvertAttestation(&a, fn) + if err != nil { + return nil, err + } + r2.Attestations[k] = append(r2.Attestations[k], *a2) + } + } + + r2.Metadata = r.Metadata + + return r2, nil +} diff --git a/solver/scheduler.go b/solver/scheduler.go index d617cd912ce3..2d0ee07afe92 100644 --- a/solver/scheduler.go +++ b/solver/scheduler.go @@ -222,8 +222,7 @@ func (s *scheduler) build(ctx context.Context, edge Edge) (CachedResult, error) wait := make(chan struct{}) - var p *pipe.Pipe - p = s.newPipe(e, nil, pipe.Request{Payload: &edgeRequest{desiredState: edgeStatusComplete}}) + p := s.newPipe(e, nil, pipe.Request{Payload: &edgeRequest{desiredState: edgeStatusComplete}}) p.OnSendCompletion = func() { p.Receiver.Receive() if p.Receiver.Status().Completed { diff --git a/solver/scheduler_test.go b/solver/scheduler_test.go index eaf18a70a502..2e4c602bba5d 100644 --- a/solver/scheduler_test.go +++ b/solver/scheduler_test.go @@ -54,11 +54,10 @@ func TestSingleLevelActiveGraph(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(1)) @@ -81,10 +80,9 @@ func TestSingleLevelActiveGraph(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(1)) @@ -113,10 +111,9 @@ func TestSingleLevelActiveGraph(t *testing.T) { } g2.Vertex.(*vertex).setupCallCounters() - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(1)) @@ -149,10 +146,9 @@ func TestSingleLevelActiveGraph(t *testing.T) { } g3.Vertex.(*vertex).setupCallCounters() - res, bi, err = j3.Build(ctx, g3) + res, err = j3.Build(ctx, g3) require.NoError(t, err) require.Equal(t, unwrap(res), "result3") - require.Equal(t, len(bi), 0) require.Equal(t, *g3.Vertex.(*vertex).cacheCallCount, int64(1)) require.Equal(t, *g3.Vertex.(*vertex).execCallCount, int64(1)) @@ -192,18 +188,16 @@ func TestSingleLevelActiveGraph(t *testing.T) { eg, _ := errgroup.WithContext(ctx) eg.Go(func() error { - res, bi, err := j4.Build(ctx, g4) + res, err := j4.Build(ctx, g4) require.NoError(t, err) require.Equal(t, unwrap(res), "result4") - require.Equal(t, len(bi), 0) return err }) eg.Go(func() error { - res, bi, err := j5.Build(ctx, g4) + res, err := j5.Build(ctx, g4) require.NoError(t, err) require.Equal(t, unwrap(res), "result4") - require.Equal(t, len(bi), 0) return err }) @@ -240,10 +234,9 @@ func TestSingleLevelCache(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -267,10 +260,9 @@ func TestSingleLevelCache(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result1") - require.Equal(t, len(bi), 0) require.Equal(t, *g1.Vertex.(*vertex).cacheCallCount, int64(1)) require.Equal(t, *g1.Vertex.(*vertex).execCallCount, int64(1)) @@ -298,10 +290,9 @@ func TestSingleLevelCache(t *testing.T) { } g2.Vertex.(*vertex).setupCallCounters() - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, *g0.Vertex.(*vertex).cacheCallCount, int64(1)) require.Equal(t, *g0.Vertex.(*vertex).execCallCount, int64(1)) @@ -366,18 +357,16 @@ func TestSingleLevelCacheParallel(t *testing.T) { eg, _ := errgroup.WithContext(ctx) eg.Go(func() error { - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) return err }) eg.Go(func() error { - res, bi, err := j1.Build(ctx, g1) + res, err := j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) return err }) @@ -460,18 +449,16 @@ func TestMultiLevelCacheParallel(t *testing.T) { eg, _ := errgroup.WithContext(ctx) eg.Go(func() error { - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) return err }) eg.Go(func() error { - res, bi, err := j1.Build(ctx, g1) + res, err := j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) return err }) @@ -514,7 +501,7 @@ func TestSingleCancelCache(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - _, _, err = j0.Build(ctx, g0) + _, err = j0.Build(ctx, g0) require.Error(t, err) require.Equal(t, true, errors.Is(err, context.Canceled)) @@ -556,7 +543,7 @@ func TestSingleCancelExec(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - _, _, err = j1.Build(ctx, g1) + _, err = j1.Build(ctx, g1) require.Error(t, err) require.Equal(t, true, errors.Is(err, context.Canceled)) @@ -609,7 +596,7 @@ func TestSingleCancelParallel(t *testing.T) { }), } - _, _, err = j.Build(ctx, g) + _, err = j.Build(ctx, g) close(firstErrored) require.Error(t, err) require.Equal(t, true, errors.Is(err, context.Canceled)) @@ -633,10 +620,9 @@ func TestSingleCancelParallel(t *testing.T) { } <-firstReady - res, bi, err := j.Build(ctx, g) + res, err := j.Build(ctx, g) require.NoError(t, err) require.Equal(t, unwrap(res), "result2") - require.Equal(t, len(bi), 0) return err }) @@ -683,10 +669,9 @@ func TestMultiLevelCalculation(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g) + res, err := j0.Build(ctx, g) require.NoError(t, err) require.Equal(t, unwrapInt(res), 42) // 1 + 2*(7 + 2) + 2 + 2 + 19 - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -722,10 +707,9 @@ func TestMultiLevelCalculation(t *testing.T) { }, }), } - res, bi, err = j1.Build(ctx, g2) + res, err = j1.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrapInt(res), 42) - require.Equal(t, len(bi), 0) } func TestHugeGraph(t *testing.T) { @@ -757,10 +741,9 @@ func TestHugeGraph(t *testing.T) { // printGraph(g, "") g.Vertex.(*vertexSum).setupCallCounters() - res, bi, err := j0.Build(ctx, g) + res, err := j0.Build(ctx, g) require.NoError(t, err) require.Equal(t, unwrapInt(res), v) - require.Equal(t, len(bi), 0) require.Equal(t, int64(nodes), *g.Vertex.(*vertexSum).cacheCallCount) // execCount := *g.Vertex.(*vertexSum).execCallCount // require.True(t, execCount < 1000) @@ -780,10 +763,9 @@ func TestHugeGraph(t *testing.T) { }() g.Vertex.(*vertexSum).setupCallCounters() - res, bi, err = j1.Build(ctx, g) + res, err = j1.Build(ctx, g) require.NoError(t, err) require.Equal(t, unwrapInt(res), v) - require.Equal(t, len(bi), 0) require.Equal(t, int64(nodes), *g.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(0), *g.Vertex.(*vertexSum).execCallCount) @@ -837,10 +819,9 @@ func TestOptimizedCacheAccess(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(3), *g0.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(3), *g0.Vertex.(*vertex).execCallCount) @@ -884,10 +865,9 @@ func TestOptimizedCacheAccess(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(3), *g1.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(1), *g1.Vertex.(*vertex).execCallCount) @@ -947,10 +927,9 @@ func TestOptimizedCacheAccess2(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(3), *g0.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(3), *g0.Vertex.(*vertex).execCallCount) @@ -995,10 +974,9 @@ func TestOptimizedCacheAccess2(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(3), *g1.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(1), *g1.Vertex.(*vertex).execCallCount) @@ -1042,10 +1020,9 @@ func TestOptimizedCacheAccess2(t *testing.T) { } g2.Vertex.(*vertex).setupCallCounters() - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(3), *g2.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g2.Vertex.(*vertex).execCallCount) @@ -1093,10 +1070,9 @@ func TestSlowCache(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -1128,10 +1104,9 @@ func TestSlowCache(t *testing.T) { }), } - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -1186,11 +1161,9 @@ func TestParallelInputs(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) - require.NoError(t, j0.Discard()) j0 = nil @@ -1241,7 +1214,7 @@ func TestErrorReturns(t *testing.T) { }), } - _, _, err = j0.Build(ctx, g0) + _, err = j0.Build(ctx, g0) require.Error(t, err) require.Contains(t, err.Error(), "error-from-test") @@ -1282,7 +1255,7 @@ func TestErrorReturns(t *testing.T) { }), } - _, _, err = j1.Build(ctx, g1) + _, err = j1.Build(ctx, g1) require.Error(t, err) require.Equal(t, true, errors.Is(err, context.Canceled)) @@ -1323,7 +1296,7 @@ func TestErrorReturns(t *testing.T) { }), } - _, _, err = j2.Build(ctx, g2) + _, err = j2.Build(ctx, g2) require.Error(t, err) require.Contains(t, err.Error(), "exec-error-from-test") @@ -1367,10 +1340,9 @@ func TestMultipleCacheSources(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(0), cacheManager.loadCounter) require.NoError(t, j0.Discard()) @@ -1410,10 +1382,10 @@ func TestMultipleCacheSources(t *testing.T) { }), } - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(1), cacheManager.loadCounter) require.Equal(t, int64(0), cacheManager2.loadCounter) @@ -1439,10 +1411,10 @@ func TestMultipleCacheSources(t *testing.T) { }), } - res, bi, err = j1.Build(ctx, g2) + res, err = j1.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result2") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(2), cacheManager.loadCounter) require.Equal(t, int64(0), cacheManager2.loadCounter) @@ -1484,10 +1456,10 @@ func TestRepeatBuildWithIgnoreCache(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount) @@ -1523,10 +1495,10 @@ func TestRepeatBuildWithIgnoreCache(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0-1") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g1.Vertex.(*vertex).execCallCount) @@ -1561,10 +1533,10 @@ func TestRepeatBuildWithIgnoreCache(t *testing.T) { } g2.Vertex.(*vertex).setupCallCounters() - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0-2") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(2), *g2.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g2.Vertex.(*vertex).execCallCount) @@ -1611,10 +1583,10 @@ func TestIgnoreCacheResumeFromSlowCache(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount) @@ -1652,10 +1624,10 @@ func TestIgnoreCacheResumeFromSlowCache(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(1), *g1.Vertex.(*vertex).execCallCount) @@ -1690,10 +1662,9 @@ func TestParallelBuildsIgnoreCache(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) // match by vertex digest j1, err := l.NewJob("j1") @@ -1715,10 +1686,9 @@ func TestParallelBuildsIgnoreCache(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result1") - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -1744,10 +1714,9 @@ func TestParallelBuildsIgnoreCache(t *testing.T) { } g2.Vertex.(*vertex).setupCallCounters() - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result2") - require.Equal(t, len(bi), 0) // match by cache key j3, err := l.NewJob("j3") @@ -1769,10 +1738,9 @@ func TestParallelBuildsIgnoreCache(t *testing.T) { } g3.Vertex.(*vertex).setupCallCounters() - res, bi, err = j3.Build(ctx, g3) + res, err = j3.Build(ctx, g3) require.NoError(t, err) require.Equal(t, unwrap(res), "result3") - require.Equal(t, len(bi), 0) // add another ignorecache merges now @@ -1795,10 +1763,9 @@ func TestParallelBuildsIgnoreCache(t *testing.T) { } g4.Vertex.(*vertex).setupCallCounters() - res, bi, err = j4.Build(ctx, g4) + res, err = j4.Build(ctx, g4) require.NoError(t, err) require.Equal(t, unwrap(res), "result3") - require.Equal(t, len(bi), 0) // add another !ignorecache merges now @@ -1820,10 +1787,9 @@ func TestParallelBuildsIgnoreCache(t *testing.T) { } g5.Vertex.(*vertex).setupCallCounters() - res, bi, err = j5.Build(ctx, g5) + res, err = j5.Build(ctx, g5) require.NoError(t, err) require.Equal(t, unwrap(res), "result3") - require.Equal(t, len(bi), 0) } func TestSubbuild(t *testing.T) { @@ -1855,10 +1821,9 @@ func TestSubbuild(t *testing.T) { } g0.Vertex.(*vertexSum).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 8) - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g0.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(2), *g0.Vertex.(*vertexSum).execCallCount) @@ -1877,10 +1842,9 @@ func TestSubbuild(t *testing.T) { g0.Vertex.(*vertexSum).setupCallCounters() - res, bi, err = j1.Build(ctx, g0) + res, err = j1.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 8) - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g0.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(0), *g0.Vertex.(*vertexSum).execCallCount) @@ -1929,10 +1893,9 @@ func TestCacheWithSelector(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount) @@ -1971,10 +1934,9 @@ func TestCacheWithSelector(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(0), *g1.Vertex.(*vertex).execCallCount) @@ -2013,10 +1975,9 @@ func TestCacheWithSelector(t *testing.T) { } g2.Vertex.(*vertex).setupCallCounters() - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0-1") - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g2.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(1), *g2.Vertex.(*vertex).execCallCount) @@ -2069,10 +2030,9 @@ func TestCacheSlowWithSelector(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g0.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(2), *g0.Vertex.(*vertex).execCallCount) @@ -2114,10 +2074,9 @@ func TestCacheSlowWithSelector(t *testing.T) { } g1.Vertex.(*vertex).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.Equal(t, int64(2), *g1.Vertex.(*vertex).cacheCallCount) require.Equal(t, int64(0), *g1.Vertex.(*vertex).execCallCount) @@ -2157,10 +2116,9 @@ func TestCacheExporting(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 6) - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -2189,10 +2147,9 @@ func TestCacheExporting(t *testing.T) { } }() - res, bi, err = j1.Build(ctx, g0) + res, err = j1.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 6) - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -2247,10 +2204,9 @@ func TestCacheExportingModeMin(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -2281,10 +2237,9 @@ func TestCacheExportingModeMin(t *testing.T) { } }() - res, bi, err = j1.Build(ctx, g0) + res, err = j1.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -2316,10 +2271,9 @@ func TestCacheExportingModeMin(t *testing.T) { } }() - res, bi, err = j2.Build(ctx, g0) + res, err = j2.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) - require.Equal(t, len(bi), 0) require.NoError(t, j2.Discard()) j2 = nil @@ -2399,10 +2353,10 @@ func TestSlowCacheAvoidAccess(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(0), cacheManager.loadCounter) require.NoError(t, j0.Discard()) @@ -2419,10 +2373,9 @@ func TestSlowCacheAvoidAccess(t *testing.T) { } }() - res, bi, err = j1.Build(ctx, g0) + res, err = j1.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -2502,10 +2455,10 @@ func TestSlowCacheAvoidLoadOnCache(t *testing.T) { } g0.Vertex.(*vertex).setupCallCounters() - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "resultmain") - require.Equal(t, len(bi), 0) + require.Equal(t, int64(0), cacheManager.loadCounter) require.NoError(t, j0.Discard()) @@ -2576,10 +2529,9 @@ func TestSlowCacheAvoidLoadOnCache(t *testing.T) { } }() - res, bi, err = j1.Build(ctx, g0) + res, err = j1.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "resultmain") - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -2621,10 +2573,9 @@ func TestCacheMultipleMaps(t *testing.T) { value: "result0", }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -2658,10 +2609,9 @@ func TestCacheMultipleMaps(t *testing.T) { }), } - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -2694,10 +2644,9 @@ func TestCacheMultipleMaps(t *testing.T) { }), } - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j2.Discard()) j2 = nil @@ -2749,10 +2698,9 @@ func TestCacheInputMultipleMaps(t *testing.T) { }}, }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) expTarget := newTestExporterTarget() @@ -2791,10 +2739,9 @@ func TestCacheInputMultipleMaps(t *testing.T) { }}, }), } - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, expTarget, testExporterOpts(true)) require.NoError(t, err) @@ -2848,10 +2795,9 @@ func TestCacheExportingPartialSelector(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -2882,10 +2828,9 @@ func TestCacheExportingPartialSelector(t *testing.T) { g1 := g0 - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -2937,10 +2882,9 @@ func TestCacheExportingPartialSelector(t *testing.T) { }), } - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j2.Discard()) j2 = nil @@ -2984,10 +2928,9 @@ func TestCacheExportingPartialSelector(t *testing.T) { ), } - res, bi, err = j3.Build(ctx, g3) + res, err = j3.Build(ctx, g3) require.NoError(t, err) require.Equal(t, unwrap(res), "result2") - require.Equal(t, len(bi), 0) require.NoError(t, j3.Discard()) j3 = nil @@ -3083,10 +3026,9 @@ func TestCacheExportingMergedKey(t *testing.T) { }), } - res, bi, err := j0.Build(ctx, g0) + res, err := j0.Build(ctx, g0) require.NoError(t, err) require.Equal(t, unwrap(res), "result0") - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -3144,10 +3086,10 @@ func TestMergedEdgesLookup(t *testing.T) { } g.Vertex.(*vertexSum).setupCallCounters() - res, bi, err := j0.Build(ctx, g) + res, err := j0.Build(ctx, g) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) - require.Equal(t, len(bi), 0) + require.Equal(t, int64(7), *g.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(0), cacheManager.loadCounter) @@ -3196,13 +3138,12 @@ func TestCacheLoadError(t *testing.T) { } g.Vertex.(*vertexSum).setupCallCounters() - res, bi, err := j0.Build(ctx, g) + res, err := j0.Build(ctx, g) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) require.Equal(t, int64(7), *g.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(5), *g.Vertex.(*vertexSum).execCallCount) require.Equal(t, int64(0), cacheManager.loadCounter) - require.Equal(t, len(bi), 0) require.NoError(t, j0.Discard()) j0 = nil @@ -3221,13 +3162,12 @@ func TestCacheLoadError(t *testing.T) { g1.Vertex.(*vertexSum).setupCallCounters() - res, bi, err = j1.Build(ctx, g1) + res, err = j1.Build(ctx, g1) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) require.Equal(t, int64(7), *g.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(0), *g.Vertex.(*vertexSum).execCallCount) require.Equal(t, int64(1), cacheManager.loadCounter) - require.Equal(t, len(bi), 0) require.NoError(t, j1.Discard()) j1 = nil @@ -3248,13 +3188,12 @@ func TestCacheLoadError(t *testing.T) { cacheManager.forceFail = true - res, bi, err = j2.Build(ctx, g2) + res, err = j2.Build(ctx, g2) require.NoError(t, err) require.Equal(t, unwrapInt(res), 11) require.Equal(t, int64(7), *g.Vertex.(*vertexSum).cacheCallCount) require.Equal(t, int64(5), *g.Vertex.(*vertexSum).execCallCount) require.Equal(t, int64(6), cacheManager.loadCounter) - require.Equal(t, len(bi), 0) require.NoError(t, j2.Discard()) j2 = nil @@ -3302,7 +3241,7 @@ func TestInputRequestDeadlock(t *testing.T) { }), } - _, _, err = j0.Build(ctx, g0) + _, err = j0.Build(ctx, g0) require.NoError(t, err) require.NoError(t, j0.Discard()) j0 = nil @@ -3340,7 +3279,7 @@ func TestInputRequestDeadlock(t *testing.T) { }), } - _, _, err = j1.Build(ctx, g1) + _, err = j1.Build(ctx, g1) require.NoError(t, err) require.NoError(t, j1.Discard()) j1 = nil @@ -3381,7 +3320,7 @@ func TestInputRequestDeadlock(t *testing.T) { }), } - _, _, err = j2.Build(ctx, g2) + _, err = j2.Build(ctx, g2) require.NoError(t, err) require.NoError(t, j2.Discard()) j2 = nil @@ -3389,10 +3328,10 @@ func TestInputRequestDeadlock(t *testing.T) { func generateSubGraph(nodes int) (Edge, int) { if nodes == 1 { - value := rand.Int() % 500 + value := rand.Int() % 500 //nolint:gosec return Edge{Vertex: vtxConst(value, vtxOpt{})}, value } - spread := rand.Int()%5 + 2 + spread := rand.Int()%5 + 2 //nolint:gosec inc := int(math.Ceil(float64(nodes) / float64(spread))) if inc > nodes { inc = nodes @@ -3414,7 +3353,7 @@ func generateSubGraph(nodes int) (Edge, int) { value += v added += inc } - extra := rand.Int() % 500 + extra := rand.Int() % 500 //nolint:gosec value += extra return Edge{Vertex: vtxSum(extra, vtxOpt{inputs: inputs})}, value } @@ -3684,7 +3623,7 @@ func (v *vertexSubBuild) Exec(ctx context.Context, g session.Group, inputs []Res if err := v.exec(ctx, inputs); err != nil { return nil, err } - res, _, err := v.b.Build(ctx, v.g) + res, err := v.b.Build(ctx, v.g) if err != nil { return nil, err } @@ -3844,7 +3783,7 @@ type testExporterRecord struct { linkMap map[digest.Digest]struct{} } -func (r *testExporterRecord) AddResult(createdAt time.Time, result *Remote) { +func (r *testExporterRecord) AddResult(_ digest.Digest, _ int, createdAt time.Time, result *Remote) { r.results++ } diff --git a/solver/testutil/cachestorage_testsuite.go b/solver/testutil/cachestorage_testsuite.go index e883ddd2a6eb..6abaa5676ae5 100644 --- a/solver/testutil/cachestorage_testsuite.go +++ b/solver/testutil/cachestorage_testsuite.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func RunCacheStorageTests(t *testing.T, st func() (solver.CacheKeyStorage, func())) { +func RunCacheStorageTests(t *testing.T, st func() solver.CacheKeyStorage) { for _, tc := range []func(*testing.T, solver.CacheKeyStorage){ testResults, testLinks, @@ -27,11 +27,9 @@ func RunCacheStorageTests(t *testing.T, st func() (solver.CacheKeyStorage, func( } } -func runStorageTest(t *testing.T, fn func(t *testing.T, st solver.CacheKeyStorage), st func() (solver.CacheKeyStorage, func())) { +func runStorageTest(t *testing.T, fn func(t *testing.T, st solver.CacheKeyStorage), st func() solver.CacheKeyStorage) { require.True(t, t.Run(getFunctionName(fn), func(t *testing.T) { - s, cleanup := st() - defer cleanup() - fn(t, s) + fn(t, st()) })) } @@ -387,7 +385,7 @@ func testWalkIDsByResult(t *testing.T, st solver.CacheKeyStorage) { func getFunctionName(i interface{}) string { fullname := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() dot := strings.LastIndex(fullname, ".") + 1 - return strings.Title(fullname[dot:]) + return strings.Title(fullname[dot:]) //nolint:staticcheck // ignoring "SA1019: strings.Title is deprecated", as for our use we don't need full unicode support } func rootKey(dgst digest.Digest, output solver.Index) digest.Digest { diff --git a/solver/testutil/memorycachestorage_test.go b/solver/testutil/memorycachestorage_test.go index ea1f6be464fe..ae42c73053e4 100644 --- a/solver/testutil/memorycachestorage_test.go +++ b/solver/testutil/memorycachestorage_test.go @@ -7,7 +7,5 @@ import ( ) func TestMemoryCacheStorage(t *testing.T) { - RunCacheStorageTests(t, func() (solver.CacheKeyStorage, func()) { - return solver.NewInMemoryCacheStorage(), func() {} - }) + RunCacheStorageTests(t, solver.NewInMemoryCacheStorage) } diff --git a/solver/types.go b/solver/types.go index a20c1020f21e..6635daef0e65 100644 --- a/solver/types.go +++ b/solver/types.go @@ -72,11 +72,17 @@ type CachedResult interface { CacheKeys() []ExportableCacheKey } +type CachedResultWithProvenance interface { + CachedResult + WalkProvenance(context.Context, func(ProvenanceProvider) error) error +} + type ResultProxy interface { + ID() string Result(context.Context) (CachedResult, error) Release(context.Context) error Definition() *pb.Definition - BuildSources() BuildSources + Provenance() interface{} } // CacheExportMode is the type for setting cache exporting modes @@ -104,6 +110,8 @@ type CacheExportOpt struct { // CompressionOpt is an option to specify the compression of the object to load. // If specified, all objects that meet the option will be cached. CompressionOpt *compression.Config + // ExportRoots defines if records for root vertexes should be exported. + ExportRoots bool } // CacheExporter can export the artifacts of the build chain @@ -120,7 +128,7 @@ type CacheExporterTarget interface { // CacheExporterRecord is a single object being exported type CacheExporterRecord interface { - AddResult(createdAt time.Time, result *Remote) + AddResult(vtx digest.Digest, index int, createdAt time.Time, result *Remote) LinkFrom(src CacheExporterRecord, index int, selector string) } @@ -159,6 +167,10 @@ type Op interface { Acquire(ctx context.Context) (release ReleaseFunc, err error) } +type ProvenanceProvider interface { + IsProvenanceProvider() +} + type ResultBasedCacheFunc func(context.Context, Result, session.Group) (digest.Digest, error) type PreprocessFunc func(context.Context, Result, session.Group) error @@ -196,15 +208,8 @@ type CacheMap struct { // such as oci descriptor content providers and progress writers to be passed to // the cache. Opts should not have any impact on the computed cache key. Opts CacheOpts - - // BuildSources contains build dependencies that will be set from source - // operation. - BuildSources BuildSources } -// BuildSources contains solved build dependencies. -type BuildSources map[string]string - // ExportableCacheKey is a cache key connected with an exporter that can export // a chain of cacherecords pointing to that key type ExportableCacheKey struct { diff --git a/source/containerimage/ocilayout.go b/source/containerimage/ocilayout.go new file mode 100644 index 000000000000..2358b5339b7a --- /dev/null +++ b/source/containerimage/ocilayout.go @@ -0,0 +1,153 @@ +package containerimage + +import ( + "context" + "io" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/reference" + "github.com/containerd/containerd/remotes" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/session" + sessioncontent "github.com/moby/buildkit/session/content" + "github.com/moby/buildkit/util/imageutil" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + maxReadSize = 4 * 1024 * 1024 +) + +// getOCILayoutResolver gets a resolver to an OCI layout for a specified store from the client using the given session. +func getOCILayoutResolver(store llb.ResolveImageConfigOptStore, sm *session.Manager, g session.Group) *ociLayoutResolver { + r := &ociLayoutResolver{ + store: store, + sm: sm, + g: g, + } + return r +} + +type ociLayoutResolver struct { + remotes.Resolver + store llb.ResolveImageConfigOptStore + sm *session.Manager + g session.Group +} + +// Fetcher returns a new fetcher for the provided reference. +func (r *ociLayoutResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { + return r, nil +} + +// Fetch get an io.ReadCloser for the specific content +func (r *ociLayoutResolver) Fetch(ctx context.Context, desc ocispecs.Descriptor) (io.ReadCloser, error) { + var rc io.ReadCloser + err := r.withCaller(ctx, func(ctx context.Context, caller session.Caller) error { + store := sessioncontent.NewCallerStore(caller, "oci:"+r.store.StoreID) + readerAt, err := store.ReaderAt(ctx, desc) + if err != nil { + return err + } + rc = &readerAtWrapper{readerAt: readerAt} + return nil + }) + return rc, err +} + +// Resolve attempts to resolve the reference into a name and descriptor. +// OCI Layout does not (yet) support tag name references, but does support hash references. +func (r *ociLayoutResolver) Resolve(ctx context.Context, refString string) (string, ocispecs.Descriptor, error) { + ref, err := reference.Parse(refString) + if err != nil { + return "", ocispecs.Descriptor{}, errors.Wrapf(err, "invalid reference %q", refString) + } + dgst := ref.Digest() + if dgst == "" { + return "", ocispecs.Descriptor{}, errors.Errorf("reference %q must have digest", refString) + } + + info, err := r.info(ctx, ref) + if err != nil { + return "", ocispecs.Descriptor{}, errors.Wrap(err, "unable to get info about digest") + } + + // Create the descriptor, then use that to read the actual root manifest/ + // This is necessary because we do not know the media-type of the descriptor, + // and there are descriptor processing elements that expect it. + desc := ocispecs.Descriptor{ + Digest: info.Digest, + Size: info.Size, + } + rc, err := r.Fetch(ctx, desc) + if err != nil { + return "", ocispecs.Descriptor{}, errors.Wrap(err, "unable to get root manifest") + } + b, err := io.ReadAll(io.LimitReader(rc, maxReadSize)) + if err != nil { + return "", ocispecs.Descriptor{}, errors.Wrap(err, "unable to read root manifest") + } + + mediaType, err := imageutil.DetectManifestBlobMediaType(b) + if err != nil { + return "", ocispecs.Descriptor{}, errors.Wrapf(err, "reference %q contains neither an index nor a manifest", refString) + } + desc.MediaType = mediaType + + return refString, desc, nil +} + +func (r *ociLayoutResolver) info(ctx context.Context, ref reference.Spec) (content.Info, error) { + var info *content.Info + err := r.withCaller(ctx, func(ctx context.Context, caller session.Caller) error { + store := sessioncontent.NewCallerStore(caller, "oci:"+r.store.StoreID) + + _, dgst := reference.SplitObject(ref.Object) + if dgst == "" { + return errors.Errorf("reference %q does not contain a digest", ref.String()) + } + in, err := store.Info(ctx, dgst) + info = &in + return err + }) + if err != nil { + return content.Info{}, err + } + if info == nil { + return content.Info{}, errors.Errorf("reference %q did not match any content", ref.String()) + } + return *info, nil +} + +func (r *ociLayoutResolver) withCaller(ctx context.Context, f func(context.Context, session.Caller) error) error { + if r.store.SessionID != "" { + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + caller, err := r.sm.Get(timeoutCtx, r.store.SessionID, false) + if err != nil { + return err + } + return f(ctx, caller) + } + return r.sm.Any(ctx, r.g, func(ctx context.Context, _ string, caller session.Caller) error { + return f(ctx, caller) + }) +} + +// readerAtWrapper wraps a ReaderAt to give a Reader +type readerAtWrapper struct { + offset int64 + readerAt content.ReaderAt +} + +func (r *readerAtWrapper) Read(p []byte) (n int, err error) { + n, err = r.readerAt.ReadAt(p, r.offset) + r.offset += int64(n) + return +} +func (r *readerAtWrapper) Close() error { + return r.readerAt.Close() +} diff --git a/source/containerimage/pull.go b/source/containerimage/pull.go index a989f0e1e9d4..509d2a994660 100644 --- a/source/containerimage/pull.go +++ b/source/containerimage/pull.go @@ -12,9 +12,12 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/reference" + "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/snapshots" "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" @@ -39,6 +42,13 @@ import ( // TODO: break apart containerd specifics like contentstore so the resolver // code can be used with any implementation +type ResolverType int + +const ( + ResolverTypeRegistry ResolverType = iota + ResolverTypeOCILayout +) + type SourceOpt struct { Snapshotter snapshot.Snapshotter ContentStore content.Store @@ -46,7 +56,8 @@ type SourceOpt struct { CacheAccessor cache.Accessor ImageStore images.Store // optional RegistryHosts docker.RegistryHosts - LeaseManager leases.Manager + ResolverType + LeaseManager leases.Manager } type Source struct { @@ -65,6 +76,9 @@ func NewSource(opt SourceOpt) (*Source, error) { } func (is *Source) ID() string { + if is.ResolverType == ResolverTypeOCILayout { + return srctypes.OCIScheme + } return srctypes.DockerImageScheme } @@ -73,20 +87,31 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re dgst digest.Digest dt []byte } + var typed *t key := ref if platform := opt.Platform; platform != nil { key += platforms.Format(*platform) } - - rm, err := source.ParseImageResolveMode(opt.ResolveMode) - if err != nil { - return "", nil, err + var ( + rm source.ResolveMode + rslvr remotes.Resolver + err error + ) + + switch is.ResolverType { + case ResolverTypeRegistry: + rm, err = source.ParseImageResolveMode(opt.ResolveMode) + if err != nil { + return "", nil, err + } + rslvr = resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g).WithImageStore(is.ImageStore, rm) + case ResolverTypeOCILayout: + rm = source.ResolveModeForcePull + rslvr = getOCILayoutResolver(opt.Store, sm, g) } key += rm.String() - res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) { - res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g).WithImageStore(is.ImageStore, rm) - dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, opt.Platform) + dgst, dt, err := imageutil.Config(ctx, ref, rslvr, is.ContentStore, is.LeaseManager, opt.Platform) if err != nil { return nil, err } @@ -95,37 +120,73 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re if err != nil { return "", nil, err } - typed := res.(*t) + typed = res.(*t) return typed.dgst, typed.dt, nil } func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) { - imageIdentifier, ok := id.(*source.ImageIdentifier) - if !ok { - return nil, errors.Errorf("invalid image identifier %v", id) - } + var ( + p *puller + platform = platforms.DefaultSpec() + pullerUtil *pull.Puller + mode source.ResolveMode + recordType client.UsageRecordType + ref reference.Spec + store llb.ResolveImageConfigOptStore + layerLimit *int + ) + switch is.ResolverType { + case ResolverTypeRegistry: + imageIdentifier, ok := id.(*source.ImageIdentifier) + if !ok { + return nil, errors.Errorf("invalid image identifier %v", id) + } - platform := platforms.DefaultSpec() - if imageIdentifier.Platform != nil { - platform = *imageIdentifier.Platform - } + if imageIdentifier.Platform != nil { + platform = *imageIdentifier.Platform + } + mode = imageIdentifier.ResolveMode + recordType = imageIdentifier.RecordType + ref = imageIdentifier.Reference + layerLimit = imageIdentifier.LayerLimit + case ResolverTypeOCILayout: + ociIdentifier, ok := id.(*source.OCIIdentifier) + if !ok { + return nil, errors.Errorf("invalid OCI layout identifier %v", id) + } - pullerUtil := &pull.Puller{ + if ociIdentifier.Platform != nil { + platform = *ociIdentifier.Platform + } + mode = source.ResolveModeForcePull // with OCI layout, we always just "pull" + store = llb.ResolveImageConfigOptStore{ + SessionID: ociIdentifier.SessionID, + StoreID: ociIdentifier.StoreID, + } + ref = ociIdentifier.Reference + layerLimit = ociIdentifier.LayerLimit + default: + return nil, errors.Errorf("unknown resolver type: %v", is.ResolverType) + } + pullerUtil = &pull.Puller{ ContentStore: is.ContentStore, Platform: platform, - Src: imageIdentifier.Reference, + Src: ref, } - p := &puller{ + p = &puller{ CacheAccessor: is.CacheAccessor, LeaseManager: is.LeaseManager, Puller: pullerUtil, - id: imageIdentifier, RegistryHosts: is.RegistryHosts, + ResolverType: is.ResolverType, ImageStore: is.ImageStore, - Mode: imageIdentifier.ResolveMode, - Ref: imageIdentifier.Reference.String(), + Mode: mode, + RecordType: recordType, + Ref: ref.String(), SessionManager: sm, vtx: vtx, + store: store, + layerLimit: layerLimit, } return p, nil } @@ -136,10 +197,13 @@ type puller struct { RegistryHosts docker.RegistryHosts ImageStore images.Store Mode source.ResolveMode + RecordType client.UsageRecordType Ref string SessionManager *session.Manager - id *source.ImageIdentifier + layerLimit *int vtx solver.Vertex + ResolverType + store llb.ResolveImageConfigOptStore g flightcontrol.Group cacheKeyErr error @@ -152,17 +216,19 @@ type puller struct { *pull.Puller } -func mainManifestKey(ctx context.Context, desc ocispecs.Descriptor, platform ocispecs.Platform) (digest.Digest, error) { +func mainManifestKey(ctx context.Context, desc ocispecs.Descriptor, platform ocispecs.Platform, layerLimit *int) (digest.Digest, error) { dt, err := json.Marshal(struct { Digest digest.Digest OS string Arch string Variant string `json:",omitempty"` + Limit *int `json:",omitempty"` }{ Digest: desc.Digest, OS: platform.OS, Arch: platform.Architecture, Variant: platform.Variant, + Limit: layerLimit, }) if err != nil { return "", err @@ -171,18 +237,30 @@ func mainManifestKey(ctx context.Context, desc ocispecs.Descriptor, platform oci } func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cacheKey string, imgDigest string, cacheOpts solver.CacheOpts, cacheDone bool, err error) { - p.Puller.Resolver = resolver.DefaultPool.GetResolver(p.RegistryHosts, p.Ref, "pull", p.SessionManager, g).WithImageStore(p.ImageStore, p.id.ResolveMode) + var getResolver pull.SessionResolver + switch p.ResolverType { + case ResolverTypeRegistry: + resolver := resolver.DefaultPool.GetResolver(p.RegistryHosts, p.Ref, "pull", p.SessionManager, g).WithImageStore(p.ImageStore, p.Mode) + p.Puller.Resolver = resolver + getResolver = func(g session.Group) remotes.Resolver { return resolver.WithSession(g) } + case ResolverTypeOCILayout: + resolver := getOCILayoutResolver(p.store, p.SessionManager, g) + p.Puller.Resolver = resolver + // OCILayout has no need for session + getResolver = func(g session.Group) remotes.Resolver { return resolver } + default: + } // progressFactory needs the outer context, the context in `p.g.Do` will // be canceled before the progress output is complete progressFactory := progress.FromContext(ctx) _, err = p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) { - if p.cacheKeyErr != nil || p.cacheKeyDone == true { + if p.cacheKeyErr != nil || p.cacheKeyDone { return nil, p.cacheKeyErr } defer func() { - if !errdefs.IsCanceled(err) { + if !errdefs.IsCanceled(ctx, err) { p.cacheKeyErr = err } }() @@ -193,16 +271,23 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach p.releaseTmpLeases = done defer imageutil.AddLease(done) - resolveProgressDone := oneOffProgress(ctx, "resolve "+p.Src.String()) + resolveProgressDone := progress.OneOff(ctx, "resolve "+p.Src.String()) defer func() { resolveProgressDone(err) }() - p.manifest, err = p.PullManifests(ctx) + p.manifest, err = p.PullManifests(ctx, getResolver) if err != nil { return nil, err } + if ll := p.layerLimit; ll != nil { + if *ll > len(p.manifest.Descriptors) { + return nil, errors.Errorf("layer limit %d is greater than the number of layers in the image %d", *ll, len(p.manifest.Descriptors)) + } + p.manifest.Descriptors = p.manifest.Descriptors[:*ll] + } + if len(p.manifest.Descriptors) > 0 { progressController := &controller.Controller{ WriterFactory: progressFactory, @@ -233,7 +318,7 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach } desc := p.manifest.MainManifestDesc - k, err := mainManifestKey(ctx, desc, p.Platform) + k, err := mainManifestKey(ctx, desc, p.Platform, p.layerLimit) if err != nil { return nil, err } @@ -243,7 +328,11 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach if err != nil { return nil, err } - p.configKey = cacheKeyFromConfig(dt).String() + ck, err := cacheKeyFromConfig(dt, p.layerLimit) + if err != nil { + return nil, err + } + p.configKey = ck.String() p.cacheKeyDone = true return nil, nil }) @@ -264,7 +353,19 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach } func (p *puller) Snapshot(ctx context.Context, g session.Group) (ir cache.ImmutableRef, err error) { - p.Puller.Resolver = resolver.DefaultPool.GetResolver(p.RegistryHosts, p.Ref, "pull", p.SessionManager, g).WithImageStore(p.ImageStore, p.id.ResolveMode) + var getResolver pull.SessionResolver + switch p.ResolverType { + case ResolverTypeRegistry: + resolver := resolver.DefaultPool.GetResolver(p.RegistryHosts, p.Ref, "pull", p.SessionManager, g).WithImageStore(p.ImageStore, p.Mode) + p.Puller.Resolver = resolver + getResolver = func(g session.Group) remotes.Resolver { return resolver.WithSession(g) } + case ResolverTypeOCILayout: + resolver := getOCILayoutResolver(p.store, p.SessionManager, g) + p.Puller.Resolver = resolver + // OCILayout has no need for session + getResolver = func(g session.Group) remotes.Resolver { return resolver } + default: + } if len(p.manifest.Descriptors) == 0 { return nil, nil @@ -310,7 +411,7 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (ir cache.Immuta } defer done(ctx) - if _, err := p.PullManifests(ctx); err != nil { + if _, err := p.PullManifests(ctx, getResolver); err != nil { return nil, err } } else if err != nil { @@ -325,8 +426,8 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (ir cache.Immuta } } - if p.id.RecordType != "" && current.GetRecordType() == "" { - if err := current.SetRecordType(p.id.RecordType); err != nil { + if p.RecordType != "" && current.GetRecordType() == "" { + if err := current.SetRecordType(p.RecordType); err != nil { return nil, err } } @@ -336,31 +437,25 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (ir cache.Immuta // cacheKeyFromConfig returns a stable digest from image config. If image config // is a known oci image we will use chainID of layers. -func cacheKeyFromConfig(dt []byte) digest.Digest { +func cacheKeyFromConfig(dt []byte, layerLimit *int) (digest.Digest, error) { var img ocispecs.Image err := json.Unmarshal(dt, &img) if err != nil { - return digest.FromBytes(dt) + if layerLimit != nil { + return "", errors.Wrap(err, "failed to parse image config") + } + return digest.FromBytes(dt), nil // digest of config + } + if layerLimit != nil { + l := *layerLimit + if len(img.RootFS.DiffIDs) < l { + return "", errors.Errorf("image has %d layers, limit is %d", len(img.RootFS.DiffIDs), l) + } + img.RootFS.DiffIDs = img.RootFS.DiffIDs[:l] } if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 { - return "" + return "", nil } - return identity.ChainID(img.RootFS.DiffIDs) -} -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.NewFromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } + return identity.ChainID(img.RootFS.DiffIDs), nil } diff --git a/source/git/gitsource.go b/source/git/gitsource.go index 9169992f7ad9..dd35fe55f7c9 100644 --- a/source/git/gitsource.go +++ b/source/git/gitsource.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "fmt" "io" - "io/ioutil" "net/url" "os" "os/exec" @@ -126,7 +125,11 @@ func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []stri }() if initializeRepo { - if _, err := gitWithinDir(ctx, dir, "", "", "", auth, "init", "--bare"); err != nil { + // Explicitly set the Git config 'init.defaultBranch' to the + // implied default to suppress "hint:" output about not having a + // default initial branch name set which otherwise spams unit + // test logs. + if _, err := gitWithinDir(ctx, dir, "", "", "", auth, "-c", "init.defaultBranch=master", "init", "--bare"); err != nil { return "", nil, errors.Wrapf(err, "failed to init repo at %s", dir) } @@ -273,7 +276,7 @@ func (gs *gitSourceHandler) mountKnownHosts(ctx context.Context) (string, func() if gs.src.KnownSSHHosts == "" { return "", nil, errors.Errorf("no configured known hosts forwarded from the client") } - knownHosts, err := ioutil.TempFile("", "") + knownHosts, err := os.CreateTemp("", "") if err != nil { return "", nil, err } @@ -493,11 +496,14 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out if err := os.MkdirAll(checkoutDir, 0711); err != nil { return nil, err } - _, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "init") + _, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "-c", "init.defaultBranch=master", "init") if err != nil { return nil, err } - _, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "remote", "add", "origin", gitDir) + // Defense-in-depth: clone using the file protocol to disable local-clone + // optimizations which can be abused on some versions of Git to copy unintended + // host files into the build context. + _, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "remote", "add", "origin", "file://"+gitDir) if err != nil { return nil, err } @@ -543,7 +549,7 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out } else { cd := checkoutDir if subdir != "." { - cd, err = ioutil.TempDir(cd, "checkout") + cd, err = os.MkdirTemp(cd, "checkout") if err != nil { return nil, errors.Wrapf(err, "failed to create temporary checkout dir") } @@ -588,7 +594,7 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out if idmap := mount.IdentityMapping(); idmap != nil { u := idmap.RootPair() - err := filepath.Walk(gitDir, func(p string, f os.FileInfo, err error) error { + err := filepath.WalkDir(gitDir, func(p string, _ os.DirEntry, _ error) error { return os.Lchown(p, u.UID, u.GID) }) if err != nil { @@ -650,6 +656,7 @@ func git(ctx context.Context, dir, sshAuthSock, knownHosts string, args ...strin flush() } }() + args = append([]string{"-c", "protocol.file.allow=user"}, args...) // Block sneaky repositories from using repos from the filesystem as submodules. cmd := exec.Command("git", args...) cmd.Dir = dir // some commands like submodule require this buf := bytes.NewBuffer(nil) @@ -662,6 +669,8 @@ func git(ctx context.Context, dir, sshAuthSock, knownHosts string, args ...strin "GIT_TERMINAL_PROMPT=0", "GIT_SSH_COMMAND=" + getGitSSHCommand(knownHosts), // "GIT_TRACE=1", + "GIT_CONFIG_NOSYSTEM=1", // Disable reading from system gitconfig. + "HOME=/dev/null", // Disable reading from user gitconfig. } if sshAuthSock != "" { cmd.Env = append(cmd.Env, "SSH_AUTH_SOCK="+sshAuthSock) diff --git a/source/git/gitsource_test.go b/source/git/gitsource_test.go index 6af1e4a4841c..8022ddff4727 100644 --- a/source/git/gitsource_test.go +++ b/source/git/gitsource_test.go @@ -1,12 +1,16 @@ package git import ( + "bytes" "context" - "io/ioutil" + "net/http" + "net/http/cgi" + "net/http/httptest" "os" "os/exec" "path/filepath" "runtime" + "strconv" "strings" "testing" @@ -20,12 +24,13 @@ import ( "github.com/docker/docker/pkg/reexec" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/client" "github.com/moby/buildkit/snapshot" containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/source" "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/winlayers" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" @@ -50,22 +55,13 @@ func testRepeatedFetch(t *testing.T, keepGitDir bool) { } t.Parallel() - ctx := context.TODO() + ctx := logProgressStreams(context.Background(), t) - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - gs := setupGitSource(t, tmpdir) - - repodir, err := ioutil.TempDir("", "buildkit-gitsource") - require.NoError(t, err) - defer os.RemoveAll(repodir) + gs := setupGitSource(t, t.TempDir()) - repodir, err = setupGitRepo(repodir) - require.NoError(t, err) + repo := setupGitRepo(t) - id := &source.GitIdentifier{Remote: repodir, KeepGitDir: keepGitDir} + id := &source.GitIdentifier{Remote: repo.mainURL, KeepGitDir: keepGitDir} g, err := gs.Resolve(ctx, id, nil, nil) require.NoError(t, err) @@ -94,7 +90,7 @@ func testRepeatedFetch(t *testing.T, keepGitDir bool) { require.NoError(t, err) defer lm.Unmount() - dt, err := ioutil.ReadFile(filepath.Join(dir, "def")) + dt, err := os.ReadFile(filepath.Join(dir, "def")) require.NoError(t, err) require.Equal(t, "bar\n", string(dt)) @@ -106,7 +102,7 @@ func testRepeatedFetch(t *testing.T, keepGitDir bool) { require.ErrorAs(t, err, &os.ErrNotExist) // second fetch returns same dir - id = &source.GitIdentifier{Remote: repodir, Ref: "master", KeepGitDir: keepGitDir} + id = &source.GitIdentifier{Remote: repo.mainURL, Ref: "master", KeepGitDir: keepGitDir} g, err = gs.Resolve(ctx, id, nil, nil) require.NoError(t, err) @@ -123,7 +119,7 @@ func testRepeatedFetch(t *testing.T, keepGitDir bool) { require.Equal(t, ref1.ID(), ref2.ID()) - id = &source.GitIdentifier{Remote: repodir, Ref: "feature", KeepGitDir: keepGitDir} + id = &source.GitIdentifier{Remote: repo.mainURL, Ref: "feature", KeepGitDir: keepGitDir} g, err = gs.Resolve(ctx, id, nil, nil) require.NoError(t, err) @@ -145,12 +141,12 @@ func testRepeatedFetch(t *testing.T, keepGitDir bool) { require.NoError(t, err) defer lm.Unmount() - dt, err = ioutil.ReadFile(filepath.Join(dir, "ghi")) + dt, err = os.ReadFile(filepath.Join(dir, "ghi")) require.NoError(t, err) require.Equal(t, "baz\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(dir, "sub/subfile")) + dt, err = os.ReadFile(filepath.Join(dir, "sub/subfile")) require.NoError(t, err) require.Equal(t, "subcontents\n", string(dt)) @@ -170,22 +166,14 @@ func testFetchBySHA(t *testing.T, keepGitDir bool) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") + ctx = logProgressStreams(ctx, t) - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + gs := setupGitSource(t, t.TempDir()) - gs := setupGitSource(t, tmpdir) - - repodir, err := ioutil.TempDir("", "buildkit-gitsource") - require.NoError(t, err) - defer os.RemoveAll(repodir) - - repodir, err = setupGitRepo(repodir) - require.NoError(t, err) + repo := setupGitRepo(t) cmd := exec.Command("git", "rev-parse", "feature") - cmd.Dir = repodir + cmd.Dir = repo.mainPath out, err := cmd.Output() require.NoError(t, err) @@ -193,7 +181,7 @@ func testFetchBySHA(t *testing.T, keepGitDir bool) { sha := strings.TrimSpace(string(out)) require.Equal(t, 40, len(sha)) - id := &source.GitIdentifier{Remote: repodir, Ref: sha, KeepGitDir: keepGitDir} + id := &source.GitIdentifier{Remote: repo.mainURL, Ref: sha, KeepGitDir: keepGitDir} g, err := gs.Resolve(ctx, id, nil, nil) require.NoError(t, err) @@ -222,12 +210,12 @@ func testFetchBySHA(t *testing.T, keepGitDir bool) { require.NoError(t, err) defer lm.Unmount() - dt, err := ioutil.ReadFile(filepath.Join(dir, "ghi")) + dt, err := os.ReadFile(filepath.Join(dir, "ghi")) require.NoError(t, err) require.Equal(t, "baz\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(dir, "sub/subfile")) + dt, err = os.ReadFile(filepath.Join(dir, "sub/subfile")) require.NoError(t, err) require.Equal(t, "subcontents\n", string(dt)) @@ -256,21 +244,13 @@ func testFetchByTag(t *testing.T, tag, expectedCommitSubject string, isAnnotated t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") + ctx = logProgressStreams(ctx, t) - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - gs := setupGitSource(t, tmpdir) - - repodir, err := ioutil.TempDir("", "buildkit-gitsource") - require.NoError(t, err) - defer os.RemoveAll(repodir) + gs := setupGitSource(t, t.TempDir()) - repodir, err = setupGitRepo(repodir) - require.NoError(t, err) + repo := setupGitRepo(t) - id := &source.GitIdentifier{Remote: repodir, Ref: tag, KeepGitDir: keepGitDir} + id := &source.GitIdentifier{Remote: repo.mainURL, Ref: tag, KeepGitDir: keepGitDir} g, err := gs.Resolve(ctx, id, nil, nil) require.NoError(t, err) @@ -299,11 +279,11 @@ func testFetchByTag(t *testing.T, tag, expectedCommitSubject string, isAnnotated require.NoError(t, err) defer lm.Unmount() - dt, err := ioutil.ReadFile(filepath.Join(dir, "def")) + dt, err := os.ReadFile(filepath.Join(dir, "def")) require.NoError(t, err) require.Equal(t, "bar\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(dir, "foo13")) + dt, err = os.ReadFile(filepath.Join(dir, "foo13")) if hasFoo13File { require.Nil(t, err) require.Equal(t, "sbb\n", string(dt)) @@ -350,36 +330,26 @@ func testMultipleRepos(t *testing.T, keepGitDir bool) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") + ctx = logProgressStreams(ctx, t) - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + gs := setupGitSource(t, t.TempDir()) - gs := setupGitSource(t, tmpdir) + repo := setupGitRepo(t) - repodir, err := ioutil.TempDir("", "buildkit-gitsource") - require.NoError(t, err) - defer os.RemoveAll(repodir) + repodir2 := t.TempDir() - repodir, err = setupGitRepo(repodir) - require.NoError(t, err) - - repodir2, err := ioutil.TempDir("", "buildkit-gitsource") - require.NoError(t, err) - defer os.RemoveAll(repodir2) - - err = runShell(repodir2, - "git init", + runShell(t, repodir2, + "git -c init.defaultBranch=master init", "git config --local user.email test", "git config --local user.name test", "echo xyz > xyz", "git add xyz", "git commit -m initial", ) - require.NoError(t, err) + repoURL2 := serveGitRepo(t, repodir2) - id := &source.GitIdentifier{Remote: repodir, KeepGitDir: keepGitDir} - id2 := &source.GitIdentifier{Remote: repodir2, KeepGitDir: keepGitDir} + id := &source.GitIdentifier{Remote: repo.mainURL, KeepGitDir: keepGitDir} + id2 := &source.GitIdentifier{Remote: repoURL2, KeepGitDir: keepGitDir} g, err := gs.Resolve(ctx, id, nil, nil) require.NoError(t, err) @@ -429,12 +399,12 @@ func testMultipleRepos(t *testing.T, keepGitDir bool) { require.NoError(t, err) defer lm.Unmount() - dt, err := ioutil.ReadFile(filepath.Join(dir, "def")) + dt, err := os.ReadFile(filepath.Join(dir, "def")) require.NoError(t, err) require.Equal(t, "bar\n", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(dir2, "xyz")) + dt, err = os.ReadFile(filepath.Join(dir2, "xyz")) require.NoError(t, err) require.Equal(t, "xyz\n", string(dt)) @@ -447,12 +417,9 @@ func TestCredentialRedaction(t *testing.T) { t.Parallel() ctx := namespaces.WithNamespace(context.Background(), "buildkit-test") + ctx = logProgressStreams(ctx, t) - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - gs := setupGitSource(t, tmpdir) + gs := setupGitSource(t, t.TempDir()) url := "https://user:keepthissecret@non-existant-host/user/private-repo.git" id := &source.GitIdentifier{Remote: url} @@ -478,20 +445,15 @@ func testSubdir(t *testing.T, keepGitDir bool) { } t.Parallel() - ctx := context.TODO() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) + ctx := logProgressStreams(context.Background(), t) - gs := setupGitSource(t, tmpdir) + gs := setupGitSource(t, t.TempDir()) - repodir, err := ioutil.TempDir("", "buildkit-gitsource") - require.NoError(t, err) - defer os.RemoveAll(repodir) + repodir := t.TempDir() - err = runShell(repodir, - "git init", + runShell(t, repodir, + "git -c init.defaultBranch=master init", "git config --local user.email test", "git config --local user.name test", "echo foo > abc", @@ -500,9 +462,9 @@ func testSubdir(t *testing.T, keepGitDir bool) { "git add abc sub", "git commit -m initial", ) - require.NoError(t, err) - id := &source.GitIdentifier{Remote: repodir, KeepGitDir: keepGitDir, Subdir: "sub"} + repoURL := serveGitRepo(t, repodir) + id := &source.GitIdentifier{Remote: repoURL, KeepGitDir: keepGitDir, Subdir: "sub"} g, err := gs.Resolve(ctx, id, nil, nil) require.NoError(t, err) @@ -531,12 +493,12 @@ func testSubdir(t *testing.T, keepGitDir bool) { require.NoError(t, err) defer lm.Unmount() - fis, err := ioutil.ReadDir(dir) + fis, err := os.ReadDir(dir) require.NoError(t, err) require.Equal(t, 1, len(fis)) - dt, err := ioutil.ReadFile(filepath.Join(dir, "bar")) + dt, err := os.ReadFile(filepath.Join(dir, "bar")) require.NoError(t, err) require.Equal(t, "abc\n", string(dt)) @@ -583,30 +545,34 @@ func setupGitSource(t *testing.T, tmpdir string) source.Source { return gs } -func setupGitRepo(dir string) (string, error) { - subPath := filepath.Join(dir, "sub") - mainPath := filepath.Join(dir, "main") - - if err := os.MkdirAll(subPath, 0700); err != nil { - return "", err - } +type gitRepoFixture struct { + mainPath, subPath string // Filesystem paths to the respective repos + mainURL, subURL string // HTTP URLs for the respective repos +} - if err := os.MkdirAll(mainPath, 0700); err != nil { - return "", err +func setupGitRepo(t *testing.T) gitRepoFixture { + t.Helper() + dir := t.TempDir() + srv := serveGitRepo(t, dir) + fixture := gitRepoFixture{ + subPath: filepath.Join(dir, "sub"), + subURL: srv + "/sub", + mainPath: filepath.Join(dir, "main"), + mainURL: srv + "/main", } + require.NoError(t, os.MkdirAll(fixture.subPath, 0700)) + require.NoError(t, os.MkdirAll(fixture.mainPath, 0700)) - if err := runShell(filepath.Join(dir, "sub"), - "git init", + runShell(t, fixture.subPath, + "git -c init.defaultBranch=master init", "git config --local user.email test", "git config --local user.name test", "echo subcontents > subfile", "git add subfile", "git commit -m initial", - ); err != nil { - return "", err - } - if err := runShell(filepath.Join(dir, "main"), - "git init", + ) + runShell(t, fixture.mainPath, + "git -c init.defaultBranch=master init", "git config --local user.email test", "git config --local user.name test", "echo foo > abc", @@ -624,17 +590,58 @@ func setupGitRepo(dir string) (string, error) { "echo baz > ghi", "git add ghi", "git commit -m feature", - "git submodule add "+subPath+" sub", + "git submodule add "+fixture.subURL+" sub", "git add -A", "git commit -m withsub", "git checkout master", - ); err != nil { - return "", err - } - return mainPath, nil + ) + return fixture +} + +func serveGitRepo(t *testing.T, root string) string { + t.Helper() + gitpath, err := exec.LookPath("git") + require.NoError(t, err) + gitversion, _ := exec.Command(gitpath, "version").CombinedOutput() + t.Logf("%s", gitversion) // E.g. "git version 2.30.2" + + // Serve all repositories under root using the Smart HTTP protocol so + // they can be cloned as we explicitly disable the file protocol. + // (Another option would be to use `git daemon` and the Git protocol, + // but that listens on a fixed port number which is a recipe for + // disaster in CI. Funnily enough, `git daemon --port=0` works but there + // is no easy way to discover which port got picked!) + + githttp := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var logs bytes.Buffer + (&cgi.Handler{ + Path: gitpath, + Args: []string{"http-backend"}, + Dir: root, + Env: []string{ + "GIT_PROJECT_ROOT=" + root, + "GIT_HTTP_EXPORT_ALL=1", + }, + Stderr: &logs, + }).ServeHTTP(w, r) + if logs.Len() == 0 { + return + } + for { + line, err := logs.ReadString('\n') + t.Log("git-http-backend: " + line) + if err != nil { + break + } + } + }) + server := httptest.NewServer(&githttp) + t.Cleanup(server.Close) + return server.URL } -func runShell(dir string, cmds ...string) error { +func runShell(t *testing.T, dir string, cmds ...string) { + t.Helper() for _, args := range cmds { var cmd *exec.Cmd if runtime.GOOS == "windows" { @@ -644,9 +651,42 @@ func runShell(dir string, cmds ...string) error { } cmd.Dir = dir cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return errors.Wrapf(err, "error running %v", args) - } + require.NoErrorf(t, cmd.Run(), "error running %v", args) } - return nil +} + +func logProgressStreams(ctx context.Context, t *testing.T) context.Context { + pr, ctx, cancel := progress.NewContext(ctx) + done := make(chan struct{}) + t.Cleanup(func() { + cancel() + <-done + }) + go func() { + defer close(done) + for { + prog, err := pr.Read(context.Background()) + if err != nil { + return + } + for _, log := range prog { + switch lsys := log.Sys.(type) { + case client.VertexLog: + var stream string + switch lsys.Stream { + case 1: + stream = "stdout" + case 2: + stream = "stderr" + default: + stream = strconv.FormatInt(int64(lsys.Stream), 10) + } + t.Logf("(%v) %s", stream, lsys.Data) + default: + t.Logf("(%T) %+v", log.Sys, log) + } + } + } + }() + return ctx } diff --git a/source/git/gitsource_unix.go b/source/git/gitsource_unix.go index 23f289c55d85..cb4991757390 100644 --- a/source/git/gitsource_unix.go +++ b/source/git/gitsource_unix.go @@ -28,7 +28,7 @@ func gitMain() { unix.Umask(0022) // Reexec git command - cmd := exec.Command(os.Args[1], os.Args[2:]...) + cmd := exec.Command(os.Args[1], os.Args[2:]...) //nolint:gosec // reexec cmd.SysProcAttr = &unix.SysProcAttr{ Setpgid: true, Pdeathsig: unix.SIGTERM, diff --git a/source/gitidentifier_test.go b/source/gitidentifier_test.go index 68c8d969dab0..0d7e8be9b206 100644 --- a/source/gitidentifier_test.go +++ b/source/gitidentifier_test.go @@ -7,27 +7,114 @@ import ( ) func TestNewGitIdentifier(t *testing.T) { - gi, err := NewGitIdentifier("ssh://root@subdomain.example.hostname:2222/root/my/really/weird/path/foo.git") - require.Nil(t, err) - require.Equal(t, "ssh://root@subdomain.example.hostname:2222/root/my/really/weird/path/foo.git", gi.Remote) - require.Equal(t, "", gi.Ref) - require.Equal(t, "", gi.Subdir) - - gi, err = NewGitIdentifier("ssh://root@subdomain.example.hostname:2222/root/my/really/weird/path/foo.git#main") - require.Nil(t, err) - require.Equal(t, "ssh://root@subdomain.example.hostname:2222/root/my/really/weird/path/foo.git", gi.Remote) - require.Equal(t, "main", gi.Ref) - require.Equal(t, "", gi.Subdir) - - gi, err = NewGitIdentifier("git@github.com:moby/buildkit.git") - require.Nil(t, err) - require.Equal(t, "git@github.com:moby/buildkit.git", gi.Remote) - require.Equal(t, "", gi.Ref) - require.Equal(t, "", gi.Subdir) - - gi, err = NewGitIdentifier("github.com/moby/buildkit.git#main") - require.Nil(t, err) - require.Equal(t, "https://github.com/moby/buildkit.git", gi.Remote) - require.Equal(t, "main", gi.Ref) - require.Equal(t, "", gi.Subdir) + tests := []struct { + url string + expected GitIdentifier + }{ + { + url: "ssh://root@subdomain.example.hostname:2222/root/my/really/weird/path/foo.git", + expected: GitIdentifier{ + Remote: "ssh://root@subdomain.example.hostname:2222/root/my/really/weird/path/foo.git", + }, + }, + { + url: "ssh://root@subdomain.example.hostname:2222/root/my/really/weird/path/foo.git#main", + expected: GitIdentifier{ + Remote: "ssh://root@subdomain.example.hostname:2222/root/my/really/weird/path/foo.git", + Ref: "main", + }, + }, + { + url: "git@github.com:moby/buildkit.git", + expected: GitIdentifier{ + Remote: "git@github.com:moby/buildkit.git", + }, + }, + { + url: "github.com/moby/buildkit.git#main", + expected: GitIdentifier{ + Remote: "https://github.com/moby/buildkit.git", + Ref: "main", + }, + }, + { + url: "git://github.com/user/repo.git", + expected: GitIdentifier{ + Remote: "git://github.com/user/repo.git", + }, + }, + { + url: "git://github.com/user/repo.git#mybranch:mydir/mysubdir/", + expected: GitIdentifier{ + Remote: "git://github.com/user/repo.git", + Ref: "mybranch", + Subdir: "mydir/mysubdir/", + }, + }, + { + url: "git://github.com/user/repo.git#:mydir/mysubdir/", + expected: GitIdentifier{ + Remote: "git://github.com/user/repo.git", + Subdir: "mydir/mysubdir/", + }, + }, + { + url: "https://github.com/user/repo.git", + expected: GitIdentifier{ + Remote: "https://github.com/user/repo.git", + }, + }, + { + url: "https://github.com/user/repo.git#mybranch:mydir/mysubdir/", + expected: GitIdentifier{ + Remote: "https://github.com/user/repo.git", + Ref: "mybranch", + Subdir: "mydir/mysubdir/", + }, + }, + { + url: "git@github.com:user/repo.git", + expected: GitIdentifier{ + Remote: "git@github.com:user/repo.git", + }, + }, + { + url: "git@github.com:user/repo.git#mybranch:mydir/mysubdir/", + expected: GitIdentifier{ + Remote: "git@github.com:user/repo.git", + Ref: "mybranch", + Subdir: "mydir/mysubdir/", + }, + }, + { + url: "ssh://github.com/user/repo.git", + expected: GitIdentifier{ + Remote: "ssh://github.com/user/repo.git", + }, + }, + { + url: "ssh://github.com/user/repo.git#mybranch:mydir/mysubdir/", + expected: GitIdentifier{ + Remote: "ssh://github.com/user/repo.git", + Ref: "mybranch", + Subdir: "mydir/mysubdir/", + }, + }, + { + url: "ssh://foo%40barcorp.com@github.com/user/repo.git#mybranch:mydir/mysubdir/", + expected: GitIdentifier{ + Remote: "ssh://foo%40barcorp.com@github.com/user/repo.git", + Ref: "mybranch", + Subdir: "mydir/mysubdir/", + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.url, func(t *testing.T) { + gi, err := NewGitIdentifier(tt.url) + require.NoError(t, err) + require.Equal(t, tt.expected, *gi) + }) + } } diff --git a/source/http/httpsource_test.go b/source/http/httpsource_test.go index a8f90949fd9a..afe6362573ab 100644 --- a/source/http/httpsource_test.go +++ b/source/http/httpsource_test.go @@ -2,7 +2,6 @@ package http import ( "context" - "io/ioutil" "os" "path/filepath" "runtime" @@ -36,11 +35,7 @@ func TestHTTPSource(t *testing.T) { t.Parallel() ctx := context.TODO() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - hs, err := newHTTPSource(tmpdir) + hs, err := newHTTPSource(t) require.NoError(t, err) resp := httpserver.Response{ @@ -159,11 +154,7 @@ func TestHTTPDefaultName(t *testing.T) { t.Parallel() ctx := context.TODO() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - hs, err := newHTTPSource(tmpdir) + hs, err := newHTTPSource(t) require.NoError(t, err) resp := httpserver.Response{ @@ -209,11 +200,7 @@ func TestHTTPInvalidURL(t *testing.T) { t.Parallel() ctx := context.TODO() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - hs, err := newHTTPSource(tmpdir) + hs, err := newHTTPSource(t) require.NoError(t, err) server := httpserver.NewTestServer(map[string]httpserver.Response{}) @@ -237,11 +224,7 @@ func TestHTTPChecksum(t *testing.T) { t.Parallel() ctx := context.TODO() - tmpdir, err := ioutil.TempDir("", "buildkit-state") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - - hs, err := newHTTPSource(tmpdir) + hs, err := newHTTPSource(t) require.NoError(t, err) resp := httpserver.Response{ @@ -328,7 +311,7 @@ func readFile(ctx context.Context, ref cache.ImmutableRef, fp string) ([]byte, e defer lm.Unmount() - dt, err := ioutil.ReadFile(filepath.Join(dir, fp)) + dt, err := os.ReadFile(filepath.Join(dir, fp)) if err != nil { return nil, err } @@ -336,11 +319,16 @@ func readFile(ctx context.Context, ref cache.ImmutableRef, fp string) ([]byte, e return dt, nil } -func newHTTPSource(tmpdir string) (source.Source, error) { +func newHTTPSource(t *testing.T) (source.Source, error) { + tmpdir := t.TempDir() + snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots")) if err != nil { return nil, err } + t.Cleanup(func() { + require.NoError(t, snapshotter.Close()) + }) store, err := local.NewStore(tmpdir) if err != nil { @@ -351,6 +339,9 @@ func newHTTPSource(tmpdir string) (source.Source, error) { if err != nil { return nil, err } + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) mdb := ctdmetadata.NewDB(db, store, map[string]snapshots.Snapshotter{ "native": snapshotter, @@ -360,6 +351,10 @@ func newHTTPSource(tmpdir string) (source.Source, error) { if err != nil { return nil, err } + t.Cleanup(func() { + require.NoError(t, md.Close()) + }) + lm := leaseutil.WithNamespace(ctdmetadata.NewLeaseManager(mdb), "buildkit") c := mdb.ContentStore() applier := winlayers.NewFileSystemApplierWithWindows(c, apply.NewFileSystemApplier(c)) @@ -378,6 +373,9 @@ func newHTTPSource(tmpdir string) (source.Source, error) { if err != nil { return nil, err } + t.Cleanup(func() { + require.NoError(t, cm.Close()) + }) return NewSource(Opt{ CacheAccessor: cm, diff --git a/source/identifier.go b/source/identifier.go index 1032399e112d..aad9f226ff6e 100644 --- a/source/identifier.go +++ b/source/identifier.go @@ -50,6 +50,8 @@ func FromString(s string) (Identifier, error) { return NewHTTPIdentifier(parts[1], true) case srctypes.HTTPScheme: return NewHTTPIdentifier(parts[1], false) + case srctypes.OCIScheme: + return NewOCIIdentifier(parts[1]) default: return nil, errors.Wrapf(errNotFound, "unknown schema %s", parts[0]) } @@ -85,6 +87,15 @@ func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) { return nil, err } id.RecordType = rt + case pb.AttrImageLayerLimit: + l, err := strconv.Atoi(v) + if err != nil { + return nil, errors.Wrapf(err, "invalid layer limit %s", v) + } + if l <= 0 { + return nil, errors.Errorf("invalid layer limit %s", v) + } + id.LayerLimit = &l } } } @@ -182,6 +193,34 @@ func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) { } } } + if id, ok := id.(*OCIIdentifier); ok { + if platform != nil { + id.Platform = &ocispecs.Platform{ + OS: platform.OS, + Architecture: platform.Architecture, + Variant: platform.Variant, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + } + } + for k, v := range op.Source.Attrs { + switch k { + case pb.AttrOCILayoutSessionID: + id.SessionID = v + case pb.AttrOCILayoutStoreID: + id.StoreID = v + case pb.AttrOCILayoutLayerLimit: + l, err := strconv.Atoi(v) + if err != nil { + return nil, errors.Wrapf(err, "invalid layer limit %s", v) + } + if l <= 0 { + return nil, errors.Errorf("invalid layer limit %s", v) + } + id.LayerLimit = &l + } + } + } return id, nil } @@ -190,6 +229,7 @@ type ImageIdentifier struct { Platform *ocispecs.Platform ResolveMode ResolveMode RecordType client.UsageRecordType + LayerLimit *int } func NewImageIdentifier(str string) (*ImageIdentifier, error) { @@ -248,6 +288,30 @@ func (*HTTPIdentifier) ID() string { return srctypes.HTTPSScheme } +type OCIIdentifier struct { + Reference reference.Spec + Platform *ocispecs.Platform + SessionID string + StoreID string + LayerLimit *int +} + +func NewOCIIdentifier(str string) (*OCIIdentifier, error) { + ref, err := reference.Parse(str) + if err != nil { + return nil, errors.WithStack(err) + } + + if ref.Object == "" { + return nil, errors.WithStack(reference.ErrObjectRequired) + } + return &OCIIdentifier{Reference: ref}, nil +} + +func (*OCIIdentifier) ID() string { + return srctypes.OCIScheme +} + func (r ResolveMode) String() string { switch r { case ResolveModeDefault: diff --git a/source/manager.go b/source/manager.go index 3f4a0cb4783d..6a9c831c9048 100644 --- a/source/manager.go +++ b/source/manager.go @@ -16,7 +16,7 @@ type Source interface { } type SourceInstance interface { - CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) + CacheKey(ctx context.Context, g session.Group, index int) (key, pin string, opts solver.CacheOpts, done bool, err error) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) } diff --git a/source/types/types.go b/source/types/types.go index b96eac2333e6..ca91accf580d 100644 --- a/source/types/types.go +++ b/source/types/types.go @@ -6,4 +6,5 @@ const ( LocalScheme = "local" HTTPScheme = "http" HTTPSScheme = "https" + OCIScheme = "oci-layout" ) diff --git a/sourcepolicy/engine.go b/sourcepolicy/engine.go new file mode 100644 index 000000000000..829e8510650c --- /dev/null +++ b/sourcepolicy/engine.go @@ -0,0 +1,152 @@ +package sourcepolicy + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/moby/buildkit/util/bklog" + "github.com/pkg/errors" +) + +var ( + // ErrSourceDenied is returned by the policy engine when a source is denied by the policy. + ErrSourceDenied = errors.New("source denied by policy") + + // ErrTooManyOps is returned by the policy engine when there are too many converts for a single source op. + ErrTooManyOps = errors.New("too many operations") +) + +// Engine is the source policy engine. +// It is responsible for evaluating a source policy against a source operation. +// Create one with `NewEngine` +// +// Rule matching is delegated to the `Matcher` interface. +// Mutations are delegated to the `Mutater` interface. +type Engine struct { + pol []*spb.Policy + sources map[string]*selectorCache +} + +// NewEngine creates a new source policy engine. +func NewEngine(pol []*spb.Policy) *Engine { + return &Engine{ + pol: pol, + } +} + +// TODO: The key here can't be used to cache attr constraint regexes. +func (e *Engine) selectorCache(src *spb.Selector) *selectorCache { + if e.sources == nil { + e.sources = map[string]*selectorCache{} + } + + key := src.MatchType.String() + " " + src.Identifier + + if s, ok := e.sources[key]; ok { + return s + } + + s := &selectorCache{Selector: src} + + e.sources[key] = s + return s +} + +// Evaluate evaluates a source operation against the policy. +// +// Policies are re-evaluated for each convert rule. +// Evaluate will error if the there are too many converts for a single source op to prevent infinite loops. +// This function may error out even if the op was mutated, in which case `true` will be returned along with the error. +// +// An error is returned when the source is denied by the policy. +func (e *Engine) Evaluate(ctx context.Context, op *pb.Op) (bool, error) { + if len(e.pol) == 0 { + return false, nil + } + + var mutated bool + const maxIterr = 20 + + for i := 0; ; i++ { + if i > maxIterr { + return mutated, errors.Wrapf(ErrTooManyOps, "too many mutations on a single source") + } + + srcOp := op.GetSource() + if srcOp == nil { + return false, nil + } + if i == 0 { + ctx = bklog.WithLogger(ctx, bklog.G(ctx).WithField("orig", *srcOp).WithField("updated", op.GetSource())) + } + + mut, err := e.evaluatePolicies(ctx, srcOp) + if mut { + mutated = true + } + if err != nil { + return mutated, err + } + if !mut { + break + } + } + + return mutated, nil +} + +func (e *Engine) evaluatePolicies(ctx context.Context, srcOp *pb.SourceOp) (bool, error) { + for _, pol := range e.pol { + mut, err := e.evaluatePolicy(ctx, pol, srcOp) + if mut || err != nil { + return mut, err + } + } + return false, nil +} + +// evaluatePolicy evaluates a single policy against a source operation. +// If the source is mutated the policy is short-circuited and `true` is returned. +// If the source is denied, an error will be returned. +// +// For Allow/Deny rules, the last matching rule wins. +// E.g. `ALLOW foo; DENY foo` will deny `foo`, `DENY foo; ALLOW foo` will allow `foo`. +func (e *Engine) evaluatePolicy(ctx context.Context, pol *spb.Policy, srcOp *pb.SourceOp) (bool, error) { + ident := srcOp.GetIdentifier() + + ctx = bklog.WithLogger(ctx, bklog.G(ctx).WithFields(map[string]interface{}{ + "ref": ident, + })) + + var deny bool + for _, rule := range pol.Rules { + selector := e.selectorCache(rule.Selector) + matched, err := match(ctx, selector, ident, srcOp.Attrs) + if err != nil { + return false, errors.Wrap(err, "error matching source policy") + } + if !matched { + continue + } + + switch rule.Action { + case spb.PolicyAction_ALLOW: + deny = false + case spb.PolicyAction_DENY: + deny = true + case spb.PolicyAction_CONVERT: + mut, err := mutate(ctx, srcOp, rule, selector, ident) + if err != nil || mut { + return mut, errors.Wrap(err, "error mutating source policy") + } + default: + return false, errors.Errorf("source policy: rule %s %s: unknown type %q", rule.Action, rule.Selector.Identifier, ident) + } + } + + if deny { + return false, errors.Wrapf(ErrSourceDenied, "source %q denied by policy", ident) + } + return false, nil +} diff --git a/sourcepolicy/engine_test.go b/sourcepolicy/engine_test.go new file mode 100644 index 000000000000..512a26efc94a --- /dev/null +++ b/sourcepolicy/engine_test.go @@ -0,0 +1,505 @@ +package sourcepolicy + +import ( + "context" + "testing" + + "github.com/moby/buildkit/solver/pb" + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/moby/buildkit/util/bklog" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestEngineEvaluate(t *testing.T) { + t.Run("Deny All", testDenyAll) + t.Run("Allow Deny", testAllowDeny) + t.Run("Convert", testConvert) + t.Run("Convert Deny", testConvertDeny) + t.Run("Allow Convert Deny", testAllowConvertDeny) + t.Run("Test convert loop", testConvertLoop) + t.Run("Test convert http", testConvertHTTP) + t.Run("Test convert regex", testConvertRegex) + t.Run("Test convert wildcard", testConvertWildcard) + t.Run("Test convert multiple", testConvertMultiple) + t.Run("test multiple policies", testMultiplePolicies) + t.Run("Last rule wins", testLastRuleWins) +} + +func testLastRuleWins(t *testing.T) { + pol := []*spb.Policy{ + { + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_ALLOW, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + { + Action: spb.PolicyAction_DENY, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + { + Action: spb.PolicyAction_ALLOW, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + }, + }, + } + + e := NewEngine(pol) + mut, err := e.Evaluate(context.Background(), &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + }) + require.NoError(t, err) + require.False(t, mut) +} + +func testMultiplePolicies(t *testing.T) { + pol := []*spb.Policy{ + { + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_ALLOW, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + }, + }, + { + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_DENY, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + }, + }, + } + + e := NewEngine(pol) + mut, err := e.Evaluate(context.Background(), &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + }) + require.ErrorIs(t, err, ErrSourceDenied) + require.False(t, mut) +} + +func testConvertMultiple(t *testing.T) { + pol := []*spb.Policy{ + { + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_CONVERT, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + Updates: &spb.Update{ + Identifier: "docker-image://docker.io/library/alpine:latest", + }, + }, + { + Action: spb.PolicyAction_CONVERT, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/alpine:latest", + }, + Updates: &spb.Update{ + Identifier: "docker-image://docker.io/library/debian:buster", + }, + }, + { + Action: spb.PolicyAction_CONVERT, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/debian:buster", + }, + Updates: &spb.Update{ + Identifier: "docker-image://docker.io/library/debian:bullseye", + }, + }, + }, + }, + } + + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + } + + ctx := context.Background() + e := NewEngine(pol) + + mutated, err := e.Evaluate(ctx, op) + require.True(t, mutated) + require.NoError(t, err) +} + +func testConvertWildcard(t *testing.T) { + pol := []*spb.Policy{ + { + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_CONVERT, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/golang:*", + MatchType: spb.MatchType_WILDCARD, + }, + Updates: &spb.Update{ + Identifier: "docker-image://fakereg.io/library/golang:${1}", + }, + }, + }, + }, + } + + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/golang:1.19", + }, + }, + } + + ctx := context.Background() + e := NewEngine(pol) + + mutated, err := e.Evaluate(ctx, op) + require.True(t, mutated) + require.NoError(t, err) + require.Equal(t, "docker-image://fakereg.io/library/golang:1.19", op.GetSource().Identifier) +} + +func testConvertRegex(t *testing.T) { + pol := &spb.Policy{ + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_CONVERT, + Selector: &spb.Selector{ + Identifier: `docker\-image://docker\.io/library/golang:(.*)`, + MatchType: spb.MatchType_REGEX, + }, + Updates: &spb.Update{ + Identifier: "docker-image://fakereg.io/library/golang:${1}", + }, + }, + }, + } + + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/golang:1.19", + }, + }, + } + + ctx := context.Background() + e := NewEngine([]*spb.Policy{pol}) + + mutated, err := e.Evaluate(ctx, op) + require.True(t, mutated) + require.NoError(t, err) + require.Equal(t, "docker-image://fakereg.io/library/golang:1.19", op.GetSource().Identifier) +} + +func testConvertHTTP(t *testing.T) { + pol := &spb.Policy{ + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_CONVERT, + Selector: &spb.Selector{ + Identifier: "https://example.com/foo", + }, + Updates: &spb.Update{ + Attrs: map[string]string{"http.checksum": "sha256:1234"}, + }, + }, + }, + } + + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "https://example.com/foo", + }, + }, + } + + ctx := context.Background() + e := NewEngine([]*spb.Policy{pol}) + + mutated, err := e.Evaluate(ctx, op) + require.True(t, mutated) + require.NoError(t, err) + require.Equal(t, "https://example.com/foo", op.GetSource().Identifier) +} + +func testConvertLoop(t *testing.T) { + pol := &spb.Policy{ + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_CONVERT, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + Updates: &spb.Update{ + Identifier: "docker-image://docker.io/library/alpine:latest", + }, + }, + { + Action: spb.PolicyAction_CONVERT, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/alpine:latest", + }, + Updates: &spb.Update{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + }, + } + + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + } + + ctx := context.Background() + e := NewEngine([]*spb.Policy{pol}) + + mutated, err := e.Evaluate(ctx, op) + require.True(t, mutated) + require.ErrorIs(t, err, ErrTooManyOps) +} + +func testAllowConvertDeny(t *testing.T) { + pol := &spb.Policy{ + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_CONVERT, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + Updates: &spb.Update{ + Identifier: "docker-image://docker.io/library/alpine:latest", + }, + }, + { + Action: spb.PolicyAction_ALLOW, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/alpine:latest", + }, + }, + { + Action: spb.PolicyAction_DENY, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/alpine:*", + }, + }, + { + Action: spb.PolicyAction_DENY, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + }, + } + + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + } + + ctx := context.Background() + e := NewEngine([]*spb.Policy{pol}) + + mutated, err := e.Evaluate(ctx, op) + require.True(t, mutated) + require.ErrorIs(t, err, ErrSourceDenied) + require.Equal(t, op.GetSource().Identifier, "docker-image://docker.io/library/alpine:latest") +} + +func testConvertDeny(t *testing.T) { + pol := &spb.Policy{ + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_DENY, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/alpine:*", + }, + }, + { + Action: spb.PolicyAction_CONVERT, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + Updates: &spb.Update{ + Identifier: "docker-image://docker.io/library/alpine:latest", + }, + }, + }, + } + + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + } + + ctx := context.Background() + e := NewEngine([]*spb.Policy{pol}) + + mutated, err := e.Evaluate(ctx, op) + require.True(t, mutated) + require.ErrorIs(t, err, ErrSourceDenied) + require.Equal(t, op.GetSource().Identifier, "docker-image://docker.io/library/alpine:latest") +} + +func testConvert(t *testing.T) { + cases := map[string]string{ + "docker-image://docker.io/library/busybox:latest": "docker-image://docker.io/library/alpine:latest", + "docker-image://docker.io/library/alpine:latest": "docker-image://docker.io/library/alpine:latest@sha256:c0d488a800e4127c334ad20d61d7bc21b4097540327217dfab52262adc02380c", + } + bklog.L.Logger.SetLevel(logrus.DebugLevel) + + for src, dst := range cases { + t.Run(src+"=>"+dst, func(t *testing.T) { + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: src, + }, + }, + } + + pol := &spb.Policy{ + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_CONVERT, + Selector: &spb.Selector{ + Identifier: src, + }, + Updates: &spb.Update{ + Identifier: dst, + }, + }, + }, + } + + ctx := context.Background() + e := NewEngine([]*spb.Policy{pol}) + + mutated, err := e.Evaluate(ctx, op) + require.True(t, mutated) + require.NoError(t, err) + require.Equal(t, dst, op.GetSource().Identifier) + }) + } +} + +func testAllowDeny(t *testing.T) { + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/alpine:latest", + }, + }, + } + pol := &spb.Policy{ + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_ALLOW, + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/alpine:latest", + }, + }, + { + Action: spb.PolicyAction_DENY, + Selector: &spb.Selector{ + Identifier: "docker-image://*", + }, + }, + }, + } + + ctx := context.Background() + e := NewEngine([]*spb.Policy{pol}) + + mutated, err := e.Evaluate(ctx, op) + require.False(t, mutated) + require.ErrorIs(t, err, ErrSourceDenied) + + op = &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:latest", + }, + }, + } + + mutated, err = e.Evaluate(ctx, op) + require.False(t, mutated) + require.ErrorIs(t, err, ErrSourceDenied) +} + +func testDenyAll(t *testing.T) { + cases := map[string]string{ + "docker-image": "docker-image://docker.io/library/alpine:latest", + "https": "https://github.com/moby/buildkit.git", + "http": "http://example.com", + } + + for kind, ref := range cases { + t.Run(ref, func(t *testing.T) { + pol := &spb.Policy{ + Rules: []*spb.Rule{ + { + Action: spb.PolicyAction_DENY, + Selector: &spb.Selector{ + Identifier: kind + "://*", + }, + }, + }, + } + + e := NewEngine([]*spb.Policy{pol}) + ctx := context.Background() + + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: ref, + }, + }, + } + + mutated, err := e.Evaluate(ctx, op) + require.False(t, mutated) + require.ErrorIs(t, err, ErrSourceDenied) + }) + } +} diff --git a/sourcepolicy/formatter.go b/sourcepolicy/formatter.go new file mode 100644 index 000000000000..487e7a368539 --- /dev/null +++ b/sourcepolicy/formatter.go @@ -0,0 +1,92 @@ +package sourcepolicy + +import ( + "regexp" + + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/moby/buildkit/util/wildcard" + "github.com/pkg/errors" +) + +// Source wraps a a protobuf source in order to store cached state such as the compiled regexes. +type selectorCache struct { + *spb.Selector + + re *regexp.Regexp + w *wildcardCache +} + +// Format formats the provided ref according to the match/type of the source. +// +// For example, if the source is a wildcard, the ref will be formatted with the wildcard in the source replacing the parameters in the destination. +// +// matcher: wildcard source: "docker.io/library/golang:*" match: "docker.io/library/golang:1.19" format: "docker.io/library/golang:${1}-alpine" result: "docker.io/library/golang:1.19-alpine" +func (s *selectorCache) Format(match, format string) (string, error) { + switch s.MatchType { + case spb.MatchType_EXACT: + return s.Identifier, nil + case spb.MatchType_REGEX: + re, err := s.regex() + if err != nil { + return "", err + } + return re.ReplaceAllString(match, format), nil + case spb.MatchType_WILDCARD: + w, err := s.wildcard() + if err != nil { + return "", err + } + m := w.Match(match) + if m == nil { + return match, nil + } + + return m.Format(format) + } + return "", errors.Errorf("unknown match type: %s", s.MatchType) +} + +// wildcardCache wraps a wildcard.Wildcard to cache returned matches by ref. +// This way a match only needs to be computed once per ref. +type wildcardCache struct { + w *wildcard.Wildcard + m map[string]*wildcard.Match +} + +func (w *wildcardCache) Match(ref string) *wildcard.Match { + if w.m == nil { + w.m = make(map[string]*wildcard.Match) + } + + if m, ok := w.m[ref]; ok { + return m + } + + m := w.w.Match(ref) + w.m[ref] = m + return m +} + +func (s *selectorCache) wildcard() (*wildcardCache, error) { + if s.w != nil { + return s.w, nil + } + w, err := wildcard.New(s.Identifier) + if err != nil { + return nil, err + } + s.w = &wildcardCache{w: w} + return s.w, nil +} + +func (s *selectorCache) regex() (*regexp.Regexp, error) { + if s.re != nil { + return s.re, nil + } + re, err := regexp.Compile(s.Identifier) + if err != nil { + return nil, err + } + s.re = re + return re, nil +} diff --git a/sourcepolicy/matcher.go b/sourcepolicy/matcher.go new file mode 100644 index 000000000000..79ab4032a5ae --- /dev/null +++ b/sourcepolicy/matcher.go @@ -0,0 +1,58 @@ +package sourcepolicy + +import ( + "context" + "regexp" + + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/pkg/errors" +) + +func match(ctx context.Context, src *selectorCache, ref string, attrs map[string]string) (bool, error) { + for _, c := range src.Constraints { + switch c.Condition { + case spb.AttrMatch_EQUAL: + if attrs[c.Key] != c.Value { + return false, nil + } + case spb.AttrMatch_NOTEQUAL: + if attrs[c.Key] == c.Value { + return false, nil + } + case spb.AttrMatch_MATCHES: + // TODO: Cache the compiled regex + matches, err := regexp.MatchString(c.Value, attrs[c.Key]) + if err != nil { + return false, errors.Errorf("invalid regex %q: %v", c.Value, err) + } + if !matches { + return false, nil + } + default: + return false, errors.Errorf("unknown attr condition: %s", c.Condition) + } + } + + if src.Identifier == ref { + return true, nil + } + + switch src.MatchType { + case spb.MatchType_EXACT: + return false, nil + case spb.MatchType_REGEX: + re, err := src.regex() + if err != nil { + return false, err + } + return re.MatchString(ref), nil + case spb.MatchType_WILDCARD: + w, err := src.wildcard() + if err != nil { + return false, err + } + return w.Match(ref) != nil, nil + default: + return false, errors.Errorf("unknown match type: %s", src.MatchType) + } +} diff --git a/sourcepolicy/matcher_test.go b/sourcepolicy/matcher_test.go new file mode 100644 index 000000000000..442ecd509006 --- /dev/null +++ b/sourcepolicy/matcher_test.go @@ -0,0 +1,317 @@ +package sourcepolicy + +import ( + "context" + "testing" + + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/stretchr/testify/require" +) + +func TestMatch(t *testing.T) { + type testCase struct { + name string + src spb.Selector + ref string + attrs map[string]string + matches bool + xErr bool + } + + cases := []testCase{ + { + name: "basic exact match", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc"}, + ref: "docker-image://docker.io/library/busybox:1.34.1-uclibc", + matches: true, + }, + { + name: "docker-image scheme matches with only wildcard", + src: spb.Selector{Identifier: "*"}, + ref: "docker-image://docker.io/library/busybox:latest", + matches: true, + }, + { + name: "docker-image scheme matches with wildcard", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*"}, + ref: "docker-image://docker.io/library/busybox:latest", + matches: true, + }, + { + name: "mis-matching scheme does not match", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*"}, + ref: "http://docker.io/library/busybox:latest", + matches: false, + }, + { + name: "http scheme matches with wildcard", + src: spb.Selector{Identifier: "http://docker.io/library/busybox:*"}, + ref: "http://docker.io/library/busybox:latest", + matches: true, + }, + { + name: "http scheme matches https URL", + src: spb.Selector{Identifier: "https://docker.io/library/busybox:*"}, + ref: "https://docker.io/library/busybox:latest", + matches: true, + }, + { + name: "attr match with default constraint (equals) matches", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "bar", + // Default equals + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + matches: true, + attrs: map[string]string{"foo": "bar"}, + }, + { + name: "attr match with default constraint (equals) does not match", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "bar", + // Default equals + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + matches: false, + attrs: map[string]string{"foo": "nope"}, + }, + { + name: "attr match with explicit equals matches", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "bar", + Condition: spb.AttrMatch_EQUAL, // explicit equals + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + matches: true, + attrs: map[string]string{"foo": "bar"}, + }, + { + name: "attr match with explicit equals does not match", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "bar", + Condition: spb.AttrMatch_EQUAL, // explicit equals + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + matches: false, + attrs: map[string]string{"foo": "nope"}, + }, + { + name: "attr match not equal does not match", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "bar", + Condition: spb.AttrMatch_NOTEQUAL, + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + matches: false, + attrs: map[string]string{"foo": "bar"}, + }, + { + name: "attr match not equal does match", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "bar", + Condition: spb.AttrMatch_NOTEQUAL, + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + matches: true, + attrs: map[string]string{"foo": "ok"}, + }, + { + name: "matching attach match with simple strings", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "bar", + Condition: spb.AttrMatch_MATCHES, + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + matches: true, + attrs: map[string]string{"foo": "bar"}, + }, + { + name: "non-matching attr match constraint simple strings", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "asdf", + Condition: spb.AttrMatch_MATCHES, + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + matches: false, + attrs: map[string]string{"foo": "bar"}, + }, + { + name: "complex regex attr match", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "^b\\w+", + Condition: spb.AttrMatch_MATCHES, + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + matches: true, + attrs: map[string]string{"foo": "bar"}, + }, + { + name: "attr constraint with non-matching regex", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "^\\d+", + Condition: spb.AttrMatch_MATCHES, + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + attrs: map[string]string{"foo": "b1"}, + matches: false, + }, + { + name: "attr constraint with regex match", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "\\d$", + Condition: spb.AttrMatch_MATCHES, + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + attrs: map[string]string{"foo": "b1"}, + matches: true, + }, + { + name: "unknown attr match condition type", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "^b+", + Condition: -1, // unknown condition + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + attrs: map[string]string{"foo": "b1"}, + matches: false, + xErr: true, + }, + { + name: "matching constraint with extra attrs", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "Foo", + Condition: spb.AttrMatch_EQUAL, + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + attrs: map[string]string{"foo": "Foo", "bar": "Bar"}, + matches: true, + }, + { + name: "multiple attrs with failed constraint", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "Foo", + Condition: spb.AttrMatch_EQUAL, + }, + { + Key: "bar", + Value: "nope", + Condition: spb.AttrMatch_EQUAL, + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + attrs: map[string]string{"foo": "Foo", "bar": "Bar"}, + matches: false, + }, + { + name: "non-existent constraint key", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "Foo", + Condition: spb.AttrMatch_EQUAL, + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + matches: false, + }, + { + name: "non-existent constraint key w/ non-nill attr", + src: spb.Selector{Identifier: "docker-image://docker.io/library/busybox:*", + Constraints: []*spb.AttrConstraint{ + { + Key: "foo", + Value: "Foo", + Condition: spb.AttrMatch_EQUAL, + }, + }, + }, + ref: "docker-image://docker.io/library/busybox:latest", + attrs: map[string]string{"bar": "Bar"}, + matches: false, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + matches, err := match(context.Background(), &selectorCache{Selector: &tc.src}, tc.ref, tc.attrs) + if !tc.xErr { + require.NoError(t, err) + } else { + require.Error(t, err) + } + require.Equal(t, tc.matches, matches) + }) + } +} diff --git a/sourcepolicy/mutate.go b/sourcepolicy/mutate.go new file mode 100644 index 000000000000..7722e6dd9bf2 --- /dev/null +++ b/sourcepolicy/mutate.go @@ -0,0 +1,50 @@ +package sourcepolicy + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/moby/buildkit/util/bklog" + "github.com/pkg/errors" +) + +// mutate is a MutateFn which converts the source operation to the identifier and attributes provided by the policy. +// If there is no change, then the return value should be false and is not considered an error. +func mutate(ctx context.Context, op *pb.SourceOp, rule *spb.Rule, selector *selectorCache, ref string) (bool, error) { + if rule.Updates == nil { + return false, errors.Errorf("missing destination for convert rule") + } + + dest := rule.Updates.Identifier + if dest == "" { + dest = rule.Selector.Identifier + } + dest, err := selector.Format(ref, dest) + if err != nil { + return false, errors.Wrap(err, "error formatting destination") + } + + bklog.G(ctx).Debugf("sourcepolicy: converting %s to %s, pattern: %s", ref, dest, rule.Updates.Identifier) + + var mutated bool + if op.Identifier != dest && dest != "" { + mutated = true + op.Identifier = dest + } + + if rule.Updates.Attrs != nil { + if op.Attrs == nil { + op.Attrs = make(map[string]string, len(rule.Updates.Attrs)) + } + for k, v := range rule.Updates.Attrs { + if op.Attrs[k] != v { + bklog.G(ctx).Debugf("setting attr %s=%s", k, v) + op.Attrs[k] = v + mutated = true + } + } + } + + return mutated, nil +} diff --git a/sourcepolicy/mutate_test.go b/sourcepolicy/mutate_test.go new file mode 100644 index 000000000000..a14b6d9ea857 --- /dev/null +++ b/sourcepolicy/mutate_test.go @@ -0,0 +1,141 @@ +package sourcepolicy + +import ( + "context" + "testing" + + "github.com/moby/buildkit/solver/pb" + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/stretchr/testify/require" +) + +func TestMutate(t *testing.T) { + type testCaseOp struct { + op *pb.Op + rule *spb.Rule + expected bool + expectedOp *pb.Op + expectedErr string + } + + testCases := []testCaseOp{ + { + op: &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc", + }, + }, + }, + rule: &spb.Rule{ + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc", + }, + Updates: &spb.Update{ + Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc@sha256:3614ca5eacf0a3a1bcc361c939202a974b4902b9334ff36eb29ffe9011aaad83", + }, + }, + expected: true, + expectedOp: &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc@sha256:3614ca5eacf0a3a1bcc361c939202a974b4902b9334ff36eb29ffe9011aaad83", + }, + }, + }, + }, + { + op: &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox", + }, + }, + }, + rule: &spb.Rule{ + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox", + }, + Updates: &spb.Update{ + Identifier: "docker-image://docker.io/library/busybox:latest@sha256:3614ca5eacf0a3a1bcc361c939202a974b4902b9334ff36eb29ffe9011aaad83", + }, + }, + expected: true, + expectedOp: &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:latest@sha256:3614ca5eacf0a3a1bcc361c939202a974b4902b9334ff36eb29ffe9011aaad83", + }, + }, + }, + }, + { + // Discard the existing digest that might have been resolved by the Dockerfile frontend's MetaResolver. + op: &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:latest@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + }, + }, + }, + rule: &spb.Rule{ + Selector: &spb.Selector{ + Identifier: "docker-image://docker.io/library/busybox:latest*", + }, + Updates: &spb.Update{ + Identifier: "docker-image://docker.io/library/busybox:latest@sha256:3614ca5eacf0a3a1bcc361c939202a974b4902b9334ff36eb29ffe9011aaad83", + }, + }, + expected: true, + expectedOp: &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "docker-image://docker.io/library/busybox:latest@sha256:3614ca5eacf0a3a1bcc361c939202a974b4902b9334ff36eb29ffe9011aaad83", + }, + }, + }, + }, + { + op: &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md", + }, + }, + }, + rule: &spb.Rule{ + Selector: &spb.Selector{}, + Updates: &spb.Update{ + Identifier: "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md", + Attrs: map[string]string{pb.AttrHTTPChecksum: "sha256:6e4b94fc270e708e1068be28bd3551dc6917a4fc5a61293d51bb36e6b75c4b53"}, + }, + }, + expected: true, + expectedOp: &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md", + Attrs: map[string]string{ + pb.AttrHTTPChecksum: "sha256:6e4b94fc270e708e1068be28bd3551dc6917a4fc5a61293d51bb36e6b75c4b53", + }, + }, + }, + }, + }, + } + + ctx := context.Background() + for _, tc := range testCases { + op := *tc.op + t.Run(op.String(), func(t *testing.T) { + src := op.GetSource() + mutated, err := mutate(ctx, src, tc.rule, &selectorCache{Selector: tc.rule.Selector}, src.GetIdentifier()) + require.Equal(t, tc.expected, mutated) + if tc.expectedErr != "" { + require.Error(t, err, tc.expectedErr) + } else { + require.Equal(t, tc.expectedOp, &op) + } + }) + } +} diff --git a/sourcepolicy/pb/generate.go b/sourcepolicy/pb/generate.go new file mode 100644 index 000000000000..041c41b80ed6 --- /dev/null +++ b/sourcepolicy/pb/generate.go @@ -0,0 +1,3 @@ +package moby_buildkit_v1_sourcepolicy //nolint:revive + +//go:generate protoc -I=. --gogofaster_out=plugins=grpc:. policy.proto diff --git a/sourcepolicy/pb/json.go b/sourcepolicy/pb/json.go new file mode 100644 index 000000000000..a9f84834e75f --- /dev/null +++ b/sourcepolicy/pb/json.go @@ -0,0 +1,62 @@ +package moby_buildkit_v1_sourcepolicy //nolint:revive + +import ( + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" +) + +// MarshalJSON implements json.Marshaler with custom marshaling for PolicyAction. +// It gives the string form of the enum value. +func (a PolicyAction) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(PolicyAction_name, int32(a)) +} + +func (a *PolicyAction) UnmarshalJSON(data []byte) error { + val, err := proto.UnmarshalJSONEnum(PolicyAction_value, data, a.String()) + if err != nil { + return err + } + + _, ok := PolicyAction_name[val] + if !ok { + return errors.Errorf("invalid PolicyAction value: %d", val) + } + *a = PolicyAction(val) + return nil +} + +func (a AttrMatch) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(AttrMatch_name, int32(a)) +} + +func (a *AttrMatch) UnmarshalJSON(data []byte) error { + val, err := proto.UnmarshalJSONEnum(AttrMatch_value, data, a.String()) + if err != nil { + return err + } + + _, ok := AttrMatch_name[val] + if !ok { + return errors.Errorf("invalid AttrMatch value: %d", val) + } + *a = AttrMatch(val) + return nil +} + +func (a MatchType) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(MatchType_name, int32(a)) +} + +func (a *MatchType) UnmarshalJSON(data []byte) error { + val, err := proto.UnmarshalJSONEnum(MatchType_value, data, a.String()) + if err != nil { + return err + } + + _, ok := AttrMatch_name[val] + if !ok { + return errors.Errorf("invalid MatchType value: %d", val) + } + *a = MatchType(val) + return nil +} diff --git a/sourcepolicy/pb/json_test.go b/sourcepolicy/pb/json_test.go new file mode 100644 index 000000000000..a590763c3d01 --- /dev/null +++ b/sourcepolicy/pb/json_test.go @@ -0,0 +1,80 @@ +package moby_buildkit_v1_sourcepolicy //nolint:revive + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestActionJSON(t *testing.T) { + for i, s := range PolicyAction_name { + // marshals to string form + data, err := json.Marshal(PolicyAction(i)) + require.NoError(t, err) + require.Equal(t, string(data), `"`+s+`"`) + + // unmarshals froms string form + var a PolicyAction + err = json.Unmarshal(data, &a) + require.NoError(t, err) + require.Equal(t, a, PolicyAction(i)) + + // unmarshals froms number form + data, err = json.Marshal(i) + require.NoError(t, err) + + var a2 PolicyAction + err = json.Unmarshal(data, &a2) + require.NoError(t, err) + require.Equal(t, a, a2) + } +} + +func TestAttrMatchJSON(t *testing.T) { + for i, s := range AttrMatch_name { + // marshals to string form + data, err := json.Marshal(AttrMatch(i)) + require.NoError(t, err) + require.Equal(t, string(data), `"`+s+`"`) + + // unmarshals froms string form + var a AttrMatch + err = json.Unmarshal(data, &a) + require.NoError(t, err) + require.Equal(t, a, AttrMatch(i)) + + // unmarshals froms number form + data, err = json.Marshal(i) + require.NoError(t, err) + + var a2 AttrMatch + err = json.Unmarshal(data, &a2) + require.NoError(t, err) + require.Equal(t, a, a2) + } +} + +func TestMatchTypeJSON(t *testing.T) { + for i, s := range MatchType_name { + // marshals to string form + data, err := json.Marshal(MatchType(i)) + require.NoError(t, err) + require.Equal(t, `"`+s+`"`, string(data)) + + // unmarshals froms string form + var a MatchType + err = json.Unmarshal(data, &a) + require.NoError(t, err) + require.Equal(t, a, MatchType(i)) + + // unmarshals froms number form + data, err = json.Marshal(i) + require.NoError(t, err) + + var a2 MatchType + err = json.Unmarshal(data, &a2) + require.NoError(t, err) + require.Equal(t, a, a2) + } +} diff --git a/sourcepolicy/pb/policy.pb.go b/sourcepolicy/pb/policy.pb.go new file mode 100644 index 000000000000..8b77afe8649d --- /dev/null +++ b/sourcepolicy/pb/policy.pb.go @@ -0,0 +1,1615 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: policy.proto + +package moby_buildkit_v1_sourcepolicy + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// PolicyAction defines the action to take when a source is matched +type PolicyAction int32 + +const ( + PolicyAction_ALLOW PolicyAction = 0 + PolicyAction_DENY PolicyAction = 1 + PolicyAction_CONVERT PolicyAction = 2 +) + +var PolicyAction_name = map[int32]string{ + 0: "ALLOW", + 1: "DENY", + 2: "CONVERT", +} + +var PolicyAction_value = map[string]int32{ + "ALLOW": 0, + "DENY": 1, + "CONVERT": 2, +} + +func (x PolicyAction) String() string { + return proto.EnumName(PolicyAction_name, int32(x)) +} + +func (PolicyAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{0} +} + +// AttrMatch defines the condition to match a source attribute +type AttrMatch int32 + +const ( + AttrMatch_EQUAL AttrMatch = 0 + AttrMatch_NOTEQUAL AttrMatch = 1 + AttrMatch_MATCHES AttrMatch = 2 +) + +var AttrMatch_name = map[int32]string{ + 0: "EQUAL", + 1: "NOTEQUAL", + 2: "MATCHES", +} + +var AttrMatch_value = map[string]int32{ + "EQUAL": 0, + "NOTEQUAL": 1, + "MATCHES": 2, +} + +func (x AttrMatch) String() string { + return proto.EnumName(AttrMatch_name, int32(x)) +} + +func (AttrMatch) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{1} +} + +// Match type is used to determine how a rule source is matched +type MatchType int32 + +const ( + // WILDCARD is the default matching type. + // It may first attempt to due an exact match but will follow up with a wildcard match + // For something more powerful, use REGEX + MatchType_WILDCARD MatchType = 0 + // EXACT treats the source identifier as a litteral string match + MatchType_EXACT MatchType = 1 + // REGEX treats the source identifier as a regular expression + // With regex matching you can also use match groups to replace values in the destination identifier + MatchType_REGEX MatchType = 2 +) + +var MatchType_name = map[int32]string{ + 0: "WILDCARD", + 1: "EXACT", + 2: "REGEX", +} + +var MatchType_value = map[string]int32{ + "WILDCARD": 0, + "EXACT": 1, + "REGEX": 2, +} + +func (x MatchType) String() string { + return proto.EnumName(MatchType_name, int32(x)) +} + +func (MatchType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{2} +} + +// Rule defines the action(s) to take when a source is matched +type Rule struct { + Action PolicyAction `protobuf:"varint,1,opt,name=action,proto3,enum=moby.buildkit.v1.sourcepolicy.PolicyAction" json:"action,omitempty"` + Selector *Selector `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` + Updates *Update `protobuf:"bytes,3,opt,name=updates,proto3" json:"updates,omitempty"` +} + +func (m *Rule) Reset() { *m = Rule{} } +func (m *Rule) String() string { return proto.CompactTextString(m) } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{0} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Rule.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return m.Size() +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *Rule) GetAction() PolicyAction { + if m != nil { + return m.Action + } + return PolicyAction_ALLOW +} + +func (m *Rule) GetSelector() *Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (m *Rule) GetUpdates() *Update { + if m != nil { + return m.Updates + } + return nil +} + +// Update contains updates to the matched build step after rule is applied +type Update struct { + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + Attrs map[string]string `protobuf:"bytes,2,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Update) Reset() { *m = Update{} } +func (m *Update) String() string { return proto.CompactTextString(m) } +func (*Update) ProtoMessage() {} +func (*Update) Descriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{1} +} +func (m *Update) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Update) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Update.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Update) XXX_Merge(src proto.Message) { + xxx_messageInfo_Update.Merge(m, src) +} +func (m *Update) XXX_Size() int { + return m.Size() +} +func (m *Update) XXX_DiscardUnknown() { + xxx_messageInfo_Update.DiscardUnknown(m) +} + +var xxx_messageInfo_Update proto.InternalMessageInfo + +func (m *Update) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +func (m *Update) GetAttrs() map[string]string { + if m != nil { + return m.Attrs + } + return nil +} + +// Selector identifies a source to match a policy to +type Selector struct { + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // MatchType is the type of match to perform on the source identifier + MatchType MatchType `protobuf:"varint,2,opt,name=match_type,json=matchType,proto3,enum=moby.buildkit.v1.sourcepolicy.MatchType" json:"match_type,omitempty"` + Constraints []*AttrConstraint `protobuf:"bytes,3,rep,name=constraints,proto3" json:"constraints,omitempty"` +} + +func (m *Selector) Reset() { *m = Selector{} } +func (m *Selector) String() string { return proto.CompactTextString(m) } +func (*Selector) ProtoMessage() {} +func (*Selector) Descriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{2} +} +func (m *Selector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Selector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Selector.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Selector) XXX_Merge(src proto.Message) { + xxx_messageInfo_Selector.Merge(m, src) +} +func (m *Selector) XXX_Size() int { + return m.Size() +} +func (m *Selector) XXX_DiscardUnknown() { + xxx_messageInfo_Selector.DiscardUnknown(m) +} + +var xxx_messageInfo_Selector proto.InternalMessageInfo + +func (m *Selector) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +func (m *Selector) GetMatchType() MatchType { + if m != nil { + return m.MatchType + } + return MatchType_WILDCARD +} + +func (m *Selector) GetConstraints() []*AttrConstraint { + if m != nil { + return m.Constraints + } + return nil +} + +// AttrConstraint defines a constraint on a source attribute +type AttrConstraint struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Condition AttrMatch `protobuf:"varint,3,opt,name=condition,proto3,enum=moby.buildkit.v1.sourcepolicy.AttrMatch" json:"condition,omitempty"` +} + +func (m *AttrConstraint) Reset() { *m = AttrConstraint{} } +func (m *AttrConstraint) String() string { return proto.CompactTextString(m) } +func (*AttrConstraint) ProtoMessage() {} +func (*AttrConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{3} +} +func (m *AttrConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttrConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AttrConstraint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AttrConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttrConstraint.Merge(m, src) +} +func (m *AttrConstraint) XXX_Size() int { + return m.Size() +} +func (m *AttrConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_AttrConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_AttrConstraint proto.InternalMessageInfo + +func (m *AttrConstraint) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *AttrConstraint) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *AttrConstraint) GetCondition() AttrMatch { + if m != nil { + return m.Condition + } + return AttrMatch_EQUAL +} + +// Policy is the list of rules the policy engine will perform +type Policy struct { + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Rules []*Rule `protobuf:"bytes,2,rep,name=rules,proto3" json:"rules,omitempty"` +} + +func (m *Policy) Reset() { *m = Policy{} } +func (m *Policy) String() string { return proto.CompactTextString(m) } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{4} +} +func (m *Policy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Policy.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Policy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Policy.Merge(m, src) +} +func (m *Policy) XXX_Size() int { + return m.Size() +} +func (m *Policy) XXX_DiscardUnknown() { + xxx_messageInfo_Policy.DiscardUnknown(m) +} + +var xxx_messageInfo_Policy proto.InternalMessageInfo + +func (m *Policy) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Policy) GetRules() []*Rule { + if m != nil { + return m.Rules + } + return nil +} + +func init() { + proto.RegisterEnum("moby.buildkit.v1.sourcepolicy.PolicyAction", PolicyAction_name, PolicyAction_value) + proto.RegisterEnum("moby.buildkit.v1.sourcepolicy.AttrMatch", AttrMatch_name, AttrMatch_value) + proto.RegisterEnum("moby.buildkit.v1.sourcepolicy.MatchType", MatchType_name, MatchType_value) + proto.RegisterType((*Rule)(nil), "moby.buildkit.v1.sourcepolicy.Rule") + proto.RegisterType((*Update)(nil), "moby.buildkit.v1.sourcepolicy.Update") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.sourcepolicy.Update.AttrsEntry") + proto.RegisterType((*Selector)(nil), "moby.buildkit.v1.sourcepolicy.Selector") + proto.RegisterType((*AttrConstraint)(nil), "moby.buildkit.v1.sourcepolicy.AttrConstraint") + proto.RegisterType((*Policy)(nil), "moby.buildkit.v1.sourcepolicy.Policy") +} + +func init() { proto.RegisterFile("policy.proto", fileDescriptor_ac3b897852294d6a) } + +var fileDescriptor_ac3b897852294d6a = []byte{ + // 516 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc7, 0xbd, 0x4e, 0xf3, 0xe1, 0x49, 0x14, 0x59, 0x2b, 0x0e, 0x16, 0x12, 0x56, 0x14, 0x84, + 0x88, 0x82, 0x30, 0x6d, 0xb8, 0x14, 0x2e, 0xc8, 0x38, 0x6e, 0x41, 0x4a, 0x13, 0xd8, 0xa6, 0xb4, + 0x1c, 0x10, 0x72, 0x9c, 0x45, 0x58, 0x75, 0x6c, 0xcb, 0x5e, 0x47, 0xf2, 0x8d, 0x47, 0xe0, 0x39, + 0x78, 0x0e, 0x0e, 0x1c, 0xcb, 0x8d, 0x23, 0x4a, 0x5e, 0x04, 0xed, 0x3a, 0x4e, 0xc3, 0xa5, 0xce, + 0xc9, 0x3b, 0xe3, 0xf9, 0xfd, 0xe7, 0x63, 0x67, 0xa1, 0x15, 0x85, 0xbe, 0xe7, 0x66, 0x46, 0x14, + 0x87, 0x2c, 0xc4, 0x0f, 0x16, 0xe1, 0x2c, 0x33, 0x66, 0xa9, 0xe7, 0xcf, 0xaf, 0x3d, 0x66, 0x2c, + 0x8f, 0x8c, 0x24, 0x4c, 0x63, 0x97, 0xe6, 0x41, 0xdd, 0xdf, 0x08, 0x0e, 0x48, 0xea, 0x53, 0x6c, + 0x41, 0xcd, 0x71, 0x99, 0x17, 0x06, 0x1a, 0xea, 0xa0, 0x5e, 0x7b, 0xf0, 0xc4, 0xb8, 0x13, 0x34, + 0xde, 0x89, 0x8f, 0x29, 0x10, 0xb2, 0x41, 0xb1, 0x05, 0x8d, 0x84, 0xfa, 0xd4, 0x65, 0x61, 0xac, + 0xc9, 0x1d, 0xd4, 0x6b, 0x0e, 0x1e, 0x97, 0xc8, 0x9c, 0x6f, 0xc2, 0xc9, 0x16, 0xc4, 0xaf, 0xa0, + 0x9e, 0x46, 0x73, 0x87, 0xd1, 0x44, 0xab, 0x08, 0x8d, 0x47, 0x25, 0x1a, 0x17, 0x22, 0x9a, 0x14, + 0x54, 0xf7, 0x07, 0x82, 0x5a, 0xee, 0xc3, 0x3a, 0x80, 0x37, 0xa7, 0x01, 0xf3, 0xbe, 0x78, 0x34, + 0x16, 0x9d, 0x29, 0x64, 0xc7, 0x83, 0x4f, 0xa0, 0xea, 0x30, 0x16, 0x27, 0x9a, 0xdc, 0xa9, 0xf4, + 0x9a, 0x83, 0xc3, 0xbd, 0x32, 0x19, 0x26, 0x47, 0xec, 0x80, 0xc5, 0x19, 0xc9, 0xf1, 0xfb, 0xc7, + 0x00, 0xb7, 0x4e, 0xac, 0x42, 0xe5, 0x9a, 0x66, 0x9b, 0x74, 0xfc, 0x88, 0xef, 0x41, 0x75, 0xe9, + 0xf8, 0x29, 0x15, 0x53, 0x51, 0x48, 0x6e, 0xbc, 0x94, 0x8f, 0x51, 0xf7, 0x27, 0x82, 0x46, 0x31, + 0x84, 0xd2, 0x72, 0x4f, 0x01, 0x16, 0x0e, 0x73, 0xbf, 0x7e, 0x66, 0x59, 0x94, 0x6b, 0xb5, 0x07, + 0xbd, 0x92, 0x9a, 0xcf, 0x38, 0x30, 0xcd, 0x22, 0x4a, 0x94, 0x45, 0x71, 0xc4, 0x13, 0x68, 0xba, + 0x61, 0x90, 0xb0, 0xd8, 0xf1, 0x02, 0xc6, 0xe7, 0xcc, 0xbb, 0x7f, 0x5a, 0xa2, 0xc4, 0x3b, 0xb4, + 0xb6, 0x14, 0xd9, 0x55, 0xe8, 0x7e, 0x43, 0xd0, 0xfe, 0xff, 0xff, 0xbe, 0x53, 0xc0, 0x27, 0xa0, + 0xb8, 0x61, 0x30, 0xf7, 0xc4, 0xf2, 0x55, 0xf6, 0xea, 0x89, 0x67, 0x12, 0x7d, 0x91, 0x5b, 0xb4, + 0xfb, 0x09, 0x6a, 0xf9, 0x52, 0x62, 0x0d, 0xea, 0x4b, 0x1a, 0x27, 0xc5, 0x32, 0x57, 0x48, 0x61, + 0xe2, 0x17, 0x50, 0x8d, 0x53, 0x9f, 0x16, 0xf7, 0xfd, 0xb0, 0x24, 0x0f, 0x7f, 0x19, 0x24, 0x27, + 0xfa, 0x87, 0xd0, 0xda, 0xdd, 0x79, 0xac, 0x40, 0xd5, 0x1c, 0x8d, 0x26, 0x97, 0xaa, 0x84, 0x1b, + 0x70, 0x30, 0xb4, 0xc7, 0x1f, 0x55, 0x84, 0x9b, 0x50, 0xb7, 0x26, 0xe3, 0x0f, 0x36, 0x99, 0xaa, + 0x72, 0xff, 0x08, 0x94, 0x6d, 0xa1, 0x3c, 0xdc, 0x7e, 0x7f, 0x61, 0x8e, 0x54, 0x09, 0xb7, 0xa0, + 0x31, 0x9e, 0x4c, 0x73, 0x4b, 0x20, 0x67, 0xe6, 0xd4, 0x7a, 0x63, 0x9f, 0xab, 0x72, 0xff, 0x19, + 0x28, 0xdb, 0xfb, 0xe2, 0x71, 0x97, 0x6f, 0x47, 0x43, 0xcb, 0x24, 0x43, 0x55, 0x12, 0x02, 0x57, + 0xa6, 0x35, 0x55, 0x11, 0x3f, 0x12, 0xfb, 0xd4, 0xbe, 0x52, 0xe5, 0xd7, 0xda, 0xaf, 0x95, 0x8e, + 0x6e, 0x56, 0x3a, 0xfa, 0xbb, 0xd2, 0xd1, 0xf7, 0xb5, 0x2e, 0xdd, 0xac, 0x75, 0xe9, 0xcf, 0x5a, + 0x97, 0x66, 0x35, 0xf1, 0xfe, 0x9f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xae, 0x7a, 0xeb, 0x6c, + 0x0f, 0x04, 0x00, 0x00, +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Updates != nil { + { + size, err := m.Updates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPolicy(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPolicy(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Action != 0 { + i = encodeVarintPolicy(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Update) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Update) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Update) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attrs) > 0 { + for k := range m.Attrs { + v := m.Attrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintPolicy(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintPolicy(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintPolicy(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = encodeVarintPolicy(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Selector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Selector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Selector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPolicy(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.MatchType != 0 { + i = encodeVarintPolicy(dAtA, i, uint64(m.MatchType)) + i-- + dAtA[i] = 0x10 + } + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = encodeVarintPolicy(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AttrConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttrConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttrConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Condition != 0 { + i = encodeVarintPolicy(dAtA, i, uint64(m.Condition)) + i-- + dAtA[i] = 0x18 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintPolicy(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintPolicy(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Policy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPolicy(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Version != 0 { + i = encodeVarintPolicy(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintPolicy(dAtA []byte, offset int, v uint64) int { + offset -= sovPolicy(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != 0 { + n += 1 + sovPolicy(uint64(m.Action)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovPolicy(uint64(l)) + } + if m.Updates != nil { + l = m.Updates.Size() + n += 1 + l + sovPolicy(uint64(l)) + } + return n +} + +func (m *Update) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovPolicy(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovPolicy(uint64(len(k))) + 1 + len(v) + sovPolicy(uint64(len(v))) + n += mapEntrySize + 1 + sovPolicy(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Selector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovPolicy(uint64(l)) + } + if m.MatchType != 0 { + n += 1 + sovPolicy(uint64(m.MatchType)) + } + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovPolicy(uint64(l)) + } + } + return n +} + +func (m *AttrConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovPolicy(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovPolicy(uint64(l)) + } + if m.Condition != 0 { + n += 1 + sovPolicy(uint64(m.Condition)) + } + return n +} + +func (m *Policy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Version != 0 { + n += 1 + sovPolicy(uint64(m.Version)) + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovPolicy(uint64(l)) + } + } + return n +} + +func sovPolicy(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPolicy(x uint64) (n int) { + return sovPolicy(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= PolicyAction(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &Selector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Updates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Updates == nil { + m.Updates = &Update{} + } + if err := m.Updates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPolicy(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPolicy + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Update) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Update: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Update: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthPolicy + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthPolicy + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthPolicy + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthPolicy + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipPolicy(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPolicy + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPolicy(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPolicy + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Selector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Selector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Selector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchType", wireType) + } + m.MatchType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MatchType |= MatchType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, &AttrConstraint{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPolicy(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPolicy + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttrConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttrConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttrConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Condition", wireType) + } + m.Condition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Condition |= AttrMatch(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPolicy(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPolicy + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Policy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Policy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, &Rule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPolicy(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPolicy + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPolicy(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPolicy + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPolicy + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPolicy + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPolicy + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPolicy + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPolicy + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPolicy = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPolicy = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPolicy = fmt.Errorf("proto: unexpected end of group") +) diff --git a/sourcepolicy/pb/policy.proto b/sourcepolicy/pb/policy.proto new file mode 100644 index 000000000000..f46aca063f75 --- /dev/null +++ b/sourcepolicy/pb/policy.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package moby.buildkit.v1.sourcepolicy; + +// Rule defines the action(s) to take when a source is matched +message Rule { + PolicyAction action = 1; + Selector selector = 2; + Update updates = 3; +} + +// Update contains updates to the matched build step after rule is applied +message Update { + string identifier = 1; + map attrs = 2; +} + +// Selector identifies a source to match a policy to +message Selector { + string identifier = 1; + // MatchType is the type of match to perform on the source identifier + MatchType match_type = 2; + repeated AttrConstraint constraints = 3; +} + +// PolicyAction defines the action to take when a source is matched +enum PolicyAction { + ALLOW = 0; + DENY = 1; + CONVERT = 2; +} + +// AttrConstraint defines a constraint on a source attribute +message AttrConstraint { + string key = 1; + string value = 2; + AttrMatch condition = 3; +} + +// AttrMatch defines the condition to match a source attribute +enum AttrMatch { + EQUAL = 0; + NOTEQUAL = 1; + MATCHES = 2; +} + +// Policy is the list of rules the policy engine will perform +message Policy { + int64 version = 1; // Currently 1 + repeated Rule rules = 2; +} + +// Match type is used to determine how a rule source is matched +enum MatchType { + // WILDCARD is the default matching type. + // It may first attempt to due an exact match but will follow up with a wildcard match + // For something more powerful, use REGEX + WILDCARD = 0; + // EXACT treats the source identifier as a litteral string match + EXACT = 1; + // REGEX treats the source identifier as a regular expression + // With regex matching you can also use match groups to replace values in the destination identifier + REGEX = 2; +} \ No newline at end of file diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 000000000000..042824dfb613 --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,15 @@ +//go:build tools +// +build tools + +// Package tools tracks dependencies on binaries not referenced in this codebase. +// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module +// Disclaimer: Avoid adding tools that don't need to be inferred from go.mod +// like golangci-lint and check they don't import too many dependencies. +package tools + +import ( + _ "github.com/gogo/protobuf/protoc-gen-gogo" + _ "github.com/gogo/protobuf/protoc-gen-gogofaster" + _ "github.com/gogo/protobuf/protoc-gen-gogoslick" + _ "github.com/golang/protobuf/protoc-gen-go" +) diff --git a/util/appdefaults/appdefaults_unix.go b/util/appdefaults/appdefaults_unix.go index 499e8771844b..0084280c2856 100644 --- a/util/appdefaults/appdefaults_unix.go +++ b/util/appdefaults/appdefaults_unix.go @@ -10,9 +10,11 @@ import ( ) const ( - Address = "unix:///run/buildkit/buildkitd.sock" - Root = "/var/lib/buildkit" - ConfigDir = "/etc/buildkit" + Address = "unix:///run/buildkit/buildkitd.sock" + Root = "/var/lib/buildkit" + ConfigDir = "/etc/buildkit" + DefaultCNIBinDir = "/opt/cni/bin" + DefaultCNIConfigPath = "/etc/buildkit/cni.json" ) // UserAddress typically returns /run/user/$UID/buildkit/buildkitd.sock diff --git a/util/appdefaults/appdefaults_windows.go b/util/appdefaults/appdefaults_windows.go index d5d0ca1fb99f..058789e48aa0 100644 --- a/util/appdefaults/appdefaults_windows.go +++ b/util/appdefaults/appdefaults_windows.go @@ -10,8 +10,10 @@ const ( ) var ( - Root = filepath.Join(os.Getenv("ProgramData"), "buildkitd", ".buildstate") - ConfigDir = filepath.Join(os.Getenv("ProgramData"), "buildkitd") + Root = filepath.Join(os.Getenv("ProgramData"), "buildkitd", ".buildstate") + ConfigDir = filepath.Join(os.Getenv("ProgramData"), "buildkitd") + DefaultCNIBinDir = filepath.Join(ConfigDir, "bin") + DefaultCNIConfigPath = filepath.Join(ConfigDir, "cni.json") ) func UserAddress() string { diff --git a/util/archutil/Dockerfile b/util/archutil/Dockerfile index 6ac641f06dcb..9f8e59d9db9e 100644 --- a/util/archutil/Dockerfile +++ b/util/archutil/Dockerfile @@ -36,6 +36,10 @@ FROM base AS exit-s390x COPY fixtures/exit.s390x.s . RUN s390x-linux-gnu-as --noexecstack -o exit.o exit.s390x.s && s390x-linux-gnu-ld -o exit -s exit.o +FROM base AS exit-ppc64 +COPY fixtures/exit.ppc64.s . +RUN powerpc64le-linux-gnu-as -mbig --noexecstack -o exit.o exit.ppc64.s && powerpc64le-linux-gnu-ld -EB -o exit -s exit.o + FROM base AS exit-ppc64le COPY fixtures/exit.ppc64le.s . RUN powerpc64le-linux-gnu-as --noexecstack -o exit.o exit.ppc64le.s && powerpc64le-linux-gnu-ld -o exit -s exit.o @@ -48,7 +52,7 @@ FROM base AS exit-mips64 COPY fixtures/exit.mips64.s . RUN mips64-linux-gnuabi64-as --noexecstack -o exit.o exit.mips64.s && mips64-linux-gnuabi64-ld -o exit -s exit.o -FROM golang:1.17-alpine AS generate +FROM golang:1.19-alpine AS generate WORKDIR /src COPY --from=exit-amd64 /src/exit amd64 COPY --from=exit-386 /src/exit 386 @@ -56,12 +60,13 @@ COPY --from=exit-arm64 /src/exit arm64 COPY --from=exit-arm /src/exit arm COPY --from=exit-riscv64 /src/exit riscv64 COPY --from=exit-s390x /src/exit s390x +COPY --from=exit-ppc64 /src/exit ppc64 COPY --from=exit-ppc64le /src/exit ppc64le COPY --from=exit-mips64le /src/exit mips64le COPY --from=exit-mips64 /src/exit mips64 COPY generate.go . -RUN go run generate.go amd64 386 arm64 arm riscv64 s390x ppc64le mips64le mips64 && ls -l +RUN go run generate.go amd64 386 arm64 arm riscv64 s390x ppc64 ppc64le mips64le mips64 && ls -l FROM scratch diff --git a/util/archutil/check_unix.go b/util/archutil/check_unix.go index 8b558a31765f..91be4d80260f 100644 --- a/util/archutil/check_unix.go +++ b/util/archutil/check_unix.go @@ -7,7 +7,6 @@ import ( "bytes" "compress/gzip" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -23,7 +22,7 @@ func withChroot(cmd *exec.Cmd, dir string) { } func check(arch, bin string) (string, error) { - tmpdir, err := ioutil.TempDir("", "qemu-check") + tmpdir, err := os.MkdirTemp("", "qemu-check") if err != nil { return "", err } @@ -41,6 +40,7 @@ func check(arch, bin string) (string, error) { return "", err } + //nolint:gosec // inputs should be static strings if _, err := io.Copy(f, r); err != nil { f.Close() return "", err diff --git a/util/archutil/detect.go b/util/archutil/detect.go index 44cb3133e1b6..3184f9e548fb 100644 --- a/util/archutil/detect.go +++ b/util/archutil/detect.go @@ -48,6 +48,11 @@ func SupportedPlatforms(noCache bool) []ocispecs.Platform { arr = append(arr, linux(p)) } } + if p := "ppc64"; def.Architecture != p { + if _, err := ppc64Supported(); err == nil { + arr = append(arr, linux(p)) + } + } if p := "ppc64le"; def.Architecture != p { if _, err := ppc64leSupported(); err == nil { arr = append(arr, linux(p)) @@ -87,9 +92,9 @@ func SupportedPlatforms(noCache bool) []ocispecs.Platform { return arr } -//WarnIfUnsupported validates the platforms and show warning message if there is, -//the end user could fix the issue based on those warning, and thus no need to drop -//the platform from the candidates. +// WarnIfUnsupported validates the platforms and show warning message if there is, +// the end user could fix the issue based on those warning, and thus no need to drop +// the platform from the candidates. func WarnIfUnsupported(pfs []ocispecs.Platform) { def := nativePlatform() for _, p := range pfs { @@ -109,6 +114,11 @@ func WarnIfUnsupported(pfs []ocispecs.Platform) { printPlatformWarning(p, err) } } + if p.Architecture == "ppc64" { + if _, err := ppc64Supported(); err != nil { + printPlatformWarning(p, err) + } + } if p.Architecture == "ppc64le" { if _, err := ppc64leSupported(); err != nil { printPlatformWarning(p, err) diff --git a/util/archutil/fixtures/exit.amd64.s b/util/archutil/fixtures/exit.amd64.S similarity index 100% rename from util/archutil/fixtures/exit.amd64.s rename to util/archutil/fixtures/exit.amd64.S diff --git a/util/archutil/fixtures/exit.ppc64.s b/util/archutil/fixtures/exit.ppc64.s new file mode 100644 index 000000000000..a684306fc12c --- /dev/null +++ b/util/archutil/fixtures/exit.ppc64.s @@ -0,0 +1,10 @@ + .global _start + .section ".opd","aw" +_start: + .quad .L.start,.TOC.@tocbase,0 + .text + .abiversion 1 +.L.start: + li %r0, 1 + li %r3, 0 + sc diff --git a/util/archutil/generate.go b/util/archutil/generate.go index c4a24e857ef8..96f280ce3ab1 100644 --- a/util/archutil/generate.go +++ b/util/archutil/generate.go @@ -7,18 +7,19 @@ import ( "bytes" "compress/gzip" "flag" - "fmt" "html/template" "io" "os" "path/filepath" + + "github.com/pkg/errors" ) // saves baseimage binaries statically into go code func main() { flag.Parse() if len(flag.Args()) == 0 { - panic(fmt.Errorf("arch is required")) + panic(errors.New("arch is required")) } for _, arch := range flag.Args() { diff --git a/util/archutil/ppc64_binary.go b/util/archutil/ppc64_binary.go new file mode 100644 index 000000000000..d0c197c20d50 --- /dev/null +++ b/util/archutil/ppc64_binary.go @@ -0,0 +1,9 @@ +//go:build !ppc64 +// +build !ppc64 + +package archutil + +// This file is generated by running make inside the archutil package. +// Do not edit manually. + +const Binaryppc64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\xd0\xb1\x8a\x13\x51\x14\x06\xe0\xff\x8e\xd9\x45\xd0\x62\x2c\x84\x05\x9b\x3c\x40\x98\x7a\xcb\x14\x6a\x65\xa3\x2f\xa0\x2b\x89\x6c\x23\xca\xee\x14\x76\xfb\xb4\x81\xbc\x45\x24\x93\xc9\x64\x12\x89\xa4\xb0\x92\xef\x83\xdc\x73\x72\x66\x7e\xce\x65\x9e\xde\x7d\x78\x5f\x55\x25\x83\x2a\xaf\x93\x74\x83\xba\x6c\xd6\xfd\x74\xde\x9d\x25\xd3\xee\x9c\xe7\x36\x93\xcc\x73\x95\x49\xff\xee\x55\x46\xea\x93\x9a\x94\xd9\x51\x2d\xc3\x79\xbd\x9b\xef\xf6\xec\xf7\x8d\xf6\xde\x1c\xd5\x92\x2c\xda\xd5\xc7\x43\xee\x62\xf5\xa2\x5d\x7d\x4a\xba\xfb\x5e\xbe\x2f\x29\xb7\xdb\xdf\x97\xe4\xed\xf6\xcb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x27\xea\x94\x69\x57\xab\xa7\xc3\xb0\x79\xbc\x7f\x6c\x1f\xda\xbb\xaf\x69\xda\xe5\xaf\x36\xcd\xf2\xfe\xf3\xb7\x87\xbb\xef\xcb\x34\x3f\x7e\x2e\xfe\xc5\xda\x17\x49\x4a\xdf\x5f\x8f\xef\x91\xa1\xe6\xe5\x49\xe6\xf9\xa8\x7f\x35\xca\x57\x7d\x7e\xd6\xe7\x67\x67\x76\x4e\x46\xfd\x9b\x51\xfe\x59\x97\x2f\x9b\xf5\xee\xef\xbe\xe6\xe6\x2f\xfb\xcb\x3e\xf7\x87\x32\x74\xd3\x73\x4f\x7e\x07\x00\x00\xff\xff\x5e\xe4\x1d\xbd\x60\x01\x01\x00" diff --git a/util/archutil/ppc64_check.go b/util/archutil/ppc64_check.go new file mode 100644 index 000000000000..00fe3e16fffe --- /dev/null +++ b/util/archutil/ppc64_check.go @@ -0,0 +1,8 @@ +//go:build !ppc64 +// +build !ppc64 + +package archutil + +func ppc64Supported() (string, error) { + return check("ppc64", Binaryppc64) +} diff --git a/util/archutil/ppc64_check_ppc64.go b/util/archutil/ppc64_check_ppc64.go new file mode 100644 index 000000000000..82e6958454ce --- /dev/null +++ b/util/archutil/ppc64_check_ppc64.go @@ -0,0 +1,8 @@ +//go:build ppc64 +// +build ppc64 + +package archutil + +func ppc64Supported() (string, error) { + return "", nil +} diff --git a/util/attestation/types.go b/util/attestation/types.go new file mode 100644 index 000000000000..35f4404cd627 --- /dev/null +++ b/util/attestation/types.go @@ -0,0 +1,11 @@ +package attestation + +const ( + MediaTypeDockerSchema2AttestationType = "application/vnd.in-toto+json" + + DockerAnnotationReferenceType = "vnd.docker.reference.type" + DockerAnnotationReferenceDigest = "vnd.docker.reference.digest" + DockerAnnotationReferenceDescription = "vnd.docker.reference.description" + + DockerAnnotationReferenceTypeDefault = "attestation-manifest" +) diff --git a/util/buildinfo/buildinfo.go b/util/buildinfo/buildinfo.go index 5adaa9b36f75..e3486e8e4f11 100644 --- a/util/buildinfo/buildinfo.go +++ b/util/buildinfo/buildinfo.go @@ -1,3 +1,6 @@ +// Package buildinfo implements utilities for build information. +// +// Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md package buildinfo import ( @@ -10,12 +13,67 @@ import ( ctnref "github.com/containerd/containerd/reference" "github.com/docker/distribution/reference" "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/source" binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/urlutil" "github.com/pkg/errors" ) +func FromProvenance(c *provenance.Capture) (*binfotypes.BuildInfo, error) { + var bi binfotypes.BuildInfo + + bi.Frontend = c.Frontend + bi.Attrs = map[string]*string{} + for k, v := range c.Args { + v := v + bi.Attrs[k] = &v + } + + for _, s := range c.Sources.Images { + bi.Sources = append(bi.Sources, binfotypes.Source{ + Type: binfotypes.SourceTypeDockerImage, + Ref: s.Ref, + Pin: s.Digest.String(), + }) + } + + for _, s := range c.Sources.HTTP { + bi.Sources = append(bi.Sources, binfotypes.Source{ + Type: binfotypes.SourceTypeHTTP, + Ref: s.URL, + Pin: s.Digest.String(), + }) + } + + for _, s := range c.Sources.Git { + bi.Sources = append(bi.Sources, binfotypes.Source{ + Type: binfotypes.SourceTypeGit, + Ref: s.URL, + Pin: s.Commit, + }) + } + + sort.Slice(bi.Sources, func(i, j int) bool { + return bi.Sources[i].Ref < bi.Sources[j].Ref + }) + + return &bi, nil +} + +func AddMetadata(metadata map[string][]byte, key string, c *provenance.Capture) error { + bi, err := FromProvenance(c) + if err != nil { + return err + } + dt, err := json.Marshal(bi) + if err != nil { + return err + } + metadata[key] = dt + return nil +} + // Decode decodes a base64 encoded build info. func Decode(enc string) (bi binfotypes.BuildInfo, _ error) { dec, err := base64.StdEncoding.DecodeString(enc) @@ -42,7 +100,7 @@ func Encode(ctx context.Context, metadata map[string][]byte, key string, llbSour } else { return nil, err } - bi.Sources = dedupSources(bi, allDepsSources(bi, nil)) + bi.Sources = dedupSources(bi.Sources, allDepsSources(bi.Deps, nil)) return json.Marshal(bi) } @@ -205,24 +263,25 @@ func decodeDeps(key string, attrs map[string]*string) (map[string]binfotypes.Bui } // dedupSources deduplicates regular sources from dependencies ones. -func dedupSources(bi binfotypes.BuildInfo, depsSources []binfotypes.Source) (srcs []binfotypes.Source) { +func dedupSources(sources []binfotypes.Source, depsSources []binfotypes.Source) (srcs []binfotypes.Source) { // dedup sources from deps - for i, src := range bi.Sources { - for _, dsrc := range depsSources { - if src == dsrc { - bi.Sources = append(bi.Sources[:i], bi.Sources[i+1:]...) - } else if src.Type == binfotypes.SourceTypeDockerImage { + msrc := make(map[binfotypes.Source]struct{}) +sourcesloop: + for _, src := range sources { + for _, srcd := range depsSources { + if src == srcd { + continue sourcesloop + } + if src.Type == binfotypes.SourceTypeDockerImage && srcd.Type == binfotypes.SourceTypeDockerImage { _, dgst := ctnref.SplitObject(src.Ref) - if dgst != "" && src.Pin == dsrc.Pin { - bi.Sources = append(bi.Sources[:i], bi.Sources[i+1:]...) + if dgst != "" && src.Pin == srcd.Pin { + continue sourcesloop } } } - } - // dedup regular sources - msrc := make(map[binfotypes.Source]struct{}) - for _, src := range bi.Sources { - msrc[src] = struct{}{} + if _, ok := msrc[src]; !ok { + msrc[src] = struct{}{} + } } for src := range msrc { srcs = append(srcs, src) @@ -234,21 +293,21 @@ func dedupSources(bi binfotypes.BuildInfo, depsSources []binfotypes.Source) (src } // allDepsSources gathers dependencies sources. -func allDepsSources(bi binfotypes.BuildInfo, visited map[binfotypes.Source]struct{}) (res []binfotypes.Source) { +func allDepsSources(deps map[string]binfotypes.BuildInfo, visited map[binfotypes.Source]struct{}) (res []binfotypes.Source) { if visited == nil { visited = make(map[binfotypes.Source]struct{}) } - if len(bi.Deps) == 0 { + if len(deps) == 0 { return res } - for _, dbi := range bi.Deps { + for _, dbi := range deps { for _, dsrc := range dbi.Sources { if _, ok := visited[dsrc]; ok { continue } visited[dsrc] = struct{}{} } - res = allDepsSources(dbi, visited) + res = allDepsSources(dbi.Deps, visited) } for src := range visited { res = append(res, src) @@ -262,17 +321,24 @@ type FormatOpts struct { } // Format formats build info. -func Format(dt []byte, format FormatOpts) (_ []byte, err error) { +func Format(dt []byte, opts FormatOpts) (_ []byte, err error) { if len(dt) == 0 { return dt, nil } + var bi binfotypes.BuildInfo if err := json.Unmarshal(dt, &bi); err != nil { return nil, errors.Wrap(err, "failed to unmarshal buildinfo for formatting") } - if format.RemoveAttrs { + + if opts.RemoveAttrs { bi.Attrs = nil + if len(bi.Deps) > 0 { + bi.Sources = dedupSources(append(bi.Sources, allDepsSources(bi.Deps, nil)...), nil) + bi.Deps = nil + } } + if dt, err = json.Marshal(bi); err != nil { return nil, err } @@ -315,7 +381,7 @@ func filterAttrs(key string, attrs map[string]*string) map[string]*string { continue } // always include - if strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") { + if strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") || strings.HasPrefix(k, "vcs:") { filtered[k] = v continue } @@ -352,6 +418,7 @@ func filterAttrs(key string, attrs map[string]*string) map[string]*string { var knownControlArgs = []string{ "BUILDKIT_CACHE_MOUNT_NS", "BUILDKIT_CONTEXT_KEEP_GIT_DIR", + "BUILDKIT_BUILDINFO", "BUILDKIT_INLINE_BUILDINFO_ATTRS", "BUILDKIT_INLINE_CACHE", "BUILDKIT_MULTI_PLATFORM", @@ -369,49 +436,6 @@ func isControlArg(attrKey string) bool { return false } -// GetMetadata returns buildinfo metadata for the specified key. If the key -// is already there, result will be merged. -func GetMetadata(metadata map[string][]byte, key string, reqFrontend string, reqAttrs map[string]string) ([]byte, error) { - if metadata == nil { - metadata = make(map[string][]byte) - } - var dtbi []byte - if v, ok := metadata[key]; ok && v != nil { - var mbi binfotypes.BuildInfo - if errm := json.Unmarshal(v, &mbi); errm != nil { - return nil, errors.Wrapf(errm, "failed to unmarshal build info for %q", key) - } - if reqFrontend != "" { - mbi.Frontend = reqFrontend - } - if deps, err := decodeDeps(key, convertMap(reduceMapString(reqAttrs, mbi.Attrs))); err == nil { - mbi.Deps = reduceMapBuildInfo(deps, mbi.Deps) - } else { - return nil, err - } - mbi.Attrs = filterAttrs(key, convertMap(reduceMapString(reqAttrs, mbi.Attrs))) - var err error - dtbi, err = json.Marshal(mbi) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal build info for %q", key) - } - } else { - deps, err := decodeDeps(key, convertMap(reqAttrs)) - if err != nil { - return nil, err - } - dtbi, err = json.Marshal(binfotypes.BuildInfo{ - Frontend: reqFrontend, - Attrs: filterAttrs(key, convertMap(reqAttrs)), - Deps: deps, - }) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal build info for %q", key) - } - } - return dtbi, nil -} - func reduceMapString(m1 map[string]string, m2 map[string]*string) map[string]string { if m1 == nil && m2 == nil { return nil @@ -426,25 +450,3 @@ func reduceMapString(m1 map[string]string, m2 map[string]*string) map[string]str } return m1 } - -func reduceMapBuildInfo(m1 map[string]binfotypes.BuildInfo, m2 map[string]binfotypes.BuildInfo) map[string]binfotypes.BuildInfo { - if m1 == nil && m2 == nil { - return nil - } - if m1 == nil { - m1 = map[string]binfotypes.BuildInfo{} - } - for k, v := range m2 { - m1[k] = v - } - return m1 -} - -func convertMap(m map[string]string) map[string]*string { - res := make(map[string]*string) - for k, v := range m { - value := v - res[k] = &value - } - return res -} diff --git a/util/buildinfo/buildinfo_test.go b/util/buildinfo/buildinfo_test.go index 6c02c25dfcbb..d2967d01c1aa 100644 --- a/util/buildinfo/buildinfo_test.go +++ b/util/buildinfo/buildinfo_test.go @@ -24,7 +24,7 @@ func TestMergeSources(t *testing.T) { "docker-image://docker.io/moby/buildkit:v0.9.0@sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab": "sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab", "docker-image://docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04": "sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04", "git://https://github.com/crazy-max/buildkit-buildsources-test.git#master": "259a5aa5aa5bb3562d12cc631fe399f4788642c1", - "https://raw.githubusercontent.com/moby/moby/master/README.md": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", + "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", } frontendSources := []binfotypes.Source{ @@ -85,7 +85,7 @@ func TestMergeSources(t *testing.T) { }, { Type: binfotypes.SourceTypeHTTP, - Ref: "https://raw.githubusercontent.com/moby/moby/master/README.md", + Ref: "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", Pin: "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", }, }, srcs) @@ -164,43 +164,92 @@ func TestDecodeDeps(t *testing.T) { func TestDedupSources(t *testing.T) { cases := []struct { - name string - bi binfotypes.BuildInfo - want []binfotypes.Source + name string + sources []binfotypes.Source + deps map[string]binfotypes.BuildInfo + want []binfotypes.Source }{ { name: "deps", - bi: binfotypes.BuildInfo{ - Frontend: "dockerfile.v0", - Attrs: map[string]*string{ - "context:base": stringPtr("input:base"), + sources: []binfotypes.Source{ + { + Type: "docker-image", + Ref: "docker.io/library/alpine@sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", + Pin: "sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", }, - Sources: []binfotypes.Source{ - { - Type: "docker-image", - Ref: "docker.io/library/alpine@sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", - Pin: "sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", - }, - { - Type: "docker-image", - Ref: "docker.io/library/busybox:latest", - Pin: "sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", - }, - { - Type: "http", - Ref: "https://raw.githubusercontent.com/moby/moby/master/README.md", - Pin: "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", + { + Type: "docker-image", + Ref: "docker.io/library/busybox:latest", + Pin: "sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", + }, + { + Type: "http", + Ref: "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", + Pin: "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", + }, + }, + deps: map[string]binfotypes.BuildInfo{ + "base": { + Frontend: "dockerfile.v0", + Sources: []binfotypes.Source{ + { + Type: "docker-image", + Ref: "docker.io/library/alpine:latest", + Pin: "sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", + }, }, }, - Deps: map[string]binfotypes.BuildInfo{ - "base": { - Frontend: "dockerfile.v0", - Sources: []binfotypes.Source{ - { - Type: "docker-image", - Ref: "docker.io/library/alpine:latest", - Pin: "sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", - }, + }, + want: []binfotypes.Source{ + { + Type: "docker-image", + Ref: "docker.io/library/busybox:latest", + Pin: "sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", + }, + { + Type: "http", + Ref: "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", + Pin: "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", + }, + }, + }, + { + name: "multideps", + sources: []binfotypes.Source{ + { + Type: "docker-image", + Ref: "docker.io/library/alpine@sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", + Pin: "sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", + }, + { + Type: "docker-image", + Ref: "docker.io/library/busybox:1.35.0@sha256:20246233b52de844fa516f8c51234f1441e55e71ecdd1a1d91ebb252e1fd4603", + Pin: "sha256:20246233b52de844fa516f8c51234f1441e55e71ecdd1a1d91ebb252e1fd4603", + }, + { + Type: "docker-image", + Ref: "docker.io/library/busybox:latest", + Pin: "sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", + }, + { + Type: "http", + Ref: "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", + Pin: "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", + }, + }, + deps: map[string]binfotypes.BuildInfo{ + "base": { + Frontend: "dockerfile.v0", + Sources: []binfotypes.Source{ + { + Type: "docker-image", + Ref: "docker.io/library/alpine:latest", + Pin: "sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", + }, + { + Type: "docker-image", + Ref: "docker.io/library/busybox:1.35.0", + Pin: "sha256:20246233b52de844fa516f8c51234f1441e55e71ecdd1a1d91ebb252e1fd4603", }, }, }, @@ -213,44 +262,38 @@ func TestDedupSources(t *testing.T) { }, { Type: "http", - Ref: "https://raw.githubusercontent.com/moby/moby/master/README.md", + Ref: "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", Pin: "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", }, }, }, { name: "regular", - bi: binfotypes.BuildInfo{ - Frontend: "dockerfile.v0", - Attrs: map[string]*string{ - "context:base": stringPtr("input:base"), + sources: []binfotypes.Source{ + { + Type: "docker-image", + Ref: "docker.io/library/alpine@sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", + Pin: "sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", }, - Sources: []binfotypes.Source{ - { - Type: "docker-image", - Ref: "docker.io/library/alpine@sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", - Pin: "sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", - }, - { - Type: "docker-image", - Ref: "docker.io/library/busybox:latest", - Pin: "sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", - }, - { - Type: "docker-image", - Ref: "docker.io/library/busybox:latest", - Pin: "sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", - }, + { + Type: "docker-image", + Ref: "docker.io/library/busybox:latest", + Pin: "sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", }, - Deps: map[string]binfotypes.BuildInfo{ - "base": { - Frontend: "dockerfile.v0", - Sources: []binfotypes.Source{ - { - Type: "docker-image", - Ref: "docker.io/library/alpine:latest", - Pin: "sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", - }, + { + Type: "docker-image", + Ref: "docker.io/library/busybox:latest", + Pin: "sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", + }, + }, + deps: map[string]binfotypes.BuildInfo{ + "base": { + Frontend: "dockerfile.v0", + Sources: []binfotypes.Source{ + { + Type: "docker-image", + Ref: "docker.io/library/alpine:latest", + Pin: "sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", }, }, }, @@ -267,7 +310,7 @@ func TestDedupSources(t *testing.T) { for _, tt := range cases { tt := tt t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.want, dedupSources(tt.bi, allDepsSources(tt.bi, nil))) + assert.Equal(t, tt.want, dedupSources(tt.sources, allDepsSources(tt.deps, nil))) }) } } @@ -329,16 +372,42 @@ func TestFormat(t *testing.T) { Frontend: "dockerfile.v0", Attrs: map[string]*string{ "build-arg:foo": stringPtr("bar"), - "context": stringPtr("https://github.com/crazy-max/buildkit-buildsources-test.git#master"), + "context:base": stringPtr("input:base"), "filename": stringPtr("Dockerfile"), "source": stringPtr("crazymax/dockerfile:master"), }, Sources: []binfotypes.Source{ { - Type: binfotypes.SourceTypeDockerImage, - Ref: "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0", - Alias: "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0", - Pin: "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0", + Type: "docker-image", + Ref: "docker.io/library/busybox:latest", + Pin: "sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", + }, + { + Type: "http", + Ref: "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", + Pin: "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", + }, + }, + Deps: map[string]binfotypes.BuildInfo{ + "base": { + Frontend: "dockerfile.v0", + Attrs: map[string]*string{ + "build-arg:foo": stringPtr("bar"), + "filename": stringPtr("Dockerfile2"), + "source": stringPtr("crazymax/dockerfile:master"), + }, + Sources: []binfotypes.Source{ + { + Type: "docker-image", + Ref: "docker.io/library/alpine:latest", + Pin: "sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", + }, + { + Type: "docker-image", + Ref: "docker.io/library/busybox:1.35.0", + Pin: "sha256:20246233b52de844fa516f8c51234f1441e55e71ecdd1a1d91ebb252e1fd4603", + }, + }, }, }, } @@ -360,10 +429,24 @@ func TestFormat(t *testing.T) { Frontend: "dockerfile.v0", Sources: []binfotypes.Source{ { - Type: binfotypes.SourceTypeDockerImage, - Ref: "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0", - Alias: "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0", - Pin: "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0", + Type: "docker-image", + Ref: "docker.io/library/alpine:latest", + Pin: "sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3", + }, + { + Type: "docker-image", + Ref: "docker.io/library/busybox:1.35.0", + Pin: "sha256:20246233b52de844fa516f8c51234f1441e55e71ecdd1a1d91ebb252e1fd4603", + }, + { + Type: "docker-image", + Ref: "docker.io/library/busybox:latest", + Pin: "sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", + }, + { + Type: "http", + Ref: "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", + Pin: "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", }, }, }, diff --git a/util/buildinfo/types/types.go b/util/buildinfo/types/types.go index 93abcd1b4f12..06cf09681e74 100644 --- a/util/buildinfo/types/types.go +++ b/util/buildinfo/types/types.go @@ -1,3 +1,6 @@ +// Package binfotypes implements types for build information. +// +// Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md package binfotypes import ( diff --git a/util/compression/compression.go b/util/compression/compression.go index ba44a9270b4e..cfc26b90780a 100644 --- a/util/compression/compression.go +++ b/util/compression/compression.go @@ -5,33 +5,53 @@ import ( "context" "io" + cdcompression "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" "github.com/containerd/stargz-snapshotter/estargz" + "github.com/moby/buildkit/util/iohelper" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// Type represents compression type for blob data. -type Type int +type Compressor func(dest io.Writer, mediaType string) (io.WriteCloser, error) +type Decompressor func(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) +type Finalizer func(context.Context, content.Store) (map[string]string, error) + +// Type represents compression type for blob data, which needs +// to be implemented for each compression type. +type Type interface { + Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) + Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) + NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) + NeedsComputeDiffBySelf() bool + OnlySupportOCITypes() bool + NeedsForceCompression() bool + MediaType() string + String() string +} -const ( +type ( + uncompressedType struct{} + gzipType struct{} + estargzType struct{} + zstdType struct{} +) + +var ( // Uncompressed indicates no compression. - Uncompressed Type = iota + Uncompressed = uncompressedType{} // Gzip is used for blob data. - Gzip + Gzip = gzipType{} // EStargz is used for estargz data. - EStargz + EStargz = estargzType{} // Zstd is used for Zstandard data. - Zstd - - // UnknownCompression means not supported yet. - UnknownCompression Type = -1 + Zstd = zstdType{} ) type Config struct { @@ -61,70 +81,42 @@ const ( mediaTypeImageLayerZstd = ocispecs.MediaTypeImageLayer + "+zstd" // unreleased image-spec#790 ) -var Default = Gzip +var Default gzipType = Gzip -func Parse(t string) Type { +func parse(t string) (Type, error) { switch t { - case "uncompressed": - return Uncompressed - case "gzip": - return Gzip - case "estargz": - return EStargz - case "zstd": - return Zstd - default: - return UnknownCompression - } -} - -func (ct Type) String() string { - switch ct { - case Uncompressed: - return "uncompressed" - case Gzip: - return "gzip" - case EStargz: - return "estargz" - case Zstd: - return "zstd" + case Uncompressed.String(): + return Uncompressed, nil + case Gzip.String(): + return Gzip, nil + case EStargz.String(): + return EStargz, nil + case Zstd.String(): + return Zstd, nil default: - return "unknown" + return nil, errors.Errorf("unsupported compression type %s", t) } } -func (ct Type) DefaultMediaType() string { - switch ct { - case Uncompressed: - return ocispecs.MediaTypeImageLayer - case Gzip, EStargz: - return ocispecs.MediaTypeImageLayerGzip - case Zstd: - return mediaTypeImageLayerZstd +func fromMediaType(mediaType string) (Type, error) { + switch toOCILayerType[mediaType] { + case ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerNonDistributable: + return Uncompressed, nil + case ocispecs.MediaTypeImageLayerGzip, ocispecs.MediaTypeImageLayerNonDistributableGzip: + return Gzip, nil + case mediaTypeImageLayerZstd, ocispecs.MediaTypeImageLayerNonDistributableZstd: + return Zstd, nil default: - return ocispecs.MediaTypeImageLayer + "+unknown" + return nil, errors.Errorf("unsupported media type %s", mediaType) } } -func (ct Type) IsMediaType(mt string) bool { +func IsMediaType(ct Type, mt string) bool { mt, ok := toOCILayerType[mt] if !ok { return false } - return mt == ct.DefaultMediaType() -} - -func FromMediaType(mediaType string) Type { - switch toOCILayerType[mediaType] { - case ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerNonDistributable: - return Uncompressed - case ocispecs.MediaTypeImageLayerGzip, ocispecs.MediaTypeImageLayerNonDistributableGzip: - return Gzip - case mediaTypeImageLayerZstd, ocispecs.MediaTypeImageLayerNonDistributableZstd: - return Zstd - default: - return UnknownCompression - } + return mt == ct.MediaType() } // DetectLayerMediaType returns media type from existing blob data. @@ -170,7 +162,7 @@ func detectCompressionType(cr *io.SectionReader) (Type, error) { // means just create an empty layer. // // See issue docker/docker#18170 - return UnknownCompression, err + return nil, err } if _, _, err := estargz.OpenFooter(cr); err == nil { @@ -241,3 +233,25 @@ func ConvertAllLayerMediaTypes(oci bool, descs ...ocispecs.Descriptor) []ocispec } return converted } + +func decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (r io.ReadCloser, err error) { + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + esgz, err := EStargz.Is(ctx, cs, desc.Digest) + if err != nil { + return nil, err + } else if esgz { + r, err = decompressEStargz(io.NewSectionReader(ra, 0, ra.Size())) + if err != nil { + return nil, err + } + } else { + r, err = cdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size())) + if err != nil { + return nil, err + } + } + return &iohelper.ReadCloser{ReadCloser: r, CloseFunc: ra.Close}, nil +} diff --git a/cache/estargz.go b/util/compression/estargz.go similarity index 76% rename from cache/estargz.go rename to util/compression/estargz.go index f67d14925d49..9d44d940486c 100644 --- a/cache/estargz.go +++ b/util/compression/estargz.go @@ -1,4 +1,4 @@ -package cache +package compression import ( "archive/tar" @@ -11,24 +11,30 @@ import ( cdcompression "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" "github.com/containerd/stargz-snapshotter/estargz" - "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/iohelper" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -var eStargzAnnotations = []string{estargz.TOCJSONDigestAnnotation, estargz.StoreUncompressedSizeAnnotation} +var EStargzAnnotations = []string{estargz.TOCJSONDigestAnnotation, estargz.StoreUncompressedSizeAnnotation} -// compressEStargz writes the passed blobs stream as an eStargz-compressed blob. -// finalize function finalizes the written blob metadata and returns all eStargz annotations. -func compressEStargz(comp compression.Config) (compressorFunc compressor, finalize func(context.Context, content.Store) (map[string]string, error)) { +const containerdUncompressed = "containerd.io/uncompressed" +const estargzLabel = "buildkit.io/compression/estargz" + +func (c estargzType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { var cInfo *compressionInfo var writeErr error var mu sync.Mutex return func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) { - if compression.FromMediaType(requiredMediaType) != compression.Gzip { - return nil, fmt.Errorf("unsupported media type for estargz compressor %q", requiredMediaType) + ct, err := FromMediaType(requiredMediaType) + if err != nil { + return nil, err + } + if ct != Gzip { + return nil, errors.Errorf("unsupported media type for estargz compressor %q", requiredMediaType) } done := make(chan struct{}) pr, pw := io.Pipe() @@ -76,7 +82,7 @@ func compressEStargz(comp compression.Config) (compressorFunc compressor, finali pr.Close() return nil }() - return &writeCloser{pw, func() error { + return &iohelper.WriteCloser{WriteCloser: pw, CloseFunc: func() error { <-done // wait until the write completes return nil }}, nil @@ -113,11 +119,44 @@ func compressEStargz(comp compression.Config) (compressorFunc compressor, finali } } -const estargzLabel = "buildkit.io/compression/estargz" +func (c estargzType) Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) { + return decompress(ctx, cs, desc) +} + +func (c estargzType) NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + esgz, err := c.Is(ctx, cs, desc.Digest) + if err != nil { + return false, err + } + if !images.IsLayerType(desc.MediaType) || esgz { + return false, nil + } + return true, nil +} + +func (c estargzType) NeedsComputeDiffBySelf() bool { + return true +} + +func (c estargzType) OnlySupportOCITypes() bool { + return true +} + +func (c estargzType) NeedsForceCompression() bool { + return false +} + +func (c estargzType) MediaType() string { + return ocispecs.MediaTypeImageLayerGzip +} + +func (c estargzType) String() string { + return "estargz" +} // isEStargz returns true when the specified digest of content exists in // the content store and it's eStargz. -func isEStargz(ctx context.Context, cs content.Store, dgst digest.Digest) (bool, error) { +func (c estargzType) Is(ctx context.Context, cs content.Store, dgst digest.Digest) (bool, error) { info, err := cs.Info(ctx, dgst) if err != nil { return false, nil @@ -178,39 +217,6 @@ func decompressEStargz(r *io.SectionReader) (io.ReadCloser, error) { return estargz.Unpack(r, new(estargz.GzipDecompressor)) } -type writeCloser struct { - io.WriteCloser - closeFunc func() error -} - -func (wc *writeCloser) Close() error { - err1 := wc.WriteCloser.Close() - err2 := wc.closeFunc() - if err1 != nil { - return errors.Wrapf(err1, "failed to close: %v", err2) - } - return err2 -} - -type counter struct { - n int64 - mu sync.Mutex -} - -func (c *counter) Write(p []byte) (n int, err error) { - c.mu.Lock() - c.n += int64(len(p)) - c.mu.Unlock() - return len(p), nil -} - -func (c *counter) size() (n int64) { - c.mu.Lock() - n = c.n - c.mu.Unlock() - return -} - type compressionInfo struct { blobInfo tocDigest digest.Digest @@ -227,7 +233,7 @@ func calculateBlobInfo() (io.WriteCloser, chan blobInfo) { pr, pw := io.Pipe() go func() { defer pr.Close() - c := new(counter) + c := new(iohelper.Counter) dgstr := digest.Canonical.Digester() diffID := digest.Canonical.Digester() decompressR, err := cdcompression.DecompressStream(io.TeeReader(pr, dgstr.Hash())) @@ -244,7 +250,7 @@ func calculateBlobInfo() (io.WriteCloser, chan blobInfo) { pr.CloseWithError(err) return } - res <- blobInfo{dgstr.Digest(), diffID.Digest(), c.size()} + res <- blobInfo{dgstr.Digest(), diffID.Digest(), c.Size()} }() return pw, res } diff --git a/util/compression/gzip.go b/util/compression/gzip.go new file mode 100644 index 000000000000..7120ba35e38d --- /dev/null +++ b/util/compression/gzip.go @@ -0,0 +1,69 @@ +package compression + +import ( + "compress/gzip" + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func (c gzipType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { + return func(dest io.Writer, _ string) (io.WriteCloser, error) { + return gzipWriter(comp)(dest) + }, nil +} + +func (c gzipType) Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) { + return decompress(ctx, cs, desc) +} + +func (c gzipType) NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + esgz, err := EStargz.Is(ctx, cs, desc.Digest) + if err != nil { + return false, err + } + if !images.IsLayerType(desc.MediaType) { + return false, nil + } + ct, err := FromMediaType(desc.MediaType) + if err != nil { + return false, err + } + if ct == Gzip && !esgz { + return false, nil + } + return true, nil +} + +func (c gzipType) NeedsComputeDiffBySelf() bool { + return false +} + +func (c gzipType) OnlySupportOCITypes() bool { + return false +} + +func (c gzipType) NeedsForceCompression() bool { + return false +} + +func (c gzipType) MediaType() string { + return ocispecs.MediaTypeImageLayerGzip +} + +func (c gzipType) String() string { + return "gzip" +} + +func gzipWriter(comp Config) func(io.Writer) (io.WriteCloser, error) { + return func(dest io.Writer) (io.WriteCloser, error) { + level := gzip.DefaultCompression + if comp.Level != nil { + level = *comp.Level + } + return gzip.NewWriterLevel(dest, level) + } +} diff --git a/util/compression/nydus.go b/util/compression/nydus.go new file mode 100644 index 000000000000..4e04be70b7ab --- /dev/null +++ b/util/compression/nydus.go @@ -0,0 +1,141 @@ +//go:build nydus +// +build nydus + +package compression + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + + nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" +) + +type nydusType struct{} + +var Nydus = nydusType{} + +func init() { + toDockerLayerType[nydusify.MediaTypeNydusBlob] = nydusify.MediaTypeNydusBlob + toOCILayerType[nydusify.MediaTypeNydusBlob] = nydusify.MediaTypeNydusBlob +} + +func Parse(t string) (Type, error) { + ct, err := parse(t) + if err != nil && t == Nydus.String() { + return Nydus, nil + } + return ct, err +} + +func FromMediaType(mediaType string) (Type, error) { + ct, err := fromMediaType(mediaType) + if err != nil && mediaType == nydusify.MediaTypeNydusBlob { + return Nydus, nil + } + return ct, err +} + +func (c nydusType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { + digester := digest.Canonical.Digester() + return func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) { + writer := io.MultiWriter(dest, digester.Hash()) + return nydusify.Pack(ctx, writer, nydusify.PackOption{}) + }, func(ctx context.Context, cs content.Store) (map[string]string, error) { + // Fill necessary labels + uncompressedDgst := digester.Digest().String() + info, err := cs.Info(ctx, digester.Digest()) + if err != nil { + return nil, errors.Wrap(err, "get info from content store") + } + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[containerdUncompressed] = uncompressedDgst + if _, err := cs.Update(ctx, info, "labels."+containerdUncompressed); err != nil { + return nil, errors.Wrap(err, "update info to content store") + } + + // Fill annotations + annotations := map[string]string{ + containerdUncompressed: uncompressedDgst, + // Use this annotation to identify nydus blob layer. + nydusify.LayerAnnotationNydusBlob: "true", + } + return annotations, nil + } +} + +func (c nydusType) Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) { + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + + go func() { + defer pw.Close() + if err := nydusify.Unpack(ctx, ra, pw, nydusify.UnpackOption{}); err != nil { + pw.CloseWithError(errors.Wrap(err, "unpack nydus blob")) + } + }() + + return pr, nil +} + +func (c nydusType) NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + if !images.IsLayerType(desc.MediaType) { + return false, nil + } + + if isNydusBlob, err := c.Is(ctx, cs, desc); err != nil { + return true, nil + } else if isNydusBlob { + return false, nil + } + + return true, nil +} + +func (c nydusType) NeedsComputeDiffBySelf() bool { + return true +} + +func (c nydusType) OnlySupportOCITypes() bool { + return true +} + +func (c nydusType) NeedsForceCompression() bool { + return true +} + +func (c nydusType) MediaType() string { + return nydusify.MediaTypeNydusBlob +} + +func (c nydusType) String() string { + return "nydus" +} + +// Is returns true when the specified digest of content exists in +// the content store and it's nydus format. +func (c nydusType) Is(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + if desc.Annotations == nil { + return false, nil + } + hasMediaType := desc.MediaType == nydusify.MediaTypeNydusBlob + _, hasAnno := desc.Annotations[nydusify.LayerAnnotationNydusBlob] + + _, err := cs.Info(ctx, desc.Digest) + if err != nil { + return false, err + } + + return hasMediaType && hasAnno, nil +} diff --git a/util/compression/parse.go b/util/compression/parse.go new file mode 100644 index 000000000000..6567da4e877d --- /dev/null +++ b/util/compression/parse.go @@ -0,0 +1,12 @@ +//go:build !nydus +// +build !nydus + +package compression + +func Parse(t string) (Type, error) { + return parse(t) +} + +func FromMediaType(mediaType string) (Type, error) { + return fromMediaType(mediaType) +} diff --git a/util/compression/uncompressed.go b/util/compression/uncompressed.go new file mode 100644 index 000000000000..5fc5b8e92a19 --- /dev/null +++ b/util/compression/uncompressed.go @@ -0,0 +1,61 @@ +package compression + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/docker/docker/pkg/ioutils" + "github.com/moby/buildkit/util/iohelper" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func (c uncompressedType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { + return func(dest io.Writer, mediaType string) (io.WriteCloser, error) { + return &iohelper.NopWriteCloser{Writer: dest}, nil + }, nil +} + +func (c uncompressedType) Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) { + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + rdr := io.NewSectionReader(ra, 0, ra.Size()) + return ioutils.NewReadCloserWrapper(rdr, ra.Close), nil +} + +func (c uncompressedType) NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + if !images.IsLayerType(desc.MediaType) { + return false, nil + } + ct, err := FromMediaType(desc.MediaType) + if err != nil { + return false, err + } + if ct == Uncompressed { + return false, nil + } + return true, nil +} + +func (c uncompressedType) NeedsComputeDiffBySelf() bool { + return false +} + +func (c uncompressedType) OnlySupportOCITypes() bool { + return false +} + +func (c uncompressedType) NeedsForceCompression() bool { + return false +} + +func (c uncompressedType) MediaType() string { + return ocispecs.MediaTypeImageLayer +} + +func (c uncompressedType) String() string { + return "uncompressed" +} diff --git a/util/compression/zstd.go b/util/compression/zstd.go new file mode 100644 index 000000000000..f18872199f64 --- /dev/null +++ b/util/compression/zstd.go @@ -0,0 +1,80 @@ +package compression + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/klauspost/compress/zstd" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func (c zstdType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { + return func(dest io.Writer, _ string) (io.WriteCloser, error) { + return zstdWriter(comp)(dest) + }, nil +} + +func (c zstdType) Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) { + return decompress(ctx, cs, desc) +} + +func (c zstdType) NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + if !images.IsLayerType(desc.MediaType) { + return false, nil + } + ct, err := FromMediaType(desc.MediaType) + if err != nil { + return false, err + } + if ct == Zstd { + return false, nil + } + return true, nil +} + +func (c zstdType) NeedsComputeDiffBySelf() bool { + return true +} + +func (c zstdType) OnlySupportOCITypes() bool { + return false +} + +func (c zstdType) NeedsForceCompression() bool { + return false +} + +func (c zstdType) MediaType() string { + return mediaTypeImageLayerZstd +} + +func (c zstdType) String() string { + return "zstd" +} + +func zstdWriter(comp Config) func(io.Writer) (io.WriteCloser, error) { + return func(dest io.Writer) (io.WriteCloser, error) { + level := zstd.SpeedDefault + if comp.Level != nil { + level = toZstdEncoderLevel(*comp.Level) + } + return zstd.NewWriter(dest, zstd.WithEncoderLevel(level)) + } +} + +func toZstdEncoderLevel(level int) zstd.EncoderLevel { + // map zstd compression levels to go-zstd levels + // once we also have c based implementation move this to helper pkg + if level < 0 { + return zstd.SpeedDefault + } else if level < 3 { + return zstd.SpeedFastest + } else if level < 7 { + return zstd.SpeedDefault + } else if level < 9 { + return zstd.SpeedBetterCompression + } + return zstd.SpeedBestCompression +} diff --git a/util/contentutil/buffer.go b/util/contentutil/buffer.go index 31d2be6867ff..9230b20731e1 100644 --- a/util/contentutil/buffer.go +++ b/util/contentutil/buffer.go @@ -3,7 +3,8 @@ package contentutil import ( "bytes" "context" - "io/ioutil" + "io" + "strings" "sync" "time" @@ -18,12 +19,14 @@ import ( type Buffer interface { content.Provider content.Ingester + content.Manager } // NewBuffer returns a new buffer func NewBuffer() Buffer { return &buffer{ buffers: map[digest.Digest][]byte{}, + infos: map[digest.Digest]content.Info{}, refs: map[string]struct{}{}, } } @@ -31,9 +34,59 @@ func NewBuffer() Buffer { type buffer struct { mu sync.Mutex buffers map[digest.Digest][]byte + infos map[digest.Digest]content.Info refs map[string]struct{} } +func (b *buffer) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + b.mu.Lock() + v, ok := b.infos[dgst] + b.mu.Unlock() + if !ok { + return content.Info{}, errdefs.ErrNotFound + } + return v, nil +} + +func (b *buffer) Update(ctx context.Context, new content.Info, fieldpaths ...string) (content.Info, error) { + b.mu.Lock() + defer b.mu.Unlock() + + updated, ok := b.infos[new.Digest] + if !ok { + return content.Info{}, errdefs.ErrNotFound + } + + if len(fieldpaths) == 0 { + fieldpaths = []string{"labels"} + } + + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = new.Labels[key] + continue + } + if path == "labels" { + updated.Labels = new.Labels + } + } + + b.infos[new.Digest] = updated + return updated, nil +} + +func (b *buffer) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { + return nil // not implemented +} + +func (b *buffer) Delete(ctx context.Context, dgst digest.Digest) error { + return nil // not implemented +} + func (b *buffer) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { var wOpts content.WriterOpts for _, opt := range opts { @@ -64,7 +117,7 @@ func (b *buffer) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (conten if err != nil { return nil, err } - return &readerAt{Reader: r, Closer: ioutil.NopCloser(r), size: int64(r.Len())}, nil + return &readerAt{Reader: r, Closer: io.NopCloser(r), size: int64(r.Len())}, nil } func (b *buffer) getBytesReader(ctx context.Context, dgst digest.Digest) (*bytes.Reader, error) { @@ -82,6 +135,7 @@ func (b *buffer) addValue(k digest.Digest, dt []byte) { b.mu.Lock() defer b.mu.Unlock() b.buffers[k] = dt + b.infos[k] = content.Info{Digest: k, Size: int64(len(dt))} } type bufferedWriter struct { diff --git a/util/contentutil/buffer_test.go b/util/contentutil/buffer_test.go index e8c1fd39ff75..22254b57abca 100644 --- a/util/contentutil/buffer_test.go +++ b/util/contentutil/buffer_test.go @@ -8,6 +8,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/remotes/docker" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -71,3 +72,49 @@ func TestReaderAt(t *testing.T) { require.Equal(t, err, io.EOF) require.Equal(t, "bar", string(buf[:n])) } + +func TestLabels(t *testing.T) { + t.Parallel() + ctx := context.TODO() + + b := NewBuffer() + + err := content.WriteBlob(ctx, b, "foo", bytes.NewBuffer([]byte("foobar")), ocispecs.Descriptor{Size: -1}) + require.NoError(t, err) + + _, err = b.Info(ctx, digest.FromBytes([]byte("abc"))) + require.Error(t, err) + + info, err := b.Info(ctx, digest.FromBytes([]byte("foobar"))) + require.NoError(t, err) + + require.Equal(t, info.Digest, digest.FromBytes([]byte("foobar"))) + + hf, err := docker.AppendDistributionSourceLabel(b, "docker.io/library/busybox:latest") + require.NoError(t, err) + _, err = hf.Handle(ctx, ocispecs.Descriptor{Digest: digest.FromBytes([]byte("foobar"))}) + require.NoError(t, err) + + info, err = b.Info(ctx, digest.FromBytes([]byte("foobar"))) + require.NoError(t, err) + require.Equal(t, info.Digest, digest.FromBytes([]byte("foobar"))) + + require.Equal(t, "library/busybox", info.Labels["containerd.io/distribution.source.docker.io"]) + + hf, err = docker.AppendDistributionSourceLabel(b, "docker.io/library/alpine:3.15") + require.NoError(t, err) + _, err = hf.Handle(ctx, ocispecs.Descriptor{Digest: digest.FromBytes([]byte("foobar"))}) + require.NoError(t, err) + + hf, err = docker.AppendDistributionSourceLabel(b, "ghcr.io/repos/alpine:3.11") + require.NoError(t, err) + _, err = hf.Handle(ctx, ocispecs.Descriptor{Digest: digest.FromBytes([]byte("foobar"))}) + require.NoError(t, err) + + info, err = b.Info(ctx, digest.FromBytes([]byte("foobar"))) + require.NoError(t, err) + require.Equal(t, info.Digest, digest.FromBytes([]byte("foobar"))) + + require.Equal(t, "library/alpine,library/busybox", info.Labels["containerd.io/distribution.source.docker.io"]) + require.Equal(t, "repos/alpine", info.Labels["containerd.io/distribution.source.ghcr.io"]) +} diff --git a/util/contentutil/copy.go b/util/contentutil/copy.go index 2509ce1a3be0..5039bd0c207a 100644 --- a/util/contentutil/copy.go +++ b/util/contentutil/copy.go @@ -3,6 +3,7 @@ package contentutil import ( "context" "io" + "strings" "sync" "github.com/containerd/containerd/content" @@ -75,7 +76,7 @@ func CopyChain(ctx context.Context, ingester content.Ingester, provider content. } }) handlers := []images.Handler{ - images.ChildrenHandler(provider), + annotateDistributionSourceHandler(images.ChildrenHandler(provider), desc.Annotations), filterHandler, retryhandler.New(limited.FetchHandler(ingester, &localFetcher{provider}, ""), func(_ []byte) {}), } @@ -92,3 +93,45 @@ func CopyChain(ctx context.Context, ingester content.Ingester, provider content. return nil } + +func annotateDistributionSourceHandler(f images.HandlerFunc, basis map[string]string) images.HandlerFunc { + return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // only add distribution source for the config or blob data descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex: + default: + return children, nil + } + + for i := range children { + child := children[i] + + for k, v := range basis { + if !strings.HasPrefix(k, "containerd.io/distribution.source.") { + continue + } + if child.Annotations != nil { + if _, ok := child.Annotations[k]; ok { + // don't override if already present + continue + } + } + + if child.Annotations == nil { + child.Annotations = map[string]string{} + } + child.Annotations[k] = v + } + + children[i] = child + } + + return children, nil + } +} diff --git a/util/contentutil/multiprovider.go b/util/contentutil/multiprovider.go index 469096d3408b..aba096d7c350 100644 --- a/util/contentutil/multiprovider.go +++ b/util/contentutil/multiprovider.go @@ -6,6 +6,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" + "github.com/moby/buildkit/session" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -90,3 +91,23 @@ func (mp *MultiProvider) Add(dgst digest.Digest, p content.Provider) { defer mp.mu.Unlock() mp.sub[dgst] = p } + +func (mp *MultiProvider) UnlazySession(desc ocispecs.Descriptor) session.Group { + type unlazySession interface { + UnlazySession(ocispecs.Descriptor) session.Group + } + + mp.mu.RLock() + if p, ok := mp.sub[desc.Digest]; ok { + mp.mu.RUnlock() + if cd, ok := p.(unlazySession); ok { + return cd.UnlazySession(desc) + } + } else { + mp.mu.RUnlock() + } + if cd, ok := mp.base.(unlazySession); ok { + return cd.UnlazySession(desc) + } + return nil +} diff --git a/util/contentutil/source.go b/util/contentutil/source.go new file mode 100644 index 000000000000..b544ed0aa337 --- /dev/null +++ b/util/contentutil/source.go @@ -0,0 +1,34 @@ +package contentutil + +import ( + "net/url" + "strings" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/reference" +) + +func HasSource(info content.Info, refspec reference.Spec) (bool, error) { + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + return false, err + } + + if info.Labels == nil { + return false, nil + } + + source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/") + repoLabel, ok := info.Labels["containerd.io/distribution.source."+source] + if !ok || repoLabel == "" { + return false, nil + } + + for _, repo := range strings.Split(repoLabel, ",") { + // the target repo is not a candidate + if repo == target { + return true, nil + } + } + return false, nil +} diff --git a/util/contentutil/source_test.go b/util/contentutil/source_test.go new file mode 100644 index 000000000000..9100e5c426d6 --- /dev/null +++ b/util/contentutil/source_test.go @@ -0,0 +1,57 @@ +package contentutil + +import ( + "testing" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/reference" + "github.com/stretchr/testify/require" +) + +func TestHasSource(t *testing.T) { + info := content.Info{ + Labels: map[string]string{ + "containerd.io/distribution.source.docker.io": "library/alpine", + }, + } + ref, err := reference.Parse("docker.io/library/alpine:latest") + require.NoError(t, err) + b, err := HasSource(info, ref) + require.NoError(t, err) + require.True(t, b) + + info = content.Info{ + Labels: map[string]string{ + "containerd.io/distribution.source.docker.io": "library/alpine,library/ubuntu", + }, + } + b, err = HasSource(info, ref) + require.NoError(t, err) + require.True(t, b) + + info = content.Info{} + b, err = HasSource(info, ref) + require.NoError(t, err) + require.False(t, b) + + info = content.Info{Labels: map[string]string{}} + b, err = HasSource(info, ref) + require.NoError(t, err) + require.False(t, b) + + info = content.Info{ + Labels: map[string]string{ + "containerd.io/distribution.source.docker.io": "library/ubuntu", + }, + } + b, err = HasSource(info, ref) + require.NoError(t, err) + require.False(t, b) + + info = content.Info{Labels: map[string]string{ + "containerd.io/distribution.source.ghcr.io": "library/alpine", + }} + b, err = HasSource(info, ref) + require.NoError(t, err) + require.False(t, b) +} diff --git a/util/entitlements/security/security_linux.go b/util/entitlements/security/security_linux.go index 6e0557961c5b..c53a24b865ab 100644 --- a/util/entitlements/security/security_linux.go +++ b/util/entitlements/security/security_linux.go @@ -145,7 +145,7 @@ func getCurrentCaps() ([]string, error) { func getAllCaps() ([]string, error) { availableCaps, err := getCurrentCaps() if err != nil { - return nil, fmt.Errorf("error getting current capabilities: %s", err) + return nil, errors.Errorf("error getting current capabilities: %s", err) } // see if any of the base linux35Caps are not available to be granted diff --git a/util/gitutil/git_ref.go b/util/gitutil/git_ref.go new file mode 100644 index 000000000000..da15b8aaf3a9 --- /dev/null +++ b/util/gitutil/git_ref.go @@ -0,0 +1,85 @@ +package gitutil + +import ( + "regexp" + "strings" + + "github.com/containerd/containerd/errdefs" +) + +// GitRef represents a git ref. +// +// Examples: +// - "https://github.com/foo/bar.git#baz/qux:quux/quuz" is parsed into: +// {Remote: "https://github.com/foo/bar.git", ShortName: "bar", Commit:"baz/qux", SubDir: "quux/quuz"}. +type GitRef struct { + // Remote is the remote repository path. + Remote string + + // ShortName is the directory name of the repo. + // e.g., "bar" for "https://github.com/foo/bar.git" + ShortName string + + // Commit is a commit hash, a tag, or branch name. + // Commit is optional. + Commit string + + // SubDir is a directory path inside the repo. + // SubDir is optional. + SubDir string + + // IndistinguishableFromLocal is true for a ref that is indistinguishable from a local file path, + // e.g., "github.com/foo/bar". + // + // Deprecated. + // Instead, use a distinguishable form such as "https://github.com/foo/bar.git". + // + // The dockerfile frontend still accepts this form only for build contexts. + IndistinguishableFromLocal bool + + // UnencryptedTCP is true for a ref that needs an unencrypted TCP connection, + // e.g., "git://..." and "http://..." . + // + // Discouraged, although not deprecated. + // Instead, consider using an encrypted TCP connection such as "git@github.com/foo/bar.git" or "https://github.com/foo/bar.git". + UnencryptedTCP bool +} + +// var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`) + +// ParseGitRef parses a git ref. +func ParseGitRef(ref string) (*GitRef, error) { + res := &GitRef{} + + if strings.HasPrefix(ref, "github.com/") { + res.IndistinguishableFromLocal = true // Deprecated + } else { + _, proto := ParseProtocol(ref) + switch proto { + case UnknownProtocol: + return nil, errdefs.ErrInvalidArgument + } + switch proto { + case HTTPProtocol, GitProtocol: + res.UnencryptedTCP = true // Discouraged, but not deprecated + } + switch proto { + // An HTTP(S) URL is considered to be a valid git ref only when it has the ".git[...]" suffix. + case HTTPProtocol, HTTPSProtocol: + var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`) + if !gitURLPathWithFragmentSuffix.MatchString(ref) { + return nil, errdefs.ErrInvalidArgument + } + } + } + + var fragment string + res.Remote, fragment, _ = strings.Cut(ref, "#") + if len(res.Remote) == 0 { + return res, errdefs.ErrInvalidArgument + } + res.Commit, res.SubDir, _ = strings.Cut(fragment, ":") + repoSplitBySlash := strings.Split(res.Remote, "/") + res.ShortName = strings.TrimSuffix(repoSplitBySlash[len(repoSplitBySlash)-1], ".git") + return res, nil +} diff --git a/util/gitutil/git_ref_test.go b/util/gitutil/git_ref_test.go new file mode 100644 index 000000000000..fff60b30f562 --- /dev/null +++ b/util/gitutil/git_ref_test.go @@ -0,0 +1,140 @@ +package gitutil + +import ( + "reflect" + "testing" +) + +func TestParseGitRef(t *testing.T) { + cases := []struct { + ref string + expected *GitRef + }{ + { + ref: "https://example.com/", + expected: nil, + }, + { + ref: "https://example.com/foo", + expected: nil, + }, + { + ref: "https://example.com/foo.git", + expected: &GitRef{ + Remote: "https://example.com/foo.git", + ShortName: "foo", + }, + }, + { + ref: "https://example.com/foo.git#deadbeef", + expected: &GitRef{ + Remote: "https://example.com/foo.git", + ShortName: "foo", + Commit: "deadbeef", + }, + }, + { + ref: "https://example.com/foo.git#release/1.2", + expected: &GitRef{ + Remote: "https://example.com/foo.git", + ShortName: "foo", + Commit: "release/1.2", + }, + }, + { + ref: "https://example.com/foo.git/", + expected: nil, + }, + { + ref: "https://example.com/foo.git.bar", + expected: nil, + }, + { + ref: "git://example.com/foo", + expected: &GitRef{ + Remote: "git://example.com/foo", + ShortName: "foo", + UnencryptedTCP: true, + }, + }, + { + ref: "github.com/moby/buildkit", + expected: &GitRef{ + Remote: "github.com/moby/buildkit", + ShortName: "buildkit", + IndistinguishableFromLocal: true, + }, + }, + { + ref: "https://github.com/moby/buildkit", + expected: nil, + }, + { + ref: "https://github.com/moby/buildkit.git", + expected: &GitRef{ + Remote: "https://github.com/moby/buildkit.git", + ShortName: "buildkit", + }, + }, + { + ref: "git@github.com:moby/buildkit", + expected: &GitRef{ + Remote: "git@github.com:moby/buildkit", + ShortName: "buildkit", + }, + }, + { + ref: "git@github.com:moby/buildkit.git", + expected: &GitRef{ + Remote: "git@github.com:moby/buildkit.git", + ShortName: "buildkit", + }, + }, + { + ref: "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + expected: &GitRef{ + Remote: "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + ShortName: "atlassian-docker", + }, + }, + { + ref: "https://github.com/foo/bar.git#baz/qux:quux/quuz", + expected: &GitRef{ + Remote: "https://github.com/foo/bar.git", + ShortName: "bar", + Commit: "baz/qux", + SubDir: "quux/quuz", + }, + }, + { + ref: "http://github.com/docker/docker.git:#branch", + expected: nil, + }, + { + ref: "https://github.com/docker/docker.git#:myfolder", + expected: &GitRef{ + Remote: "https://github.com/docker/docker.git", + ShortName: "docker", + SubDir: "myfolder", + }, + }, + } + for _, tt := range cases { + tt := tt + t.Run(tt.ref, func(t *testing.T) { + got, err := ParseGitRef(tt.ref) + if tt.expected == nil { + if err == nil { + t.Errorf("expected an error for ParseGitRef(%q)", tt.ref) + } + } else { + if err != nil { + t.Errorf("got an unexpected error: ParseGitRef(%q): %v", tt.ref, err) + } + if !reflect.DeepEqual(got, tt.expected) { + t.Errorf("expected ParseGitRef(%q) to return %#v, got %#v", tt.ref, tt.expected, got) + } + } + }) + } +} diff --git a/util/grpcerrors/grpcerrors.go b/util/grpcerrors/grpcerrors.go index f52f18673ee6..6cd9fae98ece 100644 --- a/util/grpcerrors/grpcerrors.go +++ b/util/grpcerrors/grpcerrors.go @@ -5,8 +5,9 @@ import ( "errors" "github.com/containerd/typeurl" + rpc "github.com/gogo/googleapis/google/rpc" gogotypes "github.com/gogo/protobuf/types" - "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/proto" //nolint:staticcheck "github.com/golang/protobuf/ptypes/any" "github.com/moby/buildkit/util/stack" "github.com/sirupsen/logrus" @@ -42,6 +43,14 @@ func ToGRPC(err error) error { st = status.FromProto(pb) } + // If the original error was wrapped with more context than the GRPCStatus error, + // copy the original message to the GRPCStatus error + if err.Error() != st.Message() { + pb := st.Proto() + pb.Message = err.Error() + st = status.FromProto(pb) + } + var details []proto.Message for _, st := range stack.Traces(err) { @@ -173,7 +182,7 @@ func FromGRPC(err error) error { for _, s := range stacks { if s != nil { - err = stack.Wrap(err, *s) + err = stack.Wrap(err, s) } } @@ -188,6 +197,20 @@ func FromGRPC(err error) error { return stack.Enable(err) } +func ToRPCStatus(st *spb.Status) *rpc.Status { + details := make([]*gogotypes.Any, len(st.Details)) + + for i, d := range st.Details { + details[i] = gogoAny(d) + } + + return &rpc.Status{ + Code: int32(st.Code), + Message: st.Message, + Details: details, + } +} + type grpcStatusError struct { st *status.Status } diff --git a/util/imageutil/buildinfo.go b/util/imageutil/buildinfo.go index 2ef1e75cfc0b..7196453c33c0 100644 --- a/util/imageutil/buildinfo.go +++ b/util/imageutil/buildinfo.go @@ -9,6 +9,8 @@ import ( ) // BuildInfo returns build info from image config. +// +// Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md func BuildInfo(dt []byte) (*binfotypes.BuildInfo, error) { if len(dt) == 0 { return nil, nil diff --git a/util/imageutil/config.go b/util/imageutil/config.go index cfb9d417b38b..76e0a5da350c 100644 --- a/util/imageutil/config.go +++ b/util/imageutil/config.go @@ -13,6 +13,8 @@ import ( "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" + "github.com/moby/buildkit/util/attestation" + "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/resolver/limited" "github.com/moby/buildkit/util/resolver/retryhandler" @@ -24,6 +26,7 @@ import ( type ContentCache interface { content.Ingester content.Provider + content.Manager } var leasesMu sync.Mutex @@ -75,10 +78,15 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co if desc.Digest != "" { ra, err := cache.ReaderAt(ctx, desc) if err == nil { - desc.Size = ra.Size() - mt, err := DetectManifestMediaType(ra) + info, err := cache.Info(ctx, desc.Digest) if err == nil { - desc.MediaType = mt + if ok, err := contentutil.HasSource(info, ref); err == nil && ok { + desc.Size = ra.Size() + mt, err := DetectManifestMediaType(ra) + if err == nil { + desc.MediaType = mt + } + } } } } @@ -101,8 +109,14 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co children := childrenConfigHandler(cache, platform) + dslHandler, err := docker.AppendDistributionSourceLabel(cache, ref.String()) + if err != nil { + return "", nil, err + } + handlers := []images.Handler{ retryhandler.New(limited.FetchHandler(cache, fetcher, str), func(_ []byte) {}), + dslHandler, children, } if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil { @@ -159,7 +173,8 @@ func childrenConfigHandler(provider content.Provider, platform platforms.MatchCo } else { descs = append(descs, index.Manifests...) } - case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType: + case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType, + attestation.MediaTypeDockerSchema2AttestationType: // childless data types. return nil, nil default: diff --git a/util/imageutil/schema1.go b/util/imageutil/schema1.go index 10838bf50d9a..cd66d9123ea7 100644 --- a/util/imageutil/schema1.go +++ b/util/imageutil/schema1.go @@ -3,11 +3,12 @@ package imageutil import ( "context" "encoding/json" - "io/ioutil" + "io" "strings" "time" "github.com/containerd/containerd/remotes" + "github.com/moby/buildkit/exporter/containerimage/image" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -19,7 +20,7 @@ func readSchema1Config(ctx context.Context, ref string, desc ocispecs.Descriptor return "", nil, err } defer rc.Close() - dt, err := ioutil.ReadAll(rc) + dt, err := io.ReadAll(rc) if err != nil { return "", nil, errors.Wrap(err, "failed to fetch schema1 manifest") } @@ -44,7 +45,7 @@ func convertSchema1ConfigMeta(in []byte) ([]byte, error) { return nil, errors.Errorf("invalid schema1 manifest") } - var img ocispecs.Image + var img image.Image if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), &img); err != nil { return nil, errors.Wrap(err, "failed to unmarshal image from schema 1 history") } @@ -68,7 +69,7 @@ func convertSchema1ConfigMeta(in []byte) ([]byte, error) { } } - dt, err := json.MarshalIndent(img, "", " ") + dt, err := json.MarshalIndent(img, "", " ") if err != nil { return nil, errors.Wrap(err, "failed to marshal schema1 config") } diff --git a/util/imageutil/schema1_test.go b/util/imageutil/schema1_test.go new file mode 100644 index 000000000000..17756e2e1a03 --- /dev/null +++ b/util/imageutil/schema1_test.go @@ -0,0 +1,35 @@ +package imageutil + +import ( + "bytes" + "testing" +) + +func TestConvertSchema1ConfigMeta(t *testing.T) { + dt := []byte(`{ + "schemaVersion": 1, + "name": "base-global/common", + "tag": "1.0.9.zb.standard", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:id1" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"id2\",\"parent\":\"id3\",\"created\":\"2018-07-26T11:56:23.157525618Z\",\"config\":{\"Hostname\":\"4f3d4451\",\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":null,\"Volumes\":{},\"OnBuild\":[\"ARG APP_NAME\",\"COPY ${APP_NAME}.tgz /home/admin/${APP_NAME}/target/${APP_NAME}.tgz\"],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\"}" + } + ] +}`) + result, err := convertSchema1ConfigMeta(dt) + if err != nil { + t.Errorf("convertSchema1ConfigMeta error %v", err) + return + } + if !bytes.Contains(result, []byte("OnBuild")) { + t.Errorf("convertSchema1ConfigMeta lost onbuild") + } else if !bytes.Contains(result, []byte("COPY ${APP_NAME}.tgz /home/admin/${APP_NAME}/target/${APP_NAME}.tg")) { + t.Errorf("convertSchema1ConfigMeta lost onbuild content") + } +} diff --git a/util/iohelper/helper.go b/util/iohelper/helper.go new file mode 100644 index 000000000000..e0ebaf9bb584 --- /dev/null +++ b/util/iohelper/helper.go @@ -0,0 +1,63 @@ +package iohelper + +import ( + "io" + "sync" + + "github.com/pkg/errors" +) + +type NopWriteCloser struct { + io.Writer +} + +func (w *NopWriteCloser) Close() error { + return nil +} + +type ReadCloser struct { + io.ReadCloser + CloseFunc func() error +} + +func (rc *ReadCloser) Close() error { + err1 := rc.ReadCloser.Close() + err2 := rc.CloseFunc() + if err1 != nil { + return errors.Wrapf(err1, "failed to close: %v", err2) + } + return err2 +} + +type WriteCloser struct { + io.WriteCloser + CloseFunc func() error +} + +func (wc *WriteCloser) Close() error { + err1 := wc.WriteCloser.Close() + err2 := wc.CloseFunc() + if err1 != nil { + return errors.Wrapf(err1, "failed to close: %v", err2) + } + return err2 +} + +type Counter struct { + n int64 + mu sync.Mutex +} + +func (c *Counter) Write(p []byte) (n int, err error) { + c.mu.Lock() + c.n += int64(len(p)) + c.mu.Unlock() + return len(p), nil +} + +func (c *Counter) Size() (n int64) { + c.mu.Lock() + n = c.n + c.mu.Unlock() + return +} diff --git a/util/network/cniprovider/cni.go b/util/network/cniprovider/cni.go index 8ff4cad601ce..2bebfd638ee0 100644 --- a/util/network/cniprovider/cni.go +++ b/util/network/cniprovider/cni.go @@ -4,19 +4,27 @@ import ( "context" "os" "runtime" + "strings" + "sync" + "time" cni "github.com/containerd/go-cni" "github.com/gofrs/flock" "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/network" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" + "go.opentelemetry.io/otel/trace" ) +const aboveTargetGracePeriod = 5 * time.Minute + type Opt struct { Root string ConfigPath string BinaryDir string + PoolSize int } func New(opt Opt) (network.Provider, error) { @@ -35,23 +43,35 @@ func New(opt Opt) (network.Provider, error) { cniOptions = append(cniOptions, cni.WithLoNetwork) } - cniOptions = append(cniOptions, cni.WithConfFile(opt.ConfigPath)) + if strings.HasSuffix(opt.ConfigPath, ".conflist") { + cniOptions = append(cniOptions, cni.WithConfListFile(opt.ConfigPath)) + } else { + cniOptions = append(cniOptions, cni.WithConfFile(opt.ConfigPath)) + } cniHandle, err := cni.New(cniOptions...) if err != nil { return nil, err } - cp := &cniProvider{CNI: cniHandle, root: opt.Root} + cp := &cniProvider{ + CNI: cniHandle, + root: opt.Root, + } + cleanOldNamespaces(cp) + + cp.nsPool = &cniPool{targetSize: opt.PoolSize, provider: cp} if err := cp.initNetwork(); err != nil { return nil, err } + go cp.nsPool.fillPool(context.TODO()) return cp, nil } type cniProvider struct { cni.CNI - root string + root string + nsPool *cniPool } func (c *cniProvider) initNetwork() error { @@ -62,32 +82,188 @@ func (c *cniProvider) initNetwork() error { } defer l.Unlock() } - ns, err := c.New() + ns, err := c.New(context.TODO(), "") if err != nil { return err } return ns.Close() } -func (c *cniProvider) New() (network.Namespace, error) { +func (c *cniProvider) Close() error { + c.nsPool.close() + return nil +} + +type cniPool struct { + provider *cniProvider + mu sync.Mutex + targetSize int + actualSize int + // LIFO: Ordered least recently used to most recently used + available []*cniNS + closed bool +} + +func (pool *cniPool) close() { + bklog.L.Debugf("cleaning up cni pool") + + pool.mu.Lock() + pool.closed = true + defer pool.mu.Unlock() + for len(pool.available) > 0 { + _ = pool.available[0].release() + pool.available = pool.available[1:] + pool.actualSize-- + } +} + +func (pool *cniPool) fillPool(ctx context.Context) { + for { + pool.mu.Lock() + if pool.closed { + pool.mu.Unlock() + return + } + actualSize := pool.actualSize + pool.mu.Unlock() + if actualSize >= pool.targetSize { + return + } + ns, err := pool.getNew(ctx) + if err != nil { + bklog.G(ctx).Errorf("failed to create new network namespace while prefilling pool: %s", err) + return + } + pool.put(ns) + } +} + +func (pool *cniPool) get(ctx context.Context) (*cniNS, error) { + pool.mu.Lock() + if len(pool.available) > 0 { + ns := pool.available[len(pool.available)-1] + pool.available = pool.available[:len(pool.available)-1] + pool.mu.Unlock() + trace.SpanFromContext(ctx).AddEvent("returning network namespace from pool") + bklog.G(ctx).Debugf("returning network namespace %s from pool", ns.id) + return ns, nil + } + pool.mu.Unlock() + + return pool.getNew(ctx) +} + +func (pool *cniPool) getNew(ctx context.Context) (*cniNS, error) { + ns, err := pool.provider.newNS(ctx, "") + if err != nil { + return nil, err + } + ns.pool = pool + + pool.mu.Lock() + defer pool.mu.Unlock() + if pool.closed { + return nil, errors.New("cni pool is closed") + } + pool.actualSize++ + return ns, nil +} + +func (pool *cniPool) put(ns *cniNS) { + putTime := time.Now() + ns.lastUsed = putTime + + pool.mu.Lock() + defer pool.mu.Unlock() + if pool.closed { + _ = ns.release() + return + } + pool.available = append(pool.available, ns) + actualSize := pool.actualSize + + if actualSize > pool.targetSize { + // We have more network namespaces than our target number, so + // schedule a shrinking pass. + time.AfterFunc(aboveTargetGracePeriod, pool.cleanupToTargetSize) + } +} + +func (pool *cniPool) cleanupToTargetSize() { + var toRelease []*cniNS + defer func() { + for _, poolNS := range toRelease { + _ = poolNS.release() + } + }() + + pool.mu.Lock() + defer pool.mu.Unlock() + for pool.actualSize > pool.targetSize && + len(pool.available) > 0 && + time.Since(pool.available[0].lastUsed) >= aboveTargetGracePeriod { + bklog.L.Debugf("releasing network namespace %s since it was last used at %s", pool.available[0].id, pool.available[0].lastUsed) + toRelease = append(toRelease, pool.available[0]) + pool.available = pool.available[1:] + pool.actualSize-- + } +} + +func (c *cniProvider) New(ctx context.Context, hostname string) (network.Namespace, error) { + // We can't use the pool for namespaces that need a custom hostname. + // We also avoid using it on windows because we don't have a cleanup + // mechanism for Windows yet. + if hostname == "" || runtime.GOOS == "windows" { + return c.nsPool.get(ctx) + } + return c.newNS(ctx, hostname) +} + +func (c *cniProvider) newNS(ctx context.Context, hostname string) (*cniNS, error) { id := identity.NewID() + trace.SpanFromContext(ctx).AddEvent("creating new network namespace") + bklog.G(ctx).Debugf("creating new network namespace %s", id) nativeID, err := createNetNS(c, id) if err != nil { return nil, err } + trace.SpanFromContext(ctx).AddEvent("finished creating network namespace") + bklog.G(ctx).Debugf("finished creating network namespace %s", id) + + nsOpts := []cni.NamespaceOpts{} + + if hostname != "" { + nsOpts = append(nsOpts, + // NB: K8S_POD_NAME is a semi-well-known arg set by k8s and podman and + // leveraged by the dnsname CNI plugin. a more generic name would be nice. + cni.WithArgs("K8S_POD_NAME", hostname), + + // must be set for plugins that don't understand K8S_POD_NAME + cni.WithArgs("IgnoreUnknown", "1")) + } - if _, err := c.CNI.Setup(context.TODO(), id, nativeID); err != nil { + if _, err := c.CNI.Setup(context.TODO(), id, nativeID, nsOpts...); err != nil { deleteNetNS(nativeID) return nil, errors.Wrap(err, "CNI setup error") } + trace.SpanFromContext(ctx).AddEvent("finished setting up network namespace") + bklog.G(ctx).Debugf("finished setting up network namespace %s", id) - return &cniNS{nativeID: nativeID, id: id, handle: c.CNI}, nil + return &cniNS{ + nativeID: nativeID, + id: id, + handle: c.CNI, + opts: nsOpts, + }, nil } type cniNS struct { + pool *cniPool handle cni.CNI id string nativeID string + opts []cni.NamespaceOpts + lastUsed time.Time } func (ns *cniNS) Set(s *specs.Spec) error { @@ -95,7 +271,16 @@ func (ns *cniNS) Set(s *specs.Spec) error { } func (ns *cniNS) Close() error { - err := ns.handle.Remove(context.TODO(), ns.id, ns.nativeID) + if ns.pool == nil { + return ns.release() + } + ns.pool.put(ns) + return nil +} + +func (ns *cniNS) release() error { + bklog.L.Debugf("releasing cni network namespace %s", ns.id) + err := ns.handle.Remove(context.TODO(), ns.id, ns.nativeID, ns.opts...) if err1 := unmountNetNS(ns.nativeID); err1 != nil && err == nil { err = err1 } diff --git a/util/network/cniprovider/createns_linux.go b/util/network/cniprovider/createns_linux.go index f1138a9fd5b3..a05bc0a44132 100644 --- a/util/network/cniprovider/createns_linux.go +++ b/util/network/cniprovider/createns_linux.go @@ -10,11 +10,34 @@ import ( "unsafe" "github.com/containerd/containerd/oci" + "github.com/moby/buildkit/util/bklog" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "golang.org/x/sys/unix" ) +func cleanOldNamespaces(c *cniProvider) { + nsDir := filepath.Join(c.root, "net/cni") + dirEntries, err := os.ReadDir(nsDir) + if err != nil { + bklog.L.Debugf("could not read %q for cleanup: %s", nsDir, err) + return + } + go func() { + for _, d := range dirEntries { + id := d.Name() + ns := cniNS{ + id: id, + nativeID: filepath.Join(c.root, "net/cni", id), + handle: c.CNI, + } + if err := ns.release(); err != nil { + bklog.L.Warningf("failed to release network namespace %q left over from previous run: %s", id, err) + } + } + }() +} + func createNetNS(c *cniProvider, id string) (string, error) { nsPath := filepath.Join(c.root, "net/cni", id) if err := os.MkdirAll(filepath.Dir(nsPath), 0700); err != nil { diff --git a/util/network/cniprovider/createns_unix.go b/util/network/cniprovider/createns_unix.go index 6aa4e00c56e2..656aaa49be46 100644 --- a/util/network/cniprovider/createns_unix.go +++ b/util/network/cniprovider/createns_unix.go @@ -23,3 +23,6 @@ func unmountNetNS(nativeID string) error { func deleteNetNS(nativeID string) error { return errors.New("deleting netns for cni not supported") } + +func cleanOldNamespaces(_ *cniProvider) { +} diff --git a/util/network/cniprovider/createns_windows.go b/util/network/cniprovider/createns_windows.go index 7a0cc2d272ab..f294d64d49ac 100644 --- a/util/network/cniprovider/createns_windows.go +++ b/util/network/cniprovider/createns_windows.go @@ -47,3 +47,7 @@ func deleteNetNS(nativeID string) error { return ns.Delete() } + +func cleanOldNamespaces(_ *cniProvider) { + // not implemented on Windows +} diff --git a/util/network/host.go b/util/network/host.go index c50268d45fc7..fbd6747d002f 100644 --- a/util/network/host.go +++ b/util/network/host.go @@ -4,6 +4,8 @@ package network import ( + "context" + "github.com/containerd/containerd/oci" specs "github.com/opencontainers/runtime-spec/specs-go" ) @@ -15,10 +17,14 @@ func NewHostProvider() Provider { type host struct { } -func (h *host) New() (Namespace, error) { +func (h *host) New(_ context.Context, hostname string) (Namespace, error) { return &hostNS{}, nil } +func (h *host) Close() error { + return nil +} + type hostNS struct { } diff --git a/util/network/network.go b/util/network/network.go index befeef0c7518..c48f1984f030 100644 --- a/util/network/network.go +++ b/util/network/network.go @@ -1,6 +1,7 @@ package network import ( + "context" "io" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -8,7 +9,8 @@ import ( // Provider interface for Network type Provider interface { - New() (Namespace, error) + io.Closer + New(ctx context.Context, hostname string) (Namespace, error) } // Namespace of network for workers diff --git a/util/network/none.go b/util/network/none.go index 336ff68b91b8..e2b9d122d64c 100644 --- a/util/network/none.go +++ b/util/network/none.go @@ -1,6 +1,8 @@ package network import ( + "context" + specs "github.com/opencontainers/runtime-spec/specs-go" ) @@ -11,10 +13,14 @@ func NewNoneProvider() Provider { type none struct { } -func (h *none) New() (Namespace, error) { +func (h *none) New(_ context.Context, hostname string) (Namespace, error) { return &noneNS{}, nil } +func (h *none) Close() error { + return nil +} + type noneNS struct { } diff --git a/util/overlay/overlay_linux.go b/util/overlay/overlay_linux.go index 12f153f0b641..f2f69bba06c9 100644 --- a/util/overlay/overlay_linux.go +++ b/util/overlay/overlay_linux.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -114,7 +113,7 @@ func GetOverlayLayers(m mount.Mount) ([]string, error) { // WriteUpperdir writes a layer tar archive into the specified writer, based on // the diff information stored in the upperdir. func WriteUpperdir(ctx context.Context, w io.Writer, upperdir string, lower []mount.Mount) error { - emptyLower, err := ioutil.TempDir("", "buildkit") // empty directory used for the lower of diff view + emptyLower, err := os.MkdirTemp("", "buildkit") // empty directory used for the lower of diff view if err != nil { return errors.Wrapf(err, "failed to create temp dir") } @@ -183,7 +182,7 @@ func Changes(ctx context.Context, changeFn fs.ChangeFunc, upperdir, upperdirView } else if redirect { // Return error when redirect_dir is enabled which can result to a wrong diff. // TODO: support redirect_dir - return fmt.Errorf("redirect_dir is used but it's not supported in overlayfs differ") + return errors.New("redirect_dir is used but it's not supported in overlayfs differ") } // Check if this is a deleted entry diff --git a/util/overlay/overlay_linux_test.go b/util/overlay/overlay_linux_test.go index 013263bf4d26..b6fd6d118367 100644 --- a/util/overlay/overlay_linux_test.go +++ b/util/overlay/overlay_linux_test.go @@ -6,7 +6,6 @@ package overlay import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -50,7 +49,7 @@ func TestSimpleDiff(t *testing.T) { Add("/root/.bashrc"), } - if err := testDiffWithBase(l1, l2, diff); err != nil { + if err := testDiffWithBase(t, l1, l2, diff); err != nil { t.Fatalf("Failed diff with base: %+v", err) } } @@ -75,7 +74,7 @@ func TestRenameDiff(t *testing.T) { Add("/dir2/f1"), } - if err := testDiffWithBase(l1, l2, diff, "redirect_dir=off"); err != nil { + if err := testDiffWithBase(t, l1, l2, diff, "redirect_dir=off"); err != nil { t.Fatalf("Failed diff with base: %+v", err) } } @@ -110,7 +109,7 @@ func TestEmptyFileDiff(t *testing.T) { l2 := fstest.Apply() diff := []TestChange{} - if err := testDiffWithBase(l1, l2, diff); err != nil { + if err := testDiffWithBase(t, l1, l2, diff); err != nil { t.Fatalf("Failed diff with base: %+v", err) } } @@ -134,7 +133,7 @@ func TestNestedDeletion(t *testing.T) { Delete("/d1"), } - if err := testDiffWithBase(l1, l2, diff); err != nil { + if err := testDiffWithBase(t, l1, l2, diff); err != nil { t.Fatalf("Failed diff with base: %+v", err) } } @@ -159,7 +158,7 @@ func TestDirectoryReplace(t *testing.T) { Modify("/dir1/f2"), } - if err := testDiffWithBase(l1, l2, diff); err != nil { + if err := testDiffWithBase(t, l1, l2, diff); err != nil { t.Fatalf("Failed diff with base: %+v", err) } } @@ -180,7 +179,7 @@ func TestRemoveDirectoryTree(t *testing.T) { Delete("/dir1"), } - if err := testDiffWithBase(l1, l2, diff); err != nil { + if err := testDiffWithBase(t, l1, l2, diff); err != nil { t.Fatalf("Failed diff with base: %+v", err) } } @@ -203,7 +202,7 @@ func TestRemoveDirectoryTreeWithDash(t *testing.T) { Delete("/dir1"), } - if err := testDiffWithBase(l1, l2, diff); err != nil { + if err := testDiffWithBase(t, l1, l2, diff); err != nil { t.Fatalf("Failed diff with base: %+v", err) } } @@ -226,7 +225,7 @@ func TestFileReplace(t *testing.T) { Add("/dir1/dir2/f1"), } - if err := testDiffWithBase(l1, l2, diff); err != nil { + if err := testDiffWithBase(t, l1, l2, diff); err != nil { t.Fatalf("Failed diff with base: %+v", err) } } @@ -255,7 +254,7 @@ func TestParentDirectoryPermission(t *testing.T) { Add("/dir3/f"), } - if err := testDiffWithBase(l1, l2, diff); err != nil { + if err := testDiffWithBase(t, l1, l2, diff); err != nil { t.Fatalf("Failed diff with base: %+v", err) } } @@ -307,7 +306,7 @@ func TestUpdateWithSameTime(t *testing.T) { Modify("/file-truncated-time-3"), } - if err := testDiffWithBase(l1, l2, diff); err != nil { + if err := testDiffWithBase(t, l1, l2, diff); err != nil { t.Fatalf("Failed diff with base: %+v", err) } } @@ -330,34 +329,21 @@ func TestLchtimes(t *testing.T) { ) l2 := fstest.Apply() // empty diff := []TestChange{} - if err := testDiffWithBase(l1, l2, diff); err != nil { + if err := testDiffWithBase(t, l1, l2, diff); err != nil { t.Fatalf("Failed diff with base: %+v", err) } } } -func testDiffWithBase(base, diff fstest.Applier, expected []TestChange, opts ...string) error { - t1, err := ioutil.TempDir("", "diff-with-base-lower-") - if err != nil { - return errors.Wrap(err, "failed to create temp dir") - } - defer os.RemoveAll(t1) +func testDiffWithBase(t *testing.T, base, diff fstest.Applier, expected []TestChange, opts ...string) error { + t1 := t.TempDir() if err := base.Apply(t1); err != nil { return errors.Wrap(err, "failed to apply base filesystem") } - tupper, err := ioutil.TempDir("", "diff-with-base-upperdir-") - if err != nil { - return errors.Wrap(err, "failed to create temp dir") - } - defer os.RemoveAll(tupper) - - workdir, err := ioutil.TempDir("", "diff-with-base-workdir-") - if err != nil { - return errors.Wrap(err, "failed to create temp dir") - } - defer os.RemoveAll(workdir) + tupper := t.TempDir() + workdir := t.TempDir() return mount.WithTempMount(context.Background(), []mount.Mount{ { @@ -369,7 +355,7 @@ func testDiffWithBase(base, diff fstest.Applier, expected []TestChange, opts ... if err := diff.Apply(overlayRoot); err != nil { return errors.Wrapf(err, "failed to apply diff to overlayRoot") } - if err := collectAndCheckChanges(t1, tupper, expected); err != nil { + if err := collectAndCheckChanges(t, t1, tupper, expected); err != nil { return errors.Wrap(err, "failed to collect changes") } return nil @@ -416,15 +402,11 @@ type TestChange struct { Source string } -func collectAndCheckChanges(base, upperdir string, expected []TestChange) error { +func collectAndCheckChanges(t *testing.T, base, upperdir string, expected []TestChange) error { ctx := context.Background() changes := []TestChange{} - emptyLower, err := ioutil.TempDir("", "buildkit-test-emptylower") // empty directory used for the lower of diff view - if err != nil { - return errors.Wrapf(err, "failed to create temp dir") - } - defer os.Remove(emptyLower) + emptyLower := t.TempDir() // empty directory used for the lower of diff view upperView := []mount.Mount{ { Type: "overlay", diff --git a/util/progress/multireader.go b/util/progress/multireader.go index 8d8bbf54c505..b0d92dde8f25 100644 --- a/util/progress/multireader.go +++ b/util/progress/multireader.go @@ -12,6 +12,7 @@ type MultiReader struct { initialized bool done chan struct{} writers map[*progressWriter]func() + sent []*Progress } func NewMultiReader(pr Reader) *MultiReader { @@ -31,9 +32,61 @@ func (mr *MultiReader) Reader(ctx context.Context) Reader { pw, _, ctx := NewFromContext(ctx) w := pw.(*progressWriter) - mr.writers[w] = closeWriter + + isBehind := len(mr.sent) > 0 + + select { + case <-mr.done: + isBehind = true + default: + if !isBehind { + mr.writers[w] = closeWriter + } + } go func() { + if isBehind { + close := func() { + w.Close() + closeWriter() + } + i := 0 + for { + mr.mu.Lock() + sent := mr.sent + count := len(sent) - i + if count == 0 { + select { + case <-ctx.Done(): + close() + mr.mu.Unlock() + return + case <-mr.done: + close() + mr.mu.Unlock() + return + default: + } + mr.writers[w] = closeWriter + mr.mu.Unlock() + break + } + mr.mu.Unlock() + for i, p := range sent[i:] { + w.writeRawProgress(p) + if i%100 == 0 { + select { + case <-ctx.Done(): + close() + return + default: + } + } + } + i += count + } + } + select { case <-ctx.Done(): case <-mr.done: @@ -61,6 +114,7 @@ func (mr *MultiReader) handle() error { w.Close() c() } + close(mr.done) mr.mu.Unlock() return nil } @@ -72,6 +126,7 @@ func (mr *MultiReader) handle() error { w.writeRawProgress(p) } } + mr.sent = append(mr.sent, p...) mr.mu.Unlock() } } diff --git a/util/progress/multiwriter.go b/util/progress/multiwriter.go index 1ce37ea210b3..a856db8caa09 100644 --- a/util/progress/multiwriter.go +++ b/util/progress/multiwriter.go @@ -36,9 +36,7 @@ func (ps *MultiWriter) Add(pw Writer) { } ps.mu.Lock() plist := make([]*Progress, 0, len(ps.items)) - for _, p := range ps.items { - plist = append(plist, p) - } + plist = append(plist, ps.items...) sort.Slice(plist, func(i, j int) bool { return plist[i].Timestamp.Before(plist[j].Timestamp) }) diff --git a/util/progress/progress.go b/util/progress/progress.go index 83ca6672a890..fbbb22de071e 100644 --- a/util/progress/progress.go +++ b/util/progress/progress.go @@ -118,12 +118,22 @@ func (pr *progressReader) Read(ctx context.Context) ([]*Progress, error) { done := make(chan struct{}) defer close(done) go func() { - select { - case <-done: - case <-ctx.Done(): - pr.mu.Lock() - pr.cond.Broadcast() - pr.mu.Unlock() + prdone := pr.ctx.Done() + for { + select { + case <-done: + return + case <-ctx.Done(): + pr.mu.Lock() + pr.cond.Broadcast() + pr.mu.Unlock() + return + case <-prdone: + pr.mu.Lock() + pr.cond.Broadcast() + pr.mu.Unlock() + prdone = nil + } } }() pr.mu.Lock() @@ -274,3 +284,20 @@ func (pw *noOpWriter) Write(_ string, _ interface{}) error { func (pw *noOpWriter) Close() error { return nil } + +func OneOff(ctx context.Context, id string) func(err error) error { + pw, _, _ := NewFromContext(ctx) + now := time.Now() + st := Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/util/progress/progressui/colors.go b/util/progress/progressui/colors.go new file mode 100644 index 000000000000..f6d3174769dc --- /dev/null +++ b/util/progress/progressui/colors.go @@ -0,0 +1,133 @@ +package progressui + +import ( + "encoding/csv" + "errors" + "strconv" + "strings" + + "github.com/morikuni/aec" + "github.com/sirupsen/logrus" +) + +var termColorMap = map[string]aec.ANSI{ + "default": aec.DefaultF, + + "black": aec.BlackF, + "blue": aec.BlueF, + "cyan": aec.CyanF, + "green": aec.GreenF, + "magenta": aec.MagentaF, + "red": aec.RedF, + "white": aec.WhiteF, + "yellow": aec.YellowF, + + "light-black": aec.LightBlackF, + "light-blue": aec.LightBlueF, + "light-cyan": aec.LightCyanF, + "light-green": aec.LightGreenF, + "light-magenta": aec.LightMagentaF, + "light-red": aec.LightRedF, + "light-white": aec.LightWhiteF, + "light-yellow": aec.LightYellowF, +} + +func setUserDefinedTermColors(colorsEnv string) { + fields := readBuildkitColorsEnv(colorsEnv) + if fields == nil { + return + } + for _, field := range fields { + k, v, ok := strings.Cut(field, "=") + if !ok || strings.Contains(v, "=") { + err := errors.New("A valid entry must have exactly two fields") + logrus.WithError(err).Warnf("Could not parse BUILDKIT_COLORS component: %s", field) + continue + } + k = strings.ToLower(k) + if c, ok := termColorMap[strings.ToLower(v)]; ok { + parseKeys(k, c) + } else if strings.Contains(v, ",") { + if c := readRGB(v); c != nil { + parseKeys(k, c) + } + } else { + err := errors.New("Colors must be a name from the pre-defined list or a valid 3-part RGB value") + logrus.WithError(err).Warnf("Unknown color value found in BUILDKIT_COLORS: %s=%s", k, v) + } + } +} + +func readBuildkitColorsEnv(colorsEnv string) []string { + csvReader := csv.NewReader(strings.NewReader(colorsEnv)) + csvReader.Comma = ':' + fields, err := csvReader.Read() + if err != nil { + logrus.WithError(err).Warnf("Could not parse BUILDKIT_COLORS. Falling back to defaults.") + return nil + } + return fields +} + +func readRGB(v string) aec.ANSI { + csvReader := csv.NewReader(strings.NewReader(v)) + fields, err := csvReader.Read() + if err != nil { + logrus.WithError(err).Warnf("Could not parse value %s as valid comma-separated RGB color. Ignoring.", v) + return nil + } + if len(fields) != 3 { + err = errors.New("A valid RGB color must have three fields") + logrus.WithError(err).Warnf("Could not parse value %s as valid RGB color. Ignoring.", v) + return nil + } + ok := isValidRGB(fields) + if ok { + p1, _ := strconv.Atoi(fields[0]) + p2, _ := strconv.Atoi(fields[1]) + p3, _ := strconv.Atoi(fields[2]) + c := aec.Color8BitF(aec.NewRGB8Bit(uint8(p1), uint8(p2), uint8(p3))) + return c + } + return nil +} + +func parseKeys(k string, c aec.ANSI) { + switch strings.ToLower(k) { + case "run": + colorRun = c + case "cancel": + colorCancel = c + case "error": + colorError = c + case "warning": + colorWarning = c + default: + logrus.Warnf("Unknown key found in BUILDKIT_COLORS (expected: run, cancel, error, or warning): %s", k) + } +} + +func isValidRGB(s []string) bool { + for _, n := range s { + num, err := strconv.Atoi(n) + if err != nil { + logrus.Warnf("A field in BUILDKIT_COLORS appears to contain an RGB value that is not an integer: %s", strings.Join(s, ",")) + return false + } + ok := isValidRGBValue(num) + if ok { + continue + } else { + logrus.Warnf("A field in BUILDKIT_COLORS appears to contain an RGB value that is not within the valid range of 0-255: %s", strings.Join(s, ",")) + return false + } + } + return true +} + +func isValidRGBValue(i int) bool { + if (i >= 0) && (i <= 255) { + return true + } + return false +} diff --git a/util/progress/progressui/display.go b/util/progress/progressui/display.go index 2d4ccd153e2d..edbdaaa75e59 100644 --- a/util/progress/progressui/display.go +++ b/util/progress/progressui/display.go @@ -108,6 +108,7 @@ type job struct { name string status string hasError bool + hasWarning bool // This is currently unused, but it's here for future use. isCanceled bool vertex *vertex showTerm bool @@ -829,8 +830,13 @@ func (disp *display) print(d displayInfo, width, height int, all bool) { color = colorCancel } else if j.hasError { color = colorError + } else if j.hasWarning { + // This is currently unused, but it's here for future use. + color = colorWarning + } + if color != nil { + out = aec.Apply(out, color) } - out = aec.Apply(out, color) } fmt.Fprint(disp.c, out) lineCount++ diff --git a/util/progress/progressui/init.go b/util/progress/progressui/init.go new file mode 100644 index 000000000000..75f0cb83d1de --- /dev/null +++ b/util/progress/progressui/init.go @@ -0,0 +1,37 @@ +package progressui + +import ( + "os" + "runtime" + + "github.com/morikuni/aec" +) + +var colorRun aec.ANSI +var colorCancel aec.ANSI +var colorWarning aec.ANSI +var colorError aec.ANSI + +func init() { + // As recommended on https://no-color.org/ + if v := os.Getenv("NO_COLOR"); v != "" { + // nil values will result in no ANSI color codes being emitted. + return + } else if runtime.GOOS == "windows" { + colorRun = termColorMap["cyan"] + colorCancel = termColorMap["yellow"] + colorWarning = termColorMap["yellow"] + colorError = termColorMap["red"] + } else { + colorRun = termColorMap["blue"] + colorCancel = termColorMap["yellow"] + colorWarning = termColorMap["yellow"] + colorError = termColorMap["red"] + } + + // Loosely based on the standard set by Linux LS_COLORS. + if _, ok := os.LookupEnv("BUILDKIT_COLORS"); ok { + envColorString := os.Getenv("BUILDKIT_COLORS") + setUserDefinedTermColors(envColorString) + } +} diff --git a/util/progress/progressui/printer.go b/util/progress/progressui/printer.go index 550b8c0b351e..cc8e45be290d 100644 --- a/util/progress/progressui/printer.go +++ b/util/progress/progressui/printer.go @@ -170,10 +170,10 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) { p.current = "" v.count = 0 + if v.logsPartial { + fmt.Fprintln(p.w, "") + } if v.Error != "" { - if v.logsPartial { - fmt.Fprintln(p.w, "") - } if strings.HasSuffix(v.Error, context.Canceled.Error()) { fmt.Fprintf(p.w, "#%d CANCELED\n", v.index) } else { diff --git a/util/progress/progressui/term.go b/util/progress/progressui/term.go deleted file mode 100644 index 08f1b8e4d17a..000000000000 --- a/util/progress/progressui/term.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !windows -// +build !windows - -package progressui - -import "github.com/morikuni/aec" - -var ( - colorRun = aec.BlueF - colorCancel = aec.YellowF - colorError = aec.RedF -) diff --git a/util/progress/progressui/term_windows.go b/util/progress/progressui/term_windows.go deleted file mode 100644 index c8ce914a0765..000000000000 --- a/util/progress/progressui/term_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build windows -// +build windows - -package progressui - -import "github.com/morikuni/aec" - -var ( - colorRun = aec.CyanF - colorCancel = aec.YellowF - colorError = aec.RedF -) diff --git a/util/pull/pull.go b/util/pull/pull.go index 003824027bd2..c66c4e784a6f 100644 --- a/util/pull/pull.go +++ b/util/pull/pull.go @@ -17,7 +17,6 @@ import ( "github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/util/progress/logs" "github.com/moby/buildkit/util/pull/pullprogress" - "github.com/moby/buildkit/util/resolver" "github.com/moby/buildkit/util/resolver/limited" "github.com/moby/buildkit/util/resolver/retryhandler" digest "github.com/opencontainers/go-digest" @@ -25,9 +24,11 @@ import ( "github.com/pkg/errors" ) +type SessionResolver func(g session.Group) remotes.Resolver + type Puller struct { ContentStore content.Store - Resolver *resolver.Resolver + Resolver remotes.Resolver Src reference.Spec Platform ocispecs.Platform @@ -90,6 +91,11 @@ func (p *Puller) tryLocalResolve(ctx context.Context) error { if err != nil { return err } + + if ok, err := contentutil.HasSource(info, p.Src); err != nil || !ok { + return errors.Errorf("no matching source") + } + desc.Size = info.Size p.ref = p.Src.String() ra, err := p.ContentStore.ReaderAt(ctx, desc) @@ -105,7 +111,7 @@ func (p *Puller) tryLocalResolve(ctx context.Context) error { return nil } -func (p *Puller) PullManifests(ctx context.Context) (*PulledManifests, error) { +func (p *Puller) PullManifests(ctx context.Context, getResolver SessionResolver) (*PulledManifests, error) { err := p.resolve(ctx, p.Resolver) if err != nil { return nil, err @@ -196,7 +202,7 @@ func (p *Puller) PullManifests(ctx context.Context) (*PulledManifests, error) { Nonlayers: p.nonlayers, Descriptors: p.layers, Provider: func(g session.Group) content.Provider { - return &provider{puller: p, resolver: p.Resolver.WithSession(g)} + return &provider{puller: p, resolver: getResolver(g)} }, }, nil } diff --git a/util/pull/pullprogress/progress.go b/util/pull/pullprogress/progress.go index b743706ed955..93c50106f79d 100644 --- a/util/pull/pullprogress/progress.go +++ b/util/pull/pullprogress/progress.go @@ -106,6 +106,8 @@ func trackProgress(ctx context.Context, desc ocispecs.Descriptor, manager PullMa select { case <-ctx.Done(): onFinalStatus = true + // we need a context for the manager.Status() calls to pass once. after that this function will exit + ctx = context.TODO() case <-ticker.C: } diff --git a/util/purl/image.go b/util/purl/image.go new file mode 100644 index 000000000000..b3364ba4cecb --- /dev/null +++ b/util/purl/image.go @@ -0,0 +1,117 @@ +package purl + +import ( + "strings" + + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution/reference" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + packageurl "github.com/package-url/packageurl-go" + "github.com/pkg/errors" +) + +// RefToPURL converts an image reference with optional platform constraint to a package URL. +// Image references are defined in https://github.com/distribution/distribution/blob/v2.8.1/reference/reference.go#L1 +// Package URLs are defined in https://github.com/package-url/purl-spec +func RefToPURL(ref string, platform *ocispecs.Platform) (string, error) { + named, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return "", errors.Wrapf(err, "failed to parse ref %q", ref) + } + var qualifiers []packageurl.Qualifier + + if canonical, ok := named.(reference.Canonical); ok { + qualifiers = append(qualifiers, packageurl.Qualifier{ + Key: "digest", + Value: canonical.Digest().String(), + }) + } else { + named = reference.TagNameOnly(named) + } + + version := "" + if tagged, ok := named.(reference.Tagged); ok { + version = tagged.Tag() + } + + name := reference.FamiliarName(named) + + ns := "" + parts := strings.Split(name, "/") + if len(parts) > 1 { + ns = strings.Join(parts[:len(parts)-1], "/") + } + name = parts[len(parts)-1] + + if platform != nil { + p := platforms.Normalize(*platform) + qualifiers = append(qualifiers, packageurl.Qualifier{ + Key: "platform", + Value: platforms.Format(p), + }) + } + + p := packageurl.NewPackageURL("docker", ns, name, version, qualifiers, "") + return p.ToString(), nil +} + +// PURLToRef converts a package URL to an image reference and platform. +func PURLToRef(purl string) (string, *ocispecs.Platform, error) { + p, err := packageurl.FromString(purl) + if err != nil { + return "", nil, err + } + if p.Type != "docker" { + return "", nil, errors.Errorf("invalid package type %q, expecting docker", p.Type) + } + ref := p.Name + if p.Namespace != "" { + ref = p.Namespace + "/" + ref + } + dgstVersion := "" + if p.Version != "" { + dgst, err := digest.Parse(p.Version) + if err == nil { + ref = ref + "@" + dgst.String() + dgstVersion = dgst.String() + } else { + ref += ":" + p.Version + } + } + var platform *ocispecs.Platform + for _, q := range p.Qualifiers { + if q.Key == "platform" { + p, err := platforms.Parse(q.Value) + if err != nil { + return "", nil, err + } + platform = &p + } + if q.Key == "digest" { + if dgstVersion != "" { + if dgstVersion != q.Value { + return "", nil, errors.Errorf("digest %q does not match version %q", q.Value, dgstVersion) + } + continue + } + dgst, err := digest.Parse(q.Value) + if err != nil { + return "", nil, err + } + ref = ref + "@" + dgst.String() + dgstVersion = dgst.String() + } + } + + if dgstVersion == "" && p.Version == "" { + ref += ":latest" + } + + named, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return "", nil, errors.Wrapf(err, "invalid image url %q", purl) + } + + return named.String(), platform, nil +} diff --git a/util/purl/image_test.go b/util/purl/image_test.go new file mode 100644 index 000000000000..621de37bff16 --- /dev/null +++ b/util/purl/image_test.go @@ -0,0 +1,158 @@ +package purl + +import ( + "net/url" + "testing" + + "github.com/containerd/containerd/platforms" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/require" +) + +func TestRefToPURL(t *testing.T) { + testDgst := digest.FromBytes([]byte("test")).String() + p := platforms.DefaultSpec() + testPlatform := &p + + expPlatform := url.QueryEscape(platforms.Format(platforms.Normalize(p))) + + tcases := []struct { + ref string + platform *ocispecs.Platform + expected string + err bool + }{ + { + ref: "alpine", + expected: "pkg:docker/alpine@latest", + }, + { + ref: "library/alpine:3.15", + expected: "pkg:docker/alpine@3.15", + }, + { + ref: "docker.io/library/alpine:latest", + expected: "pkg:docker/alpine@latest", + }, + { + ref: "docker.io/library/alpine:latest@" + testDgst, + expected: "pkg:docker/alpine@latest?digest=" + testDgst, + }, + { + ref: "docker.io/library/alpine@" + testDgst, + expected: "pkg:docker/alpine?digest=" + testDgst, + }, + { + ref: "user/test:v2", + expected: "pkg:docker/user/test@v2", + }, + { + ref: "ghcr.io/foo/bar:v2", + expected: "pkg:docker/ghcr.io/foo/bar@v2", + }, + { + ref: "ghcr.io/foo/bar", + expected: "pkg:docker/ghcr.io/foo/bar@latest", + }, + { + ref: "busybox", + platform: testPlatform, + expected: "pkg:docker/busybox@latest?platform=" + expPlatform, + }, + { + ref: "busybox@" + testDgst, + platform: testPlatform, + expected: "pkg:docker/busybox?digest=" + testDgst + "&platform=" + expPlatform, + }, + { + ref: "inv:al:id", + err: true, + }, + } + + for _, tc := range tcases { + tc := tc + t.Run(tc.ref, func(t *testing.T) { + purl, err := RefToPURL(tc.ref, tc.platform) + if tc.err { + require.Error(t, err) + return + } + if err != nil { + require.NoError(t, err) + } + require.Equal(t, tc.expected, purl) + }) + } +} + +func TestPURLToRef(t *testing.T) { + testDgst := digest.FromBytes([]byte("test")).String() + p := platforms.Normalize(platforms.DefaultSpec()) + p.OSVersion = "" // OSVersion is not supported in PURL + testPlatform := &p + + encPlatform := url.QueryEscape(platforms.Format(platforms.Normalize(p))) + + tcases := []struct { + purl string + err bool + expected string + platform *ocispecs.Platform + }{ + { + purl: "pkg:docker/alpine@latest", + expected: "docker.io/library/alpine:latest", + }, + { + purl: "pkg:docker/alpine", + expected: "docker.io/library/alpine:latest", + }, + { + purl: "pkg:docker/alpine?digest=" + testDgst, + expected: "docker.io/library/alpine@" + testDgst, + }, + { + purl: "pkg:docker/library/alpine@3.15?digest=" + testDgst, + expected: "docker.io/library/alpine:3.15@" + testDgst, + }, + { + purl: "pkg:docker/ghcr.io/foo/bar@v2", + expected: "ghcr.io/foo/bar:v2", + }, + { + purl: "pkg:docker/ghcr.io/foo/bar@v2", + expected: "ghcr.io/foo/bar:v2", + }, + { + purl: "pkg:docker/busybox@latest?platform=" + encPlatform, + expected: "docker.io/library/busybox:latest", + platform: testPlatform, + }, + { + purl: "pkg:busybox@latest", + err: true, + }, + } + + for _, tc := range tcases { + tc := tc + t.Run(tc.purl, func(t *testing.T) { + ref, platform, err := PURLToRef(tc.purl) + if tc.err { + require.Error(t, err) + return + } + if err != nil { + require.NoError(t, err) + } + require.Equal(t, tc.expected, ref) + if platform == nil { + require.Nil(t, tc.platform) + } else { + require.Equal(t, *tc.platform, *platform) + } + }) + } +} diff --git a/util/push/push.go b/util/push/push.go index ffa3d35f326a..881b2fd86f16 100644 --- a/util/push/push.go +++ b/util/push/push.go @@ -6,7 +6,6 @@ import ( "fmt" "strings" "sync" - "time" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" @@ -16,6 +15,7 @@ import ( "github.com/containerd/containerd/remotes/docker" "github.com/docker/distribution/reference" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/util/progress" @@ -126,7 +126,7 @@ func Push(ctx context.Context, sm *session.Manager, sid string, provider content return err } - layersDone := oneOffProgress(ctx, "pushing layers") + layersDone := progress.OneOff(ctx, "pushing layers") err = images.Dispatch(ctx, skipNonDistributableBlobs(images.Handlers(handlers...)), nil, ocispecs.Descriptor{ Digest: dgst, Size: ra.Size(), @@ -136,7 +136,7 @@ func Push(ctx context.Context, sm *session.Manager, sid string, provider content return err } - mfstDone := oneOffProgress(ctx, fmt.Sprintf("pushing manifest for %s", ref)) + mfstDone := progress.OneOff(ctx, fmt.Sprintf("pushing manifest for %s", ref)) for i := len(manifestStack) - 1; i >= 0; i-- { if _, err := pushHandler(ctx, manifestStack[i]); err != nil { return mfstDone(err) @@ -212,23 +212,6 @@ func annotateDistributionSourceHandler(manager content.Manager, annotations map[ } } -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.NewFromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} - func childrenHandler(provider content.Provider) images.HandlerFunc { return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { var descs []ocispecs.Descriptor @@ -266,7 +249,8 @@ func childrenHandler(provider content.Provider) images.HandlerFunc { } case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, - ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerGzip: + ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerGzip, + attestation.MediaTypeDockerSchema2AttestationType: // childless data types. return nil, nil default: diff --git a/util/resolver/authorizer.go b/util/resolver/authorizer.go index ed8034ccbc54..6a4140d68b1b 100644 --- a/util/resolver/authorizer.go +++ b/util/resolver/authorizer.go @@ -279,7 +279,7 @@ func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { username, secret := ah.common.Username, ah.common.Secret if username == "" || secret == "" { - return "", fmt.Errorf("failed to handle basic auth because missing username or secret") + return "", errors.New("failed to handle basic auth because missing username or secret") } auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) diff --git a/util/resolver/resolver.go b/util/resolver/resolver.go index a23f4b15cfed..0639a1b623de 100644 --- a/util/resolver/resolver.go +++ b/util/resolver/resolver.go @@ -3,7 +3,6 @@ package resolver import ( "crypto/tls" "crypto/x509" - "io/ioutil" "net" "net/http" "os" @@ -67,7 +66,7 @@ func fillInsecureOpts(host string, c config.RegistryConfig, h docker.RegistryHos func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) { for _, d := range c.TLSConfigDir { - fs, err := ioutil.ReadDir(d) + fs, err := os.ReadDir(d) if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) { return nil, errors.WithStack(err) } @@ -98,7 +97,7 @@ func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) { } for _, p := range c.RootCAs { - dt, err := ioutil.ReadFile(p) + dt, err := os.ReadFile(p) if err != nil { return nil, errors.Wrapf(err, "failed to read %s", p) } diff --git a/util/resolver/retryhandler/retry.go b/util/resolver/retryhandler/retry.go index 554076b07b41..1a2f54ed7651 100644 --- a/util/resolver/retryhandler/retry.go +++ b/util/resolver/retryhandler/retry.go @@ -14,6 +14,10 @@ import ( "github.com/pkg/errors" ) +// MaxRetryBackoff is the maximum backoff time before giving up. This is a +// variable so that code which embeds BuildKit can override the default value. +var MaxRetryBackoff = 8 * time.Second + func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc { return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { backoff := time.Second @@ -35,7 +39,7 @@ func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc { return descs, nil } // backoff logic - if backoff >= 8*time.Second { + if backoff >= MaxRetryBackoff { return nil, err } if logger != nil { @@ -60,7 +64,7 @@ func retryError(err error) bool { return true } // catches TLS timeout or other network-related temporary errors - if ne, ok := errors.Cause(err).(net.Error); ok && ne.Temporary() { + if ne, ok := errors.Cause(err).(net.Error); ok && ne.Temporary() { //nolint:staticcheck // ignoring "SA1019: Temporary is deprecated", continue to propagate net.Error through the "temporary" status return true } // https://github.com/containerd/containerd/pull/4724 diff --git a/util/sshutil/keyscan.go b/util/sshutil/keyscan.go index 3c7583ffdd62..163efee80e12 100644 --- a/util/sshutil/keyscan.go +++ b/util/sshutil/keyscan.go @@ -1,6 +1,7 @@ package sshutil import ( + "errors" "fmt" "net" "strconv" @@ -11,7 +12,7 @@ import ( const defaultPort = 22 -var errCallbackDone = fmt.Errorf("callback failed on purpose") +var errCallbackDone = errors.New("callback failed on purpose") // addDefaultPort appends a default port if hostport doesn't contain one func addDefaultPort(hostport string, defaultPort int) string { diff --git a/util/stack/stack.go b/util/stack/stack.go index 3409ac047af0..18d03630b47e 100644 --- a/util/stack/stack.go +++ b/util/stack/stack.go @@ -79,7 +79,7 @@ func Enable(err error) error { return err } -func Wrap(err error, s Stack) error { +func Wrap(err error, s *Stack) error { return &withStack{stack: s, error: err} } @@ -151,7 +151,7 @@ func convertStack(s errors.StackTrace) *Stack { if idx == -1 { continue } - line, err := strconv.Atoi(p[1][idx+1:]) + line, err := strconv.ParseInt(p[1][idx+1:], 10, 32) if err != nil { continue } @@ -169,7 +169,7 @@ func convertStack(s errors.StackTrace) *Stack { } type withStack struct { - stack Stack + stack *Stack error } @@ -178,5 +178,5 @@ func (e *withStack) Unwrap() error { } func (e *withStack) StackTrace() *Stack { - return &e.stack + return e.stack } diff --git a/util/stack/stack.pb.go b/util/stack/stack.pb.go index df55582db48a..c4a73a68f485 100644 --- a/util/stack/stack.pb.go +++ b/util/stack/stack.pb.go @@ -1,172 +1,261 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.11.4 // source: stack.proto package stack import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Stack struct { - Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"` - Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"` - Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` - Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Stack) Reset() { *m = Stack{} } -func (m *Stack) String() string { return proto.CompactTextString(m) } -func (*Stack) ProtoMessage() {} -func (*Stack) Descriptor() ([]byte, []int) { - return fileDescriptor_b44c07feb2ca0a5a, []int{0} -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Stack) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Stack.Unmarshal(m, b) + Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"` + Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"` + Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"` } -func (m *Stack) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Stack.Marshal(b, m, deterministic) -} -func (m *Stack) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stack.Merge(m, src) + +func (x *Stack) Reset() { + *x = Stack{} + if protoimpl.UnsafeEnabled { + mi := &file_stack_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Stack) XXX_Size() int { - return xxx_messageInfo_Stack.Size(m) + +func (x *Stack) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Stack) XXX_DiscardUnknown() { - xxx_messageInfo_Stack.DiscardUnknown(m) + +func (*Stack) ProtoMessage() {} + +func (x *Stack) ProtoReflect() protoreflect.Message { + mi := &file_stack_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Stack proto.InternalMessageInfo +// Deprecated: Use Stack.ProtoReflect.Descriptor instead. +func (*Stack) Descriptor() ([]byte, []int) { + return file_stack_proto_rawDescGZIP(), []int{0} +} -func (m *Stack) GetFrames() []*Frame { - if m != nil { - return m.Frames +func (x *Stack) GetFrames() []*Frame { + if x != nil { + return x.Frames } return nil } -func (m *Stack) GetCmdline() []string { - if m != nil { - return m.Cmdline +func (x *Stack) GetCmdline() []string { + if x != nil { + return x.Cmdline } return nil } -func (m *Stack) GetPid() int32 { - if m != nil { - return m.Pid +func (x *Stack) GetPid() int32 { + if x != nil { + return x.Pid } return 0 } -func (m *Stack) GetVersion() string { - if m != nil { - return m.Version +func (x *Stack) GetVersion() string { + if x != nil { + return x.Version } return "" } -func (m *Stack) GetRevision() string { - if m != nil { - return m.Revision +func (x *Stack) GetRevision() string { + if x != nil { + return x.Revision } return "" } type Frame struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` - Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Frame) Reset() { *m = Frame{} } -func (m *Frame) String() string { return proto.CompactTextString(m) } -func (*Frame) ProtoMessage() {} -func (*Frame) Descriptor() ([]byte, []int) { - return fileDescriptor_b44c07feb2ca0a5a, []int{1} + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` + Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"` } -func (m *Frame) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Frame.Unmarshal(m, b) -} -func (m *Frame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Frame.Marshal(b, m, deterministic) -} -func (m *Frame) XXX_Merge(src proto.Message) { - xxx_messageInfo_Frame.Merge(m, src) +func (x *Frame) Reset() { + *x = Frame{} + if protoimpl.UnsafeEnabled { + mi := &file_stack_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Frame) XXX_Size() int { - return xxx_messageInfo_Frame.Size(m) + +func (x *Frame) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Frame) XXX_DiscardUnknown() { - xxx_messageInfo_Frame.DiscardUnknown(m) + +func (*Frame) ProtoMessage() {} + +func (x *Frame) ProtoReflect() protoreflect.Message { + mi := &file_stack_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Frame proto.InternalMessageInfo +// Deprecated: Use Frame.ProtoReflect.Descriptor instead. +func (*Frame) Descriptor() ([]byte, []int) { + return file_stack_proto_rawDescGZIP(), []int{1} +} -func (m *Frame) GetName() string { - if m != nil { - return m.Name +func (x *Frame) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *Frame) GetFile() string { - if m != nil { - return m.File +func (x *Frame) GetFile() string { + if x != nil { + return x.File } return "" } -func (m *Frame) GetLine() int32 { - if m != nil { - return m.Line +func (x *Frame) GetLine() int32 { + if x != nil { + return x.Line } return 0 } -func init() { - proto.RegisterType((*Stack)(nil), "stack.Stack") - proto.RegisterType((*Frame)(nil), "stack.Frame") +var File_stack_proto protoreflect.FileDescriptor + +var file_stack_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x73, + 0x74, 0x61, 0x63, 0x6b, 0x22, 0x8f, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x24, + 0x0a, 0x06, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x06, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6d, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6d, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0x0a, 0x05, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x4c, 0x69, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_stack_proto_rawDescOnce sync.Once + file_stack_proto_rawDescData = file_stack_proto_rawDesc +) + +func file_stack_proto_rawDescGZIP() []byte { + file_stack_proto_rawDescOnce.Do(func() { + file_stack_proto_rawDescData = protoimpl.X.CompressGZIP(file_stack_proto_rawDescData) + }) + return file_stack_proto_rawDescData } -func init() { - proto.RegisterFile("stack.proto", fileDescriptor_b44c07feb2ca0a5a) +var file_stack_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_stack_proto_goTypes = []interface{}{ + (*Stack)(nil), // 0: stack.Stack + (*Frame)(nil), // 1: stack.Frame +} +var file_stack_proto_depIdxs = []int32{ + 1, // 0: stack.Stack.frames:type_name -> stack.Frame + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } -var fileDescriptor_b44c07feb2ca0a5a = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8f, 0x3d, 0xce, 0x82, 0x40, - 0x10, 0x86, 0xb3, 0xdf, 0xb2, 0x7c, 0x3a, 0x58, 0x98, 0xa9, 0x36, 0x56, 0x1b, 0x62, 0x41, 0x45, - 0xa1, 0x47, 0x30, 0xa1, 0x32, 0x16, 0x78, 0x02, 0x84, 0x35, 0xd9, 0xc8, 0x5f, 0x76, 0x09, 0xd7, - 0xf0, 0xca, 0x66, 0x06, 0xb4, 0x7b, 0xde, 0x9f, 0xe4, 0x9d, 0x81, 0x24, 0x4c, 0x55, 0xfd, 0xca, - 0x47, 0x3f, 0x4c, 0x03, 0x2a, 0x16, 0xe9, 0x5b, 0x80, 0xba, 0x13, 0xe1, 0x11, 0xe2, 0xa7, 0xaf, - 0x3a, 0x1b, 0xb4, 0x30, 0x32, 0x4b, 0x4e, 0xbb, 0x7c, 0xa9, 0x17, 0x64, 0x96, 0x6b, 0x86, 0x1a, - 0xfe, 0xeb, 0xae, 0x69, 0x5d, 0x6f, 0xf5, 0x9f, 0x91, 0xd9, 0xb6, 0xfc, 0x4a, 0xdc, 0x83, 0x1c, - 0x5d, 0xa3, 0xa5, 0x11, 0x99, 0x2a, 0x09, 0xa9, 0x3b, 0x5b, 0x1f, 0xdc, 0xd0, 0xeb, 0xc8, 0x08, - 0xea, 0xae, 0x12, 0x0f, 0xb0, 0xf1, 0x76, 0x76, 0x1c, 0x29, 0x8e, 0x7e, 0x3a, 0xbd, 0x80, 0xe2, - 0x49, 0x44, 0x88, 0x6e, 0x55, 0x67, 0xb5, 0xe0, 0x02, 0x33, 0x79, 0x85, 0x6b, 0x69, 0x9b, 0x3d, - 0x62, 0xf2, 0xae, 0x74, 0xcf, 0xb2, 0xcc, 0xfc, 0x88, 0xf9, 0xc9, 0xf3, 0x27, 0x00, 0x00, 0xff, - 0xff, 0xfd, 0x2c, 0xbb, 0xfb, 0xf3, 0x00, 0x00, 0x00, +func init() { file_stack_proto_init() } +func file_stack_proto_init() { + if File_stack_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_stack_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stack); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_stack_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Frame); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_stack_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_stack_proto_goTypes, + DependencyIndexes: file_stack_proto_depIdxs, + MessageInfos: file_stack_proto_msgTypes, + }.Build() + File_stack_proto = out.File + file_stack_proto_rawDesc = nil + file_stack_proto_goTypes = nil + file_stack_proto_depIdxs = nil } diff --git a/util/staticfs/merge.go b/util/staticfs/merge.go new file mode 100644 index 000000000000..d680b80cfcce --- /dev/null +++ b/util/staticfs/merge.go @@ -0,0 +1,110 @@ +package staticfs + +import ( + "context" + "io" + "io/fs" + "os" + "path/filepath" + + "github.com/tonistiigi/fsutil" + "golang.org/x/sync/errgroup" +) + +type MergeFS struct { + Lower fsutil.FS + Upper fsutil.FS +} + +var _ fsutil.FS = &MergeFS{} + +func NewMergeFS(lower, upper fsutil.FS) *MergeFS { + return &MergeFS{ + Lower: lower, + Upper: upper, + } +} + +type record struct { + path string + fi fs.FileInfo + err error +} + +func (r *record) key() string { + if r == nil { + return "" + } + return convertPathToKey(r.path) +} + +func (mfs *MergeFS) Walk(ctx context.Context, fn filepath.WalkFunc) error { + ch1 := make(chan *record, 10) + ch2 := make(chan *record, 10) + + eg, ctx := errgroup.WithContext(ctx) + eg.Go(func() error { + defer close(ch1) + return mfs.Lower.Walk(ctx, func(path string, info fs.FileInfo, err error) error { + select { + case ch1 <- &record{path: path, fi: info, err: err}: + case <-ctx.Done(): + } + return ctx.Err() + }) + }) + eg.Go(func() error { + defer close(ch2) + return mfs.Upper.Walk(ctx, func(path string, info fs.FileInfo, err error) error { + select { + case ch2 <- &record{path: path, fi: info, err: err}: + case <-ctx.Done(): + } + return ctx.Err() + }) + }) + + eg.Go(func() error { + next1, ok1 := <-ch1 + key1 := next1.key() + next2, ok2 := <-ch2 + key2 := next2.key() + + for { + if !ok1 && !ok2 { + break + } + if !ok2 || ok1 && key1 < key2 { + if err := fn(next1.path, next1.fi, next1.err); err != nil { + return err + } + next1, ok1 = <-ch1 + key1 = next1.key() + } else if !ok1 || ok2 && key1 >= key2 { + if err := fn(next2.path, next2.fi, next2.err); err != nil { + return err + } + if ok1 && key1 == key2 { + next1, ok1 = <-ch1 + key1 = next1.key() + } + next2, ok2 = <-ch2 + key2 = next2.key() + } + } + return nil + }) + + return eg.Wait() +} + +func (mfs *MergeFS) Open(p string) (io.ReadCloser, error) { + r, err := mfs.Upper.Open(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + return mfs.Lower.Open(p) + } + return r, nil +} diff --git a/util/staticfs/merge_test.go b/util/staticfs/merge_test.go new file mode 100644 index 000000000000..76dc165b0af1 --- /dev/null +++ b/util/staticfs/merge_test.go @@ -0,0 +1,98 @@ +package staticfs + +import ( + "context" + "io" + iofs "io/fs" + "os" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tonistiigi/fsutil/types" +) + +func TestMerge(t *testing.T) { + fs1 := NewFS() + fs1.Add("foo", types.Stat{Mode: 0644}, []byte("foofoo")) + fs1.Add("bar", types.Stat{Mode: 0444}, []byte("barbarbar")) + + fs2 := NewFS() + fs2.Add("abc", types.Stat{Mode: 0400}, []byte("abcabc")) + fs2.Add("foo", types.Stat{Mode: 0440}, []byte("foofoofoofoo")) + + fs := NewMergeFS(fs1, fs2) + + rc, err := fs.Open("foo") + require.NoError(t, err) + + data, err := io.ReadAll(rc) + require.NoError(t, err) + require.Equal(t, []byte("foofoofoofoo"), data) + require.NoError(t, rc.Close()) + + rc, err = fs.Open("bar") + require.NoError(t, err) + + data, err = io.ReadAll(rc) + require.NoError(t, err) + require.Equal(t, []byte("barbarbar"), data) + + var files []string + err = fs.Walk(context.TODO(), func(path string, info iofs.FileInfo, err error) error { + require.NoError(t, err) + switch path { + case "foo": + require.Equal(t, int64(12), info.Size()) + require.Equal(t, os.FileMode(0440), info.Mode()) + case "bar": + require.Equal(t, int64(9), info.Size()) + require.Equal(t, os.FileMode(0444), info.Mode()) + case "abc": + require.Equal(t, int64(6), info.Size()) + require.Equal(t, os.FileMode(0400), info.Mode()) + default: + require.Fail(t, "unexpected path", path) + } + files = append(files, path) + return nil + }) + require.NoError(t, err) + + require.Equal(t, []string{"abc", "bar", "foo"}, files) + + // extra level + fs3 := NewFS() + fs3.Add("bax", types.Stat{Mode: 0600}, []byte("bax")) + + fs = NewMergeFS(fs, fs3) + + rc, err = fs.Open("bar") + require.NoError(t, err) + + data, err = io.ReadAll(rc) + require.NoError(t, err) + require.Equal(t, []byte("barbarbar"), data) + require.NoError(t, rc.Close()) + + rc, err = fs.Open("bax") + require.NoError(t, err) + + data, err = io.ReadAll(rc) + require.NoError(t, err) + require.Equal(t, []byte("bax"), data) + require.NoError(t, rc.Close()) + + _, err = fs.Open("bay") + require.Error(t, err) + require.True(t, os.IsNotExist(err)) + + files = nil + err = fs.Walk(context.TODO(), func(path string, info iofs.FileInfo, err error) error { + require.NoError(t, err) + files = append(files, path) + return nil + }) + require.NoError(t, err) + + require.Equal(t, []string{"abc", "bar", "bax", "foo"}, files) +} diff --git a/util/staticfs/static.go b/util/staticfs/static.go new file mode 100644 index 000000000000..3b00060688f0 --- /dev/null +++ b/util/staticfs/static.go @@ -0,0 +1,74 @@ +package staticfs + +import ( + "bytes" + "context" + "io" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/tonistiigi/fsutil" + "github.com/tonistiigi/fsutil/types" +) + +type File struct { + Stat types.Stat + Data []byte +} + +type FS struct { + files map[string]File +} + +var _ fsutil.FS = &FS{} + +func NewFS() *FS { + return &FS{ + files: map[string]File{}, + } +} + +func (fs *FS) Add(p string, stat types.Stat, data []byte) { + stat.Size_ = int64(len(data)) + if stat.Mode == 0 { + stat.Mode = 0644 + } + stat.Path = p + fs.files[p] = File{ + Stat: stat, + Data: data, + } +} + +func (fs *FS) Walk(ctx context.Context, fn filepath.WalkFunc) error { + keys := make([]string, 0, len(fs.files)) + for k := range fs.files { + keys = append(keys, convertPathToKey(k)) + } + sort.Strings(keys) + for _, k := range keys { + p := convertKeyToPath(k) + st := fs.files[p].Stat + if err := fn(p, &fsutil.StatInfo{Stat: &st}, nil); err != nil { + return err + } + } + return nil +} + +func (fs *FS) Open(p string) (io.ReadCloser, error) { + if f, ok := fs.files[p]; ok { + return io.NopCloser(bytes.NewReader(f.Data)), nil + } + return nil, os.ErrNotExist +} + +func convertPathToKey(p string) string { + return strings.Replace(p, "/", "\x00", -1) +} + +func convertKeyToPath(p string) string { + return strings.Replace(p, "\x00", "/", -1) +} diff --git a/util/staticfs/static_test.go b/util/staticfs/static_test.go new file mode 100644 index 000000000000..d8771af092d4 --- /dev/null +++ b/util/staticfs/static_test.go @@ -0,0 +1,68 @@ +package staticfs + +import ( + "context" + "io" + iofs "io/fs" + "os" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tonistiigi/fsutil/types" +) + +func TestStatic(t *testing.T) { + fs := NewFS() + fs.Add("foo", types.Stat{Mode: 0644}, []byte("foofoo")) + fs.Add("bar", types.Stat{Mode: 0444}, []byte("barbarbar")) + + rc, err := fs.Open("bar") + require.NoError(t, err) + require.NoError(t, rc.Close()) + + data, err := io.ReadAll(rc) + require.NoError(t, err) + require.Equal(t, []byte("barbarbar"), data) + + _, err = fs.Open("abc") + require.Error(t, err) + require.True(t, os.IsNotExist(err)) + + var files []string + err = fs.Walk(context.TODO(), func(path string, info iofs.FileInfo, err error) error { + require.NoError(t, err) + switch path { + case "foo": + require.Equal(t, int64(6), info.Size()) + require.Equal(t, os.FileMode(0644), info.Mode()) + case "bar": + require.Equal(t, int64(9), info.Size()) + require.Equal(t, os.FileMode(0444), info.Mode()) + default: + require.Fail(t, "unexpected path", path) + } + files = append(files, path) + return nil + }) + require.NoError(t, err) + + require.Equal(t, []string{"bar", "foo"}, files) + + fs.Add("abc", types.Stat{Mode: 0444}, []byte("abcabcabc")) + + rc, err = fs.Open("abc") + require.NoError(t, err) + + data, err = io.ReadAll(rc) + require.NoError(t, err) + require.Equal(t, []byte("abcabcabc"), data) + require.NoError(t, rc.Close()) + + rc, err = fs.Open("foo") + require.NoError(t, err) + + data, err = io.ReadAll(rc) + require.NoError(t, err) + require.Equal(t, []byte("foofoo"), data) + require.NoError(t, rc.Close()) +} diff --git a/util/system/atime_unix.go b/util/system/atime_unix.go new file mode 100644 index 000000000000..9a7af36ffcc6 --- /dev/null +++ b/util/system/atime_unix.go @@ -0,0 +1,21 @@ +//go:build !windows +// +build !windows + +package system + +import ( + iofs "io/fs" + "syscall" + "time" + + "github.com/containerd/continuity/fs" + "github.com/pkg/errors" +) + +func Atime(st iofs.FileInfo) (time.Time, error) { + stSys, ok := st.Sys().(*syscall.Stat_t) + if !ok { + return time.Time{}, errors.Errorf("expected st.Sys() to be *syscall.Stat_t, got %T", st.Sys()) + } + return fs.StatATimeAsTime(stSys), nil +} diff --git a/util/system/atime_windows.go b/util/system/atime_windows.go new file mode 100644 index 000000000000..808408b613cf --- /dev/null +++ b/util/system/atime_windows.go @@ -0,0 +1,17 @@ +package system + +import ( + "fmt" + iofs "io/fs" + "syscall" + "time" +) + +func Atime(st iofs.FileInfo) (time.Time, error) { + stSys, ok := st.Sys().(*syscall.Win32FileAttributeData) + if !ok { + return time.Time{}, fmt.Errorf("expected st.Sys() to be *syscall.Win32FileAttributeData, got %T", st.Sys()) + } + // ref: https://github.com/golang/go/blob/go1.19.2/src/os/types_windows.go#L230 + return time.Unix(0, stSys.LastAccessTime.Nanoseconds()), nil +} diff --git a/util/system/path_windows.go b/util/system/path_windows.go index 85141668270d..cc7b664d8b86 100644 --- a/util/system/path_windows.go +++ b/util/system/path_windows.go @@ -4,9 +4,10 @@ package system import ( - "fmt" "path/filepath" "strings" + + "github.com/pkg/errors" ) // CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. @@ -22,13 +23,13 @@ import ( // d:\ --> Fail func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) + return "", errors.Errorf("No relative path specified in %q", path) } if !filepath.IsAbs(path) || len(path) < 2 { return filepath.FromSlash(path), nil } if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") + return "", errors.New("The specified path is not on the system drive (C:)") } return filepath.FromSlash(path[2:]), nil } diff --git a/util/testutil/dockerd/config.go b/util/testutil/dockerd/config.go new file mode 100644 index 000000000000..b3b86feff458 --- /dev/null +++ b/util/testutil/dockerd/config.go @@ -0,0 +1,16 @@ +package dockerd + +type Config struct { + Features map[string]bool `json:"features,omitempty"` + Mirrors []string `json:"registry-mirrors,omitempty"` + Builder BuilderConfig `json:"builder,omitempty"` +} + +type BuilderEntitlements struct { + NetworkHost bool `json:"network-host,omitempty"` + SecurityInsecure bool `json:"security-insecure,omitempty"` +} + +type BuilderConfig struct { + Entitlements BuilderEntitlements `json:",omitempty"` +} diff --git a/util/testutil/dockerd/daemon.go b/util/testutil/dockerd/daemon.go new file mode 100644 index 000000000000..1054e1f36946 --- /dev/null +++ b/util/testutil/dockerd/daemon.go @@ -0,0 +1,241 @@ +package dockerd + +import ( + "bytes" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/moby/buildkit/identity" + "github.com/pkg/errors" +) + +type LogT interface { + Logf(string, ...interface{}) +} + +type nopLog struct{} + +func (nopLog) Logf(string, ...interface{}) {} + +const ( + shortLen = 12 + defaultDockerdBinary = "dockerd" +) + +type Option func(*Daemon) + +type Daemon struct { + root string + folder string + Wait chan error + id string + cmd *exec.Cmd + storageDriver string + execRoot string + dockerdBinary string + Log LogT + pidFile string + sockPath string + args []string +} + +var sockRoot = filepath.Join(os.TempDir(), "docker-integration") + +func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) { + if err := os.MkdirAll(sockRoot, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to create daemon socket root %q", sockRoot) + } + + id := "d" + identity.NewID()[:shortLen] + daemonFolder, err := filepath.Abs(filepath.Join(workingDir, id)) + if err != nil { + return nil, err + } + daemonRoot := filepath.Join(daemonFolder, "root") + if err := os.MkdirAll(daemonRoot, 0755); err != nil { + return nil, errors.Wrapf(err, "failed to create daemon root %q", daemonRoot) + } + + d := &Daemon{ + id: id, + folder: daemonFolder, + root: daemonRoot, + storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), + // dxr stands for docker-execroot (shortened for avoiding unix(7) path length limitation) + execRoot: filepath.Join(os.TempDir(), "dxr", id), + dockerdBinary: defaultDockerdBinary, + Log: nopLog{}, + sockPath: filepath.Join(sockRoot, id+".sock"), + } + + for _, op := range ops { + op(d) + } + + return d, nil +} + +func (d *Daemon) Sock() string { + return "unix://" + d.sockPath +} + +func (d *Daemon) StartWithError(daemonLogs map[string]*bytes.Buffer, providedArgs ...string) error { + dockerdBinary, err := exec.LookPath(d.dockerdBinary) + if err != nil { + return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id) + } + + if d.pidFile == "" { + d.pidFile = filepath.Join(d.folder, "docker.pid") + } + + d.args = []string{ + "--data-root", d.root, + "--exec-root", d.execRoot, + "--pidfile", d.pidFile, + "--containerd-namespace", d.id, + "--containerd-plugins-namespace", d.id + "p", + "--host", d.Sock(), + } + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + d.args = append(d.args, "--userns-remap", root) + } + + // If we don't explicitly set the log-level or debug flag(-D) then + // turn on debug mode + var foundLog, foundSd bool + for _, a := range providedArgs { + if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { + foundLog = true + } + if strings.Contains(a, "--storage-driver") { + foundSd = true + } + } + if !foundLog { + d.args = append(d.args, "--debug") + } + if d.storageDriver != "" && !foundSd { + d.args = append(d.args, "--storage-driver", d.storageDriver) + } + + d.args = append(d.args, providedArgs...) + d.cmd = exec.Command(dockerdBinary, d.args...) + d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1", "BUILDKIT_DEBUG_EXEC_OUTPUT=1", "BUILDKIT_DEBUG_PANIC_ON_ERROR=1") + + if daemonLogs != nil { + b := new(bytes.Buffer) + daemonLogs["stdout: "+d.cmd.Path] = b + d.cmd.Stdout = &lockingWriter{Writer: b} + b = new(bytes.Buffer) + daemonLogs["stderr: "+d.cmd.Path] = b + d.cmd.Stderr = &lockingWriter{Writer: b} + } + + if err := d.cmd.Start(); err != nil { + return errors.Wrapf(err, "[%s] could not start daemon container", d.id) + } + + wait := make(chan error, 1) + + go func() { + ret := d.cmd.Wait() + d.Log.Logf("[%s] exiting daemon", d.id) + // If we send before logging, we might accidentally log _after_ the test is done. + // As of Go 1.12, this incurs a panic instead of silently being dropped. + wait <- ret + close(wait) + }() + + d.Wait = wait + + d.Log.Logf("[%s] daemon started\n", d.id) + return nil +} + +var errDaemonNotStarted = errors.New("daemon not started") + +func (d *Daemon) StopWithError() (err error) { + if d.cmd == nil || d.Wait == nil { + return errDaemonNotStarted + } + defer func() { + if err != nil { + d.Log.Logf("[%s] error while stopping daemon: %v", d.id, err) + } else { + d.Log.Logf("[%s] daemon stopped", d.id) + if d.pidFile != "" { + _ = os.Remove(d.pidFile) + } + } + d.cmd = nil + }() + + i := 1 + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + tick := ticker.C + + d.Log.Logf("[%s] stopping daemon", d.id) + + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + if strings.Contains(err.Error(), "os: process already finished") { + return errDaemonNotStarted + } + return errors.Wrapf(err, "[%s] could not send signal", d.id) + } + +out1: + for { + select { + case err := <-d.Wait: + return err + case <-time.After(20 * time.Second): + // time for stopping jobs and run onShutdown hooks + d.Log.Logf("[%s] daemon stop timed out after 20 seconds", d.id) + break out1 + } + } + +out2: + for { + select { + case err := <-d.Wait: + return err + case <-tick: + i++ + if i > 5 { + d.Log.Logf("[%s] tried to interrupt daemon for %d times, now try to kill it", d.id, i) + break out2 + } + d.Log.Logf("[%d] attempt #%d/5: daemon is still running with pid %d", i, d.cmd.Process.Pid) + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return errors.Wrapf(err, "[%s] attempt #%d/5 could not send signal", d.id, i) + } + } + } + + if err := d.cmd.Process.Kill(); err != nil { + d.Log.Logf("[%s] failed to kill daemon: %v", d.id, err) + return err + } + + return nil +} + +type lockingWriter struct { + mu sync.Mutex + io.Writer +} + +func (w *lockingWriter) Write(dt []byte) (int, error) { + w.mu.Lock() + n, err := w.Writer.Write(dt) + w.mu.Unlock() + return n, err +} diff --git a/util/testutil/echoserver/server.go b/util/testutil/echoserver/server.go index efeac5ca38cf..099138d33c39 100644 --- a/util/testutil/echoserver/server.go +++ b/util/testutil/echoserver/server.go @@ -11,7 +11,7 @@ type TestServer interface { } func NewTestServer(response string) (TestServer, error) { - ln, err := net.Listen("tcp", ":") + ln, err := net.Listen("tcp", ":") //nolint:gosec // server only used in tests if err != nil { return nil, err } diff --git a/util/testutil/imageinfo.go b/util/testutil/imageinfo.go new file mode 100644 index 000000000000..11cecc726792 --- /dev/null +++ b/util/testutil/imageinfo.go @@ -0,0 +1,117 @@ +package testutil + +import ( + "context" + "encoding/json" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ImageInfo struct { + Desc ocispecs.Descriptor + Manifest ocispecs.Manifest + Img ocispecs.Image + Layers []map[string]*TarItem + LayersRaw [][]byte + descPlatform string +} + +type ImagesInfo struct { + Desc ocispecs.Descriptor + Index ocispecs.Index + Images []*ImageInfo +} + +func (idx ImagesInfo) Find(platform string) *ImageInfo { + result := idx.Filter(platform) + if len(result.Images) == 0 { + return nil + } + return result.Images[0] +} + +func (idx ImagesInfo) Filter(platform string) *ImagesInfo { + result := &ImagesInfo{Desc: idx.Desc} + for _, info := range idx.Images { + if info.descPlatform == platform { + result.Images = append(result.Images, info) + } + } + return result +} + +func (idx ImagesInfo) FindAttestation(platform string) *ImageInfo { + img := idx.Find(platform) + if img == nil { + return nil + } + for _, info := range idx.Images { + if info.Desc.Annotations["vnd.docker.reference.digest"] == string(img.Desc.Digest) { + return info + } + } + return nil +} + +func ReadImages(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (*ImagesInfo, error) { + idx := &ImagesInfo{Desc: desc} + + dt, err := content.ReadBlob(ctx, p, desc) + if err != nil { + return nil, err + } + if err := json.Unmarshal(dt, &idx.Index); err != nil { + return nil, err + } + + for _, m := range idx.Index.Manifests { + img, err := ReadImage(ctx, p, m) + if err != nil { + return nil, err + } + img.descPlatform = platforms.Format(*m.Platform) + idx.Images = append(idx.Images, img) + } + return idx, nil +} + +func ReadImage(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (*ImageInfo, error) { + ii := &ImageInfo{Desc: desc} + + dt, err := content.ReadBlob(ctx, p, desc) + if err != nil { + return nil, err + } + if err := json.Unmarshal(dt, &ii.Manifest); err != nil { + return nil, err + } + + dt, err = content.ReadBlob(ctx, p, ii.Manifest.Config) + if err != nil { + return nil, err + } + if err := json.Unmarshal(dt, &ii.Img); err != nil { + return nil, err + } + + ii.Layers = make([]map[string]*TarItem, len(ii.Manifest.Layers)) + ii.LayersRaw = make([][]byte, len(ii.Manifest.Layers)) + for i, l := range ii.Manifest.Layers { + dt, err := content.ReadBlob(ctx, p, l) + if err != nil { + return nil, err + } + ii.LayersRaw[i] = dt + if images.IsLayerType(l.MediaType) { + m, err := ReadTarToMap(dt, true) + if err != nil { + return nil, err + } + ii.Layers[i] = m + } + } + return ii, nil +} diff --git a/util/testutil/integration/containerd.go b/util/testutil/integration/containerd.go index 3fb3aa5e33cb..193176f2b407 100644 --- a/util/testutil/integration/containerd.go +++ b/util/testutil/integration/containerd.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "io/ioutil" "log" "os" "os/exec" @@ -81,6 +80,10 @@ func (c *containerd) Name() string { return c.name } +func (c *containerd) Rootless() bool { + return c.uid != 0 +} + func (c *containerd) New(ctx context.Context, cfg *BackendConfig) (b Backend, cl func() error, err error) { if err := lookupBinary(c.containerd); err != nil { return nil, nil, err @@ -110,7 +113,7 @@ func (c *containerd) New(ctx context.Context, cfg *BackendConfig) (b Backend, cl rootless = true } - tmpdir, err := ioutil.TempDir("", "bktest_containerd") + tmpdir, err := os.MkdirTemp("", "bktest_containerd") if err != nil { return nil, nil, err } @@ -158,18 +161,24 @@ disabled_plugins = ["cri"] } configFile := filepath.Join(tmpdir, "config.toml") - if err := ioutil.WriteFile(configFile, []byte(config), 0644); err != nil { + if err := os.WriteFile(configFile, []byte(config), 0644); err != nil { return nil, nil, err } containerdArgs := []string{c.containerd, "--config", configFile} rootlessKitState := filepath.Join(tmpdir, "rootlesskit-containerd") if rootless { - containerdArgs = append([]string{"sudo", "-u", fmt.Sprintf("#%d", c.uid), "-i", "--", "exec", - "rootlesskit", "--copy-up=/run", "--state-dir", rootlessKitState}, containerdArgs...) + containerdArgs = append(append([]string{"sudo", "-u", fmt.Sprintf("#%d", c.uid), "-i", + fmt.Sprintf("CONTAINERD_ROOTLESS_ROOTLESSKIT_STATE_DIR=%s", rootlessKitState), + // Integration test requires the access to localhost of the host network namespace. + // TODO: remove these configurations + "CONTAINERD_ROOTLESS_ROOTLESSKIT_NET=host", + "CONTAINERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=none", + "CONTAINERD_ROOTLESS_ROOTLESSKIT_FLAGS=--mtu=0", + }, c.extraEnv...), "containerd-rootless.sh", "-c", configFile) } - cmd := exec.Command(containerdArgs[0], containerdArgs[1:]...) + cmd := exec.Command(containerdArgs[0], containerdArgs[1:]...) //nolint:gosec // test utility cmd.Env = append(os.Environ(), c.extraEnv...) ctdStop, err := startCmd(cmd, cfg.Logs) diff --git a/util/testutil/integration/dockerd.go b/util/testutil/integration/dockerd.go index 1aeb50a30811..b56390ec9598 100644 --- a/util/testutil/integration/dockerd.go +++ b/util/testutil/integration/dockerd.go @@ -3,48 +3,100 @@ package integration import ( "bytes" "context" - "fmt" + "encoding/json" "io" - "io/ioutil" "net" "os" + "path/filepath" "time" + "github.com/docker/docker/client" + "github.com/moby/buildkit/cmd/buildkitd/config" + "github.com/moby/buildkit/util/testutil/dockerd" + "github.com/pkg/errors" "golang.org/x/sync/errgroup" - - "github.com/docker/docker/testutil/daemon" ) -const dockerdBinary = "dockerd" - -type logTAdapter struct { - Name string - Logs map[string]*bytes.Buffer +// InitDockerdWorker registers a dockerd worker with the global registry. +func InitDockerdWorker() { + Register(&moby{ + name: "dockerd", + rootless: false, + unsupported: []string{ + FeatureCacheExport, + FeatureCacheImport, + FeatureDirectPush, + FeatureImageExporter, + FeatureMultiCacheExport, + FeatureMultiPlatform, + FeatureOCIExporter, + FeatureOCILayout, + FeatureProvenance, + FeatureSBOM, + FeatureSecurityMode, + FeatureCNINetwork, + }, + }) + Register(&moby{ + name: "dockerd-containerd", + rootless: false, + unsupported: []string{ + FeatureSecurityMode, + FeatureCNINetwork, + }, + }) } -func (l logTAdapter) Logf(format string, v ...interface{}) { - if buf, ok := l.Logs[l.Name]; !ok || buf == nil { - l.Logs[l.Name] = &bytes.Buffer{} - } - fmt.Fprintf(l.Logs[l.Name], format, v...) +type moby struct { + name string + rootless bool + unsupported []string } -// InitDockerdWorker registers a dockerd worker with the global registry. -func InitDockerdWorker() { - Register(&dockerd{}) +func (c moby) Name() string { + return c.name } -type dockerd struct{} - -func (c dockerd) Name() string { - return dockerdBinary +func (c moby) Rootless() bool { + return c.rootless } -func (c dockerd) New(ctx context.Context, cfg *BackendConfig) (b Backend, cl func() error, err error) { +func (c moby) New(ctx context.Context, cfg *BackendConfig) (b Backend, cl func() error, err error) { if err := requireRoot(); err != nil { return nil, nil, err } + bkcfg, err := config.LoadFile(cfg.ConfigFile) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to load buildkit config file %s", cfg.ConfigFile) + } + + dcfg := dockerd.Config{ + Features: map[string]bool{ + "containerd-snapshotter": c.name == "dockerd-containerd", + }, + } + if reg, ok := bkcfg.Registries["docker.io"]; ok && len(reg.Mirrors) > 0 { + for _, m := range reg.Mirrors { + dcfg.Mirrors = append(dcfg.Mirrors, "http://"+m) + } + } + if bkcfg.Entitlements != nil { + for _, e := range bkcfg.Entitlements { + switch e { + case "network.host": + dcfg.Builder.Entitlements.NetworkHost = true + case "security.insecure": + dcfg.Builder.Entitlements.SecurityInsecure = true + } + } + } + + dcfgdt, err := json.Marshal(dcfg) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to marshal dockerd config") + } + deferF := &multiCloser{} cl = deferF.F() @@ -58,47 +110,53 @@ func (c dockerd) New(ctx context.Context, cfg *BackendConfig) (b Backend, cl fun var proxyGroup errgroup.Group deferF.append(proxyGroup.Wait) - workDir, err := ioutil.TempDir("", "integration") + workDir, err := os.MkdirTemp("", "integration") if err != nil { return nil, nil, err } - cmd, err := daemon.NewDaemon( - workDir, - daemon.WithTestLogger(logTAdapter{ - Name: "creatingDaemon", - Logs: cfg.Logs, - }), - daemon.WithContainerdSocket(""), - ) + d, err := dockerd.NewDaemon(workDir) if err != nil { - return nil, nil, fmt.Errorf("new daemon error: %q, %s", err, formatLogs(cfg.Logs)) + return nil, nil, errors.Errorf("new daemon error: %q, %s", err, formatLogs(cfg.Logs)) + } + + dockerdConfigFile := filepath.Join(workDir, "daemon.json") + if err := os.WriteFile(dockerdConfigFile, dcfgdt, 0644); err != nil { + return nil, nil, err } - err = cmd.StartWithError() + err = d.StartWithError(cfg.Logs, + "--config-file", dockerdConfigFile, + "--userland-proxy=false", + "--bip", "10.66.66.1/24", + "--default-address-pool", "base=10.66.66.0/16,size=24", + "--debug", + ) if err != nil { return nil, nil, err } - deferF.append(cmd.StopWithError) + deferF.append(d.StopWithError) logs := map[string]*bytes.Buffer{} - if err := waitUnix(cmd.Sock(), 5*time.Second); err != nil { - return nil, nil, fmt.Errorf("dockerd did not start up: %q, %s", err, formatLogs(logs)) + if err := waitUnix(d.Sock(), 5*time.Second); err != nil { + return nil, nil, errors.Errorf("dockerd did not start up: %q, %s", err, formatLogs(logs)) } - ctx, cancel := context.WithCancel(context.Background()) - deferF.append(func() error { cancel(); return nil }) - - dockerAPI, err := cmd.NewClient() + dockerAPI, err := client.NewClientWithOpts(client.WithHost(d.Sock())) if err != nil { return nil, nil, err } deferF.append(dockerAPI.Close) + err = waitForAPI(ctx, dockerAPI, 5*time.Second) + if err != nil { + return nil, nil, errors.Wrapf(err, "dockerd client api timed out: %s", formatLogs(cfg.Logs)) + } + // Create a file descriptor to be used as a Unix domain socket. // Remove it immediately (the name will still be valid for the socket) so that // we don't leave files all over the users tmp tree. - f, err := ioutil.TempFile("", "buildkit-integration") + f, err := os.CreateTemp("", "buildkit-integration") if err != nil { return } @@ -108,7 +166,7 @@ func (c dockerd) New(ctx context.Context, cfg *BackendConfig) (b Backend, cl fun listener, err := net.Listen("unix", localPath) if err != nil { - return nil, nil, err + return nil, nil, errors.Wrapf(err, "dockerd listener error: %s", formatLogs(cfg.Logs)) } deferF.append(listener.Close) @@ -142,7 +200,37 @@ func (c dockerd) New(ctx context.Context, cfg *BackendConfig) (b Backend, cl fun }) return backend{ - address: "unix://" + listener.Addr().String(), - rootless: false, + address: "unix://" + listener.Addr().String(), + rootless: c.rootless, + isDockerd: true, + unsupportedFeatures: c.unsupported, }, cl, nil } + +func waitForAPI(ctx context.Context, apiClient *client.Client, d time.Duration) error { + step := 50 * time.Millisecond + i := 0 + for { + if _, err := apiClient.Ping(ctx); err == nil { + break + } + i++ + if time.Duration(i)*step > d { + return errors.New("failed to connect to /_ping endpoint") + } + time.Sleep(step) + } + return nil +} + +func IsTestDockerd() bool { + return os.Getenv("TEST_DOCKERD") == "1" +} + +func IsTestDockerdMoby(sb Sandbox) bool { + b, err := getBackend(sb) + if err != nil { + return false + } + return b.isDockerd && sb.Name() == "dockerd" +} diff --git a/util/testutil/integration/frombinary.go b/util/testutil/integration/frombinary.go index 3f2139ec5d91..4bf68193c548 100644 --- a/util/testutil/integration/frombinary.go +++ b/util/testutil/integration/frombinary.go @@ -3,7 +3,6 @@ package integration import ( "context" "encoding/json" - "io/ioutil" "os" "github.com/containerd/containerd/content" @@ -15,7 +14,7 @@ import ( func providerFromBinary(fn string) (_ ocispecs.Descriptor, _ content.Provider, _ func(), err error) { ctx := context.TODO() - tmpDir, err := ioutil.TempDir("", "buildkit-state") + tmpDir, err := os.MkdirTemp("", "buildkit-state") if err != nil { return ocispecs.Descriptor{}, nil, nil, err } diff --git a/util/testutil/integration/oci.go b/util/testutil/integration/oci.go index ce71643c7041..b4934a937870 100644 --- a/util/testutil/integration/oci.go +++ b/util/testutil/integration/oci.go @@ -46,6 +46,10 @@ func (s *oci) Name() string { return "oci" } +func (s *oci) Rootless() bool { + return s.uid != 0 +} + func (s *oci) New(ctx context.Context, cfg *BackendConfig) (Backend, func() error, error) { if err := lookupBinary("buildkitd"); err != nil { return nil, nil, err diff --git a/util/testutil/integration/pins.go b/util/testutil/integration/pins.go new file mode 100644 index 000000000000..1d7e49a0c71d --- /dev/null +++ b/util/testutil/integration/pins.go @@ -0,0 +1,20 @@ +package integration + +var pins = map[string]map[string]string{ + // busybox is pinned to 1.35. Newer produces has "illegal instruction" panic on some of Github infra on sha256sum + "busybox:latest": { + "amd64": "sha256:0d5a701f0ca53f38723108687add000e1922f812d4187dea7feaee85d2f5a6c5", + "arm64v8": "sha256:ffe38d75e44d8ffac4cd6d09777ffc31e94ea0ded6a0164e825a325dc17a3b68", + "library": "sha256:f4ed5f2163110c26d42741fdc92bd1710e118aed4edb19212548e8ca4e5fca22", + }, + "alpine:latest": { + "amd64": "sha256:c0d488a800e4127c334ad20d61d7bc21b4097540327217dfab52262adc02380c", + "arm64v8": "sha256:af06af3514c44a964d3b905b498cf6493db8f1cde7c10e078213a89c87308ba0", + "library": "sha256:8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4", + }, + "debian:bullseye-20230109-slim": { + "amd64": "sha256:1acb06a0c31fb467eb8327ad361f1091ab265e0bf26d452dea45dcb0c0ea5e75", + "arm64v8": "sha256:7816383f71131e55256c17d42fd77bd80f3c1c98948ebf449fe56eb6580f4c4c", + "library": "sha256:98d3b4b0cee264301eb1354e0b549323af2d0633e1c43375d0b25c01826b6790", + }, +} diff --git a/util/testutil/integration/registry.go b/util/testutil/integration/registry.go index 06d1adf355ef..32ed571faba4 100644 --- a/util/testutil/integration/registry.go +++ b/util/testutil/integration/registry.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -31,7 +30,7 @@ func NewRegistry(dir string) (url string, cl func() error, err error) { }() if dir == "" { - tmpdir, err := ioutil.TempDir("", "test-registry") + tmpdir, err := os.MkdirTemp("", "test-registry") if err != nil { return "", nil, err } @@ -52,12 +51,12 @@ http: addr: 127.0.0.1:0 `, filepath.Join(dir, "data")) - if err := ioutil.WriteFile(filepath.Join(dir, "config.yaml"), []byte(template), 0600); err != nil { + if err := os.WriteFile(filepath.Join(dir, "config.yaml"), []byte(template), 0600); err != nil { return "", nil, err } } - cmd := exec.Command("registry", "serve", filepath.Join(dir, "config.yaml")) + cmd := exec.Command("registry", "serve", filepath.Join(dir, "config.yaml")) //nolint:gosec // test utility rc, err := cmd.StderrPipe() if err != nil { return "", nil, err @@ -84,7 +83,7 @@ func detectPort(ctx context.Context, rc io.ReadCloser) (string, error) { found := make(chan struct{}) defer func() { close(found) - go io.Copy(ioutil.Discard, rc) + go io.Copy(io.Discard, rc) }() go func() { diff --git a/util/testutil/integration/run.go b/util/testutil/integration/run.go index 50d355e41104..18f6f0b748cd 100644 --- a/util/testutil/integration/run.go +++ b/util/testutil/integration/run.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "io/ioutil" "math/rand" "os" "os/exec" @@ -13,7 +12,6 @@ import ( "runtime" "sort" "strings" - "sync" "testing" "time" @@ -24,7 +22,6 @@ import ( "github.com/moby/buildkit/util/contentutil" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sync/semaphore" ) @@ -49,6 +46,7 @@ type Sandbox interface { Context() context.Context Cmd(...string) *exec.Cmd PrintLogs(*testing.T) + ClearLogs() NewRegistry() (string, error) Value(string) interface{} // chosen matrix value Name() string @@ -63,6 +61,7 @@ type BackendConfig struct { type Worker interface { New(context.Context, *BackendConfig) (Backend, func() error, error) Name() string + Rootless() bool } type ConfigUpdater interface { @@ -90,8 +89,14 @@ func (f testFunc) Run(t *testing.T, sb Sandbox) { func TestFuncs(funcs ...func(t *testing.T, sb Sandbox)) []Test { var tests []Test + names := map[string]struct{}{} for _, f := range funcs { - tests = append(tests, testFunc{name: getFunctionName(f), run: f}) + name := getFunctionName(f) + if _, ok := names[name]; ok { + panic("duplicate test: " + name) + } + names[name] = struct{}{} + tests = append(tests, testFunc{name: name, run: f}) } return tests } @@ -152,27 +157,14 @@ func Run(t *testing.T, testCases []Test, opt ...TestOpt) { mirror, cleanup, err := runMirror(t, tc.mirroredImages) require.NoError(t, err) - var mu sync.Mutex - var count int - cleanOnComplete := func() func() { - count++ - return func() { - mu.Lock() - count-- - if count == 0 { - cleanup() - } - mu.Unlock() - } - } - defer cleanOnComplete()() + t.Cleanup(func() { _ = cleanup() }) matrix := prepareValueMatrix(tc) list := List() if os.Getenv("BUILDKIT_WORKER_RANDOM") == "1" && len(list) > 0 { rand.Seed(time.Now().UnixNano()) - list = []Worker{list[rand.Intn(len(list))]} + list = []Worker{list[rand.Intn(len(list))]} //nolint:gosec // using math/rand is fine in a test utility } for _, br := range list { @@ -182,9 +174,11 @@ func Run(t *testing.T, testCases []Test, opt ...TestOpt) { name := fn + "/worker=" + br.Name() + mv.functionSuffix() func(fn, testName string, br Worker, tc Test, mv matrixValue) { ok := t.Run(testName, func(t *testing.T) { + if strings.Contains(fn, "NoRootless") && br.Rootless() { + // skip sandbox setup + t.Skip("rootless") + } ctx := appcontext.Context() - - defer cleanOnComplete()() if !strings.HasSuffix(fn, "NoParallel") { t.Parallel() } @@ -193,8 +187,8 @@ func Run(t *testing.T, testCases []Test, opt ...TestOpt) { sb, closer, err := newSandbox(ctx, br, mirror, mv) require.NoError(t, err) + t.Cleanup(func() { _ = closer() }) defer func() { - assert.NoError(t, closer()) if t.Failed() { sb.PrintLogs(t) } @@ -211,7 +205,7 @@ func Run(t *testing.T, testCases []Test, opt ...TestOpt) { func getFunctionName(i interface{}) string { fullname := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() dot := strings.LastIndex(fullname, ".") + 1 - return strings.Title(fullname[dot:]) + return strings.Title(fullname[dot:]) //nolint:staticcheck // ignoring "SA1019: strings.Title is deprecated", as for our use we don't need full unicode support } var localImageCache map[string]map[string]struct{} @@ -270,12 +264,18 @@ func OfficialImages(names ...string) map[string]string { ns := runtime.GOARCH if ns == "arm64" { ns = "arm64v8" - } else if ns != "amd64" && ns != "armhf" { + } else if ns != "amd64" { ns = "library" } m := map[string]string{} for _, name := range names { - m["library/"+name] = "docker.io/" + ns + "/" + name + ref := "docker.io/" + ns + "/" + name + if pns, ok := pins[name]; ok { + if dgst, ok := pns[ns]; ok { + ref += "@" + dgst + } + } + m["library/"+name] = ref } return m } @@ -295,7 +295,7 @@ mirrors=["%s"] } func writeConfig(updaters []ConfigUpdater) (string, error) { - tmpdir, err := ioutil.TempDir("", "bktest_config") + tmpdir, err := os.MkdirTemp("", "bktest_config") if err != nil { return "", err } @@ -308,7 +308,7 @@ func writeConfig(updaters []ConfigUpdater) (string, error) { s = upt.UpdateConfigFile(s) } - if err := ioutil.WriteFile(filepath.Join(tmpdir, buildkitdConfigFile), []byte(s), 0644); err != nil { + if err := os.WriteFile(filepath.Join(tmpdir, buildkitdConfigFile), []byte(s), 0644); err != nil { return "", err } return tmpdir, nil @@ -428,7 +428,7 @@ func runStargzSnapshotter(cfg *BackendConfig) (address string, cl func() error, } }() - tmpStargzDir, err := ioutil.TempDir("", "bktest_containerd_stargz_grpc") + tmpStargzDir, err := os.MkdirTemp("", "bktest_containerd_stargz_grpc") if err != nil { return "", nil, err } diff --git a/util/testutil/integration/sandbox.go b/util/testutil/integration/sandbox.go index 8896ec62d657..8eb90cdc0caa 100644 --- a/util/testutil/integration/sandbox.go +++ b/util/testutil/integration/sandbox.go @@ -5,7 +5,6 @@ import ( "bytes" "context" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -22,10 +21,12 @@ import ( const buildkitdConfigFile = "buildkitd.toml" type backend struct { - address string - containerdAddress string - rootless bool - snapshotter string + address string + containerdAddress string + rootless bool + snapshotter string + unsupportedFeatures []string + isDockerd bool } func (b backend) Address() string { @@ -44,6 +45,15 @@ func (b backend) Snapshotter() string { return b.snapshotter } +func (b backend) isUnsupportedFeature(feature string) bool { + for _, unsupportedFeature := range b.unsupportedFeatures { + if feature == unsupportedFeature { + return true + } + } + return false +} + type sandbox struct { Backend @@ -66,6 +76,10 @@ func (sb *sandbox) PrintLogs(t *testing.T) { printLogs(sb.logs, t.Log) } +func (sb *sandbox) ClearLogs() { + sb.logs = make(map[string]*bytes.Buffer) +} + func (sb *sandbox) NewRegistry() (string, error) { url, cl, err := NewRegistry("") if err != nil { @@ -167,7 +181,7 @@ func runBuildkitd(ctx context.Context, conf *BackendConfig, args []string, logs args = append(args, "--config="+conf.ConfigFile) } - tmpdir, err := ioutil.TempDir("", "bktest_buildkitd") + tmpdir, err := os.MkdirTemp("", "bktest_buildkitd") if err != nil { return "", nil, err } @@ -186,7 +200,7 @@ func runBuildkitd(ctx context.Context, conf *BackendConfig, args []string, logs address = getBuildkitdAddr(tmpdir) args = append(args, "--root", tmpdir, "--addr", address, "--debug") - cmd := exec.Command(args[0], args[1:]...) + cmd := exec.Command(args[0], args[1:]...) //nolint:gosec // test utility cmd.Env = append(os.Environ(), "BUILDKIT_DEBUG_EXEC_OUTPUT=1", "BUILDKIT_DEBUG_PANIC_ON_ERROR=1", "TMPDIR="+filepath.Join(tmpdir, "tmp")) cmd.Env = append(cmd.Env, extraEnv...) cmd.SysProcAttr = getSysProcAttr() @@ -219,8 +233,20 @@ func runBuildkitd(ctx context.Context, conf *BackendConfig, args []string, logs return address, cl, err } +func getBackend(sb Sandbox) (*backend, error) { + sbx, ok := sb.(*sandbox) + if !ok { + return nil, errors.Errorf("invalid sandbox type %T", sb) + } + b, ok := sbx.Backend.(backend) + if !ok { + return nil, errors.Errorf("invalid backend type %T", b) + } + return &b, nil +} + func rootlessSupported(uid int) bool { - cmd := exec.Command("sudo", "-u", fmt.Sprintf("#%d", uid), "-i", "--", "exec", "unshare", "-U", "true") + cmd := exec.Command("sudo", "-u", fmt.Sprintf("#%d", uid), "-i", "--", "exec", "unshare", "-U", "true") //nolint:gosec // test utility b, err := cmd.CombinedOutput() if err != nil { logrus.Warnf("rootless mode is not supported on this host: %v (%s)", err, string(b)) @@ -238,3 +264,69 @@ func printLogs(logs map[string]*bytes.Buffer, f func(args ...interface{})) { } } } + +const ( + FeatureCacheExport = "cache export" + FeatureCacheImport = "cache import" + FeatureDirectPush = "direct push" + FeatureFrontendOutline = "frontend outline" + FeatureFrontendTargets = "frontend targets" + FeatureImageExporter = "image exporter" + FeatureInfo = "info" + FeatureMultiCacheExport = "multi cache export" + FeatureMultiPlatform = "multi-platform" + FeatureOCIExporter = "oci exporter" + FeatureOCILayout = "oci layout" + FeatureProvenance = "provenance" + FeatureSBOM = "sbom" + FeatureSecurityMode = "security mode" + FeatureSourceDateEpoch = "source date epoch" + FeatureCNINetwork = "cni network" +) + +var features = map[string]struct{}{ + FeatureCacheExport: {}, + FeatureCacheImport: {}, + FeatureDirectPush: {}, + FeatureFrontendOutline: {}, + FeatureFrontendTargets: {}, + FeatureImageExporter: {}, + FeatureInfo: {}, + FeatureMultiCacheExport: {}, + FeatureMultiPlatform: {}, + FeatureOCIExporter: {}, + FeatureOCILayout: {}, + FeatureProvenance: {}, + FeatureSBOM: {}, + FeatureSecurityMode: {}, + FeatureSourceDateEpoch: {}, + FeatureCNINetwork: {}, +} + +func CheckFeatureCompat(t *testing.T, sb Sandbox, reason ...string) { + t.Helper() + if len(reason) == 0 { + t.Fatal("no reason provided") + } + b, err := getBackend(sb) + if err != nil { + t.Fatal(err) + } + if len(b.unsupportedFeatures) == 0 { + return + } + var ereasons []string + for _, r := range reason { + if _, ok := features[r]; ok { + if b.isUnsupportedFeature(r) { + ereasons = append(ereasons, r) + } + } else { + sb.ClearLogs() + t.Fatalf("unknown reason %q to skip test", r) + } + } + if len(ereasons) > 0 { + t.Skipf("%s worker can not currently run this test due to missing features (%s)", sb.Name(), strings.Join(ereasons, ", ")) + } +} diff --git a/util/testutil/integration/util.go b/util/testutil/integration/util.go index ee92291e531c..6c7a5a5ec426 100644 --- a/util/testutil/integration/util.go +++ b/util/testutil/integration/util.go @@ -11,9 +11,12 @@ import ( "strings" "sync" "syscall" + "testing" "time" + "github.com/containerd/continuity/fs/fstest" "github.com/pkg/errors" + "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" ) @@ -76,7 +79,7 @@ func waitUnix(address string, d time.Duration) error { address = strings.TrimPrefix(address, "unix://") addr, err := net.ResolveUnixAddr("unix", address) if err != nil { - return err + return errors.Wrapf(err, "failed resolving unix addr: %s", address) } step := 50 * time.Millisecond @@ -144,3 +147,23 @@ func (w *lockingWriter) Write(dt []byte) (int, error) { w.mu.Unlock() return n, err } + +func Tmpdir(t *testing.T, appliers ...fstest.Applier) (string, error) { + // We cannot use t.TempDir() to create a temporary directory here because + // appliers might contain fstest.CreateSocket. If the test name is too long, + // t.TempDir() could return a path that is longer than 108 characters. This + // would result in "bind: invalid argument" when we listen on the socket. + tmpdir, err := os.MkdirTemp("", "buildkit") + if err != nil { + return "", err + } + + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(tmpdir)) + }) + + if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil { + return "", err + } + return tmpdir, nil +} diff --git a/util/testutil/tar.go b/util/testutil/tar.go index e7a9b41fa3b8..a519026b6a45 100644 --- a/util/testutil/tar.go +++ b/util/testutil/tar.go @@ -5,7 +5,6 @@ import ( "bytes" "compress/gzip" "io" - "io/ioutil" "github.com/pkg/errors" ) @@ -41,7 +40,7 @@ func ReadTarToMap(dt []byte, compressed bool) (map[string]*TarItem, error) { var dt []byte if h.Typeflag == tar.TypeReg { - dt, err = ioutil.ReadAll(tr) + dt, err = io.ReadAll(tr) if err != nil { return nil, errors.Wrapf(err, "error reading file") } diff --git a/util/throttle/throttle.go b/util/throttle/throttle.go index dfc4aefa90dc..249b17dd49ce 100644 --- a/util/throttle/throttle.go +++ b/util/throttle/throttle.go @@ -31,7 +31,7 @@ func throttle(d time.Duration, f func(), wait bool) func() { go func() { for { mu.Lock() - if next == false { + if !next { running = false mu.Unlock() return diff --git a/util/tracing/detect/detect.go b/util/tracing/detect/detect.go index 7e42c1c8793c..13e54bdefc01 100644 --- a/util/tracing/detect/detect.go +++ b/util/tracing/detect/detect.go @@ -24,6 +24,7 @@ type detector struct { } var ServiceName string +var Recorder *TraceRecorder var detectors map[string]detector var once sync.Once @@ -72,16 +73,31 @@ func detectExporter() (sdktrace.SpanExporter, error) { return nil, nil } -func detect() error { - tp = trace.NewNoopTracerProvider() - +func getExporter() (sdktrace.SpanExporter, error) { exp, err := detectExporter() if err != nil { - return err + return nil, err } - if exp == nil { - return nil + if exp != nil { + exp = &threadSafeExporterWrapper{ + exporter: exp, + } + } + + if Recorder != nil { + Recorder.SpanExporter = exp + exp = Recorder + } + return exp, nil +} + +func detect() error { + tp = trace.NewNoopTracerProvider() + + exp, err := getExporter() + if err != nil || exp == nil { + return err } // enable log with traceID when valid exporter @@ -98,6 +114,10 @@ func detect() error { sp := sdktrace.NewBatchSpanProcessor(exp) + if Recorder != nil { + Recorder.flush = sp.ForceFlush + } + sdktp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sp), sdktrace.WithResource(res)) closers = append(closers, sdktp.Shutdown) @@ -112,7 +132,7 @@ func TracerProvider() (trace.TracerProvider, error) { err = err1 } }) - b, _ := strconv.ParseBool(os.Getenv("OTEL_INGORE_ERROR")) + b, _ := strconv.ParseBool(os.Getenv("OTEL_IGNORE_ERROR")) if err != nil && !b { return nil, err } diff --git a/util/tracing/detect/otlp.go b/util/tracing/detect/otlp.go index 2bea75c1c663..aa68f876ef20 100644 --- a/util/tracing/detect/otlp.go +++ b/util/tracing/detect/otlp.go @@ -16,7 +16,7 @@ func init() { } func otlpExporter() (sdktrace.SpanExporter, error) { - set := os.Getenv("OTEL_TRACES_EXPORTER") == "otpl" || os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT") != "" || os.Getenv("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT") != "" + set := os.Getenv("OTEL_TRACES_EXPORTER") == "otlp" || os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT") != "" || os.Getenv("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT") != "" if !set { return nil, nil } diff --git a/util/tracing/detect/recorder.go b/util/tracing/detect/recorder.go new file mode 100644 index 000000000000..8ff7f1dcef38 --- /dev/null +++ b/util/tracing/detect/recorder.go @@ -0,0 +1,115 @@ +package detect + +import ( + "context" + "sync" + "time" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + "go.opentelemetry.io/otel/trace" +) + +type TraceRecorder struct { + sdktrace.SpanExporter + + mu sync.Mutex + m map[trace.TraceID]*stubs + listeners map[trace.TraceID]int + flush func(context.Context) error +} + +type stubs struct { + spans []tracetest.SpanStub + last time.Time +} + +func NewTraceRecorder() *TraceRecorder { + tr := &TraceRecorder{ + m: map[trace.TraceID]*stubs{}, + listeners: map[trace.TraceID]int{}, + } + + go func() { + t := time.NewTimer(60 * time.Second) + for { + <-t.C + tr.gc() + t.Reset(50 * time.Second) + } + }() + + return tr +} + +func (r *TraceRecorder) Record(traceID trace.TraceID) func() []tracetest.SpanStub { + r.mu.Lock() + defer r.mu.Unlock() + + r.listeners[traceID]++ + var once sync.Once + var spans []tracetest.SpanStub + return func() []tracetest.SpanStub { + once.Do(func() { + if r.flush != nil { + r.flush(context.TODO()) + } + + r.mu.Lock() + defer r.mu.Unlock() + + if v, ok := r.m[traceID]; ok { + spans = v.spans + } + r.listeners[traceID]-- + if r.listeners[traceID] == 0 { + delete(r.listeners, traceID) + } + }) + return spans + } +} + +func (r *TraceRecorder) gc() { + r.mu.Lock() + defer r.mu.Unlock() + + now := time.Now() + for k, s := range r.m { + if _, ok := r.listeners[k]; ok { + continue + } + if now.Sub(s.last) > 60*time.Second { + delete(r.m, k) + } + } +} + +func (r *TraceRecorder) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { + r.mu.Lock() + + now := time.Now() + for _, s := range spans { + ss := tracetest.SpanStubFromReadOnlySpan(s) + v, ok := r.m[ss.SpanContext.TraceID()] + if !ok { + v = &stubs{} + r.m[s.SpanContext().TraceID()] = v + } + v.last = now + v.spans = append(v.spans, ss) + } + r.mu.Unlock() + + if r.SpanExporter == nil { + return nil + } + return r.SpanExporter.ExportSpans(ctx, spans) +} + +func (r *TraceRecorder) Shutdown(ctx context.Context) error { + if r.SpanExporter == nil { + return nil + } + return r.SpanExporter.Shutdown(ctx) +} diff --git a/util/tracing/detect/threadsafe.go b/util/tracing/detect/threadsafe.go new file mode 100644 index 000000000000..51d14448dfed --- /dev/null +++ b/util/tracing/detect/threadsafe.go @@ -0,0 +1,26 @@ +package detect + +import ( + "context" + "sync" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +// threadSafeExporterWrapper wraps an OpenTelemetry SpanExporter and makes it thread-safe. +type threadSafeExporterWrapper struct { + mu sync.Mutex + exporter sdktrace.SpanExporter +} + +func (tse *threadSafeExporterWrapper) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { + tse.mu.Lock() + defer tse.mu.Unlock() + return tse.exporter.ExportSpans(ctx, spans) +} + +func (tse *threadSafeExporterWrapper) Shutdown(ctx context.Context) error { + tse.mu.Lock() + defer tse.mu.Unlock() + return tse.exporter.Shutdown(ctx) +} diff --git a/util/tracing/otlptracegrpc/client.go b/util/tracing/otlptracegrpc/client.go index 638b08ce904a..e8d13301f3d5 100644 --- a/util/tracing/otlptracegrpc/client.go +++ b/util/tracing/otlptracegrpc/client.go @@ -16,17 +16,14 @@ package otlptracegrpc import ( "context" - "errors" - "fmt" "sync" "time" + "github.com/pkg/errors" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" - - "google.golang.org/grpc" - coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" + "google.golang.org/grpc" ) type client struct { @@ -38,10 +35,6 @@ type client struct { var _ otlptrace.Client = (*client)(nil) -var ( - errNoClient = errors.New("no client") -) - // NewClient creates a new gRPC trace client. func NewClient(cc *grpc.ClientConn) otlptrace.Client { c := &client{} @@ -73,7 +66,7 @@ func (c *client) Stop(ctx context.Context) error { // UploadTraces sends a batch of spans to the collector. func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { if !c.connection.Connected() { - return fmt.Errorf("traces exporter is disconnected from the server: %w", c.connection.LastConnectError()) + return errors.Wrap(c.connection.LastConnectError(), "traces exporter is disconnected from the server") } ctx, cancel := c.connection.ContextWithStop(ctx) diff --git a/util/tracing/otlptracegrpc/connection.go b/util/tracing/otlptracegrpc/connection.go index a244882197d7..dbb0fcd39f47 100644 --- a/util/tracing/otlptracegrpc/connection.go +++ b/util/tracing/otlptracegrpc/connection.go @@ -119,9 +119,7 @@ func (c *Connection) indefiniteBackgroundConnection() { connReattemptPeriod := defaultConnReattemptPeriod - // No strong seeding required, nano time can - // already help with pseudo uniqueness. - rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024))) + rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024))) //nolint:gosec // No strong seeding required, nano time can already help with pseudo uniqueness. // maxJitterNanos: 70% of the connectionReattemptPeriod maxJitterNanos := int64(0.7 * float64(connReattemptPeriod)) diff --git a/util/tracing/otlptracegrpc/errors.go b/util/tracing/otlptracegrpc/errors.go new file mode 100644 index 000000000000..b05bd02a2945 --- /dev/null +++ b/util/tracing/otlptracegrpc/errors.go @@ -0,0 +1,7 @@ +package otlptracegrpc + +import "errors" + +var ( + errNoClient = errors.New("no client") +) diff --git a/util/wildcard/wildcard.go b/util/wildcard/wildcard.go new file mode 100644 index 000000000000..ef1176c82e1e --- /dev/null +++ b/util/wildcard/wildcard.go @@ -0,0 +1,87 @@ +package wildcard + +import ( + "regexp" + "strings" + + "github.com/pkg/errors" +) + +// New returns a wildcard object for a string that contains "*" symbols. +func New(s string) (*Wildcard, error) { + reStr, err := Wildcard2Regexp(s) + if err != nil { + return nil, errors.Wrapf(err, "failed to translate wildcard %q to regexp", s) + } + re, err := regexp.Compile(reStr) + if err != nil { + return nil, errors.Wrapf(err, "failed to compile regexp %q (translated from wildcard %q)", reStr, s) + } + w := &Wildcard{ + orig: s, + re: re, + } + return w, nil +} + +// Wildcard2Regexp translates a wildcard string to a regexp string. +func Wildcard2Regexp(wildcard string) (string, error) { + s := regexp.QuoteMeta(wildcard) + if strings.Contains(s, "\\*\\*") { + return "", errors.New("invalid wildcard: \"**\"") + } + s = strings.ReplaceAll(s, "\\*", "(.*)") + s = "^" + s + "$" + return s, nil +} + +// Wildcard is a wildcard matcher object. +type Wildcard struct { + orig string + re *regexp.Regexp +} + +// String implements fmt.Stringer. +func (w *Wildcard) String() string { + return w.orig +} + +// Match returns a non-nil Match on match. +func (w *Wildcard) Match(q string) *Match { + submatches := w.re.FindStringSubmatch(q) + if len(submatches) == 0 { + return nil + } + m := &Match{ + w: w, + Submatches: submatches, + // FIXME: avoid executing regexp twice + idx: w.re.FindStringSubmatchIndex(q), + } + return m +} + +// Match is a matched result. +type Match struct { + w *Wildcard + Submatches []string // 0: the entire query, 1: the first submatch, 2: the second submatch, ... + idx []int +} + +// String implements fmt.Stringer. +func (m *Match) String() string { + if len(m.Submatches) == 0 { + return "" + } + return m.Submatches[0] +} + +// Format formats submatch strings like "$1", "$2". +func (m *Match) Format(f string) (string, error) { + if m.w == nil || len(m.Submatches) == 0 || len(m.idx) == 0 { + return "", errors.New("invalid state") + } + var b []byte + b = m.w.re.ExpandString(b, f, m.Submatches[0], m.idx) + return string(b), nil +} diff --git a/util/wildcard/wildcard_test.go b/util/wildcard/wildcard_test.go new file mode 100644 index 000000000000..4cfffaa49bd1 --- /dev/null +++ b/util/wildcard/wildcard_test.go @@ -0,0 +1,50 @@ +package wildcard + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWildcard(t *testing.T) { + wildcardStr := "docker.io/*/alpine:*" + wildcard, err := New(wildcardStr) + assert.NoError(t, err) + t.Run("Match", func(t *testing.T) { + m := wildcard.Match("docker.io/library/alpine:latest") + assert.Equal(t, []string{"docker.io/library/alpine:latest", "library", "latest"}, m.Submatches) + s, err := m.Format("$1-${2}-$3-$$-$0") + assert.NoError(t, err) + // "$3" is replaced with an empty string without producing an error, because Format() internally uses regexp.*Regexp.Expand(): + // https://pkg.go.dev/regexp#Regexp.Expand + assert.Equal(t, "library-latest--$-docker.io/library/alpine:latest", s) + }) + t.Run("NoMatch", func(t *testing.T) { + assert.Nil(t, wildcard.Match("docker.io/library/busybox:latest")) + assert.Nil(t, wildcard.Match("alpine:latest"), "matcher must not be aware of the Docker Hub reference convention") + }) +} + +func TestWildcardInvalid(t *testing.T) { + wildcardStr := "docker.io/library/alpine:**" + _, err := New(wildcardStr) + assert.ErrorContains(t, err, "invalid wildcard: \"**\"") +} + +func TestWildcardEscape(t *testing.T) { + wildcardStr := "docker.io/library/alpine:\\*" + wildcard, err := New(wildcardStr) + assert.NoError(t, err) + t.Run("NoMatch", func(t *testing.T) { + assert.Nil(t, wildcard.Match("docker.io/library/alpine:latest")) + }) +} + +func TestWildcardParentheses(t *testing.T) { + wildcardStr := "docker.io/library/alpine:(*)" + wildcard, err := New(wildcardStr) + assert.NoError(t, err) + t.Run("NoMatch", func(t *testing.T) { + assert.Nil(t, wildcard.Match("docker.io/library/alpine:latest")) + }) +} diff --git a/util/winlayers/applier.go b/util/winlayers/applier.go index c9c76b27dfb9..f2b147d674af 100644 --- a/util/winlayers/applier.go +++ b/util/winlayers/applier.go @@ -4,7 +4,6 @@ import ( "archive/tar" "context" "io" - "io/ioutil" "runtime" "strings" "sync" @@ -39,7 +38,7 @@ type winApplier struct { func (s *winApplier) Apply(ctx context.Context, desc ocispecs.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispecs.Descriptor, err error) { if !hasWindowsLayerMode(ctx) { - return s.a.Apply(ctx, desc, mounts, opts...) + return s.apply(ctx, desc, mounts, opts...) } compressed, err := images.DiffCompression(ctx, desc.MediaType) @@ -87,7 +86,7 @@ func (s *winApplier) Apply(ctx context.Context, desc ocispecs.Descriptor, mounts } // Read any trailing data - if _, err := io.Copy(ioutil.Discard, rc); err != nil { + if _, err := io.Copy(io.Discard, rc); err != nil { discard(err) return err } @@ -138,13 +137,15 @@ func filter(in io.Reader, f func(*tar.Header) bool) (io.Reader, func(error)) { return err } if h.Size > 0 { + //nolint:gosec // never read into memory if _, err := io.Copy(tarWriter, tarReader); err != nil { return err } } } else { if h.Size > 0 { - if _, err := io.Copy(ioutil.Discard, tarReader); err != nil { + //nolint:gosec // never read into memory + if _, err := io.Copy(io.Discard, tarReader); err != nil { return err } } diff --git a/util/winlayers/apply.go b/util/winlayers/apply.go new file mode 100644 index 000000000000..20b2faa03818 --- /dev/null +++ b/util/winlayers/apply.go @@ -0,0 +1,16 @@ +//go:build !nydus +// +build !nydus + +package winlayers + +import ( + "context" + + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/mount" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func (s *winApplier) apply(ctx context.Context, desc ocispecs.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispecs.Descriptor, err error) { + return s.a.Apply(ctx, desc, mounts, opts...) +} diff --git a/util/winlayers/apply_nydus.go b/util/winlayers/apply_nydus.go new file mode 100644 index 000000000000..1ef61b5bca0a --- /dev/null +++ b/util/winlayers/apply_nydus.go @@ -0,0 +1,73 @@ +//go:build nydus +// +build nydus + +package winlayers + +import ( + "context" + "io" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + + nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" +) + +func isNydusBlob(ctx context.Context, desc ocispecs.Descriptor) bool { + if desc.Annotations == nil { + return false + } + + hasMediaType := desc.MediaType == nydusify.MediaTypeNydusBlob + _, hasAnno := desc.Annotations[nydusify.LayerAnnotationNydusBlob] + return hasMediaType && hasAnno +} + +func (s *winApplier) apply(ctx context.Context, desc ocispecs.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispecs.Descriptor, err error) { + if !isNydusBlob(ctx, desc) { + return s.a.Apply(ctx, desc, mounts, opts...) + } + + var ocidesc ocispecs.Descriptor + if err := mount.WithTempMount(ctx, mounts, func(root string) error { + ra, err := s.cs.ReaderAt(ctx, desc) + if err != nil { + return errors.Wrap(err, "get reader from content store") + } + defer ra.Close() + + pr, pw := io.Pipe() + go func() { + defer pw.Close() + if err := nydusify.Unpack(ctx, ra, pw, nydusify.UnpackOption{}); err != nil { + pw.CloseWithError(errors.Wrap(err, "unpack nydus blob")) + } + }() + defer pr.Close() + + digester := digest.Canonical.Digester() + rc := &readCounter{ + r: io.TeeReader(pr, digester.Hash()), + } + + if _, err := archive.Apply(ctx, root, rc); err != nil { + return errors.Wrap(err, "apply nydus blob") + } + + ocidesc = ocispecs.Descriptor{ + MediaType: ocispecs.MediaTypeImageLayer, + Size: rc.c, + Digest: digester.Digest(), + } + + return nil + }); err != nil { + return ocispecs.Descriptor{}, err + } + + return ocidesc, nil +} diff --git a/util/winlayers/context.go b/util/winlayers/context.go index c0bd3f8a2f06..e4608892aede 100644 --- a/util/winlayers/context.go +++ b/util/winlayers/context.go @@ -12,8 +12,5 @@ func UseWindowsLayerMode(ctx context.Context) context.Context { func hasWindowsLayerMode(ctx context.Context) bool { v := ctx.Value(contextKey) - if v == nil { - return false - } - return true + return v != nil } diff --git a/util/winlayers/differ.go b/util/winlayers/differ.go index fc8ba7f7e751..fe2b1c216176 100644 --- a/util/winlayers/differ.go +++ b/util/winlayers/differ.go @@ -250,6 +250,7 @@ func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) { return err } if h.Size > 0 { + //nolint:gosec // never read into memory if _, err := io.Copy(tarWriter, tarReader); err != nil { return err } @@ -262,7 +263,6 @@ func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) { } pw.CloseWithError(err) done <- err - return }() discard := func(err error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md new file mode 100644 index 000000000000..e930a16605db --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -0,0 +1,461 @@ +# Release History + +## 1.1.0 (2022-06-03) + +### Other Changes +* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests. + +## 1.0.0 (2022-05-12) + +### Features Added +* Added interface `runtime.PollingHandler` to support custom poller implementations. + * Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`. + +### Breaking Changes +* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost` +* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic` +* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions` +* Removed `TokenRequestOptions.TenantID` +* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration` +* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()` +* Removed `arm/runtime.FinalStateVia` and related `const` values +* Renamed `runtime.PageProcessor` to `runtime.PagingHandler` +* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported. +* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()` +* `TokenCredential.GetToken` now returns `AccessToken` by value. + +### Bugs Fixed +* When per-try timeouts are enabled, only cancel the context after the body has been read and closed. +* The `Operation-Location` poller now properly handles `final-state-via` values. +* Improvements in `runtime.Poller[T]` + * `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state. + * `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries. + +### Other Changes +* Updated to latest `internal` module and absorbed breaking changes. + * Use `temporal.Resource` and deleted copy. +* The internal poller implementation has been refactored. + * The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification. + * The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface. + * The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it. +* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions` +* Default User-Agent headers no longer include `azcore` version information + +## 0.23.1 (2022-04-14) + +### Bugs Fixed +* Include XML header when marshalling XML content. +* Handle XML namespaces when searching for error code. +* Handle `odata.error` when searching for error code. + +## 0.23.0 (2022-04-04) + +### Features Added +* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations. +* Added `cloud` package with a new API for cloud configuration +* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type. + +### Breaking Changes +* Removed the `Poller` type-alias to the internal poller implementation. +* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations. +* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter. +* Replaced `arm.Endpoint` with `cloud` API + * Removed the `endpoint` parameter from `NewRPRegistrationPolicy()` + * `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error` +* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages. + * Removed the `pollerID` parameter as it's no longer required. + * Created optional parameter structs and moved optional parameters into them. +* Changed `FinalStateVia` field to a `const` type. + +### Other Changes +* Converted expiring resource and dependent types to use generics. + +## 0.22.0 (2022-03-03) + +### Features Added +* Added header `WWW-Authenticate` to the default allow-list of headers for logging. +* Added a pipeline policy that enables the retrieval of HTTP responses from API calls. + * Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default). + +### Breaking Changes +* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package. + +## 0.21.1 (2022-02-04) + +### Bugs Fixed +* Restore response body after reading in `Poller.FinalResponse()`. (#16911) +* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969) + +### Other Changes +* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789) + +## 0.21.0 (2022-01-11) + +### Features Added +* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger. +* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received. + +### Breaking Changes +* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions` +* Renamed `arm/ClientOptions.Host` to `.Endpoint` +* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload` +* Removed `azcore.HTTPResponse` interface type +* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter +* `runtime.NewResponseError()` no longer requires an `error` parameter + +## 0.20.0 (2021-10-22) + +### Breaking Changes +* Removed `arm.Connection` +* Removed `azcore.Credential` and `.NewAnonymousCredential()` + * `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential` +* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication +* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions` +* Contents in the `log` package have been slightly renamed. +* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions` +* Changed parameters for `NewBearerTokenPolicy()` +* Moved policy config options out of `arm/runtime` and into `arm/policy` + +### Features Added +* Updating Documentation +* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints +* `azcore.ClientOptions` contains common pipeline configuration settings +* Added support for multi-tenant authorization in `arm/runtime` +* Require one second minimum when calling `PollUntilDone()` + +### Bug Fixes +* Fixed a potential panic when creating the default Transporter. +* Close LRO initial response body when creating a poller. +* Fixed a panic when recursively cloning structs that contain time.Time. + +## 0.19.0 (2021-08-25) + +### Breaking Changes +* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors). + * `azcore` has all core functionality. + * `log` contains facilities for configuring in-box logging. + * `policy` is used for configuring pipeline options and creating custom pipeline policies. + * `runtime` contains various helpers used by SDK authors and generated content. + * `streaming` has helpers for streaming IO operations. +* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed. + * As a result, the `Request.Telemetry()` method has been removed. +* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it. +* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it. +* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively. + +### Bug Fixes +* Fixed an issue in the retry policy where the request body could be overwritten after a rewind. + +### Other Changes +* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively. + * The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy. +* Poller logic has been consolidated across ARM and core implementations. + * This required some changes to the internal interfaces for core pollers. +* The core poller types have been improved, including more logging and test coverage. + +## 0.18.1 (2021-08-20) + +### Features Added +* Adds an `ETag` type for comparing etags and handling etags on requests +* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object + +### Bugs Fixed +* `JoinPaths` will preserve query parameters encoded in the `root` url. + +### Other Changes +* Bumps dependency on `internal` module to the latest version (v0.7.0) + +## 0.18.0 (2021-07-29) +### Features Added +* Replaces methods from Logger type with two package methods for interacting with the logging functionality. +* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications` +* `azcore.SetListener` replaces `azcore.Logger().SetListener` + +### Breaking Changes +* Removes `Logger` type from `azcore` + + +## 0.17.0 (2021-07-27) +### Features Added +* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879) +* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123) + +### Breaking Changes +* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104) +* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103) +* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038) + + +## 0.16.2 (2021-05-26) +### Features Added +* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715) + + +## 0.16.1 (2021-05-19) +### Features Added +* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682) + + +## 0.16.0 (2021-05-07) +### Features Added +* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642) + + +## 0.15.1 (2021-05-06) +### Features Added +* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634) + + +## 0.15.0 (2021-05-05) +### Features Added +* Add support for null map and slice +* Export `Response.Payload` method + +### Breaking Changes +* remove `Response.UnmarshalError` as it's no longer required + + +## 0.14.5 (2021-04-23) +### Features Added +* Add `UnmarshalError()` on `azcore.Response` + + +## 0.14.4 (2021-04-22) +### Features Added +* Support for basic LRO polling +* Added type `LROPoller` and supporting types for basic polling on long running operations. +* rename poller param and added doc comment + +### Bugs Fixed +* Fixed content type detection bug in logging. + + +## 0.14.3 (2021-03-29) +### Features Added +* Add support for multi-part form data +* Added method `WriteMultipartFormData()` to Request. + + +## 0.14.2 (2021-03-17) +### Features Added +* Add support for encoding JSON null values +* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null. +* Documentation fixes + +### Bugs Fixed +* Fixed improper error wrapping + + +## 0.14.1 (2021-02-08) +### Features Added +* Add `Pager` and `Poller` interfaces to azcore + + +## 0.14.0 (2021-01-12) +### Features Added +* Accept zero-value options for default values +* Specify zero-value options structs to accept default values. +* Remove `DefaultXxxOptions()` methods. +* Do not silently change TryTimeout on negative values +* make per-try timeout opt-in + + +## 0.13.4 (2020-11-20) +### Features Added +* Include telemetry string in User Agent + + +## 0.13.3 (2020-11-20) +### Features Added +* Updating response body handling on `azcore.Response` + + +## 0.13.2 (2020-11-13) +### Features Added +* Remove implementation of stateless policies as first-class functions. + + +## 0.13.1 (2020-11-05) +### Features Added +* Add `Telemetry()` method to `azcore.Request()` + + +## 0.13.0 (2020-10-14) +### Features Added +* Rename `log` to `logger` to avoid name collision with the log package. +* Documentation improvements +* Simplified `DefaultHTTPClientTransport()` implementation + + +## 0.12.1 (2020-10-13) +### Features Added +* Update `internal` module dependence to `v0.5.0` + + +## 0.12.0 (2020-10-08) +### Features Added +* Removed storage specific content +* Removed internal content to prevent API clutter +* Refactored various policy options to conform with our options pattern + + +## 0.11.0 (2020-09-22) +### Features Added + +* Removed `LogError` and `LogSlowResponse`. +* Renamed `options` in `RequestLogOptions`. +* Updated `NewRequestLogPolicy()` to follow standard pattern for options. +* Refactored `requestLogPolicy.Do()` per above changes. +* Cleaned up/added logging in retry policy. +* Export `NewResponseError()` +* Fix `RequestLogOptions` comment + + +## 0.10.1 (2020-09-17) +### Features Added +* Add default console logger +* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'. +* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks. +* Add `LogLongRunningOperation` + + +## 0.10.0 (2020-09-10) +### Features Added +* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library. +* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter. +* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`. +* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request. +* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this. +* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type. +* moved path concatenation into `JoinPaths()` func + + +## 0.9.6 (2020-08-18) +### Features Added +* Improvements to body download policy +* Always download the response body for error responses, i.e. HTTP status codes >= 400. +* Simplify variable declarations + + +## 0.9.5 (2020-08-11) +### Features Added +* Set the Content-Length header in `Request.SetBody` + + +## 0.9.4 (2020-08-03) +### Features Added +* Fix cancellation of per try timeout +* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time. +* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion. +* Do not drain response body if there are no more retries +* Do not retry non-idempotent operations when body download fails + + +## 0.9.3 (2020-07-28) +### Features Added +* Add support for custom HTTP request headers +* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request. +* Use `azcore.WithHTTPHeader` to add HTTP headers to a context. +* Remove method specific to Go 1.14 + + +## 0.9.2 (2020-07-28) +### Features Added +* Omit read-only content from request payloads +* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation. +* Verify no fields were dropped +* Handle embedded struct types +* Added test for cloning by value +* Add messages to failures + + +## 0.9.1 (2020-07-22) +### Features Added +* Updated dependency on internal module to fix race condition. + + +## 0.9.0 (2020-07-09) +### Features Added +* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure. +* Updated `sdk/internal` dependency to latest version. +* Rename package alias + + +## 0.8.2 (2020-06-29) +### Features Added +* Added missing documentation comments + +### Bugs Fixed +* Fixed a bug in body download policy. + + +## 0.8.1 (2020-06-26) +### Features Added +* Miscellaneous clean-up reported by linters + + +## 0.8.0 (2020-06-01) +### Features Added +* Differentiate between standard and URL encoding. + + +## 0.7.1 (2020-05-27) +### Features Added +* Add support for for base64 encoding and decoding of payloads. + + +## 0.7.0 (2020-05-12) +### Features Added +* Change `RetryAfter()` to a function. + + +## 0.6.0 (2020-04-29) +### Features Added +* Updating `RetryAfter` to only return the detaion in the RetryAfter header + + +## 0.5.0 (2020-03-23) +### Features Added +* Export `TransportFunc` + +### Breaking Changes +* Removed `IterationDone` + + +## 0.4.1 (2020-02-25) +### Features Added +* Ensure per-try timeout is properly cancelled +* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy. +* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed. +* `Logger.Should()` will return false if no listener is set. + + +## 0.4.0 (2020-02-18) +### Features Added +* Enable custom `RetryOptions` to be specified per API call +* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call. +* Remove 429 from the list of default HTTP status codes for retry. +* Change StatusCodesForRetry to a slice so consumers can append to it. +* Added support for retry-after in HTTP-date format. +* Cleaned up some comments specific to storage. +* Remove `Request.SetQueryParam()` +* Renamed `MaxTries` to `MaxRetries` + +## 0.3.0 (2020-01-16) +### Features Added +* Added `DefaultRetryOptions` to create initialized default options. + +### Breaking Changes +* Removed `Response.CheckStatusCode()` + + +## 0.2.0 (2020-01-15) +### Features Added +* Add support for marshalling and unmarshalling JSON +* Removed `Response.Payload` field +* Exit early when unmarsahlling if there is no payload + + +## 0.1.0 (2020-01-10) +### Features Added +* Initial release diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt new file mode 100644 index 000000000000..48ea6616b5b8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md new file mode 100644 index 000000000000..35a74e18d09a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md @@ -0,0 +1,39 @@ +# Azure Core Client Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main) +[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main) + +The `azcore` module provides a set of common interfaces and types for Go SDK client modules. +These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html). + +## Getting started + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency. +To add the latest version to your `go.mod` file, execute the following command. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azcore +``` + +General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml new file mode 100644 index 000000000000..aab9218538da --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -0,0 +1,29 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go new file mode 100644 index 000000000000..9d077a3e1260 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cloud + +var ( + // AzureChina contains configuration for Azure China. + AzureChina = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzureGovernment contains configuration for Azure Government. + AzureGovernment = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzurePublic contains configuration for Azure Public Cloud. + AzurePublic = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{}, + } +) + +// ServiceName identifies a cloud service. +type ServiceName string + +// ResourceManager is a global constant identifying Azure Resource Manager. +const ResourceManager ServiceName = "resourceManager" + +// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager. +type ServiceConfiguration struct { + // Audience is the audience the client will request for its access tokens. + Audience string + // Endpoint is the service's base URL. + Endpoint string +} + +// Configuration configures a cloud. +type Configuration struct { + // ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory. + ActiveDirectoryAuthorityHost string + // Services contains configuration for the cloud's services. + Services map[ServiceName]ServiceConfiguration +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go new file mode 100644 index 000000000000..985b1bde2f2d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go @@ -0,0 +1,53 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/* +Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds. + +Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as +"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other +Azure Clouds to configure clients appropriately. + +This package contains predefined configuration for well-known sovereign clouds such as Azure Government and +Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For +example, configuring a credential and ARM client for Azure Government: + + opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment} + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) + +Applications deployed to a private cloud such as Azure Stack create a Configuration object with +appropriate values: + + c := cloud.Configuration{ + ActiveDirectoryAuthorityHost: "https://...", + Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ + cloud.ResourceManager: { + Audience: "...", + Endpoint: "https://...", + }, + }, + } + opts := azcore.ClientOptions{Cloud: c} + + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) +*/ +package cloud diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go new file mode 100644 index 000000000000..f9fb23422dfd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "context" + "reflect" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// AccessToken represents an Azure service bearer access token with expiry information. +type AccessToken struct { + Token string + ExpiresOn time.Time +} + +// TokenCredential represents a credential capable of providing an OAuth token. +type TokenCredential interface { + // GetToken requests an access token for the specified set of scopes. + GetToken(ctx context.Context, options policy.TokenRequestOptions) (AccessToken, error) +} + +// holds sentinel values used to send nulls +var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} + +// NullValue is used to send an explicit 'null' within a request. +// This is typically used in JSON-MERGE-PATCH operations to delete a value. +func NullValue[T any]() T { + t := shared.TypeOfT[T]() + v, found := nullables[t] + if !found { + var o reflect.Value + if k := t.Kind(); k == reflect.Map { + o = reflect.MakeMap(t) + } else if k == reflect.Slice { + // empty slices appear to all point to the same data block + // which causes comparisons to become ambiguous. so we create + // a slice with len/cap of one which ensures a unique address. + o = reflect.MakeSlice(t, 1, 1) + } else { + o = reflect.New(t.Elem()) + } + v = o.Interface() + nullables[t] = v + } + // return the sentinel object + return v.(T) +} + +// IsNullValue returns true if the field contains a null sentinel value. +// This is used by custom marshallers to properly encode a null value. +func IsNullValue[T any](v T) bool { + // see if our map has a sentinel object for this *T + t := reflect.TypeOf(v) + if o, found := nullables[t]; found { + o1 := reflect.ValueOf(o) + v1 := reflect.ValueOf(v) + // we found it; return true if v points to the sentinel object. + // NOTE: maps and slices can only be compared to nil, else you get + // a runtime panic. so we compare addresses instead. + return o1.Pointer() == v1.Pointer() + } + // no sentinel object for this *t + return false +} + +// ClientOptions contains configuration settings for a client's pipeline. +type ClientOptions = policy.ClientOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go new file mode 100644 index 000000000000..7119699f9c6f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -0,0 +1,257 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients. + +The middleware consists of three components. + + - One or more Policy instances. + - A Transporter instance. + - A Pipeline instance that combines the Policy and Transporter instances. + +Implementing the Policy Interface + +A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as +a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share +the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to +avoid race conditions. + +A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can +perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, +and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request +work, it must call the Next() method on the *policy.Request instance in order to pass the request to the +next Policy in the chain. + +When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance +can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response +body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response +and error instances to its caller. + +Template for implementing a stateless Policy: + + type policyFunc func(*policy.Request) (*http.Response, error) + // Do implements the Policy interface on policyFunc. + + func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) + } + + func NewMyStatelessPolicy() policy.Policy { + return policyFunc(func(req *policy.Request) (*http.Response, error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + }) + } + +Template for implementing a stateful Policy: + + type MyStatefulPolicy struct { + // TODO: add configuration/setting fields here + } + + // TODO: add initialization args to NewMyStatefulPolicy() + func NewMyStatefulPolicy() policy.Policy { + return &MyStatefulPolicy{ + // TODO: initialize configuration/setting fields here + } + } + + func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + } + +Implementing the Transporter Interface + +The Transporter interface is responsible for sending the HTTP request and returning the corresponding +HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter +implementation uses a shared http.Client from the standard library. + +The same stateful/stateless rules for Policy implementations apply to Transporter implementations. + +Using Policy and Transporter Instances Via a Pipeline + +To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. + + func NewPipeline(transport Transporter, policies ...Policy) Pipeline + +The specified Policy instances form a chain and are invoked in the order provided to NewPipeline +followed by the Transporter. + +Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + + func (p Pipeline) Do(req *Request) (*http.Request, error) + +The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter +instances. The response/error is then sent through the same chain of Policy instances in reverse +order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with +TransportA. + + pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) + +The flow of Request and Response looks like the following: + + policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ + | + HTTP(S) endpoint + | + caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ + +Creating a Request Instance + +The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also +contains some internal state and provides various convenience methods. You create a Request instance +by calling the runtime.NewRequest function: + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + +If the Request should contain a body, call the SetBody method. + + func (req *Request) SetBody(body ReadSeekCloser, contentType string) error + +A seekable stream is required so that upon retry, the retry Policy instance can seek the stream +back to the beginning before retrying the network request and re-uploading the body. + +Sending an Explicit Null + +Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. + + { + "delete-me": null + } + +This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as +a means to resolve the ambiguity between a field to be excluded and its zero-value. + + type Widget struct { + Name *string `json:",omitempty"` + Count *int `json:",omitempty"` + } + +In the above example, Name and Count are defined as pointer-to-type to disambiguate between +a missing value (nil) and a zero-value (0) which might have semantic differences. + +In a PATCH operation, any fields left as nil are to have their values preserved. When updating +a Widget's count, one simply specifies the new value for Count, leaving Name nil. + +To fulfill the requirement for sending a JSON null, the NullValue() function can be used. + + w := Widget{ + Count: azcore.NullValue[*int](), + } + +This sends an explict "null" for Count, indicating that any current value for Count should be deleted. + +Processing the Response + +When the HTTP response is received, the *http.Response is returned directly. Each Policy instance +can inspect/mutate the *http.Response. + +Built-in Logging + +To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. + +By default the logger writes to stderr. This can be customized by calling log.SetListener, providing +a callback that writes to the desired location. Any custom logging implementation MUST provide its +own synchronization to handle concurrent invocations. + +See the docs for the log package for further details. + +Pageable Operations + +Pageable operations return potentially large data sets spread over multiple GET requests. The result of +each GET is a "page" of data consisting of a slice of items. + +Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. + + func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] + +The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages +and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. + + pager := widgetClient.NewListWidgetsPager(nil) + for pager.More() { + page, err := pager.NextPage(context.TODO()) + // handle err + for _, widget := range page.Values { + // process widget + } + } + +Long-Running Operations + +Long-running operations (LROs) are operations consisting of an initial request to start the operation followed +by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one +of the following values. + + * Succeeded - the LRO completed successfully + * Failed - the LRO failed to complete + * Canceled - the LRO was canceled + +LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. + + func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) + +When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. +It does _not_ mean that the widget has been created or updated (or failed to be created/updated). + +The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, +call the PollUntilDone() method. + + poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) + // handle err + result, err := poller.PollUntilDone(context.TODO(), nil) + // handle err + // use result + +The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the +context is canceled/timed out. + +Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to +this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation +mechanism as required. + +Resume Tokens + +Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to +recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. + + token, err := poller.ResumeToken() + // handle error + +Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls +to poller.Poll() might change the poller's state. In this case, a new token should be created. + +After the token has been obtained, it can be used to recreate an instance of the originating poller. + + poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ + ResumeToken: token, + }) + +When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. + +Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO +BeginA() will result in an error. +*/ +package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go new file mode 100644 index 000000000000..17bd50c67320 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go new file mode 100644 index 000000000000..23ea7e7c8eac --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go @@ -0,0 +1,48 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "strings" +) + +// ETag is a property used for optimistic concurrency during updates +// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2 +// An ETag can be empty (""). +type ETag string + +// ETagAny is an ETag that represents everything, the value is "*" +const ETagAny ETag = "*" + +// Equals does a strong comparison of two ETags. Equals returns true when both +// ETags are not weak and the values of the underlying strings are equal. +func (e ETag) Equals(other ETag) bool { + return !e.IsWeak() && !other.IsWeak() && e == other +} + +// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match +// character-by-character, regardless of either or both being tagged as "weak". +func (e ETag) WeakEquals(other ETag) bool { + getStart := func(e1 ETag) int { + if e1.IsWeak() { + return 2 + } + return 0 + } + aStart := getStart(e) + bStart := getStart(other) + + aVal := e[aStart:] + bVal := other[bStart:] + + return aVal == bVal +} + +// IsWeak specifies whether the ETag is strong or weak. +func (e ETag) IsWeak() bool { + return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go new file mode 100644 index 000000000000..8fca32a7d4c9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -0,0 +1,61 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// Exported as streaming.NopCloser(). +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +// Exported as runtime.Payload(). +func Payload(resp *http.Response) ([]byte, error) { + // r.Body won't be a nopClosingBytesReader if downloading was skipped + if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok { + return buf.Bytes(), nil + } + bytesBody, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, err + } + resp.Body = shared.NewNopClosingBytesReader(bytesBody) + return bytesBody, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go new file mode 100644 index 000000000000..c44efd6eff57 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go @@ -0,0 +1,97 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "fmt" + "net/http" + + "golang.org/x/net/http/httpguts" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +// Exported as policy.Policy. +type Policy interface { + // Do applies the policy to the specified Request. When implementing a Policy, mutate the + // request before calling req.Next() to move on to the next policy, and respond to the result + // before returning to the caller. + Do(req *Request) (*http.Response, error) +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +// Exported as runtime.Pipeline. +type Pipeline struct { + policies []Policy +} + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +// Exported as policy.Transporter. +type Transporter interface { + // Do sends the HTTP request and returns the HTTP response or error. + Do(req *http.Request) (*http.Response, error) +} + +// used to adapt a TransportPolicy to a Policy +type transportPolicy struct { + trans Transporter +} + +func (tp transportPolicy) Do(req *Request) (*http.Response, error) { + if tp.trans == nil { + return nil, errors.New("missing transporter") + } + resp, err := tp.trans.Do(req.Raw()) + if err != nil { + return nil, err + } else if resp == nil { + // there was no response and no error (rare but can happen) + // this ensures the retry policy will retry the request + return nil, errors.New("received nil response") + } + return resp, nil +} + +// NewPipeline creates a new Pipeline object from the specified Policies. +// Not directly exported, but used as part of runtime.NewPipeline(). +func NewPipeline(transport Transporter, policies ...Policy) Pipeline { + // transport policy must always be the last in the slice + policies = append(policies, transportPolicy{trans: transport}) + return Pipeline{ + policies: policies, + } +} + +// Do is called for each and every HTTP request. It passes the request through all +// the Policy objects (which can transform the Request's URL/query parameters/headers) +// and ultimately sends the transformed HTTP request over the network. +func (p Pipeline) Do(req *Request) (*http.Response, error) { + if req == nil { + return nil, errors.New("request cannot be nil") + } + // check copied from Transport.roundTrip() + for k, vv := range req.Raw().Header { + if !httpguts.ValidHeaderFieldName(k) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field value %q for key %v", v, k) + } + } + } + req.policies = p.policies + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go new file mode 100644 index 000000000000..4aeec158937b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use NewRequest() instead. +// Exported as policy.Request. +type Request struct { + req *http.Request + body io.ReadSeekCloser + policies []Policy + values opValues +} + +type opValues map[reflect.Type]interface{} + +// Set adds/changes a value +func (ov opValues) set(value interface{}) { + ov[reflect.TypeOf(value)] = value +} + +// Get looks for a value set by SetValue first +func (ov opValues) get(value interface{}) bool { + v, ok := ov[reflect.ValueOf(value).Elem().Type()] + if ok { + reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) + } + return ok +} + +// NewRequest creates a new Request with the specified input. +// Exported as runtime.NewRequest(). +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { + req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil) + if err != nil { + return nil, err + } + if req.URL.Host == "" { + return nil, errors.New("no Host in request URL") + } + if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") { + return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme) + } + return &Request{req: req}, nil +} + +// Body returns the original body specified when the Request was created. +func (req *Request) Body() io.ReadSeekCloser { + return req.body +} + +// Raw returns the underlying HTTP request. +func (req *Request) Raw() *http.Request { + return req.req +} + +// Next calls the next policy in the pipeline. +// If there are no more policies, nil and an error are returned. +// This method is intended to be called from pipeline policies. +// To send a request through a pipeline call Pipeline.Do(). +func (req *Request) Next() (*http.Response, error) { + if len(req.policies) == 0 { + return nil, errors.New("no more policies") + } + nextPolicy := req.policies[0] + nextReq := *req + nextReq.policies = nextReq.policies[1:] + return nextPolicy.Do(&nextReq) +} + +// SetOperationValue adds/changes a mutable key/value associated with a single operation. +func (req *Request) SetOperationValue(value interface{}) { + if req.values == nil { + req.values = opValues{} + } + req.values.set(value) +} + +// OperationValue looks for a value set by SetOperationValue(). +func (req *Request) OperationValue(value interface{}) bool { + if req.values == nil { + return false + } + return req.values.get(value) +} + +// SetBody sets the specified ReadSeekCloser as the HTTP request body. +func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { + // Set the body and content length. + size, err := body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } + if size == 0 { + body.Close() + return nil + } + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } + // keep a copy of the original body. this is to handle cases + // where req.Body is replaced, e.g. httputil.DumpRequest and friends. + req.body = body + req.req.Body = body + req.req.ContentLength = size + req.req.Header.Set(shared.HeaderContentType, contentType) + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + return nil +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (req *Request) RewindBody() error { + if req.body != nil { + // Reset the stream back to the beginning and restore the body + _, err := req.body.Seek(0, io.SeekStart) + req.req.Body = req.body + return err + } + return nil +} + +// Close closes the request body. +func (req *Request) Close() error { + if req.body == nil { + return nil + } + return req.body.Close() +} + +// Clone returns a deep copy of the request with its context changed to ctx. +func (req *Request) Clone(ctx context.Context) *Request { + r2 := *req + r2.req = req.req.Clone(ctx) + return &r2 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go new file mode 100644 index 000000000000..3db6acc83258 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -0,0 +1,142 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "regexp" +) + +// NewResponseError creates a new *ResponseError from the provided HTTP response. +// Exported as runtime.NewResponseError(). +func NewResponseError(resp *http.Response) error { + respErr := &ResponseError{ + StatusCode: resp.StatusCode, + RawResponse: resp, + } + + // prefer the error code in the response header + if ec := resp.Header.Get("x-ms-error-code"); ec != "" { + respErr.ErrorCode = ec + return respErr + } + + // if we didn't get x-ms-error-code, check in the response body + body, err := Payload(resp) + if err != nil { + return err + } + + if len(body) > 0 { + if code := extractErrorCodeJSON(body); code != "" { + respErr.ErrorCode = code + } else if code := extractErrorCodeXML(body); code != "" { + respErr.ErrorCode = code + } + } + + return respErr +} + +func extractErrorCodeJSON(body []byte) string { + var rawObj map[string]interface{} + if err := json.Unmarshal(body, &rawObj); err != nil { + // not a JSON object + return "" + } + + // check if this is a wrapped error, i.e. { "error": { ... } } + // if so then unwrap it + if wrapped, ok := rawObj["error"]; ok { + unwrapped, ok := wrapped.(map[string]interface{}) + if !ok { + return "" + } + rawObj = unwrapped + } else if wrapped, ok := rawObj["odata.error"]; ok { + // check if this a wrapped odata error, i.e. { "odata.error": { ... } } + unwrapped, ok := wrapped.(map[string]any) + if !ok { + return "" + } + rawObj = unwrapped + } + + // now check for the error code + code, ok := rawObj["code"] + if !ok { + return "" + } + codeStr, ok := code.(string) + if !ok { + return "" + } + return codeStr +} + +func extractErrorCodeXML(body []byte) string { + // regular expression is much easier than dealing with the XML parser + rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`) + res := rx.FindStringSubmatch(string(body)) + if len(res) != 2 { + return "" + } + // first submatch is the entire thing, second one is the captured error code + return res[1] +} + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +// Exported as azcore.ResponseError. +type ResponseError struct { + // ErrorCode is the error code returned by the resource provider if available. + ErrorCode string + + // StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants. + StatusCode int + + // RawResponse is the underlying HTTP response. + RawResponse *http.Response +} + +// Error implements the error interface for type ResponseError. +// Note that the message contents are not contractual and can change over time. +func (e *ResponseError) Error() string { + // write the request method and URL with response status code + msg := &bytes.Buffer{} + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + if e.ErrorCode != "" { + fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) + } else { + fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := Payload(e.RawResponse) + if err != nil { + // this really shouldn't fail at this point as the response + // body is already cached (it was read in NewResponseError) + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + // the standard library doesn't have a pretty-printer for XML + fmt.Fprintln(msg) + } else { + fmt.Fprintln(msg, "Response contained no body") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + + return msg.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go new file mode 100644 index 000000000000..0684cb317390 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// This is an internal helper package to combine the complete logging APIs. +package log + +import ( + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type Event = log.Event + +const ( + EventRequest = azlog.EventRequest + EventResponse = azlog.EventResponse + EventRetryPolicy = azlog.EventRetryPolicy + EventLRO = azlog.EventLRO +) + +func Write(cls log.Event, msg string) { + log.Write(cls, msg) +} + +func Writef(cls log.Event, format string, a ...interface{}) { + log.Writef(cls, format, a...) +} + +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +func Should(cls log.Event) bool { + return log.Should(cls) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go new file mode 100644 index 000000000000..0194b8b01143 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -0,0 +1,147 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package async + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md + +// Applicable returns true if the LRO is using Azure-AsyncOperation. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderAzureAsync) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["asyncURL"] + return ok +} + +// Poller is an LRO poller that uses the Azure-AsyncOperation pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The URL from Azure-AsyncOperation header. + AsyncURL string `json:"asyncURL"` + + // The URL from Location header. + LocURL string `json:"locURL"` + + // The URL from the initial LRO request. + OrigURL string `json:"origURL"` + + // The HTTP method from the initial LRO request. + Method string `json:"method"` + + // The value of final-state-via from swagger, can be the empty string. + FinalState pollers.FinalStateVia `json:"finalState"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response and final-state type. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.") + asyncURL := resp.Header.Get(shared.HeaderAzureAsync) + if asyncURL == "" { + return nil, errors.New("response is missing Azure-AsyncOperation header") + } + if !pollers.IsValidURL(asyncURL) { + return nil, fmt.Errorf("invalid polling URL %s", asyncURL) + } + p := &Poller[T]{ + pl: pl, + resp: resp, + AsyncURL: asyncURL, + LocURL: resp.Header.Get(shared.HeaderLocation), + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: pollers.StatusInProgress, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return pollers.IsTerminalState(p.CurState) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) { + state, err := pollers.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if pollers.Failed(p.CurState) { + return exported.NewResponseError(p.resp) + } + var req *exported.Request + var err error + if p.Method == http.MethodPatch || p.Method == http.MethodPut { + // for PATCH and PUT, the final GET is on the original resource URL + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost { + if p.FinalState == pollers.FinalStateViaAzureAsyncOp { + // no final GET required + } else if p.FinalState == pollers.FinalStateViaOriginalURI { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.LocURL != "" { + // ideally FinalState would be set to "location" but it isn't always. + // must check last due to more permissive condition. + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go new file mode 100644 index 000000000000..99e9f2f8d0a3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -0,0 +1,130 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package body + +import ( + "context" + "errors" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" +) + +// Kind is the identifier of this type in a resume token. +const kind = "body" + +// Applicable returns true if the LRO is using no headers, just provisioning state. +// This is only applicable to PATCH and PUT methods and assumes no polling headers. +func Applicable(resp *http.Response) bool { + // we can't check for absense of headers due to some misbehaving services + // like redis that return a Location header but don't actually use that protocol + return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Body pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The poller's type, used for resume token processing. + Type string `json:"type"` + + // The URL for polling. + PollURL string `json:"pollURL"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Body poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Body poller.") + p := &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: resp.Request.URL.String(), + } + // default initial state to InProgress. depending on the HTTP + // status code and provisioning state, we might change the value. + curState := pollers.StatusInProgress + provState, err := pollers.GetProvisioningState(resp) + if err != nil && !errors.Is(err, pollers.ErrNoBody) { + return nil, err + } + if resp.StatusCode == http.StatusCreated && provState != "" { + // absense of provisioning state is ok for a 201, means the operation is in progress + curState = provState + } else if resp.StatusCode == http.StatusOK { + if provState != "" { + curState = provState + } else if provState == "" { + // for a 200, absense of provisioning state indicates success + curState = pollers.StatusSucceeded + } + } else if resp.StatusCode == http.StatusNoContent { + curState = pollers.StatusSucceeded + } + p.CurState = curState + return p, nil +} + +func (p *Poller[T]) Done() bool { + return pollers.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + if resp.StatusCode == http.StatusNoContent { + p.resp = resp + p.CurState = pollers.StatusSucceeded + return p.CurState, nil + } + state, err := pollers.GetProvisioningState(resp) + if errors.Is(err, pollers.ErrNoBody) { + // a missing response body in non-204 case is an error + return "", err + } else if state == "" { + // a response body without provisioning state is considered terminal success + state = pollers.StatusSucceeded + } else if err != nil { + return "", err + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go new file mode 100644 index 000000000000..56c2b9029297 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -0,0 +1,111 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package loc + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Kind is the identifier of this type in a resume token. +const kind = "loc" + +// Applicable returns true if the LRO is using Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + Type string `json:"type"` + PollURL string `json:"pollURL"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Location poller.") + locURL := resp.Header.Get(shared.HeaderLocation) + if locURL == "" { + return nil, errors.New("response is missing Location header") + } + if !pollers.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid polling URL %s", locURL) + } + return &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: locURL, + CurState: pollers.StatusInProgress, + }, nil +} + +func (p *Poller[T]) Done() bool { + return pollers.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + // location polling can return an updated polling URL + if h := resp.Header.Get(shared.HeaderLocation); h != "" { + p.PollURL = h + } + // if provisioning state is available, use that. this is only + // for some ARM LRO scenarios (e.g. DELETE with a Location header) + // so if it's missing then use HTTP status code. + provState, _ := pollers.GetProvisioningState(resp) + p.resp = resp + if provState != "" { + p.CurState = provState + } else if resp.StatusCode == http.StatusAccepted { + p.CurState = pollers.StatusInProgress + } else if resp.StatusCode > 199 && resp.StatusCode < 300 { + // any 2xx other than a 202 indicates success + p.CurState = pollers.StatusSucceeded + } else { + p.CurState = pollers.StatusFailed + } + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go new file mode 100644 index 000000000000..dd714e768c5e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -0,0 +1,140 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package op + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Applicable returns true if the LRO is using Operation-Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderOperationLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["oplocURL"] + return ok +} + +// Poller is an LRO poller that uses the Operation-Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + OpLocURL string `json:"oplocURL"` + LocURL string `json:"locURL"` + OrigURL string `json:"origURL"` + Method string `json:"method"` + FinalState pollers.FinalStateVia `json:"finalState"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Operation-Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Operation-Location poller.") + opURL := resp.Header.Get(shared.HeaderOperationLocation) + if opURL == "" { + return nil, errors.New("response is missing Operation-Location header") + } + if !pollers.IsValidURL(opURL) { + return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL) + } + locURL := resp.Header.Get(shared.HeaderLocation) + // Location header is optional + if locURL != "" && !pollers.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid Location URL %s", locURL) + } + // default initial state to InProgress. if the + // service sent us a status then use that instead. + curState := pollers.StatusInProgress + status, err := pollers.GetStatus(resp) + if err != nil && !errors.Is(err, pollers.ErrNoBody) { + return nil, err + } + if status != "" { + curState = status + } + + return &Poller[T]{ + pl: pl, + resp: resp, + OpLocURL: opURL, + LocURL: locURL, + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: curState, + }, nil +} + +func (p *Poller[T]) Done() bool { + return pollers.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) { + state, err := pollers.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + var req *exported.Request + var err error + if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost { + // no final GET required, terminal response should have it + } else if rl, rlErr := pollers.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, pollers.ErrNoBody) { + return rlErr + } else if rl != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, rl) + } else if p.Method == http.MethodPatch || p.Method == http.MethodPut { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go new file mode 100644 index 000000000000..37ed647f4e0d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go @@ -0,0 +1,24 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia string + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation" + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation FinalStateVia = "location" + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI FinalStateVia = "original-uri" + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation FinalStateVia = "operation-location" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go new file mode 100644 index 000000000000..17ab7dadc3fa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -0,0 +1,317 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// the well-known set of LRO status/provisioning state values. +const ( + StatusSucceeded = "Succeeded" + StatusCanceled = "Canceled" + StatusFailed = "Failed" + StatusInProgress = "InProgress" +) + +// IsTerminalState returns true if the LRO's state is terminal. +func IsTerminalState(s string) bool { + return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) +} + +// Failed returns true if the LRO's state is terminal failure. +func Failed(s string) bool { + return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) +} + +// Succeeded returns true if the LRO's state is terminal success. +func Succeeded(s string) bool { + return strings.EqualFold(s, StatusSucceeded) +} + +// returns true if the LRO response contains a valid HTTP status code +func StatusCodeValid(resp *http.Response) bool { + return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent) +} + +// IsValidURL verifies that the URL is valid and absolute. +func IsValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// getTokenTypeName creates a type name from the type parameter T. +func getTokenTypeName[T any]() (string, error) { + tt := shared.TypeOfT[T]() + var n string + if tt.Kind() == reflect.Pointer { + n = "*" + tt = tt.Elem() + } + n += tt.Name() + if n == "" { + return "", errors.New("nameless types are not allowed") + } + return n, nil +} + +type resumeTokenWrapper[T any] struct { + Type string `json:"type"` + Token T `json:"token"` +} + +// NewResumeToken creates a resume token from the specified type. +// An error is returned if the generic type has no name (e.g. struct{}). +func NewResumeToken[TResult, TSource any](from TSource) (string, error) { + n, err := getTokenTypeName[TResult]() + if err != nil { + return "", err + } + b, err := json.Marshal(resumeTokenWrapper[TSource]{ + Type: n, + Token: from, + }) + if err != nil { + return "", err + } + return string(b), nil +} + +// ExtractToken returns the poller-specific token information from the provided token value. +func ExtractToken(token string) ([]byte, error) { + raw := map[string]json.RawMessage{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return nil, err + } + // this is dependent on the type resumeTokenWrapper[T] + tk, ok := raw["token"] + if !ok { + return nil, errors.New("missing token value") + } + return tk, nil +} + +// IsTokenValid returns an error if the specified token isn't applicable for generic type T. +func IsTokenValid[T any](token string) error { + raw := map[string]interface{}{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return err + } + t, ok := raw["type"] + if !ok { + return errors.New("missing type value") + } + tt, ok := t.(string) + if !ok { + return fmt.Errorf("invalid type format %T", t) + } + n, err := getTokenTypeName[T]() + if err != nil { + return err + } + if tt != n { + return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n) + } + return nil +} + +// ErrNoBody is returned if the response didn't contain a body. +var ErrNoBody = errors.New("the response did not contain a body") + +// GetJSON reads the response body into a raw JSON object. +// It returns ErrNoBody if there was no content. +func GetJSON(resp *http.Response) (map[string]interface{}, error) { + body, err := exported.Payload(resp) + if err != nil { + return nil, err + } + if len(body) == 0 { + return nil, ErrNoBody + } + // unmarshall the body to get the value + var jsonBody map[string]interface{} + if err = json.Unmarshal(body, &jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// provisioningState returns the provisioning state from the response or the empty string. +func provisioningState(jsonBody map[string]interface{}) string { + jsonProps, ok := jsonBody["properties"] + if !ok { + return "" + } + props, ok := jsonProps.(map[string]interface{}) + if !ok { + return "" + } + rawPs, ok := props["provisioningState"] + if !ok { + return "" + } + ps, ok := rawPs.(string) + if !ok { + return "" + } + return ps +} + +// status returns the status from the response or the empty string. +func status(jsonBody map[string]interface{}) string { + rawStatus, ok := jsonBody["status"] + if !ok { + return "" + } + status, ok := rawStatus.(string) + if !ok { + return "" + } + return status +} + +// GetStatus returns the LRO's status from the response body. +// Typically used for Azure-AsyncOperation flows. +// If there is no status in the response body the empty string is returned. +func GetStatus(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return status(jsonBody), nil +} + +// GetProvisioningState returns the LRO's state from the response body. +// If there is no state in the response body the empty string is returned. +func GetProvisioningState(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return provisioningState(jsonBody), nil +} + +// GetResourceLocation returns the LRO's resourceLocation value from the response body. +// Typically used for Operation-Location flows. +// If there is no resourceLocation in the response body the empty string is returned. +func GetResourceLocation(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + v, ok := jsonBody["resourceLocation"] + if !ok { + // it might be ok if the field doesn't exist, the caller must make that determination + return "", nil + } + vv, ok := v.(string) + if !ok { + return "", fmt.Errorf("the resourceLocation value %v was not in string format", v) + } + return vv, nil +} + +// used if the operation synchronously completed +type NopPoller[T any] struct { + resp *http.Response + result T +} + +// NewNopPoller creates a NopPoller from the provided response. +// It unmarshals the response body into an instance of T. +func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) { + np := &NopPoller[T]{resp: resp} + if resp.StatusCode == http.StatusNoContent { + return np, nil + } + payload, err := exported.Payload(resp) + if err != nil { + return nil, err + } + if len(payload) == 0 { + return np, nil + } + if err = json.Unmarshal(payload, &np.result); err != nil { + return nil, err + } + return np, nil +} + +func (*NopPoller[T]) Done() bool { + return true +} + +func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) { + return p.resp, nil +} + +func (p *NopPoller[T]) Result(ctx context.Context, out *T) error { + *out = p.result + return nil +} + +// PollHelper creates and executes the request, calling update() with the response. +// If the request fails, the update func is not called. +// The update func returns the state of the operation for logging purposes or an error +// if it fails to extract the required state from the response. +func PollHelper(ctx context.Context, endpoint string, pl exported.Pipeline, update func(resp *http.Response) (string, error)) error { + req, err := exported.NewRequest(ctx, http.MethodGet, endpoint) + if err != nil { + return err + } + resp, err := pl.Do(req) + if err != nil { + return err + } + state, err := update(resp) + if err != nil { + return err + } + log.Writef(log.EventLRO, "State %s", state) + return nil +} + +// ResultHelper processes the response as success or failure. +// In the success case, it unmarshals the payload into either a new instance of T or out. +// In the failure case, it creates an *azcore.Response error from the response. +func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { + // short-circuit the simple success case with no response body to unmarshal + if resp.StatusCode == http.StatusNoContent { + return nil + } + + defer resp.Body.Close() + if !StatusCodeValid(resp) || failed { + // the LRO failed. unmarshall the error and update state + return exported.NewResponseError(resp) + } + + // success case + payload, err := exported.Payload(resp) + if err != nil { + return err + } + if len(payload) == 0 { + return nil + } + + if err = json.Unmarshal(payload, out); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go new file mode 100644 index 000000000000..4dd39e68ce86 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +const ( + ContentTypeAppJSON = "application/json" + ContentTypeAppXML = "application/xml" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary" + HeaderAzureAsync = "Azure-AsyncOperation" + HeaderContentLength = "Content-Length" + HeaderContentType = "Content-Type" + HeaderLocation = "Location" + HeaderOperationLocation = "Operation-Location" + HeaderRetryAfter = "Retry-After" + HeaderUserAgent = "User-Agent" +) + +const BearerTokenPrefix = "Bearer " + +const ( + // Module is the name of the calling module used in telemetry data. + Module = "azcore" + + // Version is the semantic version (see http://semver.org) of this module. + Version = "v1.1.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go new file mode 100644 index 000000000000..96eef2956fff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +import ( + "context" + "errors" + "io" + "net/http" + "reflect" + "strconv" + "time" +) + +// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. +type CtxWithHTTPHeaderKey struct{} + +// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. +type CtxWithRetryOptionsKey struct{} + +// CtxIncludeResponseKey is used as a context key for retrieving the raw response. +type CtxIncludeResponseKey struct{} + +// Delay waits for the duration to elapse or the context to be cancelled. +func Delay(ctx context.Context, delay time.Duration) error { + select { + case <-time.After(delay): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// RetryAfter returns non-zero if the response contains a Retry-After header value. +func RetryAfter(resp *http.Response) time.Duration { + if resp == nil { + return 0 + } + ra := resp.Header.Get(HeaderRetryAfter) + if ra == "" { + return 0 + } + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + return time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + return time.Until(t) + } + return 0 +} + +// TypeOfT returns the type of the generic type param. +func TypeOfT[T any]() reflect.Type { + // you can't, at present, obtain the type of + // a type parameter, so this is the trick + return reflect.TypeOf((*T)(nil)).Elem() +} + +// BytesSetter abstracts replacing a byte slice on some type. +type BytesSetter interface { + Set(b []byte) +} + +// NewNopClosingBytesReader creates a new *NopClosingBytesReader for the specified slice. +func NewNopClosingBytesReader(data []byte) *NopClosingBytesReader { + return &NopClosingBytesReader{s: data} +} + +// NopClosingBytesReader is an io.ReadSeekCloser around a byte slice. +// It also provides direct access to the byte slice to avoid rereading. +type NopClosingBytesReader struct { + s []byte + i int64 +} + +// Bytes returns the underlying byte slice. +func (r *NopClosingBytesReader) Bytes() []byte { + return r.s +} + +// Close implements the io.Closer interface. +func (*NopClosingBytesReader) Close() error { + return nil +} + +// Read implements the io.Reader interface. +func (r *NopClosingBytesReader) Read(b []byte) (n int, err error) { + if r.i >= int64(len(r.s)) { + return 0, io.EOF + } + n = copy(b, r.s[r.i:]) + r.i += int64(n) + return +} + +// Set replaces the existing byte slice with the specified byte slice and resets the reader. +func (r *NopClosingBytesReader) Set(b []byte) { + r.s = b + r.i = 0 +} + +// Seek implements the io.Seeker interface. +func (r *NopClosingBytesReader) Seek(offset int64, whence int) (int64, error) { + var i int64 + switch whence { + case io.SeekStart: + i = offset + case io.SeekCurrent: + i = r.i + offset + case io.SeekEnd: + i = int64(len(r.s)) + offset + default: + return 0, errors.New("nopClosingBytesReader: invalid whence") + } + if i < 0 { + return 0, errors.New("nopClosingBytesReader: negative position") + } + r.i = i + return i, nil +} + +var _ BytesSetter = (*NopClosingBytesReader)(nil) + +// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface. +type TransportFunc func(*http.Request) (*http.Response, error) + +// Do implements the Transporter interface for the TransportFunc type. +func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) { + return pf(req) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go new file mode 100644 index 000000000000..2f3901bff3c4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package log contains functionality for configuring logging behavior. +// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all". +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go new file mode 100644 index 000000000000..7bde29d0a462 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go @@ -0,0 +1,50 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package log provides functionality for configuring logging facilities. +package log + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// Event is used to group entries. Each group can be toggled on or off. +type Event = log.Event + +const ( + // EventRequest entries contain information about HTTP requests. + // This includes information like the URL, query parameters, and headers. + EventRequest Event = "Request" + + // EventResponse entries contain information about HTTP responses. + // This includes information like the HTTP status code, headers, and request URL. + EventResponse Event = "Response" + + // EventRetryPolicy entries contain information specific to the retry policy in use. + EventRetryPolicy Event = "Retry" + + // EventLRO entries contain information specific to long-running operations. + // This includes information like polling location, operation state, and sleep intervals. + EventLRO Event = "LongRunningOperation" +) + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetEvents(cls ...Event) { + log.SetEvents(cls...) +} + +// SetListener will set the Logger to write to the specified Listener. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +// for testing purposes +func resetEvents() { + log.TestResetEvents() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go new file mode 100644 index 000000000000..fad2579ed6c5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package policy contains the definitions needed for configuring in-box pipeline policies +// and creating custom policies. +package policy diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go new file mode 100644 index 000000000000..bfc71e9a002f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package policy + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +type Policy = exported.Policy + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +type Transporter = exported.Transporter + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use runtime.NewRequest() instead. +type Request = exported.Request + +// ClientOptions contains optional settings for a client's pipeline. +// All zero-value fields will be initialized with default values. +type ClientOptions struct { + // Cloud specifies a cloud for the client. The default is Azure Public Cloud. + Cloud cloud.Configuration + + // Logging configures the built-in logging policy. + Logging LogOptions + + // Retry configures the built-in retry policy. + Retry RetryOptions + + // Telemetry configures the built-in telemetry policy. + Telemetry TelemetryOptions + + // Transport sets the transport for HTTP requests. + Transport Transporter + + // PerCallPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCallPolicies []Policy + + // PerRetryPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetryPolicies []Policy +} + +// LogOptions configures the logging policy's behavior. +type LogOptions struct { + // IncludeBody indicates if request and response bodies should be included in logging. + // The default value is false. + // NOTE: enabling this can lead to disclosure of sensitive information, use with care. + IncludeBody bool + + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParams is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParams []string +} + +// RetryOptions configures the retry policy's behavior. +// Call NewRetryOptions() to create an instance with default values. +type RetryOptions struct { + // MaxRetries specifies the maximum number of attempts a failed operation will be retried + // before producing an error. + // The default value is three. A value less than zero means one try and no retries. + MaxRetries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // This is disabled by default. Specify a value greater than zero to enable. + // NOTE: Setting this to a small value might cause premature HTTP request time-outs. + TryTimeout time.Duration + + // RetryDelay specifies the initial amount of delay to use before retrying an operation. + // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. + // The default value is four seconds. A value less than zero means no delay between retries. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. + // Typically the value is greater than or equal to the value specified in RetryDelay. + // The default Value is 120 seconds. A value less than zero means there is no cap. + MaxRetryDelay time.Duration + + // StatusCodes specifies the HTTP status codes that indicate the operation should be retried. + // The default value is the status codes in StatusCodesForRetry. + // Specifying an empty slice will cause retries to happen only for transport errors. + StatusCodes []int +} + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // ApplicationID is an application-specific identification string to add to the User-Agent. + // It has a maximum length of 24 characters and must not contain any spaces. + ApplicationID string + + // Disabled will prevent the addition of any telemetry data to the User-Agent. + Disabled bool +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +type TokenRequestOptions struct { + // Scopes contains the list of permission scopes required for the token. + Scopes []string +} + +// BearerTokenOptions configures the bearer token policy's behavior. +type BearerTokenOptions struct { + // placeholder for future options +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go new file mode 100644 index 000000000000..c9cfa438cb34 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package runtime contains various facilities for creating requests and handling responses. +// The content is intended for SDK authors. +package runtime diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go new file mode 100644 index 000000000000..6d03b291ebff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +// NewResponseError creates an *azcore.ResponseError from the provided HTTP response. +// Call this when a service request returns a non-successful status code. +func NewResponseError(resp *http.Response) error { + return exported.NewResponseError(resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go new file mode 100644 index 000000000000..5507665d651d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" +) + +// PagingHandler contains the required data for constructing a Pager. +type PagingHandler[T any] struct { + // More returns a boolean indicating if there are more pages to fetch. + // It uses the provided page to make the determination. + More func(T) bool + + // Fetcher fetches the first and subsequent pages. + Fetcher func(context.Context, *T) (T, error) +} + +// Pager provides operations for iterating over paged responses. +type Pager[T any] struct { + current *T + handler PagingHandler[T] + firstPage bool +} + +// NewPager creates an instance of Pager using the specified PagingHandler. +// Pass a non-nil T for firstPage if the first page has already been retrieved. +func NewPager[T any](handler PagingHandler[T]) *Pager[T] { + return &Pager[T]{ + handler: handler, + firstPage: true, + } +} + +// More returns true if there are more pages to retrieve. +func (p *Pager[T]) More() bool { + if p.current != nil { + return p.handler.More(*p.current) + } + return true +} + +// NextPage advances the pager to the next page. +func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { + var resp T + var err error + if p.current != nil { + if p.firstPage { + // we get here if it's an LRO-pager, we already have the first page + p.firstPage = false + return *p.current, nil + } else if !p.handler.More(*p.current) { + return *new(T), errors.New("no more pages") + } + resp, err = p.handler.Fetcher(ctx, p.current) + } else { + // non-LRO case, first page + p.firstPage = false + resp, err = p.handler.Fetcher(ctx, nil) + } + if err != nil { + return *new(T), err + } + p.current = &resp + return *p.current, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T]. +func (p *Pager[T]) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &p.current) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go new file mode 100644 index 000000000000..ad75ae2ab249 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -0,0 +1,73 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// PipelineOptions contains Pipeline options for SDK developers +type PipelineOptions struct { + AllowedHeaders, AllowedQueryParameters []string + PerCall, PerRetry []policy.Policy +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +type Pipeline = exported.Pipeline + +// NewPipeline creates a pipeline from connection options, with any additional policies as specified. +// Policies from ClientOptions are placed after policies from PipelineOptions. +// The module and version parameters are used by the telemetry policy, when enabled. +func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline { + cp := policy.ClientOptions{} + if options != nil { + cp = *options + } + if len(plOpts.AllowedHeaders) > 0 { + headers := make([]string, 0, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) + copy(headers, plOpts.AllowedHeaders) + headers = append(headers, cp.Logging.AllowedHeaders...) + cp.Logging.AllowedHeaders = headers + } + if len(plOpts.AllowedQueryParameters) > 0 { + qp := make([]string, 0, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) + copy(qp, plOpts.AllowedQueryParameters) + qp = append(qp, cp.Logging.AllowedQueryParams...) + cp.Logging.AllowedQueryParams = qp + } + // we put the includeResponsePolicy at the very beginning so that the raw response + // is populated with the final response (some policies might mutate the response) + policies := []policy.Policy{policyFunc(includeResponsePolicy)} + if !cp.Telemetry.Disabled { + policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry)) + } + policies = append(policies, plOpts.PerCall...) + policies = append(policies, cp.PerCallPolicies...) + policies = append(policies, NewRetryPolicy(&cp.Retry)) + policies = append(policies, plOpts.PerRetry...) + policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, NewLogPolicy(&cp.Logging)) + policies = append(policies, policyFunc(httpHeaderPolicy), policyFunc(bodyDownloadPolicy)) + transport := cp.Transport + if transport == nil { + transport = defaultHTTPClient + } + return exported.NewPipeline(transport, policies...) +} + +// policyFunc is a type that implements the Policy interface. +// Use this type when implementing a stateless policy as a first-class function. +type policyFunc func(*policy.Request) (*http.Response, error) + +// Do implements the Policy interface on policyFunc. +func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go new file mode 100644 index 000000000000..71e3062be0bd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +type BearerTokenPolicy struct { + // mainResource is the resource to be retreived using the tenant specified in the credential + mainResource *temporal.Resource[azcore.AccessToken, acquiringResourceState] + // the following fields are read-only + cred azcore.TokenCredential + scopes []string +} + +type acquiringResourceState struct { + req *policy.Request + p *BearerTokenPolicy +} + +// acquire acquires or updates the resource; only one +// thread/goroutine at a time ever calls this function +func acquire(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(state.req.Raw().Context(), policy.TokenRequestOptions{Scopes: state.p.scopes}) + if err != nil { + return azcore.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} + +// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. +// cred: an azcore.TokenCredential implementation such as a credential object from azidentity +// scopes: the list of permission scopes required for the token. +// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. +func NewBearerTokenPolicy(cred azcore.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy { + return &BearerTokenPolicy{ + cred: cred, + scopes: scopes, + mainResource: temporal.NewResource(acquire), + } +} + +// Do authorizes a request with a bearer token +func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { + as := acquiringResourceState{ + p: b, + req: req, + } + tk, err := b.mainResource.Get(as) + if err != nil { + return nil, err + } + req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token) + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go new file mode 100644 index 000000000000..02d621ee89e2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go @@ -0,0 +1,73 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte. +func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if err != nil { + return resp, err + } + var opValues bodyDownloadPolicyOpValues + // don't skip downloading error response bodies + if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 { + return resp, err + } + // Either bodyDownloadPolicyOpValues was not specified (so skip is false) + // or it was specified and skip is false: don't skip downloading the body + _, err = exported.Payload(resp) + if err != nil { + return resp, newBodyDownloadError(err, req) + } + return resp, err +} + +// bodyDownloadPolicyOpValues is the struct containing the per-operation values +type bodyDownloadPolicyOpValues struct { + Skip bool +} + +type bodyDownloadError struct { + err error +} + +func newBodyDownloadError(err error, req *policy.Request) error { + // on failure, only retry the request for idempotent operations. + // we currently identify them as DELETE, GET, and PUT requests. + if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut { + // error is safe for retry + return err + } + // wrap error to avoid retries + return &bodyDownloadError{ + err: err, + } +} + +func (b *bodyDownloadError) Error() string { + return fmt.Sprintf("body download policy: %s", b.err.Error()) +} + +func (b *bodyDownloadError) NonRetriable() { + // marker method +} + +func (b *bodyDownloadError) Unwrap() error { + return b.err +} + +var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go new file mode 100644 index 000000000000..770e0a2b6a64 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request +func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { + // check if any custom HTTP headers have been specified + if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil { + for k, v := range header.(http.Header) { + // use Set to replace any existing value + // it also canonicalizes the header key + req.Raw().Header.Set(k, v[0]) + // add any remaining values + for i := 1; i < len(v); i++ { + req.Raw().Header.Add(k, v[i]) + } + } + } + return req.Next() +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go new file mode 100644 index 000000000000..4714baa30cd6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go @@ -0,0 +1,34 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request +func includeResponsePolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if resp == nil { + return resp, err + } + if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil { + httpOut := httpOutRaw.(**http.Response) + *httpOut = resp + } + return resp, err +} + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go new file mode 100644 index 000000000000..faf175e3fd2a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -0,0 +1,251 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "sort" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/diag" +) + +type logPolicy struct { + includeBody bool + allowedHeaders map[string]struct{} + allowedQP map[string]struct{} +} + +// NewLogPolicy creates a request/response logging policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewLogPolicy(o *policy.LogOptions) policy.Policy { + if o == nil { + o = &policy.LogOptions{} + } + // construct default hash set of allowed headers + allowedHeaders := map[string]struct{}{ + "accept": {}, + "cache-control": {}, + "connection": {}, + "content-length": {}, + "content-type": {}, + "date": {}, + "etag": {}, + "expires": {}, + "if-match": {}, + "if-modified-since": {}, + "if-none-match": {}, + "if-unmodified-since": {}, + "last-modified": {}, + "ms-cv": {}, + "pragma": {}, + "request-id": {}, + "retry-after": {}, + "server": {}, + "traceparent": {}, + "transfer-encoding": {}, + "user-agent": {}, + "www-authenticate": {}, + "x-ms-request-id": {}, + "x-ms-client-request-id": {}, + "x-ms-return-client-request-id": {}, + } + // add any caller-specified allowed headers to the set + for _, ah := range o.AllowedHeaders { + allowedHeaders[strings.ToLower(ah)] = struct{}{} + } + // now do the same thing for query params + allowedQP := map[string]struct{}{ + "api-version": {}, + } + for _, qp := range o.AllowedQueryParams { + allowedQP[strings.ToLower(qp)] = struct{}{} + } + return &logPolicy{ + includeBody: o.IncludeBody, + allowedHeaders: allowedHeaders, + allowedQP: allowedQP, + } +} + +// logPolicyOpValues is the struct containing the per-operation values +type logPolicyOpValues struct { + try int32 + start time.Time +} + +func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) { + // Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object. + var opValues logPolicyOpValues + if req.OperationValue(&opValues); opValues.start.IsZero() { + opValues.start = time.Now() // If this is the 1st try, record this operation's start time + } + opValues.try++ // The first try is #1 (not #0) + req.SetOperationValue(opValues) + + // Log the outgoing request as informational + if log.Should(log.EventRequest) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try) + p.writeRequestWithResponse(b, req, nil, nil) + var err error + if p.includeBody { + err = writeReqBody(req, b) + } + log.Write(log.EventRequest, b.String()) + if err != nil { + return nil, err + } + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err := req.Next() // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(opValues.start) + + if log.Should(log.EventResponse) { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE RECEIVED\n") + } + + p.writeRequestWithResponse(b, req, response, err) + if err != nil { + // skip frames runtime.Callers() and runtime.StackTrace() + b.WriteString(diag.StackTrace(2, 32)) + } else if p.includeBody { + err = writeRespBody(response, b) + } + log.Write(log.EventResponse, b.String()) + } + return response, err +} + +const redactedValue = "REDACTED" + +// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) { + // redact applicable query params + cpURL := *req.Raw().URL + qp := cpURL.Query() + for k := range qp { + if _, ok := p.allowedQP[strings.ToLower(k)]; !ok { + qp.Set(k, redactedValue) + } + } + cpURL.RawQuery = qp.Encode() + // Write the request into the buffer. + fmt.Fprint(b, " "+req.Raw().Method+" "+cpURL.String()+"\n") + p.writeHeader(b, req.Raw().Header) + if resp != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n") + p.writeHeader(b, resp.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + value := header.Get(k) + // redact all header values not in the allow-list + if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { + value = redactedValue + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} + +// returns true if the request/response body should be logged. +// this is determined by looking at the content-type header value. +func shouldLogBody(b *bytes.Buffer, contentType string) bool { + contentType = strings.ToLower(contentType) + if strings.HasPrefix(contentType, "text") || + strings.Contains(contentType, "json") || + strings.Contains(contentType, "xml") { + return true + } + fmt.Fprintf(b, " Skip logging body for %s\n", contentType) + return false +} + +// writes to a buffer, used for logging purposes +func writeReqBody(req *policy.Request, b *bytes.Buffer) error { + if req.Raw().Body == nil { + fmt.Fprint(b, " Request contained no body\n") + return nil + } + if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { + return nil + } + body, err := ioutil.ReadAll(req.Raw().Body) + if err != nil { + fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) + return err + } + if err := req.RewindBody(); err != nil { + return err + } + logBody(b, body) + return nil +} + +// writes to a buffer, used for logging purposes +func writeRespBody(resp *http.Response, b *bytes.Buffer) error { + ct := resp.Header.Get(shared.HeaderContentType) + if ct == "" { + fmt.Fprint(b, " Response contained no body\n") + return nil + } else if !shouldLogBody(b, ct) { + return nil + } + body, err := Payload(resp) + if err != nil { + fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error()) + return err + } + if len(body) > 0 { + logBody(b, body) + } else { + fmt.Fprint(b, " Response contained no body\n") + } + return nil +} + +func logBody(b *bytes.Buffer, body []byte) { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprintln(b, string(body)) + fmt.Fprintln(b, " --------------------------------------------------------------------------------") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go new file mode 100644 index 000000000000..db70955b28bc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +type requestIDPolicy struct{} + +// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header +func NewRequestIDPolicy() policy.Policy { + return &requestIDPolicy{} +} + +func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) { + const requestIdHeader = "x-ms-client-request-id" + if req.Raw().Header.Get(requestIdHeader) == "" { + id, err := uuid.New() + if err != nil { + return nil, err + } + req.Raw().Header.Set(requestIdHeader, id.String()) + } + + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go new file mode 100644 index 000000000000..9d630e47125e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -0,0 +1,242 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "io" + "math" + "math/rand" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +const ( + defaultMaxRetries = 3 +) + +func setDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = defaultMaxRetries + } else if o.MaxRetries < 0 { + o.MaxRetries = 0 + } + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 120 * time.Second + } else if o.MaxRetryDelay < 0 { + // not really an unlimited cap, but sufficiently large enough to be considered as such + o.MaxRetryDelay = math.MaxInt64 + } + if o.RetryDelay == 0 { + o.RetryDelay = 4 * time.Second + } else if o.RetryDelay < 0 { + o.RetryDelay = 0 + } + if o.StatusCodes == nil { + o.StatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + } +} + +func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 + pow := func(number int64, exponent int32) int64 { // pow is nested helper function + var result int64 = 1 + for n := int32(0); n < exponent; n++ { + result *= number + } + return result + } + + delay := time.Duration(pow(2, try)-1) * o.RetryDelay + + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand + if delay > o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} + +// NewRetryPolicy creates a policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewRetryPolicy(o *policy.RetryOptions) policy.Policy { + if o == nil { + o = &policy.RetryOptions{} + } + p := &retryPolicy{options: *o} + return p +} + +type retryPolicy struct { + options policy.RetryOptions +} + +func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + options := p.options + // check if the retry options have been overridden for this call + if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil { + options = override.(policy.RetryOptions) + } + setDefaults(&options) + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. + var rwbody *retryableRequestBody + if req.Body() != nil { + // wrap the body so we control when it's actually closed. + // do this outside the for loop so defers don't accumulate. + rwbody = &retryableRequestBody{body: req.Body()} + defer rwbody.realClose() + } + try := int32(1) + for { + resp = nil // reset + log.Writef(log.EventRetryPolicy, "\n=====> Try=%d %s %s", try, req.Raw().Method, req.Raw().URL.String()) + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + err = req.RewindBody() + if err != nil { + return + } + // RewindBody() restores Raw().Body to its original state, so set our rewindable after + if rwbody != nil { + req.Raw().Body = rwbody + } + + if options.TryTimeout == 0 { + resp, err = req.Next() + } else { + // Set the per-try time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout) + clone := req.Clone(tryCtx) + resp, err = clone.Next() // Make the request + // if the body was already downloaded or there was an error it's safe to cancel the context now + if err != nil { + tryCancel() + } else if _, ok := resp.Body.(*shared.NopClosingBytesReader); ok { + tryCancel() + } else { + // must cancel the context after the body has been read and closed + resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body} + } + } + if err == nil { + log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode) + } else { + log.Writef(log.EventRetryPolicy, "error %v", err) + } + + if err == nil && !HasStatusCode(resp, options.StatusCodes...) { + // if there is no error and the response code isn't in the list of retry codes then we're done. + return + } else if ctxErr := req.Raw().Context().Err(); ctxErr != nil { + // don't retry if the parent context has been cancelled or its deadline exceeded + err = ctxErr + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + + // check if the error is not retriable + var nre errorinfo.NonRetriable + if errors.As(err, &nre) { + // the error says it's not retriable so don't retry + log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre) + return + } + + if try == options.MaxRetries+1 { + // max number of tries has been reached, don't sleep again + log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries) + return + } + + // drain before retrying so nothing is leaked + Drain(resp) + + // use the delay from retry-after if available + delay := shared.RetryAfter(resp) + if delay <= 0 { + delay = calcDelay(options, try) + } + log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) + select { + case <-time.After(delay): + try++ + case <-req.Raw().Context().Done(): + err = req.Raw().Context().Err() + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + } +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context { + return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The retry policy closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following type/methods implement the contextCancelReadCloser + +// contextCancelReadCloser combines an io.ReadCloser with a cancel func. +// it ensures the cancel func is invoked once the body has been read and closed. +type contextCancelReadCloser struct { + cf context.CancelFunc + body io.ReadCloser +} + +func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { + return rc.body.Read(p) +} + +func (rc *contextCancelReadCloser) Close() error { + err := rc.body.Close() + rc.cf() + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go new file mode 100644 index 000000000000..2abcdc576b69 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "net/http" + "os" + "runtime" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +type telemetryPolicy struct { + telemetryValue string +} + +// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests. +// The format is [ ]azsdk-go-/ . +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy { + if o == nil { + o = &policy.TelemetryOptions{} + } + tp := telemetryPolicy{} + if o.Disabled { + return &tp + } + b := &bytes.Buffer{} + // normalize ApplicationID + if o.ApplicationID != "" { + o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/") + if len(o.ApplicationID) > 24 { + o.ApplicationID = o.ApplicationID[:24] + } + b.WriteString(o.ApplicationID) + b.WriteRune(' ') + } + b.WriteString(formatTelemetry(mod, ver)) + b.WriteRune(' ') + b.WriteString(platformInfo) + tp.telemetryValue = b.String() + return &tp +} + +func formatTelemetry(comp, ver string) string { + return fmt.Sprintf("azsdk-go-%s/%s", comp, ver) +} + +func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) { + if p.telemetryValue == "" { + return req.Next() + } + // preserve the existing User-Agent string + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua) + } + req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue) + return req.Next() +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go new file mode 100644 index 000000000000..14c90fecfe5d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -0,0 +1,326 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia = pollers.FinalStateVia + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation = pollers.FinalStateViaLocation + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation = pollers.FinalStateViaOpLocation +) + +// NewPollerOptions contains the optional parameters for NewPoller. +type NewPollerOptions[T any] struct { + // FinalStateVia contains the final-state-via value for the LRO. + FinalStateVia FinalStateVia + + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPoller creates a Poller based on the provided initial response. +func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + if options.Handler != nil { + return &Poller[T]{ + op: options.Handler, + resp: resp, + result: result, + }, nil + } + + defer resp.Body.Close() + // this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success). + // ideally the codegen should return an error if the initial response failed and not even create a poller. + if !pollers.StatusCodeValid(resp) { + return nil, errors.New("the operation failed or was cancelled") + } + + // determine the polling method + var opr PollingHandler[T] + var err error + if async.Applicable(resp) { + // async poller must be checked first as it can also have a location header + opr, err = async.New[T](pl, resp, options.FinalStateVia) + } else if op.Applicable(resp) { + // op poller must be checked before loc as it can also have a location header + opr, err = op.New[T](pl, resp, options.FinalStateVia) + } else if loc.Applicable(resp) { + opr, err = loc.New[T](pl, resp) + } else if body.Applicable(resp) { + // must test body poller last as it's a subset of the other pollers. + // TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion) + opr, err = body.New[T](pl, resp) + } else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) { + // if we get here it means we have a 202 with no polling headers. + // for DELETE and POST this is a hard error per ARM RPC spec. + return nil, errors.New("response is missing polling URL") + } else { + opr, err = pollers.NewNopPoller[T](resp) + } + + if err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + resp: resp, + result: result, + }, nil +} + +// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken. +type NewPollerFromResumeTokenOptions[T any] struct { + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPollerFromResumeToken creates a Poller from a resume token string. +func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerFromResumeTokenOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + + if err := pollers.IsTokenValid[T](token); err != nil { + return nil, err + } + raw, err := pollers.ExtractToken(token) + if err != nil { + return nil, err + } + var asJSON map[string]interface{} + if err := json.Unmarshal(raw, &asJSON); err != nil { + return nil, err + } + + opr := options.Handler + // now rehydrate the poller based on the encoded poller type + if async.CanResume(asJSON) { + opr, _ = async.New[T](pl, nil, "") + } else if body.CanResume(asJSON) { + opr, _ = body.New[T](pl, nil) + } else if loc.CanResume(asJSON) { + opr, _ = loc.New[T](pl, nil) + } else if op.CanResume(asJSON) { + opr, _ = op.New[T](pl, nil, "") + } else if opr != nil { + log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) + } else { + return nil, fmt.Errorf("unhandled poller token %s", string(raw)) + } + if err := json.Unmarshal(raw, &opr); err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + result: result, + }, nil +} + +// PollingHandler[T] abstracts the differences among poller implementations. +type PollingHandler[T any] interface { + // Done returns true if the LRO has reached a terminal state. + Done() bool + + // Poll fetches the latest state of the LRO. + Poll(context.Context) (*http.Response, error) + + // Result is called once the LRO has reached a terminal state. It populates the out parameter + // with the result of the operation. + Result(ctx context.Context, out *T) error +} + +// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +type Poller[T any] struct { + op PollingHandler[T] + resp *http.Response + err error + result *T + done bool +} + +// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method. +type PollUntilDoneOptions struct { + // Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second. + // Pass zero to accept the default value (30s). + Frequency time.Duration +} + +// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires. +// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals. +// options: pass nil to accept the default values. +// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might +// benefit from a shorter or longer duration. +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) { + if options == nil { + options = &PollUntilDoneOptions{} + } + cp := *options + if cp.Frequency == 0 { + cp.Frequency = 30 * time.Second + } + + // skip the floor check when executing tests so they don't take so long + if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { + return *new(T), errors.New("polling frequency minimum is one second") + } + + start := time.Now() + logPollUntilDoneExit := func(v interface{}) { + log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) + } + log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) + if p.resp != nil { + // initial check for a retry-after header existing on the initial response + if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { + log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) + if err := shared.Delay(ctx, retryAfter); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } + } + // begin polling the endpoint until a terminal state is reached + for { + resp, err := p.Poll(ctx) + if err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + if p.Done() { + logPollUntilDoneExit("succeeded") + return p.Result(ctx) + } + d := cp.Frequency + if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { + log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String()) + d = retryAfter + } else { + log.Writef(log.EventLRO, "delay for %s", d.String()) + } + if err = shared.Delay(ctx, d); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } +} + +// Poll fetches the latest state of the LRO. It returns an HTTP response or error. +// If Poll succeeds, the poller's state is updated and the HTTP response is returned. +// If Poll fails, the poller's state is unmodified and the error is returned. +// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + if p.Done() { + // the LRO has reached a terminal state, don't poll again + return p.resp, nil + } + resp, err := p.op.Poll(ctx) + if err != nil { + return nil, err + } + p.resp = resp + return p.resp, nil +} + +// Done returns true if the LRO has reached a terminal state. +// Once a terminal state is reached, call Result(). +func (p *Poller[T]) Done() bool { + return p.op.Done() +} + +// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done. +// If the LRO completed successfully, a populated instance of T is returned. +// If the LRO failed or was canceled, an *azcore.ResponseError error is returned. +// Calling this on an LRO in a non-terminal state will return an error. +func (p *Poller[T]) Result(ctx context.Context) (T, error) { + if !p.Done() { + return *new(T), errors.New("poller is in a non-terminal state") + } + if p.done { + // the result has already been retrieved, return the cached value + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil + } + err := p.op.Result(ctx, p.result) + var respErr *exported.ResponseError + if errors.As(err, &respErr) { + // the LRO failed. record the error + p.err = err + } else if err != nil { + // the call to Result failed, don't cache anything in this case + return *new(T), err + } + p.done = true + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil +} + +// ResumeToken returns a value representing the poller that can be used to resume +// the LRO at a later time. ResumeTokens are unique per service operation. +// The token's format should be considered opaque and is subject to change. +// Calling this on an LRO in a terminal state will return an error. +func (p *Poller[T]) ResumeToken() (string, error) { + if p.Done() { + return "", errors.New("poller is in a terminal state") + } + tk, err := pollers.NewResumeToken[T](p.op) + if err != nil { + return "", err + } + return tk, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go new file mode 100644 index 000000000000..21e5c578d542 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -0,0 +1,225 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "mime/multipart" + "reflect" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +type Base64Encoding int + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = 0 + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = 1 +) + +// NewRequest creates a new policy.Request with the specified input. +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { + return exported.NewRequest(ctx, httpMethod, endpoint) +} + +// JoinPaths concatenates multiple URL path segments into one path, +// inserting path separation characters as required. JoinPaths will preserve +// query parameters in the root path +func JoinPaths(root string, paths ...string) string { + if len(paths) == 0 { + return root + } + + qps := "" + if strings.Contains(root, "?") { + splitPath := strings.Split(root, "?") + root, qps = splitPath[0], splitPath[1] + } + + for i := 0; i < len(paths); i++ { + root = strings.TrimRight(root, "/") + paths[i] = strings.TrimLeft(paths[i], "/") + root += "/" + paths[i] + } + + if qps != "" { + if !strings.HasSuffix(root, "/") { + root += "/" + } + return root + "?" + qps + } + return root +} + +// EncodeByteArray will base-64 encode the byte slice v. +func EncodeByteArray(v []byte, format Base64Encoding) string { + if format == Base64URLFormat { + return base64.RawURLEncoding.EncodeToString(v) + } + return base64.StdEncoding.EncodeToString(v) +} + +// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. +// The encoded value is treated as a JSON string. +func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { + // send as a JSON string + encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) + return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON) +} + +// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. +func MarshalAsJSON(req *policy.Request, v interface{}) error { + v = cloneWithoutReadOnlyFields(v) + b, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON) +} + +// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. +func MarshalAsXML(req *policy.Request, v interface{}) error { + b, err := xml.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + // inclue the XML header as some services require it + b = []byte(xml.Header + string(b)) + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) +} + +// SetMultipartFormData writes the specified keys/values as multi-part form +// fields with the specified value. File content must be specified as a ReadSeekCloser. +// All other values are treated as string values. +func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { + body := bytes.Buffer{} + writer := multipart.NewWriter(&body) + for k, v := range formData { + if rsc, ok := v.(io.ReadSeekCloser); ok { + // this is the body to upload, the key is its file name + fd, err := writer.CreateFormFile(k, k) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, rsc); err != nil { + return err + } + continue + } + // ensure the value is in string format + s, ok := v.(string) + if !ok { + s = fmt.Sprintf("%v", v) + } + if err := writer.WriteField(k, s); err != nil { + return err + } + } + if err := writer.Close(); err != nil { + return err + } + return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType()) +} + +// SkipBodyDownload will disable automatic downloading of the response body. +func SkipBodyDownload(req *policy.Request) { + req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) +} + +// returns a clone of the object graph pointed to by v, omitting values of all read-only +// fields. if there are no read-only fields in the object graph, no clone is created. +func cloneWithoutReadOnlyFields(v interface{}) interface{} { + val := reflect.Indirect(reflect.ValueOf(v)) + if val.Kind() != reflect.Struct { + // not a struct, skip + return v + } + // first walk the graph to find any R/O fields. + // if there aren't any, skip cloning the graph. + if !recursiveFindReadOnlyField(val) { + return v + } + return recursiveCloneWithoutReadOnlyFields(val) +} + +// returns true if any field in the object graph of val contains the `azure:"ro"` tag value +func recursiveFindReadOnlyField(val reflect.Value) bool { + t := val.Type() + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + return true + } else if reflect.Indirect(val.Field(i)).Kind() == reflect.Struct && recursiveFindReadOnlyField(reflect.Indirect(val.Field(i))) { + return true + } + } + return false +} + +// clones the object graph of val. all non-R/O properties are copied to the clone +func recursiveCloneWithoutReadOnlyFields(val reflect.Value) interface{} { + t := val.Type() + clone := reflect.New(t) + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + // omit from payload + continue + } + // clone field will receive the same value as the source field... + value := val.Field(i) + v := reflect.Indirect(value) + if v.IsValid() && v.Type() != reflect.TypeOf(time.Time{}) && v.Kind() == reflect.Struct { + // ...unless the source value is a struct, in which case we recurse to clone that struct. + // (We can't recursively clone time.Time because it contains unexported fields.) + c := recursiveCloneWithoutReadOnlyFields(v) + if field.Anonymous { + // NOTE: this does not handle the case of embedded fields of unexported struct types. + // this should be ok as we don't generate any code like this at present + value = reflect.Indirect(reflect.ValueOf(c)) + } else { + value = reflect.ValueOf(c) + } + } + reflect.Indirect(clone).Field(i).Set(value) + } + return clone.Interface() +} + +// returns true if the "azure" tag contains the option "ro" +func azureTagIsReadOnly(tag string) bool { + if tag == "" { + return false + } + parts := strings.Split(tag, ",") + for _, part := range parts { + if part == "ro" { + return true + } + } + return false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go new file mode 100644 index 000000000000..2322f0a201ba --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -0,0 +1,137 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +func Payload(resp *http.Response) ([]byte, error) { + return exported.Payload(resp) +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + return exported.HasStatusCode(resp, statusCodes...) +} + +// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v. +func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error { + p, err := Payload(resp) + if err != nil { + return err + } + return DecodeByteArray(string(p), v, format) +} + +// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsJSON(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = json.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsXML(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = xml.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// Drain reads the response body to completion then closes it. The bytes read are discarded. +func Drain(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } +} + +// removeBOM removes any byte-order mark prefix from the payload if present. +func removeBOM(resp *http.Response) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // UTF8 + trimmed := bytes.TrimPrefix(payload, []byte("\xef\xbb\xbf")) + if len(trimmed) < len(payload) { + resp.Body.(shared.BytesSetter).Set(trimmed) + } + return nil +} + +// DecodeByteArray will base-64 decode the provided string into v. +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + if len(s) == 0 { + return nil + } + payload := string(s) + if payload[0] == '"' { + // remove surrounding quotes + payload = payload[1 : len(payload)-1] + } + switch format { + case Base64StdFormat: + decoded, err := base64.StdEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + case Base64URLFormat: + // use raw encoding as URL format should not contain any '=' characters + decoded, err := base64.RawURLEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + default: + return fmt.Errorf("unrecognized byte array format: %d", format) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go new file mode 100644 index 000000000000..869bed511842 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +var defaultHTTPClient *http.Client + +func init() { + defaultTransport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + defaultHTTPClient = &http.Client{ + Transport: defaultTransport, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go new file mode 100644 index 000000000000..cadaef3d5842 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package streaming contains helpers for streaming IO operations and progress reporting. +package streaming diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go new file mode 100644 index 000000000000..8563375af07e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -0,0 +1,72 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package streaming + +import ( + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +type progress struct { + rc io.ReadCloser + rsc io.ReadSeekCloser + pr func(bytesTransferred int64) + offset int64 +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return exported.NopCloser(rs) +} + +// NewRequestProgress adds progress reporting to an HTTP request's body stream. +func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser { + return &progress{ + rc: body, + rsc: body, + pr: pr, + offset: 0, + } +} + +// NewResponseProgress adds progress reporting to an HTTP response's body stream. +func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser { + return &progress{ + rc: body, + rsc: nil, + pr: pr, + offset: 0, + } +} + +// Read reads a block of data from an inner stream and reports progress +func (p *progress) Read(b []byte) (n int, err error) { + n, err = p.rc.Read(b) + if err != nil && err != io.EOF { + return + } + p.offset += int64(n) + // Invokes the user's callback method to report progress + p.pr(p.offset) + return +} + +// Seek only expects a zero or from beginning. +func (p *progress) Seek(offset int64, whence int) (int64, error) { + // This should only ever be called with offset = 0 and whence = io.SeekStart + n, err := p.rsc.Seek(offset, whence) + if err == nil { + p.offset = int64(n) + } + return n, err +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (p *progress) Close() error { + return p.rc.Close() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go new file mode 100644 index 000000000000..faa98c9dc514 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package to contains various type-conversion helper functions. +package to diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go new file mode 100644 index 000000000000..e0e4817b90d1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go @@ -0,0 +1,21 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package to + +// Ptr returns a pointer to the provided value. +func Ptr[T any](v T) *T { + return &v +} + +// SliceOfPtrs returns a slice of *T from the specified values. +func SliceOfPtrs[T any](vv ...T) []*T { + slc := make([]*T, len(vv)) + for i := range vv { + slc[i] = Ptr(vv[i]) + } + return slc +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md new file mode 100644 index 000000000000..670839fd4414 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -0,0 +1,295 @@ +# Release History + +## 1.1.0 (2022-06-07) + +### Features Added +* `ClientCertificateCredential` and `ClientSecretCredential` support ESTS-R. First-party + applications can set environment variable `AZURE_REGIONAL_AUTHORITY_NAME` with a + region name. + +## 1.0.1 (2022-06-07) + +### Other Changes +* Upgrade `microsoft-authentication-library-for-go` requirement to v0.5.1 + ([#18176](https://github.com/Azure/azure-sdk-for-go/issues/18176)) + +## 1.0.0 (2022-05-12) + +### Features Added +* `DefaultAzureCredential` reads environment variable `AZURE_CLIENT_ID` for the + client ID of a user-assigned managed identity + ([#17293](https://github.com/Azure/azure-sdk-for-go/pull/17293)) + +### Breaking Changes +* Removed `AuthorizationCodeCredential`. Use `InteractiveBrowserCredential` instead + to authenticate a user with the authorization code flow. +* Instances of `AuthenticationFailedError` are now returned by pointer. +* `GetToken()` returns `azcore.AccessToken` by value + +### Bugs Fixed +* `AzureCLICredential` panics after receiving an unexpected error type + ([#17490](https://github.com/Azure/azure-sdk-for-go/issues/17490)) + +### Other Changes +* `GetToken()` returns an error when the caller specifies no scope +* Updated to the latest versions of `golang.org/x/crypto`, `azcore` and `internal` + +## 0.14.0 (2022-04-05) + +### Breaking Changes +* This module now requires Go 1.18 +* Removed `AuthorityHost`. Credentials are now configured for sovereign or private + clouds with the API in `azcore/cloud`, for example: + ```go + // before + opts := azidentity.ClientSecretCredentialOptions{AuthorityHost: azidentity.AzureGovernment} + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + + // after + import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + + opts := azidentity.ClientSecretCredentialOptions{} + opts.Cloud = cloud.AzureGovernment + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + ``` + +## 0.13.2 (2022-03-08) + +### Bugs Fixed +* Prevented a data race in `DefaultAzureCredential` and `ChainedTokenCredential` + ([#17144](https://github.com/Azure/azure-sdk-for-go/issues/17144)) + +### Other Changes +* Upgraded App Service managed identity version from 2017-09-01 to 2019-08-01 + ([#17086](https://github.com/Azure/azure-sdk-for-go/pull/17086)) + +## 0.13.1 (2022-02-08) + +### Features Added +* `EnvironmentCredential` supports certificate SNI authentication when + `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN` is "true". + ([#16851](https://github.com/Azure/azure-sdk-for-go/pull/16851)) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken()` now returns an error when configured for + a user assigned identity in Azure Cloud Shell (which doesn't support such identities) + ([#16946](https://github.com/Azure/azure-sdk-for-go/pull/16946)) + +### Other Changes +* `NewDefaultAzureCredential()` logs non-fatal errors. These errors are also included in the + error returned by `DefaultAzureCredential.GetToken()` when it's unable to acquire a token + from any source. ([#15923](https://github.com/Azure/azure-sdk-for-go/issues/15923)) + +## 0.13.0 (2022-01-11) + +### Breaking Changes +* Replaced `AuthenticationFailedError.RawResponse()` with a field having the same name +* Unexported `CredentialUnavailableError` +* Instances of `ChainedTokenCredential` will now skip looping through the list of source credentials and re-use the first successful credential on subsequent calls to `GetToken`. + * If `ChainedTokenCredentialOptions.RetrySources` is true, `ChainedTokenCredential` will continue to try all of the originally provided credentials each time the `GetToken` method is called. + * `ChainedTokenCredential.successfulCredential` will contain a reference to the last successful credential. + * `DefaultAzureCredenial` will also re-use the first successful credential on subsequent calls to `GetToken`. + * `DefaultAzureCredential.chain.successfulCredential` will also contain a reference to the last successful credential. + +### Other Changes +* `ManagedIdentityCredential` no longer probes IMDS before requesting a token + from it. Also, an error response from IMDS no longer disables a credential + instance. Following an error, a credential instance will continue to send + requests to IMDS as necessary. +* Adopted MSAL for user and service principal authentication +* Updated `azcore` requirement to 0.21.0 + +## 0.12.0 (2021-11-02) +### Breaking Changes +* Raised minimum go version to 1.16 +* Removed `NewAuthenticationPolicy()` from credentials. Clients should instead use azcore's + `runtime.NewBearerTokenPolicy()` to construct a bearer token authorization policy. +* The `AuthorityHost` field in credential options structs is now a custom type, + `AuthorityHost`, with underlying type `string` +* `NewChainedTokenCredential` has a new signature to accommodate a placeholder + options struct: + ```go + // before + cred, err := NewChainedTokenCredential(credA, credB) + + // after + cred, err := NewChainedTokenCredential([]azcore.TokenCredential{credA, credB}, nil) + ``` +* Removed `ExcludeAzureCLICredential`, `ExcludeEnvironmentCredential`, and `ExcludeMSICredential` + from `DefaultAzureCredentialOptions` +* `NewClientCertificateCredential` requires a `[]*x509.Certificate` and `crypto.PrivateKey` instead of + a path to a certificate file. Added `ParseCertificates` to simplify getting these in common cases: + ```go + // before + cred, err := NewClientCertificateCredential("tenant", "client-id", "/cert.pem", nil) + + // after + certData, err := os.ReadFile("/cert.pem") + certs, key, err := ParseCertificates(certData, password) + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, nil) + ``` +* Removed `InteractiveBrowserCredentialOptions.ClientSecret` and `.Port` +* Removed `AADAuthenticationFailedError` +* Removed `id` parameter of `NewManagedIdentityCredential()`. User assigned identities are now + specified by `ManagedIdentityCredentialOptions.ID`: + ```go + // before + cred, err := NewManagedIdentityCredential("client-id", nil) + // or, for a resource ID + opts := &ManagedIdentityCredentialOptions{ID: ResourceID} + cred, err := NewManagedIdentityCredential("/subscriptions/...", opts) + + // after + clientID := ClientID("7cf7db0d-...") + opts := &ManagedIdentityCredentialOptions{ID: clientID} + // or, for a resource ID + resID: ResourceID("/subscriptions/...") + opts := &ManagedIdentityCredentialOptions{ID: resID} + cred, err := NewManagedIdentityCredential(opts) + ``` +* `DeviceCodeCredentialOptions.UserPrompt` has a new type: `func(context.Context, DeviceCodeMessage) error` +* Credential options structs now embed `azcore.ClientOptions`. In addition to changing literal initialization + syntax, this change renames `HTTPClient` fields to `Transport`. +* Renamed `LogCredential` to `EventCredential` +* `AzureCLICredential` no longer reads the environment variable `AZURE_CLI_PATH` +* `NewManagedIdentityCredential` no longer reads environment variables `AZURE_CLIENT_ID` and + `AZURE_RESOURCE_ID`. Use `ManagedIdentityCredentialOptions.ID` instead. +* Unexported `AuthenticationFailedError` and `CredentialUnavailableError` structs. In their place are two + interfaces having the same names. + +### Bugs Fixed +* `AzureCLICredential.GetToken` no longer mutates its `opts.Scopes` + +### Features Added +* Added connection configuration options to `DefaultAzureCredentialOptions` +* `AuthenticationFailedError.RawResponse()` returns the HTTP response motivating the error, + if available + +### Other Changes +* `NewDefaultAzureCredential()` returns `*DefaultAzureCredential` instead of `*ChainedTokenCredential` +* Added `TenantID` field to `DefaultAzureCredentialOptions` and `AzureCLICredentialOptions` + +## 0.11.0 (2021-09-08) +### Breaking Changes +* Unexported `AzureCLICredentialOptions.TokenProvider` and its type, + `AzureCLITokenProvider` + +### Bug Fixes +* `ManagedIdentityCredential.GetToken` returns `CredentialUnavailableError` + when IMDS has no assigned identity, signaling `DefaultAzureCredential` to + try other credentials + + +## 0.10.0 (2021-08-30) +### Breaking Changes +* Update based on `azcore` refactor [#15383](https://github.com/Azure/azure-sdk-for-go/pull/15383) + +## 0.9.3 (2021-08-20) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken` no longer mutates its `opts.Scopes` + +### Other Changes +* Bumps version of `azcore` to `v0.18.1` + + +## 0.9.2 (2021-07-23) +### Features Added +* Adding support for Service Fabric environment in `ManagedIdentityCredential` +* Adding an option for using a resource ID instead of client ID in `ManagedIdentityCredential` + + +## 0.9.1 (2021-05-24) +### Features Added +* Add LICENSE.txt and bump version information + + +## 0.9.0 (2021-05-21) +### Features Added +* Add support for authenticating in Azure Stack environments +* Enable user assigned identities for the IMDS scenario in `ManagedIdentityCredential` +* Add scope to resource conversion in `GetToken()` on `ManagedIdentityCredential` + + +## 0.8.0 (2021-01-20) +### Features Added +* Updating documentation + + +## 0.7.1 (2021-01-04) +### Features Added +* Adding port option to `InteractiveBrowserCredential` + + +## 0.7.0 (2020-12-11) +### Features Added +* Add `redirectURI` parameter back to authentication code flow + + +## 0.6.1 (2020-12-09) +### Features Added +* Updating query parameter in `ManagedIdentityCredential` and updating datetime string for parsing managed identity access tokens. + + +## 0.6.0 (2020-11-16) +### Features Added +* Remove `RedirectURL` parameter from auth code flow to align with the MSAL implementation which relies on the native client redirect URL. + + +## 0.5.0 (2020-10-30) +### Features Added +* Flattening credential options + + +## 0.4.3 (2020-10-21) +### Features Added +* Adding Azure Arc support in `ManagedIdentityCredential` + + +## 0.4.2 (2020-10-16) +### Features Added +* Typo fixes + + +## 0.4.1 (2020-10-16) +### Features Added +* Ensure authority hosts are only HTTPs + + +## 0.4.0 (2020-10-16) +### Features Added +* Adding options structs for credentials + + +## 0.3.0 (2020-10-09) +### Features Added +* Update `DeviceCodeCredential` callback + + +## 0.2.2 (2020-10-09) +### Features Added +* Add `AuthorizationCodeCredential` + + +## 0.2.1 (2020-10-06) +### Features Added +* Add `InteractiveBrowserCredential` + + +## 0.2.0 (2020-09-11) +### Features Added +* Refactor `azidentity` on top of `azcore` refactor +* Updated policies to conform to `policy.Policy` interface changes. +* Updated non-retriable errors to conform to `azcore.NonRetriableError`. +* Fixed calls to `Request.SetBody()` to include content type. +* Switched endpoints to string types and removed extra parsing code. + + +## 0.1.1 (2020-09-02) +### Features Added +* Add `AzureCLICredential` to `DefaultAzureCredential` chain + + +## 0.1.0 (2020-07-23) +### Features Added +* Initial Release. Azure Identity library that provides Azure Active Directory token authentication support for the SDK. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt new file mode 100644 index 000000000000..48ea6616b5b8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md new file mode 100644 index 000000000000..4ac53eb7b276 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -0,0 +1,307 @@ +# Migrating from autorest/adal to azidentity + +`azidentity` provides Azure Active Directory (Azure AD) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. + +This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. + +## Table of contents + +- [Acquire a token](#acquire-a-token) +- [Client certificate authentication](#client-certificate-authentication) +- [Client secret authentication](#client-secret-authentication) +- [Configuration](#configuration) +- [Device code authentication](#device-code-authentication) +- [Managed identity](#managed-identity) +- [Use azidentity credentials with older packages](#use-azidentity-credentials-with-older-packages) + +## Configuration + +### `autorest/adal` + +Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires an Azure AD endpoint and tenant: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.chinacloudapi.cn", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.chinacloudapi.cn/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +``` + +### `azidentity` + +A credential instance can acquire tokens for any audience. The audience for each token is determined by the client requesting it. Credentials require endpoint configuration only for sovereign or private clouds. The `azcore/cloud` package has predefined configuration for sovereign clouds such as Azure China: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +clientOpts := azcore.ClientOptions{Cloud: cloud.AzureChina} + +cred, err := azidentity.NewClientSecretCredential( + tenantID, clientID, secret, &azidentity.ClientSecretCredentialOptions{ClientOptions: clientOpts}, +) +handle(err) +``` + +## Client secret authentication + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.azure.com/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Client certificate authentication + +### `autorest/adal` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +handle(err) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, clientID, certificate, rsaPrivateKey, "https://management.azure.com/", +) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certs, key, err := azidentity.ParseCertificates(certData, nil) +handle(err) + +cred, err = azidentity.NewClientCertificateCredential(tenantID, clientID, certs, key, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Managed identity + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/", nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewManagedIdentityCredential(nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +### User-assigned identities + +`autorest/adal`: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +opts := &adal.ManagedIdentityOptions{ClientID: "..."} +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/") +handle(err) +``` + +`azidentity`: + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + +opts := azidentity.ManagedIdentityCredentialOptions{ID: azidentity.ClientID("...")} +cred, err := azidentity.NewManagedIdentityCredential(&opts) +handle(err) +``` + +## Device code authentication + +### `autorest/adal` + +```go +import ( + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthClient := &http.Client{} +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +resource := "https://management.azure.com/" +deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthCfg, clientID, resource) +handle(err) + +// display instructions, wait for the user to authenticate +fmt.Println(*deviceCode.Message) +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromManualToken(*oauthCfg, clientID, resource, *token) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewDeviceCodeCredential(nil) +handle(err) + +client, err := armsubscriptions.NewSubscriptionsClient(cred, nil) +handle(err) +``` + +`azidentity.DeviceCodeCredential` will guide a user through authentication, printing instructions to the console by default. The user prompt is customizable. For more information, see the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential). + +## Acquire a token + +### `autorest/adal` + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://vault.azure.net", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) + +err = spt.Refresh() +if err == nil { + token := spt.Token +} +``` + +### `azidentity` + +In ordinary usage, application code doesn't need to request tokens from credentials directly. Azure SDK clients handle token acquisition and refreshing internally. However, applications may call `GetToken()` to do so. All credential types have this method. + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +tk, err := cred.GetToken( + context.TODO(), policy.TokenRequestOptions{Scopes: []string{"https://vault.azure.net/.default"}}, +) +if err == nil { + token := tk.Token +} +``` + +Note that `azidentity` credentials use the Azure AD v2.0 endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent). + +## Use azidentity credentials with older packages + +The [azidext module](https://pkg.go.dev/github.com/jongio/azidext/go/azidext) provides an adapter for `azidentity` credential types. The adapter enables using the credential types with older Azure SDK clients. For example: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/jongio/azidext/go/azidext" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = azidext.NewTokenCredentialAdapter(cred, []string{"https://management.azure.com//.default"}) +``` + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FMIGRATION.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md new file mode 100644 index 000000000000..68b35a545c3b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -0,0 +1,239 @@ +# Azure Identity Client Module for Go + +The Azure Identity module provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) +| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/) +| [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) + +# Getting started + +## Install the module + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Install the Azure Identity module: + +```sh +go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +## Prerequisites + +- an [Azure subscription](https://azure.microsoft.com/free/) +- Go 1.18 + +### Authenticating during local development + +When debugging and executing code locally, developers typically use their own accounts to authenticate calls to Azure services. The `azidentity` module supports authenticating through developer tools to simplify local development. + +#### Authenticating via the Azure CLI + +`DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user +signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. + +When no default browser is available, `az login` will use the device code +authentication flow. This can also be selected manually by running `az login --use-device-code`. + +## Key concepts + +### Credentials + +A credential is a type which contains or can obtain the data needed for a +service client to authenticate requests. Service clients across the Azure SDK +accept a credential instance when they are constructed, and use that credential +to authenticate requests. + +The `azidentity` module focuses on OAuth authentication with Azure Active +Directory (AAD). It offers a variety of credential types capable of acquiring +an Azure AD access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. + +### DefaultAzureCredential + +`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: + +![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) + +1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate. +2. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. +3. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. + +> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. + +## Managed Identity + +`DefaultAzureCredential` and `ManagedIdentityCredential` support +[managed identity authentication](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) +in any hosting environment which supports managed identities, such as (this list is not exhaustive): +* [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity) +* [Azure Arc](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) +* [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/msi-authorization) +* [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/use-managed-identity) +* [Azure Service Fabric](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity) +* [Azure Virtual Machines](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token) + +## Examples + +- [Authenticate with DefaultAzureCredential](#authenticate-with-defaultazurecredential "Authenticate with DefaultAzureCredential") +- [Define a custom authentication flow with ChainedTokenCredential](#define-a-custom-authentication-flow-with-chainedtokencredential "Define a custom authentication flow with ChainedTokenCredential") +- [Specify a user-assigned managed identity for DefaultAzureCredential](#specify-a-user-assigned-managed-identity-for-defaultazurecredential) + +### Authenticate with DefaultAzureCredential + +This example demonstrates authenticating a client from the `armresources` module with `DefaultAzureCredential`. + +```go +cred, err := azidentity.NewDefaultAzureCredential(nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", cred, nil) +``` + +### Specify a user-assigned managed identity for DefaultAzureCredential + +To configure `DefaultAzureCredential` to authenticate a user-assigned managed identity, set the environment variable `AZURE_CLIENT_ID` to the identity's client ID. + +### Define a custom authentication flow with `ChainedTokenCredential` + +`DefaultAzureCredential` is generally the quickest way to get started developing apps for Azure. For more advanced scenarios, `ChainedTokenCredential` links multiple credential instances to be tried sequentially when authenticating. It will try each chained credential in turn until one provides a token or fails to authenticate due to an error. + +The following example demonstrates creating a credential, which will attempt to authenticate using managed identity. It will fall back to authenticating via the Azure CLI when a managed identity is unavailable. + +```go +managed, err := azidentity.NewManagedIdentityCredential(nil) +if err != nil { + // handle error +} +azCLI, err := azidentity.NewAzureCLICredential(nil) +if err != nil { + // handle error +} +chain, err := azidentity.NewChainedTokenCredential([]azcore.TokenCredential{managed, azCLI}, nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) +``` + +## Credential Types + +### Authenticating Azure Hosted Applications + +|Credential|Usage +|-|- +|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps +|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials +|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables +|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource + +### Authenticating Service Principals + +|Credential|Usage +|-|- +|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret +|[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate + +### Authenticating Users + +|Credential|Usage +|-|- +|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser +|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI +|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password + +### Authenticating via Development Tools + +|Credential|Usage +|-|- +|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI + +## Environment Variables + +`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables: + +#### Service principal with secret + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_SECRET`|one of the application's client secrets + +#### Service principal with certificate + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key (without password protection) + +#### Username and password + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_USERNAME`|a username (usually an email address) +|`AZURE_PASSWORD`|that user's password + +Configuration is attempted in the above order. For example, if values for a +client secret and certificate are both present, the client secret will be used. + +## Troubleshooting + +### Error Handling + +Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). + +For more details on handling specific Azure Active Directory errors please refer to the +Azure Active Directory +[error code documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes). + +### Logging + +This module uses the classification-based logging implementation in `azcore`. To enable console logging for all SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. Use the `azcore/log` package to control log event output or to enable logs for `azidentity` only. For example: +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +Credentials log basic information only, such as `GetToken` success or failure and errors. These log entries don't contain authentication secrets but may contain sensitive information. + +## Next steps + +Client and management modules listed on the [Azure SDK releases page](https://azure.github.io/azure-sdk/releases/latest/go.html) support authenticating with `azidentity` credential types. You can learn more about using these libraries in their documentation, which is linked from the release page. + +## Provide Feedback + +If you encounter bugs or have suggestions, please +[open an issue](https://github.com/Azure/azure-sdk-for-go/issues). + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md new file mode 100644 index 000000000000..1e28d181fefa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -0,0 +1,192 @@ +# Troubleshoot Azure Identity authentication issues + +This troubleshooting guide covers failure investigation techniques, common errors for the credential types in the `azidentity` module, and mitigation steps to resolve these errors. + +## Table of contents + +- [Handle azidentity errors](#handle-azidentity-errors) + - [Permission issues](#permission-issues) +- [Find relevant information in errors](#find-relevant-information-in-errors) +- [Enable and configure logging](#enable-and-configure-logging) +- [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) +- [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues) +- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) +- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) +- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues) +- [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues) + - [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity) + - [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity) + - [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity) +- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Get additional help](#get-additional-help) + +## Handle azidentity errors + +Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Azure Active Directory (Azure AD). Depending on the application, these errors may or may not be recoverable. + +### Permission issues + +Service client errors with a status code of 401 or 403 often indicate that authentication succeeded but the caller doesn't have permission to access the specified API. Check the service documentation to determine which RBAC roles are needed for the request, and ensure the authenticated user or service principal has the appropriate role assignments. + +## Find relevant information in errors + +Authentication errors can include responses from Azure AD and often contain information helpful in diagnosis. Consider the following error message: + +``` +ClientSecretCredential authentication failed +POST https://login.microsoftonline.com/3c631bb7-a9f7-4343-a5ba-a615913/oauth2/v2.0/token +-------------------------------------------------------------------------------- +RESPONSE 401 Unauthorized +-------------------------------------------------------------------------------- +{ + "error": "invalid_client", + "error_description": "AADSTS7000215: Invalid client secret provided. Ensure the secret being sent in the request is the client secret value, not the client secret ID, for a secret added to app '86be4c01-505b-45e9-bfc0-9b825fd84'.\r\nTrace ID: 03da4b8e-5ffe-48ca-9754-aff4276f0100\r\nCorrelation ID: 7b12f9bb-2eef-42e3-ad75-eee69ec9088d\r\nTimestamp: 2022-03-02 18:25:26Z", + "error_codes": [ + 7000215 + ], + "timestamp": "2022-03-02 18:25:26Z", + "trace_id": "03da4b8e-5ffe-48ca-9754-aff4276f0100", + "correlation_id": "7b12f9bb-2eef-42e3-ad75-eee69ec9088d", + "error_uri": "https://login.microsoftonline.com/error?code=7000215" +} +-------------------------------------------------------------------------------- +``` + +This error contains several pieces of information: + +- __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. + +- __Azure AD Error Code and Message__: The error code and message returned by Azure AD. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes#aadsts-error-codes) has more information on AADSTS error codes. + +- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Azure AD failures. + +### Enable and configure logging + +`azidentity` provides the same logging capabilities as the rest of the Azure SDK. The simplest way to see the logs to help debug authentication issues is to print credential logs to the console. +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +## Troubleshoot DefaultAzureCredential authentication issues + +| Error |Description| Mitigation | +|---|---|---| +|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
  • [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
  • Consult the troubleshooting guide for underlying credential types for more information.
    • [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
    • [ManagedIdentityCredential](#troubleshoot-visualstudiocredential-authentication-issues)
    • [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
    | +|Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|
    • [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
    • If an unexpected credential is returning a token, check application configuration such as environment variables.
    • Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
    | + +## Troubleshoot EnvironmentCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Missing or incomplete environment variable configuration|A valid combination of environment variables wasn't set|Ensure the appropriate environment variables are set for the intended authentication method as described in the [module documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)| + +## Troubleshoot ClientSecretCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + +## Troubleshoot ClientCertificateCredential authentication issues + +| Error Code | Description | Mitigation | +|---|---|---| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + +## Troubleshoot UsernamePasswordCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.| + +## Troubleshoot ManagedIdentityCredential authentication issues + +`ManagedIdentityCredential` is designed to work on a variety of Azure hosts support managed identity. Configuration and troubleshooting vary from host to host. The below table lists the Azure hosts that can be assigned a managed identity and are supported by `ManagedIdentityCredential`. + +|Host Environment| | | +|---|---|---| +|Azure Virtual Machines and Scale Sets|[Configuration](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| +|Azure App Service and Azure Functions|[Configuration](https://docs.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| +|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)| +|Azure Arc|[Configuration](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| +|Azure Service Fabric|[Configuration](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)|| + +### Azure Virtual Machine managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

    If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| +|The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`| +|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|

    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | +|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
    • Refer to the error message for more details on specific failures.
    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | + +#### Verify IMDS is available on the VM + +If you have access to the VM, you can use `curl` to verify the managed identity endpoint is available. + +```sh +curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://management.core.windows.net&api-version=2018-02-01' -H "Metadata: true" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure App Service and Azure Functions managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
    • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://docs.microsoft.com/azure/app-service/overview-managed-identity).
    • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
    | + +#### Verify the App Service managed identity endpoint is available + +If you can SSH into the App Service, you can verify managed identity is available in the environment. First ensure the environment variables `IDENTITY_ENDPOINT` and `IDENTITY_SECRET` are set. Then you can verify the managed identity endpoint is available using `curl`. + +```sh +curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-version=2019-08-01" -H "X-IDENTITY-HEADER: $IDENTITY_HEADER" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure Kubernetes Service managed identity + +#### Pod Identity + +| Error Message |Description| Mitigation | +|---|---|---| +|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). + +## Troubleshoot AzureCliCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
    • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://docs.microsoft.com/cli/azure/install-azure-cli).
    • Validate the installation location is in the application's `PATH` environment variable.
    | +|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
    • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://docs.microsoft.com/cli/azure/authenticate-azure-cli).
    • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
    | + +#### Verify the Azure CLI can obtain tokens + +You can manually verify that the Azure CLI can authenticate and obtain tokens. First, use the `account` command to verify the logged in account. + +```azurecli +az account show +``` + +Once you've verified the Azure CLI is using the correct account, you can validate that it's able to obtain tokens for that account. + +```azurecli +az account get-access-token --output json --resource https://management.core.windows.net +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +## Get additional help + +Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go new file mode 100644 index 000000000000..0faee55ef04d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -0,0 +1,129 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "regexp" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const ( + azureAuthorityHost = "AZURE_AUTHORITY_HOST" + azureClientID = "AZURE_CLIENT_ID" + azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME" + + organizationsTenantID = "organizations" + developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" + defaultSuffix = "/.default" + tenantIDValidationErr = "invalid tenantID. You can locate your tenantID by following the instructions listed here: https://docs.microsoft.com/partner-center/find-ids-and-domain-names" +) + +// setAuthorityHost initializes the authority host for credentials. Precedence is: +// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user +// 2. value of AZURE_AUTHORITY_HOST +// 3. default: Azure Public Cloud +func setAuthorityHost(cc cloud.Configuration) (string, error) { + host := cc.ActiveDirectoryAuthorityHost + if host == "" { + if len(cc.Services) > 0 { + return "", errors.New("missing ActiveDirectoryAuthorityHost for specified cloud") + } + host = cloud.AzurePublic.ActiveDirectoryAuthorityHost + if envAuthorityHost := os.Getenv(azureAuthorityHost); envAuthorityHost != "" { + host = envAuthorityHost + } + } + u, err := url.Parse(host) + if err != nil { + return "", err + } + if u.Scheme != "https" { + return "", errors.New("cannot use an authority host without https") + } + return host, nil +} + +// validTenantID return true is it receives a valid tenantID, returns false otherwise +func validTenantID(tenantID string) bool { + match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID) + if err != nil { + return false + } + return match +} + +func newPipelineAdapter(opts *azcore.ClientOptions) pipelineAdapter { + pl := runtime.NewPipeline(component, version, runtime.PipelineOptions{}, opts) + return pipelineAdapter{pl: pl} +} + +type pipelineAdapter struct { + pl runtime.Pipeline +} + +func (p pipelineAdapter) CloseIdleConnections() { + // do nothing +} + +func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { + req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String()) + if err != nil { + return nil, err + } + if r.Body != nil && r.Body != http.NoBody { + // create a rewindable body from the existing body as required + var body io.ReadSeekCloser + if rsc, ok := r.Body.(io.ReadSeekCloser); ok { + body = rsc + } else { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + body = streaming.NopCloser(bytes.NewReader(b)) + } + err = req.SetBody(body, r.Header.Get("Content-Type")) + if err != nil { + return nil, err + } + } + resp, err := p.pl.Do(req) + if err != nil { + return nil, err + } + return resp, err +} + +// enables fakes for test scenarios +type confidentialClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireTokenSilentOption) (confidential.AuthResult, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireTokenByAuthCodeOption) (confidential.AuthResult, error) + AcquireTokenByCredential(ctx context.Context, scopes []string) (confidential.AuthResult, error) +} + +// enables fakes for test scenarios +type publicClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireTokenSilentOption) (public.AuthResult, error) + AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string) (public.AuthResult, error) + AcquireTokenByDeviceCode(ctx context.Context, scopes []string) (public.DeviceCode, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...public.AcquireTokenByAuthCodeOption) (public.AuthResult, error) + AcquireTokenInteractive(ctx context.Context, scopes []string, options ...public.InteractiveAuthOption) (public.AuthResult, error) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go new file mode 100644 index 000000000000..68f46d51a1ef --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -0,0 +1,189 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const credNameAzureCLI = "AzureCLICredential" + +// used by tests to fake invoking the CLI +type azureCLITokenProvider func(ctx context.Context, resource string, tenantID string) ([]byte, error) + +// AzureCLICredentialOptions contains optional parameters for AzureCLICredential. +type AzureCLICredentialOptions struct { + // TenantID identifies the tenant the credential should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. + TenantID string + + tokenProvider azureCLITokenProvider +} + +// init returns an instance of AzureCLICredentialOptions initialized with default values. +func (o *AzureCLICredentialOptions) init() { + if o.tokenProvider == nil { + o.tokenProvider = defaultTokenProvider() + } +} + +// AzureCLICredential authenticates as the identity logged in to the Azure CLI. +type AzureCLICredential struct { + tokenProvider azureCLITokenProvider + tenantID string +} + +// NewAzureCLICredential constructs an AzureCLICredential. Pass nil to accept default options. +func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredential, error) { + cp := AzureCLICredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + return &AzureCLICredential{ + tokenProvider: cp.tokenProvider, + tenantID: cp.TenantID, + }, nil +} + +// GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI. +// This method is called automatically by Azure SDK clients. +func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) != 1 { + return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + } + // CLI expects an AAD v1 resource, not a v2 scope + scope := strings.TrimSuffix(opts.Scopes[0], defaultSuffix) + at, err := c.authenticate(ctx, scope) + if err != nil { + return azcore.AccessToken{}, err + } + logGetTokenSuccess(c, opts) + return at, nil +} + +const timeoutCLIRequest = 10 * time.Second + +func (c *AzureCLICredential) authenticate(ctx context.Context, resource string) (azcore.AccessToken, error) { + output, err := c.tokenProvider(ctx, resource, c.tenantID) + if err != nil { + return azcore.AccessToken{}, err + } + + return c.createAccessToken(output) +} + +func defaultTokenProvider() func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + return func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err + } + if !match { + return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource) + } + + ctx, cancel := context.WithTimeout(ctx, timeoutCLIRequest) + defer cancel() + + commandLine := "az account get-access-token -o json --resource " + resource + if tenantID != "" { + commandLine += " --tenant " + tenantID + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") { + msg = "Azure CLI not found on path" + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureCLI, msg) + } + + return output, nil + } +} + +func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { + t := struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` + }{} + err := json.Unmarshal(tk, &t) + if err != nil { + return azcore.AccessToken{}, err + } + + tokenExpirationDate, err := parseExpirationDate(t.ExpiresOn) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) + } + + converted := azcore.AccessToken{ + Token: t.AccessToken, + ExpiresOn: *tokenExpirationDate, + } + return converted, nil +} + +// parseExpirationDate parses either a Azure CLI or CloudShell date into a time object +func parseExpirationDate(input string) (*time.Time, error) { + // CloudShell (and potentially the Azure CLI in future) + expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) + if cloudShellErr != nil { + // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) + const cliFormat = "2006-01-02 15:04:05.999999" + expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) + if cliErr != nil { + return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) + } + return &expirationDate, nil + } + return &expirationDate, nil +} + +var _ azcore.TokenCredential = (*AzureCLICredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go new file mode 100644 index 000000000000..86a89064569a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -0,0 +1,133 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// ChainedTokenCredentialOptions contains optional parameters for ChainedTokenCredential. +type ChainedTokenCredentialOptions struct { + // RetrySources configures how the credential uses its sources. When true, the credential always attempts to + // authenticate through each source in turn, stopping when one succeeds. When false, the credential authenticates + // only through this first successful source--it never again tries the sources which failed. + RetrySources bool +} + +// ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default, +// it tries all the credentials until one authenticates, after which it always uses that credential. +type ChainedTokenCredential struct { + cond *sync.Cond + iterating bool + name string + retrySources bool + sources []azcore.TokenCredential + successfulCredential azcore.TokenCredential +} + +// NewChainedTokenCredential creates a ChainedTokenCredential. Pass nil for options to accept defaults. +func NewChainedTokenCredential(sources []azcore.TokenCredential, options *ChainedTokenCredentialOptions) (*ChainedTokenCredential, error) { + if len(sources) == 0 { + return nil, errors.New("sources must contain at least one TokenCredential") + } + for _, source := range sources { + if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil + return nil, errors.New("sources cannot contain nil") + } + } + cp := make([]azcore.TokenCredential, len(sources)) + copy(cp, sources) + if options == nil { + options = &ChainedTokenCredentialOptions{} + } + return &ChainedTokenCredential{ + cond: sync.NewCond(&sync.Mutex{}), + name: "ChainedTokenCredential", + retrySources: options.RetrySources, + sources: cp, + }, nil +} + +// GetToken calls GetToken on the chained credentials in turn, stopping when one returns a token. +// This method is called automatically by Azure SDK clients. +func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if !c.retrySources { + // ensure only one goroutine at a time iterates the sources and perhaps sets c.successfulCredential + c.cond.L.Lock() + for { + if c.successfulCredential != nil { + c.cond.L.Unlock() + return c.successfulCredential.GetToken(ctx, opts) + } + if !c.iterating { + c.iterating = true + // allow other goroutines to wait while this one iterates + c.cond.L.Unlock() + break + } + c.cond.Wait() + } + } + + var err error + var errs []error + var token azcore.AccessToken + var successfulCredential azcore.TokenCredential + for _, cred := range c.sources { + token, err = cred.GetToken(ctx, opts) + if err == nil { + log.Writef(EventAuthentication, "%s authenticated with %s", c.name, extractCredentialName(cred)) + successfulCredential = cred + break + } + errs = append(errs, err) + if _, ok := err.(*credentialUnavailableError); !ok { + break + } + } + if c.iterating { + c.cond.L.Lock() + c.successfulCredential = successfulCredential + c.iterating = false + c.cond.L.Unlock() + c.cond.Broadcast() + } + // err is the error returned by the last GetToken call. It will be nil when that call succeeds + if err != nil { + // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise + msg := createChainedErrorMessage(errs) + if _, ok := err.(*credentialUnavailableError); ok { + err = newCredentialUnavailableError(c.name, msg) + } else { + res := getResponseFromError(err) + err = newAuthenticationFailedError(c.name, msg, res) + } + } + return token, err +} + +func createChainedErrorMessage(errs []error) string { + msg := "failed to acquire a token.\nAttempted credentials:" + for _, err := range errs { + msg += fmt.Sprintf("\n\t%s", err.Error()) + } + return msg +} + +func extractCredentialName(credential azcore.TokenCredential) string { + return strings.TrimPrefix(fmt.Sprintf("%T", credential), "*azidentity.") +} + +var _ azcore.TokenCredential = (*ChainedTokenCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml new file mode 100644 index 000000000000..3b443e8eedb2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -0,0 +1,47 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + RunLiveTests: true + ServiceDirectory: 'azidentity' + PreSteps: + - pwsh: | + [System.Convert]::FromBase64String($env:PFX_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/test.pfx -AsByteStream + Set-Content -Path $(Agent.TempDirectory)/test.pem -Value $env:PEM_CONTENTS + [System.Convert]::FromBase64String($env:SNI_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/testsni.pfx -AsByteStream + env: + PFX_CONTENTS: $(net-identity-spcert-pfx) + PEM_CONTENTS: $(net-identity-spcert-pem) + SNI_CONTENTS: $(net-identity-spcert-sni) + EnvVars: + AZURE_IDENTITY_TEST_TENANTID: $(net-identity-tenantid) + AZURE_IDENTITY_TEST_USERNAME: $(net-identity-username) + AZURE_IDENTITY_TEST_PASSWORD: $(net-identity-password) + IDENTITY_SP_TENANT_ID: $(net-identity-sp-tenantid) + IDENTITY_SP_CLIENT_ID: $(net-identity-sp-clientid) + IDENTITY_SP_CLIENT_SECRET: $(net-identity-sp-clientsecret) + IDENTITY_SP_CERT_PEM: $(Agent.TempDirectory)/test.pem + IDENTITY_SP_CERT_PFX: $(Agent.TempDirectory)/test.pfx + IDENTITY_SP_CERT_SNI: $(Agent.TempDirectory)/testsni.pfx diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go new file mode 100644 index 000000000000..e50157b104de --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -0,0 +1,217 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "crypto" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "golang.org/x/crypto/pkcs12" +) + +const credNameCert = "ClientCertificateCredential" + +// ClientCertificateCredentialOptions contains optional parameters for ClientCertificateCredential. +type ClientCertificateCredentialOptions struct { + azcore.ClientOptions + + // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c + // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. + // Defaults to False. + SendCertificateChain bool +} + +// ClientCertificateCredential authenticates a service principal with a certificate. +type ClientCertificateCredential struct { + client confidentialClient +} + +// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. +func NewClientCertificateCredential(tenantID string, clientID string, certs []*x509.Certificate, key crypto.PrivateKey, options *ClientCertificateCredentialOptions) (*ClientCertificateCredential, error) { + if len(certs) == 0 { + return nil, errors.New("at least one certificate is required") + } + pk, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("'key' must be an *rsa.PrivateKey") + } + if !validTenantID(tenantID) { + return nil, errors.New(tenantIDValidationErr) + } + if options == nil { + options = &ClientCertificateCredentialOptions{} + } + authorityHost, err := setAuthorityHost(options.Cloud) + if err != nil { + return nil, err + } + cert, err := newCertContents(certs, pk, options.SendCertificateChain) + if err != nil { + return nil, err + } + cred := confidential.NewCredFromCert(cert.c, key) // TODO: NewCredFromCert should take a slice + if err != nil { + return nil, err + } + o := []confidential.Option{ + confidential.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + confidential.WithHTTPClient(newPipelineAdapter(&options.ClientOptions)), + confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), + } + if options.SendCertificateChain { + o = append(o, confidential.WithX5C()) + } + c, err := confidential.New(clientID, cred, o...) + if err != nil { + return nil, err + } + return &ClientCertificateCredential{client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameCert + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + + ar, err = c.client.AcquireTokenByCredential(ctx, opts.Scopes) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameCert, err) + } + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +// ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. +// Pass nil for password if the private key isn't encrypted. This function can't decrypt keys in PEM format. +func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, crypto.PrivateKey, error) { + var blocks []*pem.Block + var err error + if len(password) == 0 { + blocks, err = loadPEMCert(certData) + } + if len(blocks) == 0 || err != nil { + blocks, err = loadPKCS12Cert(certData, string(password)) + } + if err != nil { + return nil, nil, err + } + var certs []*x509.Certificate + var pk crypto.PrivateKey + for _, block := range blocks { + switch block.Type { + case "CERTIFICATE": + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certs = append(certs, c) + case "PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + } + if err != nil { + return nil, nil, err + } + case "RSA PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + } + } + if len(certs) == 0 { + return nil, nil, errors.New("found no certificate") + } + if pk == nil { + return nil, nil, errors.New("found no private key") + } + return certs, pk, nil +} + +type certContents struct { + c *x509.Certificate // the signing cert + fp []byte // the signing cert's fingerprint, a SHA-1 digest + pk *rsa.PrivateKey // the signing key + x5c []string // concatenation of every provided cert, base64 encoded +} + +func newCertContents(certs []*x509.Certificate, key *rsa.PrivateKey, sendCertificateChain bool) (*certContents, error) { + cc := certContents{pk: key} + // need the the signing cert's fingerprint: identify that cert by matching its public key to the private key + for _, cert := range certs { + certKey, ok := cert.PublicKey.(*rsa.PublicKey) + if ok && key.E == certKey.E && key.N.Cmp(certKey.N) == 0 { + fp := sha1.Sum(cert.Raw) + cc.fp = fp[:] + cc.c = cert + if sendCertificateChain { + // signing cert must be first in x5c + cc.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cc.x5c...) + } + } else if sendCertificateChain { + cc.x5c = append(cc.x5c, base64.StdEncoding.EncodeToString(cert.Raw)) + } + } + if len(cc.fp) == 0 || cc.c == nil { + return nil, errors.New("found no certificate matching 'key'") + } + return &cc, nil +} + +func loadPEMCert(certData []byte) ([]*pem.Block, error) { + blocks := []*pem.Block{} + for { + var block *pem.Block + block, certData = pem.Decode(certData) + if block == nil { + break + } + blocks = append(blocks, block) + } + if len(blocks) == 0 { + return nil, errors.New("didn't find any PEM blocks") + } + return blocks, nil +} + +func loadPKCS12Cert(certData []byte, password string) ([]*pem.Block, error) { + blocks, err := pkcs12.ToPEM(certData, password) + if err != nil { + return nil, err + } + if len(blocks) == 0 { + // not mentioning PKCS12 in this message because we end up here when certData is garbage + return nil, errors.New("didn't find any certificate content") + } + return blocks, err +} + +var _ azcore.TokenCredential = (*ClientCertificateCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go new file mode 100644 index 000000000000..6ecb8f4db816 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -0,0 +1,78 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameSecret = "ClientSecretCredential" + +// ClientSecretCredentialOptions contains optional parameters for ClientSecretCredential. +type ClientSecretCredentialOptions struct { + azcore.ClientOptions +} + +// ClientSecretCredential authenticates an application with a client secret. +type ClientSecretCredential struct { + client confidentialClient +} + +// NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults. +func NewClientSecretCredential(tenantID string, clientID string, clientSecret string, options *ClientSecretCredentialOptions) (*ClientSecretCredential, error) { + if !validTenantID(tenantID) { + return nil, errors.New(tenantIDValidationErr) + } + if options == nil { + options = &ClientSecretCredentialOptions{} + } + authorityHost, err := setAuthorityHost(options.Cloud) + if err != nil { + return nil, err + } + cred, err := confidential.NewCredFromSecret(clientSecret) + if err != nil { + return nil, err + } + c, err := confidential.New(clientID, cred, + confidential.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + confidential.WithHTTPClient(newPipelineAdapter(&options.ClientOptions)), + confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), + ) + if err != nil { + return nil, err + } + return &ClientSecretCredential{client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameSecret + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + + ar, err = c.client.AcquireTokenByCredential(ctx, opts.Scopes) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameSecret, err) + } + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientSecretCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go new file mode 100644 index 000000000000..7358558acb57 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -0,0 +1,132 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "os" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential. +// These options may not apply to all credentials in the chain. +type DefaultAzureCredentialOptions struct { + azcore.ClientOptions + + // TenantID identifies the tenant the Azure CLI should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the user logged in to the CLI. + TenantID string +} + +// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. +// It combines credentials suitable for deployment with credentials suitable for local development. +// It attempts to authenticate with each of these credential types, in the following order, stopping when one provides a token: +// EnvironmentCredential +// ManagedIdentityCredential +// AzureCLICredential +// Consult the documentation for these credential types for more information on how they authenticate. +// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for +// every subsequent authentication. +type DefaultAzureCredential struct { + chain *ChainedTokenCredential +} + +// NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults. +func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) { + var creds []azcore.TokenCredential + var errorMessages []string + + if options == nil { + options = &DefaultAzureCredentialOptions{} + } + + envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ClientOptions: options.ClientOptions}) + if err == nil { + creds = append(creds, envCred) + } else { + errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err}) + } + + o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions} + if ID, ok := os.LookupEnv(azureClientID); ok { + o.ID = ClientID(ID) + } + msiCred, err := NewManagedIdentityCredential(o) + if err == nil { + creds = append(creds, msiCred) + msiCred.client.imdsTimeout = time.Second + } else { + errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err}) + } + + cliCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{TenantID: options.TenantID}) + if err == nil { + creds = append(creds, cliCred) + } else { + errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err}) + } + + err = defaultAzureCredentialConstructorErrorHandler(len(creds), errorMessages) + if err != nil { + return nil, err + } + + chain, err := NewChainedTokenCredential(creds, nil) + if err != nil { + return nil, err + } + chain.name = "DefaultAzureCredential" + return &DefaultAzureCredential{chain: chain}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.chain.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*DefaultAzureCredential)(nil) + +func defaultAzureCredentialConstructorErrorHandler(numberOfSuccessfulCredentials int, errorMessages []string) (err error) { + errorMessage := strings.Join(errorMessages, "\n\t") + + if numberOfSuccessfulCredentials == 0 { + return errors.New(errorMessage) + } + + if len(errorMessages) != 0 { + log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", errorMessage) + } + + return nil +} + +// defaultCredentialErrorReporter is a substitute for credentials that couldn't be constructed. +// Its GetToken method always returns a credentialUnavailableError having the same message as +// the error that prevented constructing the credential. This ensures the message is present +// in the error returned by ChainedTokenCredential.GetToken() +type defaultCredentialErrorReporter struct { + credType string + err error +} + +func (d *defaultCredentialErrorReporter) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if _, ok := d.err.(*credentialUnavailableError); ok { + return azcore.AccessToken{}, d.err + } + return azcore.AccessToken{}, newCredentialUnavailableError(d.credType, d.err.Error()) +} + +var _ azcore.TokenCredential = (*defaultCredentialErrorReporter)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go new file mode 100644 index 000000000000..d0c72c348548 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -0,0 +1,130 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameDeviceCode = "DeviceCodeCredential" + +// DeviceCodeCredentialOptions contains optional parameters for DeviceCodeCredential. +type DeviceCodeCredentialOptions struct { + azcore.ClientOptions + + // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant + // applications. + TenantID string + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + // UserPrompt controls how the credential presents authentication instructions. The credential calls + // this function with authentication details when it receives a device code. By default, the credential + // prints these details to stdout. + UserPrompt func(context.Context, DeviceCodeMessage) error +} + +func (o *DeviceCodeCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } + if o.UserPrompt == nil { + o.UserPrompt = func(ctx context.Context, dc DeviceCodeMessage) error { + fmt.Println(dc.Message) + return nil + } + } +} + +// DeviceCodeMessage contains the information a user needs to complete authentication. +type DeviceCodeMessage struct { + // UserCode is the user code returned by the service. + UserCode string `json:"user_code"` + // VerificationURL is the URL at which the user must authenticate. + VerificationURL string `json:"verification_uri"` + // Message is user instruction from Azure Active Directory. + Message string `json:"message"` +} + +// DeviceCodeCredential acquires tokens for a user via the device code flow, which has the +// user browse to an Azure Active Directory URL, enter a code, and authenticate. It's useful +// for authenticating a user in an environment without a web browser, such as an SSH session. +// If a web browser is available, InteractiveBrowserCredential is more convenient because it +// automatically opens a browser to the login page. +type DeviceCodeCredential struct { + client publicClient + userPrompt func(context.Context, DeviceCodeMessage) error + account public.Account +} + +// NewDeviceCodeCredential creates a DeviceCodeCredential. Pass nil to accept default options. +func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeCredential, error) { + cp := DeviceCodeCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + if !validTenantID(cp.TenantID) { + return nil, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(cp.Cloud) + if err != nil { + return nil, err + } + c, err := public.New(cp.ClientID, + public.WithAuthority(runtime.JoinPaths(authorityHost, cp.TenantID)), + public.WithHTTPClient(newPipelineAdapter(&cp.ClientOptions)), + ) + if err != nil { + return nil, err + } + return &DeviceCodeCredential{userPrompt: cp.UserPrompt, client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication. +// This method is called automatically by Azure SDK clients. +func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameDeviceCode + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, public.WithSilentAccount(c.account)) + if err == nil { + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + dc, err := c.client.AcquireTokenByDeviceCode(ctx, opts.Scopes) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameDeviceCode, err) + } + err = c.userPrompt(ctx, DeviceCodeMessage{ + UserCode: dc.Result.UserCode, + VerificationURL: dc.Result.VerificationURL, + Message: dc.Result.Message, + }) + if err != nil { + return azcore.AccessToken{}, err + } + ar, err = dc.AuthenticationResult(ctx) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameDeviceCode, err) + } + c.account = ar.Account + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go new file mode 100644 index 000000000000..16c595d1d375 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -0,0 +1,122 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN" + +// EnvironmentCredentialOptions contains optional parameters for EnvironmentCredential +type EnvironmentCredentialOptions struct { + azcore.ClientOptions +} + +// EnvironmentCredential authenticates a service principal with a secret or certificate, or a user with a password, depending +// on environment variable configuration. It reads configuration from these variables, in the following order: +// +// Service principal with client secret +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_SECRET: one of the service principal's client secrets +// +// Service principal with certificate +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the unencrypted private key. +// +// User with username and password +// +// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". +// +// AZURE_CLIENT_ID: client ID of the application the user will authenticate to +// +// AZURE_USERNAME: a username (usually an email address) +// +// AZURE_PASSWORD: the user's password +type EnvironmentCredential struct { + cred azcore.TokenCredential +} + +// NewEnvironmentCredential creates an EnvironmentCredential. Pass nil to accept default options. +func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*EnvironmentCredential, error) { + if options == nil { + options = &EnvironmentCredentialOptions{} + } + tenantID := os.Getenv("AZURE_TENANT_ID") + if tenantID == "" { + return nil, errors.New("missing environment variable AZURE_TENANT_ID") + } + clientID := os.Getenv(azureClientID) + if clientID == "" { + return nil, errors.New("missing environment variable " + azureClientID) + } + if clientSecret := os.Getenv("AZURE_CLIENT_SECRET"); clientSecret != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientSecretCredential") + o := &ClientSecretCredentialOptions{ClientOptions: options.ClientOptions} + cred, err := NewClientSecretCredential(tenantID, clientID, clientSecret, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if certPath := os.Getenv("AZURE_CLIENT_CERTIFICATE_PATH"); certPath != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientCertificateCredential") + certData, err := os.ReadFile(certPath) + if err != nil { + return nil, fmt.Errorf(`failed to read certificate file "%s": %v`, certPath, err) + } + certs, key, err := ParseCertificates(certData, nil) + if err != nil { + return nil, fmt.Errorf(`failed to load certificate from "%s": %v`, certPath, err) + } + o := &ClientCertificateCredentialOptions{ClientOptions: options.ClientOptions} + if v, ok := os.LookupEnv(envVarSendCertChain); ok { + o.SendCertificateChain = v == "1" || strings.ToLower(v) == "true" + } + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if username := os.Getenv("AZURE_USERNAME"); username != "" { + if password := os.Getenv("AZURE_PASSWORD"); password != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with UsernamePasswordCredential") + o := &UsernamePasswordCredentialOptions{ClientOptions: options.ClientOptions} + cred, err := NewUsernamePasswordCredential(tenantID, clientID, username, password, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + return nil, errors.New("no value for AZURE_PASSWORD") + } + return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set") +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.cred.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*EnvironmentCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go new file mode 100644 index 000000000000..c60d13d00716 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -0,0 +1,108 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" +) + +// getResponseFromError retrieves the response carried by +// an AuthenticationFailedError or MSAL CallErr, if any +func getResponseFromError(err error) *http.Response { + var a *AuthenticationFailedError + var c msal.CallErr + var res *http.Response + if errors.As(err, &c) { + res = c.Resp + } else if errors.As(err, &a) { + res = a.RawResponse + } + return res +} + +// AuthenticationFailedError indicates an authentication request has failed. +type AuthenticationFailedError struct { + // RawResponse is the HTTP response motivating the error, if available. + RawResponse *http.Response + + credType string + message string +} + +func newAuthenticationFailedError(credType string, message string, resp *http.Response) error { + return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp} +} + +func newAuthenticationFailedErrorFromMSALError(credType string, err error) error { + res := getResponseFromError(err) + return newAuthenticationFailedError(credType, err.Error(), res) +} + +// Error implements the error interface. Note that the message contents are not contractual and can change over time. +func (e *AuthenticationFailedError) Error() string { + if e.RawResponse == nil { + return e.credType + ": " + e.message + } + msg := &bytes.Buffer{} + fmt.Fprintf(msg, e.credType+" authentication failed\n") + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := io.ReadAll(e.RawResponse.Body) + e.RawResponse.Body.Close() + if err != nil { + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + e.RawResponse.Body = io.NopCloser(bytes.NewReader(body)) + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + } else { + fmt.Fprint(msg, "Response contained no body") + } + fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------") + return msg.String() +} + +// NonRetriable indicates the request which provoked this error shouldn't be retried. +func (*AuthenticationFailedError) NonRetriable() { + // marker method +} + +var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) + +// credentialUnavailableError indicates a credential can't attempt +// authentication because it lacks required data or state. +type credentialUnavailableError struct { + credType string + message string +} + +func newCredentialUnavailableError(credType, message string) error { + return &credentialUnavailableError{credType: credType, message: message} +} + +func (e *credentialUnavailableError) Error() string { + return e.credType + ": " + e.message +} + +// NonRetriable indicates that this error should not be retried. +func (e *credentialUnavailableError) NonRetriable() { + // marker method +} + +var _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go new file mode 100644 index 000000000000..e4aaf45b6dda --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -0,0 +1,100 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameBrowser = "InteractiveBrowserCredentiall" + +// InteractiveBrowserCredentialOptions contains optional parameters for InteractiveBrowserCredential. +type InteractiveBrowserCredentialOptions struct { + azcore.ClientOptions + + // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. + TenantID string + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + // RedirectURL will be supported in a future version but presently doesn't work: https://github.com/Azure/azure-sdk-for-go/issues/15632. + // Applications which have "http://localhost" registered as a redirect URL need not set this option. + RedirectURL string +} + +func (o *InteractiveBrowserCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } +} + +// InteractiveBrowserCredential opens a browser to interactively authenticate a user. +type InteractiveBrowserCredential struct { + client publicClient + options InteractiveBrowserCredentialOptions + account public.Account +} + +// NewInteractiveBrowserCredential constructs a new InteractiveBrowserCredential. Pass nil to accept default options. +func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOptions) (*InteractiveBrowserCredential, error) { + cp := InteractiveBrowserCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + if !validTenantID(cp.TenantID) { + return nil, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(cp.Cloud) + if err != nil { + return nil, err + } + c, err := public.New(cp.ClientID, + public.WithAuthority(runtime.JoinPaths(authorityHost, cp.TenantID)), + public.WithHTTPClient(newPipelineAdapter(&cp.ClientOptions)), + ) + if err != nil { + return nil, err + } + return &InteractiveBrowserCredential{options: cp, client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameBrowser + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, public.WithSilentAccount(c.account)) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + + o := []public.InteractiveAuthOption{} + if c.options.RedirectURL != "" { + o = append(o, public.WithRedirectURI(c.options.RedirectURL)) + } + ar, err = c.client.AcquireTokenInteractive(ctx, opts.Scopes, o...) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameBrowser, err) + } + c.account = ar.Account + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go new file mode 100644 index 000000000000..569453e4622d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "fmt" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// EventAuthentication entries contain information about authentication. +// This includes information like the names of environment variables +// used when obtaining credentials and the type of credential used. +const EventAuthentication log.Event = "Authentication" + +func logGetTokenSuccess(cred azcore.TokenCredential, opts policy.TokenRequestOptions) { + if !log.Should(EventAuthentication) { + return + } + scope := strings.Join(opts.Scopes, ", ") + msg := fmt.Sprintf("%T.GetToken() acquired a token for scope %s\n", cred, scope) + log.Write(EventAuthentication, msg) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go new file mode 100644 index 000000000000..ce6e1e61474c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -0,0 +1,393 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const ( + arcIMDSEndpoint = "IMDS_ENDPOINT" + identityEndpoint = "IDENTITY_ENDPOINT" + identityHeader = "IDENTITY_HEADER" + identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" + headerMetadata = "Metadata" + imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + msiEndpoint = "MSI_ENDPOINT" + imdsAPIVersion = "2018-02-01" + azureArcAPIVersion = "2019-08-15" + serviceFabricAPIVersion = "2019-07-01-preview" + + qpClientID = "client_id" + qpResID = "mi_res_id" +) + +type msiType int + +const ( + msiTypeAppService msiType = iota + msiTypeAzureArc + msiTypeCloudShell + msiTypeIMDS + msiTypeServiceFabric +) + +// managedIdentityClient provides the base for authenticating in managed identity environments +// This type includes an runtime.Pipeline and TokenCredentialOptions. +type managedIdentityClient struct { + pipeline runtime.Pipeline + msiType msiType + endpoint string + id ManagedIDKind + imdsTimeout time.Duration +} + +type wrappedNumber json.Number + +func (n *wrappedNumber) UnmarshalJSON(b []byte) error { + c := string(b) + if c == "\"\"" { + return nil + } + return json.Unmarshal(b, (*json.Number)(n)) +} + +// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS +func setIMDSRetryOptionDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = 5 + } + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 1 * time.Minute + } + if o.RetryDelay == 0 { + o.RetryDelay = 2 * time.Second + } + if o.StatusCodes == nil { + o.StatusCodes = []int{ + // IMDS docs recommend retrying 404, 429 and all 5xx + // https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling + http.StatusNotFound, // 404 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusNotImplemented, // 501 + http.StatusBadGateway, // 502 + http.StatusGatewayTimeout, // 504 + http.StatusHTTPVersionNotSupported, // 505 + http.StatusVariantAlsoNegotiates, // 506 + http.StatusInsufficientStorage, // 507 + http.StatusLoopDetected, // 508 + http.StatusNotExtended, // 510 + http.StatusNetworkAuthenticationRequired, // 511 + } + } + if o.TryTimeout == 0 { + o.TryTimeout = 1 * time.Minute + } +} + +// newManagedIdentityClient creates a new instance of the ManagedIdentityClient with the ManagedIdentityCredentialOptions +// that are passed into it along with a default pipeline. +// options: ManagedIdentityCredentialOptions configure policies for the pipeline and the authority host that +// will be used to retrieve tokens and authenticate +func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*managedIdentityClient, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + cp := options.ClientOptions + c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS} + env := "IMDS" + if endpoint, ok := os.LookupEnv(identityEndpoint); ok { + if _, ok := os.LookupEnv(identityHeader); ok { + if _, ok := os.LookupEnv(identityServerThumbprint); ok { + env = "Service Fabric" + c.endpoint = endpoint + c.msiType = msiTypeServiceFabric + } else { + env = "App Service" + c.endpoint = endpoint + c.msiType = msiTypeAppService + } + } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok { + env = "Azure Arc" + c.endpoint = endpoint + c.msiType = msiTypeAzureArc + } + } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { + env = "Cloud Shell" + c.endpoint = endpoint + c.msiType = msiTypeCloudShell + } else { + setIMDSRetryOptionDefaults(&cp.Retry) + } + c.pipeline = runtime.NewPipeline(component, version, runtime.PipelineOptions{}, &cp) + + if log.Should(EventAuthentication) { + log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) + } + + return &c, nil +} + +// authenticate creates an authentication request for a Managed Identity and returns the resulting Access Token if successful. +// ctx: The current context for controlling the request lifetime. +// clientID: The client (application) ID of the service principal. +// scopes: The scopes required for the token. +func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { + var cancel context.CancelFunc + if c.imdsTimeout > 0 && c.msiType == msiTypeIMDS { + ctx, cancel = context.WithTimeout(ctx, c.imdsTimeout) + defer cancel() + } + + msg, err := c.createAuthRequest(ctx, id, scopes) + if err != nil { + return azcore.AccessToken{}, err + } + + resp, err := c.pipeline.Do(msg) + if err != nil { + if cancel != nil && errors.Is(err, context.DeadlineExceeded) { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "IMDS token request timed out") + } + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil) + } + + // got a response, remove the IMDS timeout so future requests use the transport's configuration + c.imdsTimeout = 0 + + if runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return c.createAccessToken(resp) + } + + if c.msiType == msiTypeIMDS && resp.StatusCode == 400 { + if id != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp) + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "no default identity is assigned to this resource") + } + + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp) +} + +func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) { + value := struct { + // these are the only fields that we use + Token string `json:"access_token,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` + ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid + ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string + }{} + if err := runtime.UnmarshalAsJSON(res, &value); err != nil { + return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) + } + if value.ExpiresIn != "" { + expiresIn, err := json.Number(value.ExpiresIn).Int64() + if err != nil { + return azcore.AccessToken{}, err + } + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil + } + switch v := value.ExpiresOn.(type) { + case float64: + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil + case string: + if expiresOn, err := strconv.Atoi(v); err == nil { + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil + } + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res) + default: + msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res) + } +} + +func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + switch c.msiType { + case msiTypeIMDS: + return c.createIMDSAuthRequest(ctx, id, scopes) + case msiTypeAppService: + return c.createAppServiceAuthRequest(ctx, id, scopes) + case msiTypeAzureArc: + // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service + key, err := c.getAzureArcSecretKey(ctx, scopes) + if err != nil { + msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err) + return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil) + } + return c.createAzureArcAuthRequest(ctx, id, scopes, key) + case msiTypeServiceFabric: + return c.createServiceFabricAuthRequest(ctx, id, scopes) + case msiTypeCloudShell: + return c.createCloudShellAuthRequest(ctx, id, scopes) + default: + return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment") + } +} + +func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", imdsAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader)) + q := request.Raw().URL.Query() + q.Add("api-version", "2019-08-01") + q.Add("resource", scopes[0]) + if id != nil { + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + q := request.Raw().URL.Query() + request.Raw().Header.Set("Accept", "application/json") + request.Raw().Header.Set("Secret", os.Getenv(identityHeader)) + q.Add("api-version", serviceFabricAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) { + // create the request to retreive the secret key challenge provided by the HIMDS service + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return "", err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + request.Raw().URL.RawQuery = q.Encode() + // send the initial request to get the short-lived secret key + response, err := c.pipeline.Do(request) + if err != nil { + return "", err + } + // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location + // of the secret key file. Any other status code indicates an error in the request. + if response.StatusCode != 401 { + msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode) + return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response) + } + header := response.Header.Get("WWW-Authenticate") + if len(header) == 0 { + return "", errors.New("did not receive a value from WWW-Authenticate header") + } + // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key + pos := strings.LastIndex(header, "=") + if pos == -1 { + return "", fmt.Errorf("did not receive a correct value from WWW-Authenticate header: %s", header) + } + key, err := ioutil.ReadFile(header[pos+1:]) + if err != nil { + return "", fmt.Errorf("could not read file (%s) contents: %v", header[pos+1:], err) + } + return string(key), nil +} + +func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodPost, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + data := url.Values{} + data.Set("resource", strings.Join(scopes, " ")) + dataEncoded := data.Encode() + body := streaming.NopCloser(strings.NewReader(dataEncoded)) + if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil { + return nil, err + } + if id != nil { + log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") + q := request.Raw().URL.Query() + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + return request, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go new file mode 100644 index 000000000000..f17ada1c3ed0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -0,0 +1,105 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const credNameManagedIdentity = "ManagedIdentityCredential" + +type managedIdentityIDKind int + +const ( + miClientID managedIdentityIDKind = 0 + miResourceID managedIdentityIDKind = 1 +) + +// ManagedIDKind identifies the ID of a managed identity as either a client or resource ID +type ManagedIDKind interface { + fmt.Stringer + idKind() managedIdentityIDKind +} + +// ClientID is the client ID of a user-assigned managed identity. +type ClientID string + +func (ClientID) idKind() managedIdentityIDKind { + return miClientID +} + +// String returns the string value of the ID. +func (c ClientID) String() string { + return string(c) +} + +// ResourceID is the resource ID of a user-assigned managed identity. +type ResourceID string + +func (ResourceID) idKind() managedIdentityIDKind { + return miResourceID +} + +// String returns the string value of the ID. +func (r ResourceID) String() string { + return string(r) +} + +// ManagedIdentityCredentialOptions contains optional parameters for ManagedIdentityCredential. +type ManagedIdentityCredentialOptions struct { + azcore.ClientOptions + + // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity + // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that + // some platforms don't accept resource IDs. + ID ManagedIDKind +} + +// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. +// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a +// user-assigned identity. See Azure Active Directory documentation for more information about managed identities: +// https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +type ManagedIdentityCredential struct { + id ManagedIDKind + client *managedIdentityClient +} + +// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. +func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*ManagedIdentityCredential, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + client, err := newManagedIdentityClient(options) + if err != nil { + return nil, err + } + return &ManagedIdentityCredential{id: options.ID, client: client}, nil +} + +// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. +func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) != 1 { + err := errors.New(credNameManagedIdentity + ": GetToken() requires exactly one scope") + return azcore.AccessToken{}, err + } + // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + scopes := []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} + tk, err := c.client.authenticate(ctx, c.id, scopes) + if err != nil { + return azcore.AccessToken{}, err + } + logGetTokenSuccess(c, opts) + return tk, err +} + +var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go new file mode 100644 index 000000000000..8b02e7b47bab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameUserPassword = "UsernamePasswordCredential" + +// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential. +type UsernamePasswordCredentialOptions struct { + azcore.ClientOptions +} + +// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, +// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible +// with any form of multi-factor authentication, and the application must already have user or admin consent. +// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts. +type UsernamePasswordCredential struct { + client publicClient + username string + password string + account public.Account +} + +// NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user +// will authenticate to. Pass nil for options to accept defaults. +func NewUsernamePasswordCredential(tenantID string, clientID string, username string, password string, options *UsernamePasswordCredentialOptions) (*UsernamePasswordCredential, error) { + if !validTenantID(tenantID) { + return nil, errors.New(tenantIDValidationErr) + } + if options == nil { + options = &UsernamePasswordCredentialOptions{} + } + authorityHost, err := setAuthorityHost(options.Cloud) + if err != nil { + return nil, err + } + c, err := public.New(clientID, + public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + public.WithHTTPClient(newPipelineAdapter(&options.ClientOptions)), + ) + if err != nil { + return nil, err + } + return &UsernamePasswordCredential{username: username, password: password, client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameUserPassword + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, public.WithSilentAccount(c.account)) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + ar, err = c.client.AcquireTokenByUsernamePassword(ctx, opts.Scopes, c.username, c.password) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameUserPassword, err) + } + c.account = ar.Account + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go new file mode 100644 index 000000000000..0fb125ace9e1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -0,0 +1,15 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +const ( + // UserAgent is the string to be used in the user agent string when making requests. + component = "azidentity" + + // Version is the semantic version (see http://semver.org) of this module. + version = "v1.1.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt new file mode 100644 index 000000000000..48ea6616b5b8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go new file mode 100644 index 000000000000..1fdc53615b6a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag + +import ( + "fmt" + "runtime" + "strings" +) + +// Caller returns the file and line number of a frame on the caller's stack. +// If the funtion fails an empty string is returned. +// skipFrames - the number of frames to skip when determining the caller. +// Passing a value of 0 will return the immediate caller of this function. +func Caller(skipFrames int) string { + if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { + // the skipFrames + 1 is to skip ourselves + frame := runtime.FuncForPC(pc) + return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line) + } + return "" +} + +// StackTrace returns a formatted stack trace string. +// If the funtion fails an empty string is returned. +// skipFrames - the number of stack frames to skip before composing the trace string. +// totalFrames - the maximum number of stack frames to include in the trace string. +func StackTrace(skipFrames, totalFrames int) string { + pcCallers := make([]uintptr, totalFrames) + if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 { + return "" + } + frames := runtime.CallersFrames(pcCallers) + sb := strings.Builder{} + for { + frame, more := frames.Next() + sb.WriteString(frame.Function) + sb.WriteString("()\n\t") + sb.WriteString(frame.File) + sb.WriteRune(':') + sb.WriteString(fmt.Sprintf("%d\n", frame.Line)) + if !more { + break + } + } + return sb.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go new file mode 100644 index 000000000000..66bf13e5f04b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go new file mode 100644 index 000000000000..8c6eacb618a3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go new file mode 100644 index 000000000000..ade7b348e303 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -0,0 +1,16 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo + +// NonRetriable represents a non-transient error. This works in +// conjunction with the retry policy, indicating that the error condition +// is idempotent, so no retries will be attempted. +// Use errors.As() to access this interface in the error chain. +type NonRetriable interface { + error + NonRetriable() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go new file mode 100644 index 000000000000..d7876d297ae9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go new file mode 100644 index 000000000000..4f1dcf1b78a6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log + +import ( + "fmt" + "os" + "time" +) + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Event is used to group entries. Each group can be toggled on or off. +type Event string + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +func SetEvents(cls ...Event) { + log.cls = cls +} + +// SetListener will set the Logger to write to the specified listener. +func SetListener(lst func(Event, string)) { + log.lst = lst +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// END PUBLIC SURFACE AREA +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. +func Should(cls Event) bool { + if log.lst == nil { + return false + } + if log.cls == nil || len(log.cls) == 0 { + return true + } + for _, c := range log.cls { + if c == cls { + return true + } + } + return false +} + +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. +func Write(cls Event, message string) { + if !Should(cls) { + return + } + log.lst(cls, message) +} + +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls Event, format string, a ...interface{}) { + if !Should(cls) { + return + } + log.lst(cls, fmt.Sprintf(format, a...)) +} + +// TestResetEvents is used for TESTING PURPOSES ONLY. +func TestResetEvents() { + log.cls = nil +} + +// logger controls which events to log and writing to the underlying log. +type logger struct { + cls []Event + lst func(Event, string) +} + +// the process-wide logger +var log logger + +func init() { + initLogging() +} + +// split out for testing purposes +func initLogging() { + if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" { + // cls could be enhanced to support a comma-delimited list of log events + log.lst = func(cls Event, msg string) { + // simple console logger, it writes to stderr in the following format: + // [time-stamp] Event: message + fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go new file mode 100644 index 000000000000..b23f3860c5e5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -0,0 +1,120 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package temporal + +import ( + "sync" + "time" +) + +// AcquireResource abstracts a method for refreshing a temporal resource. +type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) + +// Resource is a temporal resource (usually a credential) that requires periodic refreshing. +type Resource[TResource, TState any] struct { + // cond is used to synchronize access to the shared resource embodied by the remaining fields + cond *sync.Cond + + // acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource + acquiring bool + + // resource contains the value of the shared resource + resource TResource + + // expiration indicates when the shared resource expires; it is 0 if the resource was never acquired + expiration time.Time + + // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource + lastAttempt time.Time + + // acquireResource is the callback function that actually acquires the resource + acquireResource AcquireResource[TResource, TState] +} + +// NewResource creates a new Resource that uses the specified AcquireResource for refreshing. +func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { + return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} +} + +// Get returns the underlying resource. +// If the resource is fresh, no refresh is performed. +func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { + // If the resource is expiring within this time window, update it eagerly. + // This allows other threads/goroutines to keep running by using the not-yet-expired + // resource value while one thread/goroutine updates the resource. + const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration + const backoff = 30 * time.Second // Minimum wait time between eager update attempts + + now, acquire, expired, resource := time.Now(), false, false, er.resource + // acquire exclusive lock + er.cond.L.Lock() + for { + expired = er.expiration.IsZero() || er.expiration.Before(now) + if expired { + // The resource was never acquired or has expired + if !er.acquiring { + // If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // Getting here means that this thread/goroutine will wait for the updated resource + } else if er.expiration.Add(-window).Before(now) { + // The resource is valid but is expiring within the time window + if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { + // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted + // to do so within the last 30 seconds, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // This thread/goroutine will use the existing resource value while another updates it + resource = er.resource + break + } else { + // The resource is not close to expiring, this thread/goroutine should use its current value + resource = er.resource + break + } + // If we get here, wait for the new resource value to be acquired/updated + er.cond.Wait() + } + er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked + + var err error + if acquire { + // This thread/goroutine has been selected to acquire/update the resource + var expiration time.Time + var newValue TResource + er.lastAttempt = now + newValue, expiration, err = er.acquireResource(state) + + // Atomically, update the shared resource's new value & expiration. + er.cond.L.Lock() + if err == nil { + // Update resource & expiration, return the new value + resource = newValue + er.resource, er.expiration = resource, expiration + } else if !expired { + // An eager update failed. Discard the error and return the current--still valid--resource value + err = nil + } + er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource + + // Wake up any waiting threads/goroutines since there is a resource they can ALL use + er.cond.L.Unlock() + er.cond.Broadcast() + } + return resource, err // Return the resource this thread/goroutine can use +} + +// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get(). +func (er *Resource[TResource, TState]) Expire() { + er.cond.L.Lock() + defer er.cond.L.Unlock() + + // Reset the expiration as if we never got this resource to begin with + er.expiration = time.Time{} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go new file mode 100644 index 000000000000..a3824bee8b5b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go new file mode 100644 index 000000000000..278ac9cd1c2c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go @@ -0,0 +1,76 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid + +import ( + "crypto/rand" + "errors" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedRFC4122 byte = 0x40 +) + +// A UUID representation compliant with specification in RFC4122 document. +type UUID [16]byte + +// New returns a new UUID using the RFC4122 algorithm. +func New() (UUID, error) { + u := UUID{} + // Set all bits to pseudo-random values. + // NOTE: this takes a process-wide lock + _, err := rand.Read(u[:]) + if err != nil { + return u, err + } + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return u, nil +} + +// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format. +func (u UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID. +func Parse(s string) (UUID, error) { + var uuid UUID + // ensure format + switch len(s) { + case 36: + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 38: + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + s = s[1:37] + default: + return uuid, errors.New("invalid UUID format") + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + // parse chunks + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + b, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return uuid, fmt.Errorf("invalid UUID format: %s", err) + } + uuid[i] = byte(b) + } + return uuid, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md new file mode 100644 index 000000000000..db095b3a26ab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -0,0 +1,54 @@ +# Release History + +## 0.4.1 (2022-05-12) + +### Other Changes +* Updated to latest `azcore` and `internal` modules + +## 0.4.0 (2022-04-19) + +### Breaking Changes +* Fixed Issue #17150 : Renaming/refactoring high level methods. +* Fixed Issue #16972 : Constructors should return clients by reference. +* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags remains the same. + +### Bugs Fixed +* Fixed Issue #17515 : SetTags options bag missing leaseID. +* Fixed Issue #17423 : Drop "Type" suffix from `GeoReplicationStatusType`. +* Fixed Issue #17335 : Nil pointer exception when passing nil options bag in `ListBlobsFlat` API call. +* Fixed Issue #17188 : `BlobURLParts` not supporting VersionID +* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods ignoring the options bag. +* Fixed Issue #16920 : Fixing error handling example. +* Fixed Issue #16786 : Refactoring of autorest code generation definition and adding necessary transformations. +* Fixed Issue #16679 : Response parsing issue in List blobs API. + +## 0.3.0 (2022-02-09) + +### Breaking Changes + +* Updated to latest `azcore`. Public surface area is unchanged. +* [#16978](https://github.com/Azure/azure-sdk-for-go/pull/16978): The `DownloadResponse.Body` parameter is + now `*RetryReaderOptions`. + +### Bugs Fixed + +* Fixed Issue #16193 : `azblob.GetSASToken` wrong signed resource. +* Fixed Issue #16223 : `HttpRange` does not expose its fields. +* Fixed Issue #16254 : Issue passing reader to upload `BlockBlobClient` +* Fixed Issue #16295 : Problem with listing blobs by using of `ListBlobsHierarchy()` +* Fixed Issue #16542 : Empty `StorageError` in the Azurite environment +* Fixed Issue #16679 : Unable to access Metadata when listing blobs +* Fixed Issue #16816 : `ContainerClient.GetSASToken` doesn't allow list permission. +* Fixed Issue #16988 : Too many arguments in call to `runtime.NewResponseError` + +## 0.2.0 (2021-11-03) + +### Breaking Changes + +* Clients now have one constructor per authentication method + +## 0.1.0 (2021-09-13) + +### Features Added + +* This is the initial preview release of the `azblob` library diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt new file mode 100644 index 000000000000..d1ca00f20a89 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md new file mode 100644 index 000000000000..32a10a005c1e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -0,0 +1,397 @@ +# Azure Blob Storage SDK for Go + +## Introduction + +The Microsoft Azure Storage SDK for Go allows you to build applications that takes advantage of Azure's scalable cloud +storage. This is the new beta client module for Azure Blob Storage, which follows +our [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html) and replaces the +previous beta [azblob package](https://github.com/azure/azure-storage-blob-go). + +## Getting Started + +The Azure Blob SDK can access an Azure Storage account. + +### Prerequisites + +* Go versions 1.18 or higher +* You must have an [Azure storage account][azure_storage_account]. If you need to create one, you can use + the [Azure Cloud Shell](https://shell.azure.com/bash) to create one with these commands (replace `my-resource-group` + and `mystorageaccount` with your own unique names): + (Optional) if you want a new resource group to hold the Storage Account: + ``` + az group create --name my-resource-group --location westus2 + ``` + Create the storage account: + ``` + az storage account create --resource-group my-resource-group --name mystorageaccount + ``` + + The storage account name can be queried with: + ``` + az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob" + ``` + You can set this as an environment variable with: + ```bash + # PowerShell + $ENV:AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount" + # bash + export AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount" + ``` + + Query your storage account keys: + ``` + az storage account keys list --resource-group my-resource-group -n mystorageaccount + ``` + + Output: + ```json + [ + { + "creationTime": "2022-02-07T17:18:44.088870+00:00", + "keyName": "key1", + "permissions": "FULL", + "value": "..." + }, + { + "creationTime": "2022-02-07T17:18:44.088870+00:00", + "keyName": "key2", + "permissions": "FULL", + "value": "..." + } + ] + ``` + + ```bash + # PowerShell + $ENV:AZURE_STORAGE_ACCOUNT_KEY="" + # Bash + export AZURE_STORAGE_ACCOUNT_KEY="" + ``` + > You can obtain your account key from the Azure Portal under the "Access Keys" section on the left-hand pane of your storage account. + +#### Create account + +* To create a new Storage account, you can use [Azure Portal][azure_portal_create_account] + , [Azure PowerShell][azure_powershell_create_account], or [Azure CLI][azure_cli_create_account]. + +### Install the package + +* Install the Azure Blob Storage client module for Go with `go get`: + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +``` + +> Optional: If you are going to use AAD authentication, install the `azidentity` package: + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +#### Create the client + +`azblob` allows you to interact with three types of resources :- + +* [Azure storage accounts][azure_storage_account]. +* [Containers](https://azure.microsoft.com/en-in/overview/what-is-a-container/#overview) within those storage accounts. +* [Blobs](https://azure.microsoft.com/en-in/services/storage/blobs/#overview) (block blobs/ page blobs/ append blobs) + within those containers. + +Interaction with these resources starts with an instance of a [client](#clients). To create a client object, you will +need the account's blob service endpoint URL and a credential that allows you to access the account. The `endpoint` can +be found on the page for your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys" +section or by running the following Azure CLI command: + +```bash +# Get the blob service URL for the account +az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob" +``` + +Once you have the account URL, it can be used to create the service client: + +```golang +cred, err := azblob.NewSharedKeyCredential("myAccountName", "myAccountKey") +handle(err) +serviceClient, err := azblob.NewServiceClientWithSharedKey("https://.blob.core.windows.net/", cred, nil) +handle(err) +``` + +For more information about blob service URL's and how to configure custom domain names for Azure Storage check out +the [official documentation][azure_portal_account_url] + +#### Types of credentials + +The azblob clients support authentication via Shared Key Credential, Connection String, Shared Access Signature, or any +of the `azidentity` types that implement the `azcore.TokenCredential` interface. + +##### 1. Creating the client from a shared key + +To use an account [shared key][azure_shared_key] (aka account key or access key), provide the key as a string. This can +be found in your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys" section or by +running the following Azure CLI command: + +```bash +az storage account keys list -g my-resource-group -n mystorageaccount +``` + +Use Shared Key authentication as the credential parameter to authenticate the client: + +```golang +credential, err := azblob.NewSharedKeyCredential("accountName", "accountKey") +handle(err) +serviceClient, err := azblob.NewServiceClientWithSharedKey("https://.blob.core.windows.net/", credential, nil) +handle(err) +``` + +##### 2. Creating the client from a connection string + +You can use connection string, instead of providing the account URL and credential separately, for authentication as +well. To do this, pass the connection string to the client's `NewServiceClientFromConnectionString` method. The +connection string can be found in your storage account in the [Azure Portal][azure_portal_account_url] under the "Access +Keys" section or with the following Azure CLI command: + +```bash +az storage account show-connection-string -g my-resource-group -n mystorageaccount +``` + +```golang +connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" +serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil) +``` + +##### 3. Creating the client from a SAS token + +To use a [shared access signature (SAS) token][azure_sas_token], provide the token as a string. You can generate a SAS +token from the Azure Portal +under [Shared access signature](https://docs.microsoft.com/rest/api/storageservices/create-service-sas) or use +the `ServiceClient.GetSASToken` or `ContainerClient.GetSASToken()` methods. + +```golang +credential, err := azblob.NewSharedKeyCredential("accountName", "accountKey") +handle(err) +serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), credential, nil) +handle(err) +// Provide the convenience function with relevant info (services, resource types, permissions, and duration) +// The SAS token will be valid from this moment onwards. +accountSAS, err := serviceClient.GetSASToken(AccountSASResourceTypes{Object: true, Service: true, Container: true}, +AccountSASPermissions{Read: true, List: true}, AccountSASServices{Blob: true}, time.Now(), time.Now().Add(48*time.Hour)) +handle(err) +sasURL := fmt.Sprintf("https://%s.blob.core.windows.net/?%s", accountName, accountSAS) + +// The sasURL can be used to authenticate a client without need for a credential +serviceClient, err = NewServiceClientWithNoCredential(sasURL, nil) +handle(err) +``` + +### Clients + +Three different clients are provided to interact with the various components of the Blob Service: + +1. **`ServiceClient`** + * Get and set account settings. + * Query, create, and delete containers within the account. + +2. **`ContainerClient`** + * Get and set container access settings, properties, and metadata. + * Create, delete, and query blobs within the container. + * `ContainerLeaseClient` to support container lease management. + +3. **`BlobClient`** + * `AppendBlobClient`, `BlockBlobClient`, and `PageBlobClient` + * Get and set blob properties. + * Perform CRUD operations on a given blob. + * `BlobLeaseClient` to support blob lease management. + +### Example + +```go +// Use your storage account's name and key to create a credential object, used to access your account. +// You can obtain these details from the Azure Portal. +accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") +if !ok { + handle(errors.New("AZURE_STORAGE_ACCOUNT_NAME could not be found")) +} + +accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") +if !ok { + handle(errors.New("AZURE_STORAGE_ACCOUNT_KEY could not be found")) +} +cred, err := NewSharedKeyCredential(accountName, accountKey) +handle(err) + +// Open up a service client. +// You'll need to specify a service URL, which for blob endpoints usually makes up the syntax http(s)://.blob.core.windows.net/ +service, err := NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) +handle(err) + +// All operations in the Azure Blob Storage SDK for Go operate on a context.Context, allowing you to control cancellation/timeout. +ctx := context.Background() // This example has no expiry. + +// This example showcases several common operations to help you get started, such as: + +// ===== 1. Creating a container ===== + +// First, branch off of the service client and create a container client. +container := service.NewContainerClient("mycontainer") + +// Then, fire off a create operation on the container client. +// Note that, all service-side requests have an options bag attached, allowing you to specify things like metadata, public access types, etc. +// Specifying nil omits all options. +_, err = container.Create(ctx, nil) +handle(err) + +// ===== 2. Uploading/downloading a block blob ===== +// We'll specify our data up-front, rather than reading a file for simplicity's sake. +data := "Hello world!" + +// Branch off of the container into a block blob client +blockBlob := container.NewBlockBlobClient("HelloWorld.txt") + +// Upload data to the block blob +_, err = blockBlob.Upload(ctx, NopCloser(strings.NewReader(data)), nil) +handle(err) + +// Download the blob's contents and ensure that the download worked properly +get, err := blockBlob.Download(ctx, nil) +handle(err) + +// Open a buffer, reader, and then download! +downloadedData := &bytes.Buffer{} +// RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. +reader := get.Body(RetryReaderOptions{}) +_, err = downloadedData.ReadFrom(reader) +handle(err) +err = reader.Close() +handle(err) +if data != downloadedData.String() { + handle(errors.New("downloaded data doesn't match uploaded data")) +} + +// ===== 3. list blobs ===== +// The ListBlobs and ListContainers APIs return two channels, a values channel, and an errors channel. +// You should enumerate on a range over the values channel, and then check the errors channel, as only ONE value will ever be passed to the errors channel. +// The AutoPagerTimeout defines how long it will wait to place into the items channel before it exits & cleans itself up. A zero time will result in no timeout. +pager := container.ListBlobsFlat(nil) + +for pager.NextPage(ctx) { + resp := pager.PageResponse() + + for _, v := range resp.ContainerListBlobFlatSegmentResult.Segment.BlobItems { + fmt.Println(*v.Name) + } +} + +if err = pager.Err(); err != nil { + handle(err) +} + +// Delete the blob we created earlier. +_, err = blockBlob.Delete(ctx, nil) +handle(err) + +// Delete the container we created earlier. +_, err = container.Delete(ctx, nil) +handle(err) +``` + +## Troubleshooting + +### Error Handling + +All I/O operations will return an `error` that can be investigated to discover more information about the error. In +addition, you can investigate the raw response of any response object: + +```golang +var storageErr *azblob.StorageError +resp, err := serviceClient.CreateContainer(context.Background(), "testcontainername", nil) +if err != nil && errors.As(err, &storageErr) { + // do something with storageErr.Response() +} +``` + +### Logging + +This module uses the classification based logging implementation in azcore. To turn on logging +set `AZURE_SDK_GO_LOGGING` to `all`. + +If you only want to include logs for `azblob`, you must create your own logger and set the log classification +as `LogCredential`. + +To obtain more detailed logging, including request/response bodies and header values, make sure to leave the logger as +default or enable the `LogRequest` and/or `LogResponse` classificatons. A logger that only includes credential logs can +be like the following: + +```golang +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" +// Set log to output to the console +azlog.SetListener(func (cls azlog.Classification, msg string) { + fmt.Println(msg) // printing log out to the console +}) + +// Includes only requests and responses in credential logs +azlog.SetClassifications(azlog.Request, azlog.Response) +``` + +> CAUTION: logs from credentials contain sensitive information. +> These logs must be protected to avoid compromising account security. +> + +## License + +This project is licensed under MIT. + +## Provide Feedback + +If you encounter bugs or have suggestions, please +[open an issue](https://github.com/Azure/azure-sdk-for-go/issues) and assign the `Azure.AzBlob` label. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License +Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For +details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate +the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to +do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + + + + +[azure_subscription]:https://azure.microsoft.com/free/ + +[azure_storage_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal + +[azure_portal_create_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal + +[azure_powershell_create_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-powershell + +[azure_cli_create_account]: https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-cli + +[azure_cli_account_url]:https://docs.microsoft.com/cli/azure/storage/account?view=azure-cli-latest#az-storage-account-show + +[azure_powershell_account_url]:https://docs.microsoft.com/powershell/module/az.storage/get-azstorageaccount?view=azps-4.6.1 + +[azure_portal_account_url]:https://docs.microsoft.com/azure/storage/common/storage-account-overview#storage-account-endpoints + +[azure_sas_token]:https://docs.microsoft.com/azure/storage/common/storage-sas-overview + +[azure_shared_key]:https://docs.microsoft.com/rest/api/storageservices/authorize-with-shared-key + +[azure_core_ref_docs]:https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore + +[azure_core_readme]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azcore/README.md + +[blobs_error_codes]: https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes + +[msft_oss_coc]:https://opensource.microsoft.com/codeofconduct/ + +[msft_oss_coc_faq]:https://opensource.microsoft.com/codeofconduct/faq/ + +[contact_msft_oss]:mailto:opencode@microsoft.com + +[blobs_rest]: https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-rest-api diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md new file mode 100644 index 000000000000..0a391904aac4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md @@ -0,0 +1,171 @@ +# Code Generation - Azure Blob SDK for Golang + + + +```bash +cd swagger +autorest autorest.md +gofmt -w generated/* +``` + +### Settings + +```yaml +go: true +clear-output-folder: false +version: "^3.0.0" +license-header: MICROSOFT_MIT_NO_VERSION +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json" +module: "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" +credential-scope: "https://storage.azure.com/.default" +output-folder: internal/ +file-prefix: "zz_generated_" +openapi-type: "data-plane" +verbose: true +security: AzureKey +module-version: "0.3.0" +modelerfour: + group-parameters: false + seal-single-value-enum-by-default: true + lenient-model-deduplication: true +export-clients: false +use: "@autorest/go@4.0.0-preview.36" +``` + +### Fix BlobMetadata. + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.BlobMetadata["properties"]; + +``` + +### Don't include container name or blob in path - we have direct URIs. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('/{containerName}/{blob}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); + } + else if (property.includes('/{containerName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); + } + } +``` + +### Remove DataLake stuff. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('filesystem')) + { + delete $[property]; + } + } +``` + +### Remove DataLakeStorageError + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.DataLakeStorageError; +``` + +### Fix 304s + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{containerName}/{blob}"] + transform: > + $.get.responses["304"] = { + "description": "The condition specified using HTTP conditional header(s) is not met.", + "x-az-response-name": "ConditionNotMetError", + "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } } + }; +``` + +### Fix GeoReplication + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.GeoReplication.properties.Status["x-ms-enum"]; + $.GeoReplication.properties.Status["x-ms-enum"] = { + "name": "BlobGeoReplicationStatus", + "modelAsString": false + }; +``` + +### Fix RehydratePriority + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.RehydratePriority["x-ms-enum"]; + $.RehydratePriority["x-ms-enum"] = { + "name": "RehydratePriority", + "modelAsString": false + }; +``` + +### Fix BlobDeleteType + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.BlobDeleteType.enum; + $.BlobDeleteType.enum = [ + "None", + "Permanent" + ]; +``` + +### Fix EncryptionAlgorithm + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.EncryptionAlgorithm.enum; + $.EncryptionAlgorithm.enum = [ + "None", + "AES256" + ]; +``` + +### Fix XML string "ObjectReplicationMetadata" to "OrMetadata" + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"]; + delete $.BlobItemInternal.properties["ObjectReplicationMetadata"]; +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go new file mode 100644 index 000000000000..14c7feda1105 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "errors" +) + +type bytesWriter []byte + +func newBytesWriter(b []byte) bytesWriter { + return b +} + +func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { + if off >= int64(len(c)) || off < 0 { + return 0, errors.New("offset value is out of range") + } + + n := copy(c[int(off):], b) + if n < len(b) { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go new file mode 100644 index 000000000000..d5ccdfb40766 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go @@ -0,0 +1,231 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal" + "io" + "sync" + "sync/atomic" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +// blockWriter provides methods to upload blocks that represent a file to a server and commit them. +// This allows us to provide a local implementation that fakes the server for hermetic testing. +type blockWriter interface { + StageBlock(context.Context, string, io.ReadSeekCloser, *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error) + CommitBlockList(context.Context, []string, *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) +} + +// copyFromReader copies a source io.Reader to blob storage using concurrent uploads. +// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably +// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The +// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload +// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works +// well, 4 MiB or 8 MiB, and auto-scale to as many goroutines within the memory limit. This gives a single dial to tweak and we can +// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model). +// We can even provide a utility to dial this number in for customer networks to optimize their copies. +func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) { + if err := o.defaults(); err != nil { + return BlockBlobCommitBlockListResponse{}, err + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var err error + generatedUuid, err := uuid.New() + if err != nil { + return BlockBlobCommitBlockListResponse{}, err + } + + cp := &copier{ + ctx: ctx, + cancel: cancel, + reader: from, + to: to, + id: newID(generatedUuid), + o: o, + errCh: make(chan error, 1), + } + + // Send all our chunks until we get an error. + for { + if err = cp.sendChunk(); err != nil { + break + } + } + // If the error is not EOF, then we have a problem. + if err != nil && !errors.Is(err, io.EOF) { + return BlockBlobCommitBlockListResponse{}, err + } + + // Close out our upload. + if err := cp.close(); err != nil { + return BlockBlobCommitBlockListResponse{}, err + } + + return cp.result, nil +} + +// copier streams a file via chunks in parallel from a reader representing a file. +// Do not use directly, instead use copyFromReader(). +type copier struct { + // ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case, + // the copier has the lifetime of a function call, so it's fine. + ctx context.Context + cancel context.CancelFunc + + // reader is the source to be written to storage. + reader io.Reader + // to is the location we are writing our chunks to. + to blockWriter + + // o contains our options for uploading. + o UploadStreamOptions + + // id provides the ids for each chunk. + id *id + + //// num is the current chunk we are on. + //num int32 + //// ch is used to pass the next chunk of data from our reader to one of the writers. + //ch chan copierChunk + + // errCh is used to hold the first error from our concurrent writers. + errCh chan error + // wg provides a count of how many writers we are waiting to finish. + wg sync.WaitGroup + + // result holds the final result from blob storage after we have submitted all chunks. + result BlockBlobCommitBlockListResponse +} + +type copierChunk struct { + buffer []byte + id string + length int +} + +// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error +// it returns that error. Otherwise, it is nil. getErr supports only returning an error once per copier. +func (c *copier) getErr() error { + select { + case err := <-c.errCh: + return err + default: + } + return c.ctx.Err() +} + +// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel. +// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF. +func (c *copier) sendChunk() error { + if err := c.getErr(); err != nil { + return err + } + + buffer := c.o.TransferManager.Get() + if len(buffer) == 0 { + return fmt.Errorf("TransferManager returned a 0 size buffer, this is a bug in the manager") + } + + n, err := io.ReadFull(c.reader, buffer) + if n > 0 { + // Some data was read, schedule the write. + id := c.id.next() + c.wg.Add(1) + c.o.TransferManager.Run( + func() { + defer c.wg.Done() + c.write(copierChunk{buffer: buffer, id: id, length: n}) + }, + ) + } else { + // Return the unused buffer to the manager. + c.o.TransferManager.Put(buffer) + } + + if err == nil { + return nil + } else if err == io.EOF || err == io.ErrUnexpectedEOF { + return io.EOF + } + + if cerr := c.getErr(); cerr != nil { + return cerr + } + + return err +} + +// write uploads a chunk to blob storage. +func (c *copier) write(chunk copierChunk) { + defer c.o.TransferManager.Put(chunk.buffer) + + if err := c.ctx.Err(); err != nil { + return + } + stageBlockOptions := c.o.getStageBlockOptions() + _, err := c.to.StageBlock(c.ctx, chunk.id, internal.NopCloser(bytes.NewReader(chunk.buffer[:chunk.length])), stageBlockOptions) + if err != nil { + c.errCh <- fmt.Errorf("write error: %w", err) + return + } +} + +// close commits our blocks to blob storage and closes our writer. +func (c *copier) close() error { + c.wg.Wait() + + if err := c.getErr(); err != nil { + return err + } + + var err error + commitBlockListOptions := c.o.getCommitBlockListOptions() + c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), commitBlockListOptions) + return err +} + +// id allows the creation of unique IDs based on UUID4 + an int32. This auto-increments. +type id struct { + u [64]byte + num uint32 + all []string +} + +// newID constructs a new id. +func newID(uu uuid.UUID) *id { + u := [64]byte{} + copy(u[:], uu[:]) + return &id{u: u} +} + +// next returns the next ID. +func (id *id) next() string { + defer atomic.AddUint32(&id.num, 1) + + binary.BigEndian.PutUint32(id.u[len(uuid.UUID{}):], atomic.LoadUint32(&id.num)) + str := base64.StdEncoding.EncodeToString(id.u[:]) + id.all = append(id.all, str) + + return str +} + +// issued returns all ids that have been issued. This returned value shares the internal slice, so it is not safe to modify the return. +// The value is only valid until the next time next() is called. +func (id *id) issued() []string { + return id.all +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml new file mode 100644 index 000000000000..e0623f50e855 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml @@ -0,0 +1,28 @@ +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azblob + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azblob + + +stages: + - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'storage/azblob' + RunLiveTests: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go new file mode 100644 index 000000000000..c5d501c66101 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +type connection struct { + u string + p runtime.Pipeline +} + +// newConnection creates an instance of the connection type with the specified endpoint. +// Pass nil to accept the default options; this is the same as passing a zero-value options. +func newConnection(endpoint string, options *azcore.ClientOptions) *connection { + cp := azcore.ClientOptions{} + if options != nil { + cp = *options + } + return &connection{u: endpoint, p: runtime.NewPipeline(moduleName, moduleVersion, runtime.PipelineOptions{}, &cp)} +} + +// Endpoint returns the connection's endpoint. +func (c *connection) Endpoint() string { + return c.u +} + +// Pipeline returns the connection's pipeline. +func (c *connection) Pipeline() runtime.Pipeline { + return c.p +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go new file mode 100644 index 000000000000..c1c336ed4665 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go @@ -0,0 +1,46 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +var SASVersion = "2019-12-12" + +//nolint +const ( + // BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. + BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB + + // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. + BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB + + // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob. + BlockBlobMaxBlocks = 50000 + + // PageBlobPageBytes indicates the number of bytes in a page (512). + PageBlobPageBytes = 512 + + // BlobDefaultDownloadBlockSize is default block size + BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB +) + +const ( + headerAuthorization = "Authorization" + headerXmsDate = "x-ms-date" + headerContentLength = "Content-Length" + headerContentEncoding = "Content-Encoding" + headerContentLanguage = "Content-Language" + headerContentType = "Content-Type" + headerContentMD5 = "Content-MD5" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" +) + +const ( + tokenScope = "https://storage.azure.com/.default" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go new file mode 100644 index 000000000000..c2426eb7005f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go @@ -0,0 +1,214 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* + +Package azblob can access an Azure Blob Storage. + +The azblob package is capable of :- + - Creating, deleting, and querying containers in an account + - Creating, deleting, and querying blobs in a container + - Creating Shared Access Signature for authentication + +Types of Resources + +The azblob package allows you to interact with three types of resources :- + +* Azure storage accounts. +* Containers within those storage accounts. +* Blobs (block blobs/ page blobs/ append blobs) within those containers. + +The Azure Blob Storage (azblob) client library for Go allows you to interact with each of these components through the use of a dedicated client object. +To create a client object, you will need the account's blob service endpoint URL and a credential that allows you to access the account. + +Types of Credentials + +The clients support different forms of authentication. +The azblob library supports any of the `azcore.TokenCredential` interfaces, authorization via a Connection String, +or authorization with a Shared Access Signature token. + +Using a Shared Key + +To use an account shared key (aka account key or access key), provide the key as a string. +This can be found in your storage account in the Azure Portal under the "Access Keys" section. + +Use the key as the credential parameter to authenticate the client: + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) + + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil) + handle(err) + + fmt.Println(serviceClient.URL()) + +Using a Connection String + +Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately. +To do this, pass the connection string to the service client's `NewServiceClientFromConnectionString` method. +The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section. + + connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" + serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil) + +Using a Shared Access Signature (SAS) Token + +To use a shared access signature (SAS) token, provide the token at the end of your service URL. +You can generate a SAS token from the Azure Portal under Shared Access Signature or use the ServiceClient.GetSASToken() functions. + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) + + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + handle(err) + serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil) + handle(err) + fmt.Println(serviceClient.URL()) + + // Alternatively, you can create SAS on the fly + + resources := azblob.AccountSASResourceTypes{Service: true} + permission := azblob.AccountSASPermissions{Read: true} + start := time.Now() + expiry := start.AddDate(0, 0, 1) + serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, start, expiry) + handle(err) + + serviceClientWithSAS, err := azblob.NewServiceClientWithNoCredential(serviceURLWithSAS, nil) + handle(err) + + fmt.Println(serviceClientWithSAS.URL()) + +Types of Clients + +There are three different clients provided to interact with the various components of the Blob Service: + +1. **`ServiceClient`** + * Get and set account settings. + * Query, create, and delete containers within the account. + +2. **`ContainerClient`** + * Get and set container access settings, properties, and metadata. + * Create, delete, and query blobs within the container. + * `ContainerLeaseClient` to support container lease management. + +3. **`BlobClient`** + * `AppendBlobClient`, `BlockBlobClient`, and `PageBlobClient` + * Get and set blob properties. + * Perform CRUD operations on a given blob. + * `BlobLeaseClient` to support blob lease management. + +Examples + + // Your account name and key can be obtained from the Azure Portal. + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + // The service URL for blob endpoints is usually in the form: http(s)://.blob.core.windows.net/ + serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) + handle(err) + + // ===== 1. Create a container ===== + + // First, create a container client, and use the Create method to create a new container in your account + containerClient, err := serviceClient.NewContainerClient("testcontainer") + handle(err) + + // All APIs have an options' bag struct as a parameter. + // The options' bag struct allows you to specify optional parameters such as metadata, public access types, etc. + // If you want to use the default options, pass in nil. + _, err = containerClient.Create(context.TODO(), nil) + handle(err) + + // ===== 2. Upload and Download a block blob ===== + uploadData := "Hello world!" + + // Create a new blockBlobClient from the containerClient + blockBlobClient, err := containerClient.NewBlockBlobClient("HelloWorld.txt") + handle(err) + + // Upload data to the block blob + blockBlobUploadOptions := azblob.BlockBlobUploadOptions{ + Metadata: map[string]string{"Foo": "Bar"}, + TagsMap: map[string]string{"Year": "2022"}, + } + _, err = blockBlobClient.Upload(context.TODO(), streaming.NopCloser(strings.NewReader(uploadData)), &blockBlobUploadOptions) + handle(err) + + // Download the blob's contents and ensure that the download worked properly + blobDownloadResponse, err := blockBlobClient.Download(context.TODO(), nil) + handle(err) + + // Use the bytes.Buffer object to read the downloaded data. + // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. + reader := blobDownloadResponse.Body(nil) + downloadData, err := ioutil.ReadAll(reader) + handle(err) + if string(downloadData) != uploadData { + handle(errors.New("Uploaded data should be same as downloaded data")) + } + + + if err = reader.Close(); err != nil { + handle(err) + return + } + + // ===== 3. List blobs ===== + // List methods returns a pager object which can be used to iterate over the results of a paging operation. + // To iterate over a page use the NextPage(context.Context) to fetch the next page of results. + // PageResponse() can be used to iterate over the results of the specific page. + // Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results. + pager := containerClient.ListBlobsFlat(nil) + for pager.NextPage(context.TODO()) { + resp := pager.PageResponse() + for _, v := range resp.Segment.BlobItems { + fmt.Println(*v.Name) + } + } + + if err = pager.Err(); err != nil { + handle(err) + } + + // Delete the blob. + _, err = blockBlobClient.Delete(context.TODO(), nil) + handle(err) + + // Delete the container. + _, err = containerClient.Delete(context.TODO(), nil) + handle(err) +*/ + +package azblob diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go new file mode 100644 index 000000000000..28725003981f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go @@ -0,0 +1,316 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "encoding/base64" + "io" + "net/http" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal" + + "bytes" + "errors" + "os" +) + +// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob. +func (bb *BlockBlobClient) uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, o UploadOption) (*http.Response, error) { + if o.BlockSize == 0 { + // If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error + if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { + return nil, errors.New("buffer is too large to upload to a block blob") + } + // If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request + if readerSize <= BlockBlobMaxUploadBlobBytes { + o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified + } else { + o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks + if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = BlobDefaultDownloadBlockSize + } + // StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize). + } + } + + if readerSize <= BlockBlobMaxUploadBlobBytes { + // If the size can fit in 1 Upload call, do it this way + var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) + if o.Progress != nil { + body = streaming.NewRequestProgress(internal.NopCloser(body), o.Progress) + } + + uploadBlockBlobOptions := o.getUploadBlockBlobOptions() + resp, err := bb.Upload(ctx, internal.NopCloser(body), uploadBlockBlobOptions) + + return resp.RawResponse, err + } + + var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) + + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs + progress := int64(0) + progressLock := &sync.Mutex{} + + err := DoBatchTransfer(ctx, BatchTransferOptions{ + OperationName: "uploadReaderAtToBlockBlob", + TransferSize: readerSize, + ChunkSize: o.BlockSize, + Parallelism: o.Parallelism, + Operation: func(offset int64, count int64, ctx context.Context) error { + // This function is called once per block. + // It is passed this block's offset within the buffer and its count of bytes + // Prepare to read the proper block/section of the buffer + var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) + blockNum := offset / o.BlockSize + if o.Progress != nil { + blockProgress := int64(0) + body = streaming.NewRequestProgress(internal.NopCloser(body), + func(bytesTransferred int64) { + diff := bytesTransferred - blockProgress + blockProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks + // at the same time causing PutBlockList to get a mix of blocks from all the clients. + generatedUuid, err := uuid.New() + if err != nil { + return err + } + blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String())) + stageBlockOptions := o.getStageBlockOptions() + _, err = bb.StageBlock(ctx, blockIDList[blockNum], internal.NopCloser(body), stageBlockOptions) + return err + }, + }) + if err != nil { + return nil, err + } + // All put blocks were successful, call Put Block List to finalize the blob + commitBlockListOptions := o.getCommitBlockListOptions() + resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions) + + return resp.RawResponse, err +} + +// UploadBuffer uploads a buffer in blocks to a block blob. +func (bb *BlockBlobClient) UploadBuffer(ctx context.Context, b []byte, o UploadOption) (*http.Response, error) { + return bb.uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), o) +} + +// UploadFile uploads a file in blocks to a block blob. +func (bb *BlockBlobClient) UploadFile(ctx context.Context, file *os.File, o UploadOption) (*http.Response, error) { + + stat, err := file.Stat() + if err != nil { + return nil, err + } + return bb.uploadReaderAtToBlockBlob(ctx, file, stat.Size(), o) +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. +// A Context deadline or cancellation will cause this to error. +func (bb *BlockBlobClient) UploadStream(ctx context.Context, body io.Reader, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) { + if err := o.defaults(); err != nil { + return BlockBlobCommitBlockListResponse{}, err + } + + // If we used the default manager, we need to close it. + if o.transferMangerNotSet { + defer o.TransferManager.Close() + } + + result, err := copyFromReader(ctx, body, bb, o) + if err != nil { + return BlockBlobCommitBlockListResponse{}, err + } + + return result, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DownloadToWriterAt downloads an Azure blob to a WriterAt with parallel. +// Offset and count are optional, pass 0 for both to download the entire blob. +func (b *BlobClient) DownloadToWriterAt(ctx context.Context, offset int64, count int64, writer io.WriterAt, o DownloadOptions) error { + if o.BlockSize == 0 { + o.BlockSize = BlobDefaultDownloadBlockSize + } + + if count == CountToEnd { // If size not specified, calculate it + // If we don't have the length at all, get it + downloadBlobOptions := o.getDownloadBlobOptions(0, CountToEnd, nil) + dr, err := b.Download(ctx, downloadBlobOptions) + if err != nil { + return err + } + count = *dr.ContentLength - offset + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return nil + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := DoBatchTransfer(ctx, BatchTransferOptions{ + OperationName: "downloadBlobToWriterAt", + TransferSize: count, + ChunkSize: o.BlockSize, + Parallelism: o.Parallelism, + Operation: func(chunkStart int64, count int64, ctx context.Context) error { + + downloadBlobOptions := o.getDownloadBlobOptions(chunkStart+offset, count, nil) + dr, err := b.Download(ctx, downloadBlobOptions) + if err != nil { + return err + } + body := dr.Body(&o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = streaming.NewResponseProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.Copy(newSectionWriter(writer, chunkStart, count), body) + if err != nil { + return err + } + err = body.Close() + return err + }, + }) + if err != nil { + return err + } + return nil +} + +// DownloadToBuffer downloads an Azure blob to a buffer with parallel. +// Offset and count are optional, pass 0 for both to download the entire blob. +func (b *BlobClient) DownloadToBuffer(ctx context.Context, offset int64, count int64, _bytes []byte, o DownloadOptions) error { + return b.DownloadToWriterAt(ctx, offset, count, newBytesWriter(_bytes), o) +} + +// DownloadToFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +// Offset and count are optional, pass 0 for both to download the entire blob. +func (b *BlobClient) DownloadToFile(ctx context.Context, offset int64, count int64, file *os.File, o DownloadOptions) error { + // 1. Calculate the size of the destination file + var size int64 + + if count == CountToEnd { + // Try to get Azure blob's size + getBlobPropertiesOptions := o.getBlobPropertiesOptions() + props, err := b.GetProperties(ctx, getBlobPropertiesOptions) + if err != nil { + return err + } + size = *props.ContentLength - offset + } else { + size = count + } + + // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. + stat, err := file.Stat() + if err != nil { + return err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return err + } + } + + if size > 0 { + return b.DownloadToWriterAt(ctx, offset, size, file, o) + } else { // if the blob's size is 0, there is no need in downloading it + return nil + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DoBatchTransfer helps to execute operations in a batch manner. +// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) +func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error { + if o.ChunkSize == 0 { + return errors.New("ChunkSize cannot be 0") + } + + if o.Parallelism == 0 { + o.Parallelism = 5 // default Parallelism + } + + // Prepare and do parallel operations. + numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) + operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently + operationResponseChannel := make(chan error, numChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + for g := uint16(0); g < o.Parallelism; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + err := f() + operationResponseChannel <- err + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + curChunkSize := o.ChunkSize + + if chunkNum == numChunks-1 { // Last chunk + curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.ChunkSize + + operationChannel <- func() error { + return o.Operation(offset, curChunkSize, ctx) + } + } + close(operationChannel) + + // Wait for the operations to complete. + var firstErr error = nil + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + responseError := <-operationResponseChannel + // record the first error (the original error which should cause the other chunks to fail with canceled context) + if responseError != nil && firstErr == nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + firstErr = responseError + } + } + return firstErr +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go new file mode 100644 index 000000000000..cd2ada9b5db4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go @@ -0,0 +1,150 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "strconv" + "time" +) + +// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. +type CtxWithHTTPHeaderKey struct{} + +// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. +type CtxWithRetryOptionsKey struct{} + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +// BodyDownloadPolicyOpValues is the struct containing the per-operation values +type BodyDownloadPolicyOpValues struct { + Skip bool +} + +func NewResponseError(inner error, resp *http.Response) error { + return &ResponseError{inner: inner, resp: resp} +} + +type ResponseError struct { + inner error + resp *http.Response +} + +// Error implements the error interface for type ResponseError. +func (e *ResponseError) Error() string { + return e.inner.Error() +} + +// Unwrap returns the inner error. +func (e *ResponseError) Unwrap() error { + return e.inner +} + +// RawResponse returns the HTTP response associated with this error. +func (e *ResponseError) RawResponse() *http.Response { + return e.resp +} + +// NonRetriable indicates this error is non-transient. +func (e *ResponseError) NonRetriable() { + // marker method +} + +// Delay waits for the duration to elapse or the context to be cancelled. +func Delay(ctx context.Context, delay time.Duration) error { + select { + case <-time.After(delay): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// ErrNoBody is returned if the response didn't contain a body. +var ErrNoBody = errors.New("the response did not contain a body") + +// GetJSON reads the response body into a raw JSON object. +// It returns ErrNoBody if there was no content. +func GetJSON(resp *http.Response) (map[string]interface{}, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return nil, err + } + if len(body) == 0 { + return nil, ErrNoBody + } + // put the body back so it's available to others + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + // unmarshall the body to get the value + var jsonBody map[string]interface{} + if err = json.Unmarshal(body, &jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +const HeaderRetryAfter = "Retry-After" + +// RetryAfter returns non-zero if the response contains a Retry-After header value. +func RetryAfter(resp *http.Response) time.Duration { + if resp == nil { + return 0 + } + ra := resp.Header.Get(HeaderRetryAfter) + if ra == "" { + return 0 + } + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + return time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + return time.Until(t) + } + return 0 +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +const defaultScope = "/.default" + +// EndpointToScope converts the provided URL endpoint to its default scope. +func EndpointToScope(endpoint string) string { + if endpoint[len(endpoint)-1] != '/' { + endpoint += "/" + } + return endpoint + defaultScope +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go new file mode 100644 index 000000000000..d2e89f5b2a60 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "errors" + "io" +) + +type sectionWriter struct { + count int64 + offset int64 + position int64 + writerAt io.WriterAt +} + +func newSectionWriter(c io.WriterAt, off int64, count int64) *sectionWriter { + return §ionWriter{ + count: count, + offset: off, + writerAt: c, + } +} + +func (c *sectionWriter) Write(p []byte) (int, error) { + remaining := c.count - c.position + + if remaining <= 0 { + return 0, errors.New("end of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.writerAt.WriteAt(slice, c.offset+c.position) + c.position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go new file mode 100644 index 000000000000..5c40e9bc2ab4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go @@ -0,0 +1,154 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "fmt" + "sync" +) + +// TransferManager provides a buffer and thread pool manager for certain transfer options. +// It is undefined behavior if code outside this package call any of these methods. +type TransferManager interface { + // Get provides a buffer that will be used to read data into and write out to the stream. + // It is guaranteed by this package to not read or write beyond the size of the slice. + Get() []byte + + // Put may or may not put the buffer into underlying storage, depending on settings. + // The buffer must not be touched after this has been called. + Put(b []byte) // nolint + + // Run will use a goroutine pool entry to run a function. This blocks until a pool + // goroutine becomes available. + Run(func()) + + // Close shuts down all internal goroutines. This must be called when the TransferManager + // will no longer be used. Not closing it will cause a goroutine leak. + Close() +} + +// --------------------------------------------------------------------------------------------------------------------- + +type staticBuffer struct { + buffers chan []byte + size int + threadpool chan func() +} + +// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer +// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This +// can be shared between calls if you wish to control maximum memory and concurrency with +// multiple concurrent calls. +func NewStaticBuffer(size, max int) (TransferManager, error) { + if size < 1 || max < 1 { + return nil, fmt.Errorf("cannot be called with size or max set to < 1") + } + + if size < _1MiB { + return nil, fmt.Errorf("cannot have size < 1MiB") + } + + threadpool := make(chan func(), max) + buffers := make(chan []byte, max) + for i := 0; i < max; i++ { + go func() { + for f := range threadpool { + f() + } + }() + + buffers <- make([]byte, size) + } + return staticBuffer{ + buffers: buffers, + size: size, + threadpool: threadpool, + }, nil +} + +// Get implements TransferManager.Get(). +func (s staticBuffer) Get() []byte { + return <-s.buffers +} + +// Put implements TransferManager.Put(). +func (s staticBuffer) Put(b []byte) { // nolint + select { + case s.buffers <- b: + default: // This shouldn't happen, but just in case they call Put() with there own buffer. + } +} + +// Run implements TransferManager.Run(). +func (s staticBuffer) Run(f func()) { + s.threadpool <- f +} + +// Close implements TransferManager.Close(). +func (s staticBuffer) Close() { + close(s.threadpool) + close(s.buffers) +} + +// --------------------------------------------------------------------------------------------------------------------- + +type syncPool struct { + threadpool chan func() + pool sync.Pool +} + +// NewSyncPool creates a TransferManager that will use a sync.Pool +// that can hold a non-capped number of buffers constrained by concurrency. This +// can be shared between calls if you wish to share memory and concurrency. +func NewSyncPool(size, concurrency int) (TransferManager, error) { + if size < 1 || concurrency < 1 { + return nil, fmt.Errorf("cannot be called with size or max set to < 1") + } + + if size < _1MiB { + return nil, fmt.Errorf("cannot have size < 1MiB") + } + + threadpool := make(chan func(), concurrency) + for i := 0; i < concurrency; i++ { + go func() { + for f := range threadpool { + f() + } + }() + } + + return &syncPool{ + threadpool: threadpool, + pool: sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, + }, + }, nil +} + +// Get implements TransferManager.Get(). +func (s *syncPool) Get() []byte { + return s.pool.Get().([]byte) +} + +// Put implements TransferManager.Put(). +// nolint +func (s *syncPool) Put(b []byte) { + s.pool.Put(b) +} + +// Run implements TransferManager.Run(). +func (s *syncPool) Run(f func()) { + s.threadpool <- f +} + +// Close implements TransferManager.Close(). +func (s *syncPool) Close() { + close(s.threadpool) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go new file mode 100644 index 000000000000..612bc784c379 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "bytes" + "fmt" +) + +// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Add, Create, Write, Delete, List bool +} + +// String produces the access policy permission string for an Azure Storage container. +// Call this method to set AccessPolicy's Permission field. +func (p AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go new file mode 100644 index 000000000000..25490ab5950c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go @@ -0,0 +1,154 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// AppendBlobClient represents a client to an Azure Storage append blob; +type AppendBlobClient struct { + BlobClient + client *appendBlobClient +} + +// NewAppendBlobClient creates an AppendBlobClient with the specified URL, Azure AD credential, and options. +func NewAppendBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*AppendBlobClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + return &AppendBlobClient{ + client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()), + BlobClient: BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + }, + }, nil +} + +// NewAppendBlobClientWithNoCredential creates an AppendBlobClient with the specified URL and options. +func NewAppendBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*AppendBlobClient, error) { + conOptions := getConnectionOptions(options) + conn := newConnection(blobURL, conOptions) + + return &AppendBlobClient{ + client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()), + BlobClient: BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + }, + }, nil +} + +// NewAppendBlobClientWithSharedKey creates an AppendBlobClient with the specified URL, shared key, and options. +func NewAppendBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*AppendBlobClient, error) { + authPolicy := newSharedKeyCredPolicy(cred) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + return &AppendBlobClient{ + client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()), + BlobClient: BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + sharedKey: cred, + }, + }, nil +} + +// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab *AppendBlobClient) WithSnapshot(snapshot string) (*AppendBlobClient, error) { + p, err := NewBlobURLParts(ab.URL()) + if err != nil { + return nil, err + } + + p.Snapshot = snapshot + endpoint := p.URL() + pipeline := ab.client.pl + + return &AppendBlobClient{ + client: newAppendBlobClient(endpoint, pipeline), + BlobClient: BlobClient{ + client: newBlobClient(endpoint, pipeline), + sharedKey: ab.sharedKey, + }, + }, nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (ab *AppendBlobClient) WithVersionID(versionID string) (*AppendBlobClient, error) { + p, err := NewBlobURLParts(ab.URL()) + if err != nil { + return nil, err + } + + p.VersionID = versionID + endpoint := p.URL() + pipeline := ab.client.pl + + return &AppendBlobClient{ + client: newAppendBlobClient(endpoint, pipeline), + BlobClient: BlobClient{ + client: newBlobClient(endpoint, pipeline), + sharedKey: ab.sharedKey, + }, + }, nil +} + +// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (ab *AppendBlobClient) Create(ctx context.Context, options *AppendBlobCreateOptions) (AppendBlobCreateResponse, error) { + appendBlobAppendBlockOptions, blobHttpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() + + resp, err := ab.client.Create(ctx, 0, appendBlobAppendBlockOptions, blobHttpHeaders, + leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + + return toAppendBlobCreateResponse(resp), handleError(err) +} + +// AppendBlock writes a stream to a new block of data to the end of the existing append blob. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. +func (ab *AppendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeekCloser, options *AppendBlobAppendBlockOptions) (AppendBlobAppendBlockResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return AppendBlobAppendBlockResponse{}, nil + } + + appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format() + + resp, err := ab.client.AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) + + return toAppendBlobAppendBlockResponse(resp), handleError(err) +} + +// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. +func (ab *AppendBlobClient) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlobAppendBlockFromURLOptions) (AppendBlobAppendBlockFromURLResponse, error) { + appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format() + + // content length should be 0 on * from URL. always. It's a 400 if it isn't. + resp, err := ab.client.AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, + leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + return toAppendBlobAppendBlockFromURLResponse(resp), handleError(err) +} + +// SealAppendBlob - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only. +// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal +func (ab *AppendBlobClient) SealAppendBlob(ctx context.Context, options *AppendBlobSealOptions) (AppendBlobSealResponse, error) { + leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := options.format() + resp, err := ab.client.Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions) + return toAppendBlobSealResponse(resp), handleError(err) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go new file mode 100644 index 000000000000..9543d14f8776 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go @@ -0,0 +1,278 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "errors" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// BlobClient represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type BlobClient struct { + client *blobClient + sharedKey *SharedKeyCredential +} + +// NewBlobClient creates a BlobClient object using the specified URL, Azure AD credential, and options. +func NewBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlobClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + return &BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + }, nil +} + +// NewBlobClientWithNoCredential creates a BlobClient object using the specified URL and options. +func NewBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlobClient, error) { + conOptions := getConnectionOptions(options) + conn := newConnection(blobURL, conOptions) + + return &BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + }, nil +} + +// NewBlobClientWithSharedKey creates a BlobClient object using the specified URL, shared key, and options. +func NewBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlobClient, error) { + authPolicy := newSharedKeyCredPolicy(cred) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + return &BlobClient{ + client: newBlobClient(blobURL, conn.Pipeline()), + sharedKey: cred, + }, nil +} + +// NewBlobClientFromConnectionString creates BlobClient from a connection String +//nolint +func NewBlobClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*BlobClient, error) { + containerClient, err := NewContainerClientFromConnectionString(connectionString, containerName, options) + if err != nil { + return nil, err + } + return containerClient.NewBlobClient(blobName) +} + +// URL returns the URL endpoint used by the BlobClient object. +func (b *BlobClient) URL() string { + return b.client.endpoint +} + +// WithSnapshot creates a new BlobClient object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b *BlobClient) WithSnapshot(snapshot string) (*BlobClient, error) { + p, err := NewBlobURLParts(b.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + pipeline := b.client.pl + return &BlobClient{ + client: newBlobClient(p.URL(), pipeline), + sharedKey: b.sharedKey, + }, nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (b *BlobClient) WithVersionID(versionID string) (*BlobClient, error) { + p, err := NewBlobURLParts(b.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + pipeline := b.client.pl + return &BlobClient{ + client: newBlobClient(p.URL(), pipeline), + sharedKey: b.sharedKey, + }, nil +} + +// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (b *BlobClient) Download(ctx context.Context, options *BlobDownloadOptions) (BlobDownloadResponse, error) { + o, lease, cpk, accessConditions := options.format() + dr, err := b.client.Download(ctx, o, lease, cpk, accessConditions) + if err != nil { + return BlobDownloadResponse{}, handleError(err) + } + + offset := int64(0) + count := int64(CountToEnd) + + if options != nil && options.Offset != nil { + offset = *options.Offset + } + + if options != nil && options.Count != nil { + count = *options.Count + } + + eTag := "" + if dr.ETag != nil { + eTag = *dr.ETag + } + return BlobDownloadResponse{ + b: b, + blobClientDownloadResponse: dr, + ctx: ctx, + getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: eTag}, + ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), + }, err +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b *BlobClient) Delete(ctx context.Context, o *BlobDeleteOptions) (BlobDeleteResponse, error) { + basics, leaseInfo, accessConditions := o.format() + resp, err := b.client.Delete(ctx, basics, leaseInfo, accessConditions) + + return toBlobDeleteResponse(resp), handleError(err) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (b *BlobClient) Undelete(ctx context.Context, o *BlobUndeleteOptions) (BlobUndeleteResponse, error) { + undeleteOptions := o.format() + resp, err := b.client.Undelete(ctx, undeleteOptions) + + return toBlobUndeleteResponse(resp), handleError(err) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (b *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobSetTierOptions) (BlobSetTierResponse, error) { + basics, lease, accessConditions := options.format() + resp, err := b.client.SetTier(ctx, tier, basics, lease, accessConditions) + + return toBlobSetTierResponse(resp), handleError(err) +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (b *BlobClient) GetProperties(ctx context.Context, options *BlobGetPropertiesOptions) (BlobGetPropertiesResponse, error) { + basics, lease, cpk, access := options.format() + resp, err := b.client.GetProperties(ctx, basics, lease, cpk, access) + + return toGetBlobPropertiesResponse(resp), handleError(err) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (b *BlobClient) SetHTTPHeaders(ctx context.Context, blobHttpHeaders BlobHTTPHeaders, options *BlobSetHTTPHeadersOptions) (BlobSetHTTPHeadersResponse, error) { + basics, lease, access := options.format() + resp, err := b.client.SetHTTPHeaders(ctx, basics, &blobHttpHeaders, lease, access) + + return toBlobSetHTTPHeadersResponse(resp), handleError(err) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (b *BlobClient) SetMetadata(ctx context.Context, metadata map[string]string, options *BlobSetMetadataOptions) (BlobSetMetadataResponse, error) { + basics := blobClientSetMetadataOptions{ + Metadata: metadata, + } + lease, cpk, cpkScope, access := options.format() + resp, err := b.client.SetMetadata(ctx, &basics, lease, cpk, cpkScope, access) + + return toBlobSetMetadataResponse(resp), handleError(err) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (b *BlobClient) CreateSnapshot(ctx context.Context, options *BlobCreateSnapshotOptions) (BlobCreateSnapshotResponse, error) { + // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter + // because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this + // performance hit. + basics, cpk, cpkScope, access, lease := options.format() + resp, err := b.client.CreateSnapshot(ctx, basics, cpk, cpkScope, access, lease) + + return toBlobCreateSnapshotResponse(resp), handleError(err) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (b *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobStartCopyOptions) (BlobStartCopyFromURLResponse, error) { + basics, srcAccess, destAccess, lease := options.format() + resp, err := b.client.StartCopyFromURL(ctx, copySource, basics, srcAccess, destAccess, lease) + + return toBlobStartCopyFromURLResponse(resp), handleError(err) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (b *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobAbortCopyOptions) (BlobAbortCopyFromURLResponse, error) { + basics, lease := options.format() + resp, err := b.client.AbortCopyFromURL(ctx, copyID, basics, lease) + + return toBlobAbortCopyFromURLResponse(resp), handleError(err) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (b *BlobClient) SetTags(ctx context.Context, options *BlobSetTagsOptions) (BlobSetTagsResponse, error) { + blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.client.SetTags(ctx, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + + return toBlobSetTagsResponse(resp), handleError(err) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (b *BlobClient) GetTags(ctx context.Context, options *BlobGetTagsOptions) (BlobGetTagsResponse, error) { + blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.client.GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + + return toBlobGetTagsResponse(resp), handleError(err) + +} + +// GetSASToken is a convenience method for generating a SAS token for the currently pointed at blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (b *BlobClient) GetSASToken(permissions BlobSASPermissions, start time.Time, expiry time.Time) (SASQueryParameters, error) { + urlParts, _ := NewBlobURLParts(b.URL()) + + t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot) + + if err != nil { + t = time.Time{} + } + + if b.sharedKey == nil { + return SASQueryParameters{}, errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential") + } + + return BlobSASSignatureValues{ + ContainerName: urlParts.ContainerName, + BlobName: urlParts.BlobName, + SnapshotTime: t, + Version: SASVersion, + + Permissions: permissions.String(), + + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.NewSASQueryParameters(b.sharedKey) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go new file mode 100644 index 000000000000..a9273dfb62cd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go @@ -0,0 +1,98 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +// BlobLeaseClient represents lease client on blob +type BlobLeaseClient struct { + BlobClient + leaseID *string +} + +// NewBlobLeaseClient is constructor for BlobLeaseClient +func (b *BlobClient) NewBlobLeaseClient(leaseID *string) (*BlobLeaseClient, error) { + if leaseID == nil { + generatedUuid, err := uuid.New() + if err != nil { + return nil, err + } + leaseID = to.Ptr(generatedUuid.String()) + } + return &BlobLeaseClient{ + BlobClient: *b, + leaseID: leaseID, + }, nil +} + +// AcquireLease acquires a lease on the blob for write and delete operations. +//The lease Duration must be between 15 and 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (blc *BlobLeaseClient) AcquireLease(ctx context.Context, options *BlobAcquireLeaseOptions) (BlobAcquireLeaseResponse, error) { + blobAcquireLeaseOptions, modifiedAccessConditions := options.format() + blobAcquireLeaseOptions.ProposedLeaseID = blc.leaseID + + resp, err := blc.client.AcquireLease(ctx, &blobAcquireLeaseOptions, modifiedAccessConditions) + return toBlobAcquireLeaseResponse(resp), handleError(err) +} + +// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) +// constant to break a fixed-Duration lease when it expires or an infinite lease immediately. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (blc *BlobLeaseClient) BreakLease(ctx context.Context, options *BlobBreakLeaseOptions) (BlobBreakLeaseResponse, error) { + blobBreakLeaseOptions, modifiedAccessConditions := options.format() + resp, err := blc.client.BreakLease(ctx, blobBreakLeaseOptions, modifiedAccessConditions) + return toBlobBreakLeaseResponse(resp), handleError(err) +} + +// ChangeLease changes the blob's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (blc *BlobLeaseClient) ChangeLease(ctx context.Context, options *BlobChangeLeaseOptions) (BlobChangeLeaseResponse, error) { + if blc.leaseID == nil { + return BlobChangeLeaseResponse{}, errors.New("leaseID cannot be nil") + } + proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format() + if err != nil { + return BlobChangeLeaseResponse{}, err + } + resp, err := blc.client.ChangeLease(ctx, *blc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions) + + // If lease has been changed successfully, set the leaseID in client + if err == nil { + blc.leaseID = proposedLeaseID + } + + return toBlobChangeLeaseResponse(resp), handleError(err) +} + +// RenewLease renews the blob's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (blc *BlobLeaseClient) RenewLease(ctx context.Context, options *BlobRenewLeaseOptions) (BlobRenewLeaseResponse, error) { + if blc.leaseID == nil { + return BlobRenewLeaseResponse{}, errors.New("leaseID cannot be nil") + } + renewLeaseBlobOptions, modifiedAccessConditions := options.format() + resp, err := blc.client.RenewLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions) + return toBlobRenewLeaseResponse(resp), handleError(err) +} + +// ReleaseLease releases the blob's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (blc *BlobLeaseClient) ReleaseLease(ctx context.Context, options *ReleaseLeaseBlobOptions) (BlobReleaseLeaseResponse, error) { + if blc.leaseID == nil { + return BlobReleaseLeaseResponse{}, errors.New("leaseID cannot be nil") + } + renewLeaseBlobOptions, modifiedAccessConditions := options.format() + resp, err := blc.client.ReleaseLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions) + return toBlobReleaseLeaseResponse(resp), handleError(err) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go new file mode 100644 index 000000000000..b080128c8153 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go @@ -0,0 +1,201 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" +) + +// BlockBlobClient defines a set of operations applicable to block blobs. +type BlockBlobClient struct { + BlobClient + client *blockBlobClient +} + +// NewBlockBlobClient creates a BlockBlobClient object using the specified URL, Azure AD credential, and options. +func NewBlockBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlockBlobClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + bClient := newBlobClient(conn.Endpoint(), conn.Pipeline()) + return &BlockBlobClient{ + client: newBlockBlobClient(bClient.endpoint, bClient.pl), + BlobClient: BlobClient{ + client: bClient, + }, + }, nil +} + +// NewBlockBlobClientWithNoCredential creates a BlockBlobClient object using the specified URL and options. +func NewBlockBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlockBlobClient, error) { + conOptions := getConnectionOptions(options) + conn := newConnection(blobURL, conOptions) + + bClient := newBlobClient(conn.Endpoint(), conn.Pipeline()) + return &BlockBlobClient{ + client: newBlockBlobClient(bClient.endpoint, bClient.pl), + BlobClient: BlobClient{ + client: bClient, + }, + }, nil +} + +// NewBlockBlobClientWithSharedKey creates a BlockBlobClient object using the specified URL, shared key, and options. +func NewBlockBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlockBlobClient, error) { + authPolicy := newSharedKeyCredPolicy(cred) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + bClient := newBlobClient(conn.Endpoint(), conn.Pipeline()) + return &BlockBlobClient{ + client: newBlockBlobClient(bClient.endpoint, bClient.pl), + BlobClient: BlobClient{ + client: bClient, + sharedKey: cred, + }, + }, nil +} + +// WithSnapshot creates a new BlockBlobClient object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb *BlockBlobClient) WithSnapshot(snapshot string) (*BlockBlobClient, error) { + p, err := NewBlobURLParts(bb.URL()) + if err != nil { + return nil, err + } + + p.Snapshot = snapshot + endpoint := p.URL() + bClient := newBlobClient(endpoint, bb.client.pl) + + return &BlockBlobClient{ + client: newBlockBlobClient(bClient.endpoint, bClient.pl), + BlobClient: BlobClient{ + client: bClient, + sharedKey: bb.sharedKey, + }, + }, nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (bb *BlockBlobClient) WithVersionID(versionID string) (*BlockBlobClient, error) { + p, err := NewBlobURLParts(bb.URL()) + if err != nil { + return nil, err + } + + p.VersionID = versionID + endpoint := p.URL() + bClient := newBlobClient(endpoint, bb.client.pl) + + return &BlockBlobClient{ + client: newBlockBlobClient(bClient.endpoint, bClient.pl), + BlobClient: BlobClient{ + client: bClient, + sharedKey: bb.sharedKey, + }, + }, nil +} + +// Upload creates a new block blob or overwrites an existing block blob. +// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not +// supported with Upload; the content of the existing blob is overwritten with the new content. To +// perform a partial update of a block blob, use StageBlock and CommitBlockList. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (bb *BlockBlobClient) Upload(ctx context.Context, body io.ReadSeekCloser, options *BlockBlobUploadOptions) (BlockBlobUploadResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return BlockBlobUploadResponse{}, err + } + + basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() + + resp, err := bb.client.Upload(ctx, count, body, basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) + + return toBlockBlobUploadResponse(resp), handleError(err) +} + +// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. +func (bb *BlockBlobClient) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, + options *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return BlockBlobStageBlockResponse{}, err + } + + stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format() + resp, err := bb.client.StageBlock(ctx, base64BlockID, count, body, stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo) + + return toBlockBlobStageBlockResponse(resp), handleError(err) +} + +// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// If count is CountToEnd (0), then data is read from specified offset to the end. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. +func (bb *BlockBlobClient) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, + contentLength int64, options *BlockBlobStageBlockFromURLOptions) (BlockBlobStageBlockFromURLResponse, error) { + + stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format() + + resp, err := bb.client.StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions, + cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + + return toBlockBlobStageBlockFromURLResponse(resp), handleError(err) +} + +// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written +// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob +// by uploading only those blocks that have changed, then committing the new and existing +// blocks together. Any blocks not specified in the block list and permanently deleted. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. +func (bb *BlockBlobClient) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) { + // this is a code smell in the generated code + blockIds := make([]*string, len(base64BlockIDs)) + for k, v := range base64BlockIDs { + blockIds[k] = to.Ptr(v) + } + + blockLookupList := BlockLookupList{Latest: blockIds} + commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess := options.format() + + resp, err := bb.client.CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess) + + return toBlockBlobCommitBlockListResponse(resp), handleError(err) +} + +// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. +func (bb *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobGetBlockListOptions) (BlockBlobGetBlockListResponse, error) { + o, lac, mac := options.format() + + resp, err := bb.client.GetBlockList(ctx, listType, o, lac, mac) + + return toBlockBlobGetBlockListResponse(resp), handleError(err) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (bb *BlockBlobClient) CopyFromURL(ctx context.Context, source string, options *BlockBlobCopyFromURLOptions) (BlockBlobCopyFromURLResponse, error) { + copyOptions, smac, mac, lac := options.format() + resp, err := bb.BlobClient.client.CopyFromURL(ctx, source, copyOptions, smac, mac, lac) + + return toBlockBlobCopyFromURLResponse(resp), handleError(err) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go new file mode 100644 index 000000000000..2c23b8f4ed86 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go @@ -0,0 +1,88 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "errors" + "fmt" + "strings" +) + +var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + + "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + + "AccountKey=;EndpointSuffix=core.windows.net'") + +// convertConnStrToMap converts a connection string (in format key1=value1;key2=value2;key3=value3;) into a map of key-value pairs +func convertConnStrToMap(connStr string) (map[string]string, error) { + ret := make(map[string]string) + connStr = strings.TrimRight(connStr, ";") + + splitString := strings.Split(connStr, ";") + if len(splitString) == 0 { + return ret, errConnectionString + } + for _, stringPart := range splitString { + parts := strings.SplitN(stringPart, "=", 2) + if len(parts) != 2 { + return ret, errConnectionString + } + ret[parts[0]] = parts[1] + } + return ret, nil +} + +// parseConnectionString parses a connection string into a service URL and a SharedKeyCredential or a service url with the +// SharedAccessSignature combined. +func parseConnectionString(connectionString string) (string, *SharedKeyCredential, error) { + var serviceURL string + var cred *SharedKeyCredential + + defaultScheme := "https" + defaultSuffix := "core.windows.net" + + connStrMap, err := convertConnStrToMap(connectionString) + if err != nil { + return "", nil, err + } + + accountName, ok := connStrMap["AccountName"] + if !ok { + return "", nil, errConnectionString + } + accountKey, ok := connStrMap["AccountKey"] + if !ok { + sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] + if !ok { + return "", nil, errConnectionString + } + return fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), nil, nil + } + + protocol, ok := connStrMap["DefaultEndpointsProtocol"] + if !ok { + protocol = defaultScheme + } + + suffix, ok := connStrMap["EndpointSuffix"] + if !ok { + suffix = defaultSuffix + } + + blobEndpoint, ok := connStrMap["BlobEndpoint"] + if ok { + cred, err = NewSharedKeyCredential(accountName, accountKey) + return blobEndpoint, cred, err + } + serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix) + + cred, err = NewSharedKeyCredential(accountName, accountKey) + if err != nil { + return "", nil, err + } + + return serviceURL, cred, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go new file mode 100644 index 000000000000..12c4a18dfd16 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go @@ -0,0 +1,253 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "errors" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// ContainerClient represents a URL to the Azure Storage container allowing you to manipulate its blobs. +type ContainerClient struct { + client *containerClient + sharedKey *SharedKeyCredential +} + +// URL returns the URL endpoint used by the ContainerClient object. +func (c *ContainerClient) URL() string { + return c.client.endpoint +} + +// NewContainerClient creates a ContainerClient object using the specified URL, Azure AD credential, and options. +func NewContainerClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*ContainerClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(containerURL, conOptions) + + return &ContainerClient{ + client: newContainerClient(conn.Endpoint(), conn.Pipeline()), + }, nil +} + +// NewContainerClientWithNoCredential creates a ContainerClient object using the specified URL and options. +func NewContainerClientWithNoCredential(containerURL string, options *ClientOptions) (*ContainerClient, error) { + conOptions := getConnectionOptions(options) + conn := newConnection(containerURL, conOptions) + + return &ContainerClient{ + client: newContainerClient(conn.Endpoint(), conn.Pipeline()), + }, nil +} + +// NewContainerClientWithSharedKey creates a ContainerClient object using the specified URL, shared key, and options. +func NewContainerClientWithSharedKey(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*ContainerClient, error) { + authPolicy := newSharedKeyCredPolicy(cred) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(containerURL, conOptions) + + return &ContainerClient{ + client: newContainerClient(conn.Endpoint(), conn.Pipeline()), + sharedKey: cred, + }, nil +} + +// NewContainerClientFromConnectionString creates a ContainerClient object using connection string of an account +func NewContainerClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*ContainerClient, error) { + svcClient, err := NewServiceClientFromConnectionString(connectionString, options) + if err != nil { + return nil, err + } + return svcClient.NewContainerClient(containerName) +} + +// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of +// ContainerClient's URL. The new BlobClient uses the same request policy pipeline as the ContainerClient. +// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's +// NewBlobClient method. +func (c *ContainerClient) NewBlobClient(blobName string) (*BlobClient, error) { + blobURL := appendToURLPath(c.URL(), blobName) + + return &BlobClient{ + client: newBlobClient(blobURL, c.client.pl), + sharedKey: c.sharedKey, + }, nil +} + +// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of +// ContainerClient's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerClient. +// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's +// NewAppendBlobClient method. +func (c *ContainerClient) NewAppendBlobClient(blobName string) (*AppendBlobClient, error) { + blobURL := appendToURLPath(c.URL(), blobName) + + return &AppendBlobClient{ + BlobClient: BlobClient{ + client: newBlobClient(blobURL, c.client.pl), + sharedKey: c.sharedKey, + }, + client: newAppendBlobClient(blobURL, c.client.pl), + }, nil +} + +// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of +// ContainerClient's URL. The new BlockBlobClient uses the same request policy pipeline as the ContainerClient. +// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's +// NewBlockBlobClient method. +func (c *ContainerClient) NewBlockBlobClient(blobName string) (*BlockBlobClient, error) { + blobURL := appendToURLPath(c.URL(), blobName) + + return &BlockBlobClient{ + BlobClient: BlobClient{ + client: newBlobClient(blobURL, c.client.pl), + sharedKey: c.sharedKey, + }, + client: newBlockBlobClient(blobURL, c.client.pl), + }, nil +} + +// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of ContainerClient's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerClient. +// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's +// NewPageBlobClient method. +func (c *ContainerClient) NewPageBlobClient(blobName string) (*PageBlobClient, error) { + blobURL := appendToURLPath(c.URL(), blobName) + + return &PageBlobClient{ + BlobClient: BlobClient{ + client: newBlobClient(blobURL, c.client.pl), + sharedKey: c.sharedKey, + }, + client: newPageBlobClient(blobURL, c.client.pl), + }, nil +} + +// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. +func (c *ContainerClient) Create(ctx context.Context, options *ContainerCreateOptions) (ContainerCreateResponse, error) { + basics, cpkInfo := options.format() + resp, err := c.client.Create(ctx, basics, cpkInfo) + + return toContainerCreateResponse(resp), handleError(err) +} + +// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. +func (c *ContainerClient) Delete(ctx context.Context, o *ContainerDeleteOptions) (ContainerDeleteResponse, error) { + basics, leaseInfo, accessConditions := o.format() + resp, err := c.client.Delete(ctx, basics, leaseInfo, accessConditions) + + return toContainerDeleteResponse(resp), handleError(err) +} + +// GetProperties returns the container's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. +func (c *ContainerClient) GetProperties(ctx context.Context, o *ContainerGetPropertiesOptions) (ContainerGetPropertiesResponse, error) { + // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. + // This allows us to not expose a GetProperties method at all simplifying the API. + // The optionals are nil, like they were in track 1.5 + options, leaseAccess := o.format() + resp, err := c.client.GetProperties(ctx, options, leaseAccess) + + return toContainerGetPropertiesResponse(resp), handleError(err) +} + +// SetMetadata sets the container's metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. +func (c *ContainerClient) SetMetadata(ctx context.Context, o *ContainerSetMetadataOptions) (ContainerSetMetadataResponse, error) { + metadataOptions, lac, mac := o.format() + resp, err := c.client.SetMetadata(ctx, metadataOptions, lac, mac) + + return toContainerSetMetadataResponse(resp), handleError(err) +} + +// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. +func (c *ContainerClient) GetAccessPolicy(ctx context.Context, o *ContainerGetAccessPolicyOptions) (ContainerGetAccessPolicyResponse, error) { + options, ac := o.format() + resp, err := c.client.GetAccessPolicy(ctx, options, ac) + + return toContainerGetAccessPolicyResponse(resp), handleError(err) +} + +// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. +func (c *ContainerClient) SetAccessPolicy(ctx context.Context, o *ContainerSetAccessPolicyOptions) (ContainerSetAccessPolicyResponse, error) { + accessPolicy, mac, lac := o.format() + resp, err := c.client.SetAccessPolicy(ctx, accessPolicy, mac, lac) + + return toContainerSetAccessPolicyResponse(resp), handleError(err) +} + +// ListBlobsFlat returns a pager for blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c *ContainerClient) ListBlobsFlat(o *ContainerListBlobsFlatOptions) *ContainerListBlobFlatPager { + listOptions := o.format() + pager := c.client.ListBlobFlatSegment(listOptions) + + // override the advancer + pager.advancer = func(ctx context.Context, response containerClientListBlobFlatSegmentResponse) (*policy.Request, error) { + listOptions.Marker = response.NextMarker + return c.client.listBlobFlatSegmentCreateRequest(ctx, listOptions) + } + + return toContainerListBlobFlatSegmentPager(pager) +} + +// ListBlobsHierarchy returns a channel of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored. +// AutoPagerBufferSize specifies the channel's buffer size. +// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.) +func (c *ContainerClient) ListBlobsHierarchy(delimiter string, o *ContainerListBlobsHierarchyOptions) *ContainerListBlobHierarchyPager { + listOptions := o.format() + pager := c.client.ListBlobHierarchySegment(delimiter, listOptions) + + // override the advancer + pager.advancer = func(ctx context.Context, response containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) { + listOptions.Marker = response.NextMarker + return c.client.listBlobHierarchySegmentCreateRequest(ctx, delimiter, listOptions) + } + + return toContainerListBlobHierarchySegmentPager(pager) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (c *ContainerClient) GetSASURL(permissions ContainerSASPermissions, start time.Time, expiry time.Time) (string, error) { + if c.sharedKey == nil { + return "", errors.New("SAS can only be signed with a SharedKeyCredential") + } + + urlParts, err := NewBlobURLParts(c.URL()) + if err != nil { + return "", err + } + + // Containers do not have snapshots, nor versions. + urlParts.SAS, err = BlobSASSignatureValues{ + ContainerName: urlParts.ContainerName, + Permissions: permissions.String(), + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.NewSASQueryParameters(c.sharedKey) + + return urlParts.URL(), err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go new file mode 100644 index 000000000000..395a72a89aaa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go @@ -0,0 +1,102 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +//ContainerLeaseClient represents lease client of container +type ContainerLeaseClient struct { + ContainerClient + leaseID *string +} + +// NewContainerLeaseClient is constructor of ContainerLeaseClient +func (c *ContainerClient) NewContainerLeaseClient(leaseID *string) (*ContainerLeaseClient, error) { + if leaseID == nil { + generatedUuid, err := uuid.New() + if err != nil { + return nil, err + } + leaseID = to.Ptr(generatedUuid.String()) + } + return &ContainerLeaseClient{ + ContainerClient: *c, + leaseID: leaseID, + }, nil +} + +// AcquireLease acquires a lease on the container for delete operations. The lease Duration must be between 15 to 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (clc *ContainerLeaseClient) AcquireLease(ctx context.Context, options *ContainerAcquireLeaseOptions) (ContainerAcquireLeaseResponse, error) { + containerAcquireLeaseOptions, modifiedAccessConditions := options.format() + containerAcquireLeaseOptions.ProposedLeaseID = clc.leaseID + + resp, err := clc.client.AcquireLease(ctx, &containerAcquireLeaseOptions, modifiedAccessConditions) + if err == nil && resp.LeaseID != nil { + clc.leaseID = resp.LeaseID + } + return toContainerAcquireLeaseResponse(resp), handleError(err) +} + +// BreakLease breaks the container's previously-acquired lease (if it exists). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (clc *ContainerLeaseClient) BreakLease(ctx context.Context, options *ContainerBreakLeaseOptions) (ContainerBreakLeaseResponse, error) { + containerBreakLeaseOptions, modifiedAccessConditions := options.format() + resp, err := clc.client.BreakLease(ctx, containerBreakLeaseOptions, modifiedAccessConditions) + return toContainerBreakLeaseResponse(resp), handleError(err) +} + +// ChangeLease changes the container's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (clc *ContainerLeaseClient) ChangeLease(ctx context.Context, options *ContainerChangeLeaseOptions) (ContainerChangeLeaseResponse, error) { + if clc.leaseID == nil { + return ContainerChangeLeaseResponse{}, errors.New("leaseID cannot be nil") + } + + proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format() + if err != nil { + return ContainerChangeLeaseResponse{}, err + } + + resp, err := clc.client.ChangeLease(ctx, *clc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions) + if err == nil && resp.LeaseID != nil { + clc.leaseID = resp.LeaseID + } + return toContainerChangeLeaseResponse(resp), handleError(err) +} + +// ReleaseLease releases the container's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (clc *ContainerLeaseClient) ReleaseLease(ctx context.Context, options *ContainerReleaseLeaseOptions) (ContainerReleaseLeaseResponse, error) { + if clc.leaseID == nil { + return ContainerReleaseLeaseResponse{}, errors.New("leaseID cannot be nil") + } + containerReleaseLeaseOptions, modifiedAccessConditions := options.format() + resp, err := clc.client.ReleaseLease(ctx, *clc.leaseID, containerReleaseLeaseOptions, modifiedAccessConditions) + + return toContainerReleaseLeaseResponse(resp), handleError(err) +} + +// RenewLease renews the container's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (clc *ContainerLeaseClient) RenewLease(ctx context.Context, options *ContainerRenewLeaseOptions) (ContainerRenewLeaseResponse, error) { + if clc.leaseID == nil { + return ContainerRenewLeaseResponse{}, errors.New("leaseID cannot be nil") + } + renewLeaseBlobOptions, modifiedAccessConditions := options.format() + resp, err := clc.client.RenewLease(ctx, *clc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions) + if err == nil && resp.LeaseID != nil { + clc.leaseID = resp.LeaseID + } + return toContainerRenewLeaseResponse(resp), handleError(err) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go new file mode 100644 index 000000000000..507993b9e5d0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go @@ -0,0 +1,261 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "io" + "net/url" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// PageBlobClient represents a client to an Azure Storage page blob; +type PageBlobClient struct { + BlobClient + client *pageBlobClient +} + +// NewPageBlobClient creates a ServiceClient object using the specified URL, Azure AD credential, and options. +// Example of serviceURL: https://.blob.core.windows.net +func NewPageBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*PageBlobClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + return &PageBlobClient{ + client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()), + BlobClient: BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + }, + }, nil +} + +// NewPageBlobClientWithNoCredential creates a ServiceClient object using the specified URL and options. +// Example of serviceURL: https://.blob.core.windows.net? +func NewPageBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*PageBlobClient, error) { + conOptions := getConnectionOptions(options) + conn := newConnection(blobURL, conOptions) + + return &PageBlobClient{ + client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()), + BlobClient: BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + }, + }, nil +} + +// NewPageBlobClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options. +// Example of serviceURL: https://.blob.core.windows.net +func NewPageBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*PageBlobClient, error) { + authPolicy := newSharedKeyCredPolicy(cred) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + return &PageBlobClient{ + client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()), + BlobClient: BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + sharedKey: cred, + }, + }, nil +} + +// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb *PageBlobClient) WithSnapshot(snapshot string) (*PageBlobClient, error) { + p, err := NewBlobURLParts(pb.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + endpoint := p.URL() + pipeline := pb.client.pl + return &PageBlobClient{ + client: newPageBlobClient(endpoint, pipeline), + BlobClient: BlobClient{ + client: newBlobClient(endpoint, pipeline), + sharedKey: pb.sharedKey, + }, + }, nil +} + +// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the version returning a URL to the base blob. +func (pb *PageBlobClient) WithVersionID(versionID string) (*PageBlobClient, error) { + p, err := NewBlobURLParts(pb.URL()) + if err != nil { + return nil, err + } + + p.VersionID = versionID + endpoint := p.URL() + + pipeline := pb.client.pl + return &PageBlobClient{ + client: newPageBlobClient(endpoint, pipeline), + BlobClient: BlobClient{ + client: newBlobClient(endpoint, pipeline), + sharedKey: pb.sharedKey, + }, + }, nil +} + +// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (pb *PageBlobClient) Create(ctx context.Context, size int64, o *PageBlobCreateOptions) (PageBlobCreateResponse, error) { + createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() + + resp, err := pb.client.Create(ctx, 0, size, createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + + return toPageBlobCreateResponse(resp), handleError(err) +} + +// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb *PageBlobClient) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *PageBlobUploadPagesOptions) (PageBlobUploadPagesResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + + if err != nil { + return PageBlobUploadPagesResponse{}, err + } + + uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + resp, err := pb.client.UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions, + cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + + return toPageBlobUploadPagesResponse(resp), handleError(err) +} + +// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. +// The sourceOffset specifies the start offset of source data to copy from. +// The destOffset specifies the start offset of data in page blob will be written to. +// The count must be a multiple of 512 bytes. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. +func (pb *PageBlobClient) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64, + options *PageBlobUploadPagesFromURLOptions) (PageBlobUploadPagesFromURLResponse, error) { + + uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := options.format() + + resp, err := pb.client.UploadPagesFromURL(ctx, source, rangeToString(sourceOffset, count), 0, + rangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, + sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + + return toPageBlobUploadPagesFromURLResponse(resp), handleError(err) +} + +// ClearPages frees the specified pages from the page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb *PageBlobClient) ClearPages(ctx context.Context, pageRange HttpRange, options *PageBlobClearPagesOptions) (PageBlobClearPagesResponse, error) { + clearOptions := &pageBlobClientClearPagesOptions{ + Range: pageRange.format(), + } + + leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + resp, err := pb.client.ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo, + cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + + return toPageBlobClearPagesResponse(resp), handleError(err) +} + +// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb *PageBlobClient) GetPageRanges(options *PageBlobGetPageRangesOptions) *PageBlobGetPageRangesPager { + getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions := options.format() + + pageBlobGetPageRangesPager := pb.client.GetPageRanges(getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions) + + // Fixing Advancer + pageBlobGetPageRangesPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesResponse) (*policy.Request, error) { + getPageRangesOptions.Marker = response.NextMarker + req, err := pb.client.getPageRangesCreateRequest(ctx, getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return nil, handleError(err) + } + queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery) + if err != nil { + return nil, handleError(err) + } + req.Raw().URL.RawQuery = queryValues.Encode() + return req, nil + } + + return toPageBlobGetPageRangesPager(pageBlobGetPageRangesPager) +} + +// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb *PageBlobClient) GetPageRangesDiff(options *PageBlobGetPageRangesDiffOptions) *PageBlobGetPageRangesDiffPager { + getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions := options.format() + + getPageRangesDiffPager := pb.client.GetPageRangesDiff(getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions) + + // Fixing Advancer + getPageRangesDiffPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) { + getPageRangesDiffOptions.Marker = response.NextMarker + req, err := pb.client.getPageRangesDiffCreateRequest(ctx, getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return nil, handleError(err) + } + queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery) + if err != nil { + return nil, handleError(err) + } + req.Raw().URL.RawQuery = queryValues.Encode() + return req, nil + } + + return toPageBlobGetPageRangesDiffPager(getPageRangesDiffPager) +} + +// Resize resizes the page blob to the specified size (which must be a multiple of 512). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb *PageBlobClient) Resize(ctx context.Context, size int64, options *PageBlobResizeOptions) (PageBlobResizeResponse, error) { + resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() + + resp, err := pb.client.Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + + return toPageBlobResizeResponse(resp), handleError(err) +} + +// UpdateSequenceNumber sets the page blob's sequence number. +func (pb *PageBlobClient) UpdateSequenceNumber(ctx context.Context, options *PageBlobUpdateSequenceNumberOptions) (PageBlobUpdateSequenceNumberResponse, error) { + actionType, updateOptions, lac, mac := options.format() + resp, err := pb.client.UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac) + + return toPageBlobUpdateSequenceNumberResponse(resp), handleError(err) +} + +// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. +// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and +// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. +func (pb *PageBlobClient) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *PageBlobCopyIncrementalOptions) (PageBlobCopyIncrementalResponse, error) { + copySourceURL, err := url.Parse(copySource) + if err != nil { + return PageBlobCopyIncrementalResponse{}, err + } + + queryParams := copySourceURL.Query() + queryParams.Set("snapshot", prevSnapshot) + copySourceURL.RawQuery = queryParams.Encode() + + pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format() + resp, err := pb.client.CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions) + + return toPageBlobCopyIncrementalResponse(resp), handleError(err) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go new file mode 100644 index 000000000000..062587604e85 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go @@ -0,0 +1,184 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "net" + "net/url" + "strings" +) + +const ( + snapshot = "snapshot" + versionId = "versionid" + SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" +) + +// BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an +// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL(). +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type BlobURLParts struct { + Scheme string // Ex: "https://" + Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" + IPEndpointStyleInfo IPEndpointStyleInfo + ContainerName string // "" if no container + BlobName string // "" if no blob + Snapshot string // "" if not a snapshot + SAS SASQueryParameters + UnparsedParams string + VersionID string // "" if not versioning enabled +} + +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/containername" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/container/... +// As url's Host property, host could be both host or host:port +func isIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} + +// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object. +func NewBlobURLParts(u string) (BlobURLParts, error) { + uri, err := url.Parse(u) + if err != nil { + return BlobURLParts{}, err + } + + up := BlobURLParts{ + Scheme: uri.Scheme, + Host: uri.Host, + } + + // Find the container & blob names (if any) + if uri.Path != "" { + path := uri.Path + if path[0] == '/' { + path = path[1:] // If path starts with a slash, remove it + } + if isIPEndpointStyle(up.Host) { + if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob + up.IPEndpointStyleInfo.AccountName = path + path = "" // No ContainerName present in the URL so path should be empty + } else { + up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes + path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names) + } + } + + containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists) + if containerEndIndex == -1 { // Slash not found; path has container name & no blob name + up.ContainerName = path + } else { + up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes + up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash + } + } + + // Convert the query parameters to a case-sensitive map & trim whitespace + paramsMap := uri.Query() + + up.Snapshot = "" // Assume no snapshot + if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { + up.Snapshot = snapshotStr[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, snapshot) + } + + up.VersionID = "" // Assume no versionID + if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok { + up.VersionID = versionIDs[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, versionId) // delete "versionid" from paramsMap + delete(paramsMap, "versionId") // delete "versionId" from paramsMap + } + + up.SAS = newSASQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up, nil +} + +type caseInsensitiveValues url.Values // map[string][]string +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false +} + +// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery +// field contains the SAS, snapshot, and unparsed query parameters. +func (up BlobURLParts) URL() string { + path := "" + if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + path += "/" + up.IPEndpointStyleInfo.AccountName + } + // Concatenate container & blob names (if they exist) + if up.ContainerName != "" { + path += "/" + up.ContainerName + if up.BlobName != "" { + path += "/" + up.BlobName + } + } + + rawQuery := up.UnparsedParams + + //If no snapshot is initially provided, fill it in from the SAS query properties to help the user + if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() { + up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat) + } + + // Concatenate blob version id query parameter (if it exists) + if up.VersionID != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += versionId + "=" + up.VersionID + } + + // Concatenate blob snapshot query parameter (if it exists) + if up.Snapshot != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += snapshot + "=" + up.Snapshot + } + sas := up.SAS.Encode() + if sas != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += sas + } + u := url.URL{ + Scheme: up.Scheme, + Host: up.Host, + Path: path, + RawQuery: rawQuery, + } + return u.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go new file mode 100644 index 000000000000..3f987843904b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import "net/http" + +// ResponseError is a wrapper of error passed from service +type ResponseError interface { + Error() string + Unwrap() error + RawResponse() *http.Response + NonRetriable() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go new file mode 100644 index 000000000000..dda993d1c96c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go @@ -0,0 +1,35 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +// GetHTTPHeaders returns the user-modifiable properties for this blob. +func (bgpr BlobGetPropertiesResponse) GetHTTPHeaders() BlobHTTPHeaders { + return BlobHTTPHeaders{ + BlobContentType: bgpr.ContentType, + BlobContentEncoding: bgpr.ContentEncoding, + BlobContentLanguage: bgpr.ContentLanguage, + BlobContentDisposition: bgpr.ContentDisposition, + BlobCacheControl: bgpr.CacheControl, + BlobContentMD5: bgpr.ContentMD5, + } +} + +/////////////////////////////////////////////////////////////////////////////// + +// GetHTTPHeaders returns the user-modifiable properties for this blob. +func (r BlobDownloadResponse) GetHTTPHeaders() BlobHTTPHeaders { + return BlobHTTPHeaders{ + BlobContentType: r.ContentType, + BlobContentEncoding: r.ContentEncoding, + BlobContentLanguage: r.ContentLanguage, + BlobContentDisposition: r.ContentDisposition, + BlobCacheControl: r.CacheControl, + BlobContentMD5: r.ContentMD5, + } +} + +/////////////////////////////////////////////////////////////////////////////// diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go new file mode 100644 index 000000000000..3179138f1113 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go @@ -0,0 +1,194 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "io" + "net" + "net/http" + "strings" + "sync" +) + +const CountToEnd = 0 + +// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. +type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) + +// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters +// that should be used to make an HTTP GET request. +type HTTPGetterInfo struct { + // Offset specifies the start offset that should be used when + // creating the HTTP GET request's Range header + Offset int64 + + // Count specifies the count of bytes that should be used to calculate + // the end offset when creating the HTTP GET request's Range header + Count int64 + + // ETag specifies the resource's etag that should be used when creating + // the HTTP GET request's If-Match header + ETag string +} + +// FailedReadNotifier is a function type that represents the notification function called when a read fails +type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool) + +// RetryReaderOptions contains properties which can help to decide when to do retry. +type RetryReaderOptions struct { + // MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made + // while reading from a RetryReader. A value of zero means that no additional HTTP + // GET requests will be made. + MaxRetryRequests int + doInjectError bool + doInjectErrorRound int + injectedError error + + // NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging. + NotifyFailedRead FailedReadNotifier + + // TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, + // retryReader has the following special behaviour: closing the response body before it is all read is treated as a + // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = + // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If + // TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead + // treated as a fatal (non-retryable) error. + // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens + // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors + // which will be retried. + TreatEarlyCloseAsError bool + + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +// retryReader implements io.ReaderCloser methods. +// retryReader tries to read from response, and if there is retriable network error +// returned during reading, it will retry according to retry reader option through executing +// user defined action with provided data to get a new response, and continue the overall reading process +// through reading from the new response. +type retryReader struct { + ctx context.Context + info HTTPGetterInfo + countWasBounded bool + o RetryReaderOptions + getter HTTPGetter + + // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response + responseMu *sync.Mutex + response *http.Response +} + +// NewRetryReader creates a retry reader. +func NewRetryReader(ctx context.Context, initialResponse *http.Response, + info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { + return &retryReader{ + ctx: ctx, + getter: getter, + info: info, + countWasBounded: info.Count != CountToEnd, + response: initialResponse, + responseMu: &sync.Mutex{}, + o: o} +} + +func (s *retryReader) setResponse(r *http.Response) { + s.responseMu.Lock() + defer s.responseMu.Unlock() + s.response = r +} + +func (s *retryReader) Read(p []byte) (n int, err error) { + for try := 0; ; try++ { + //fmt.Println(try) // Comment out for debugging. + if s.countWasBounded && s.info.Count == CountToEnd { + // User specified an original count and the remaining bytes are 0, return 0, EOF + return 0, io.EOF + } + + s.responseMu.Lock() + resp := s.response + s.responseMu.Unlock() + if resp == nil { // We don't have a response stream to read from, try to get one. + newResponse, err := s.getter(s.ctx, s.info) + if err != nil { + return 0, err + } + // Successful GET; this is the network stream we'll read from. + s.setResponse(newResponse) + resp = newResponse + } + n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) + + // Injection mechanism for testing. + if s.o.doInjectError && try == s.o.doInjectErrorRound { + if s.o.injectedError != nil { + err = s.o.injectedError + } else { + err = &net.DNSError{IsTemporary: true} + } + } + + // We successfully read data or end EOF. + if err == nil || err == io.EOF { + s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Count != CountToEnd { + s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + } + return n, err // Return the return to the caller + } + _ = s.Close() + + s.setResponse(nil) // Our stream is no longer good + + // Check the retry count and error code, and decide whether to retry. + retriesExhausted := try >= s.o.MaxRetryRequests + _, isNetError := err.(net.Error) + isUnexpectedEOF := err == io.ErrUnexpectedEOF + willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted + + // Notify, for logging purposes, of any failures + if s.o.NotifyFailedRead != nil { + failureCount := try + 1 // because try is zero-based + s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry) + } + + if willRetry { + continue + // Loop around and try to get and read from new stream. + } + return n, err // Not retryable, or retries exhausted, so just return + } +} + +// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry +// Is this safe, to close early from another goroutine? Early close ultimately ends up calling +// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" +// which is exactly the behaviour we want. +// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) +// then there are two different types of error that may happen - either the one one we check for here, +// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine +// to check for one, since the other is a net.Error, which our main Read retry loop is already handing. +func (s *retryReader) wasRetryableEarlyClose(err error) bool { + if s.o.TreatEarlyCloseAsError { + return false // user wants all early closes to be errors, and so not retryable + } + // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text + return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) +} + +const ReadOnClosedBodyMessage = "read on closed response body" + +func (s *retryReader) Close() error { + s.responseMu.Lock() + defer s.responseMu.Unlock() + if s.response != nil && s.response.Body != nil { + return s.response.Body.Close() + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go new file mode 100644 index 000000000000..b4104def5837 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go @@ -0,0 +1,243 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" +) + +// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSASSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to SASVersion + Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() + ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() +} + +// Sign uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSASSignatureValues) Sign(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = SASVersion + } + perms := &AccountSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + + startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + v.Services, + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That right, the account SAS requires a terminating extra newline + "\n") + + signature, err := sharedKeyCredential.ComputeHMACSHA256(stringToSign) + if err != nil { + return SASQueryParameters{}, err + } + p := SASQueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: v.Services, + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. +type AccountSASPermissions struct { + Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Permissions field. +func (p AccountSASPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.DeletePreviousVersion { + buffer.WriteRune('x') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + if p.Tag { + buffer.WriteRune('t') + } + if p.FilterByTags { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASPermissions's fields from a string. +func (p *AccountSASPermissions) Parse(s string) error { + *p = AccountSASPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + case 'x': + p.Process = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + default: + return fmt.Errorf("invalid permission character: '%v'", r) + } + } + return nil +} + +// AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. +type AccountSASServices struct { + Blob, Queue, File bool +} + +// String produces the SAS services string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Services field. +func (s AccountSASServices) String() string { + var buffer bytes.Buffer + if s.Blob { + buffer.WriteRune('b') + } + if s.Queue { + buffer.WriteRune('q') + } + if s.File { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASServices' fields from a string. +func (s *AccountSASServices) Parse(str string) error { + *s = AccountSASServices{} // Clear out the flags + for _, r := range str { + switch r { + case 'b': + s.Blob = true + case 'q': + s.Queue = true + case 'f': + s.File = true + default: + return fmt.Errorf("invalid service character: '%v'", r) + } + } + return nil +} + +// AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. +type AccountSASResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's ResourceTypes field. +func (rt AccountSASResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// Parse initializes the AccountSASResourceType's fields from a string. +func (rt *AccountSASResourceTypes) Parse(s string) error { + *rt = AccountSASResourceTypes{} // Clear out the flags + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return fmt.Errorf("invalid resource type: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go new file mode 100644 index 000000000000..7efbec9b8cf3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go @@ -0,0 +1,427 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "errors" + "net" + "net/url" + "strings" + "time" +) + +// SASProtocol indicates the http/https. +type SASProtocol string + +const ( + // SASProtocolHTTPS can be specified for a SAS protocol + SASProtocolHTTPS SASProtocol = "https" + + // SASProtocolHTTPSandHTTP can be specified for a SAS protocol + //SASProtocolHTTPSandHTTP SASProtocol = "https,http" +) + +// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatSASTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatSASTimeWithDefaultFormat(&expiryTime) + } + sh := "" + if !snapshotTime.IsZero() { + sh = snapshotTime.Format(SnapshotTimeFormat) + } + return ss, se, sh +} + +// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 +var SASTimeFormats = []string{"2006-01-02T15:04:05.0000000Z", SASTimeFormat, "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. + +// formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatSASTimeWithDefaultFormat(t *time.Time) string { + return formatSASTime(t, SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatSASTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// parseSASTimeString try to parse sas time string. +func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range SASTimeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type SASQueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol SASProtocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + snapshotTime time.Time `param:"snapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + signedOid string `param:"skoid"` + signedTid string `param:"sktid"` + signedStart time.Time `param:"skt"` + signedService string `param:"sks"` + signedExpiry time.Time `param:"ske"` + signedVersion string `param:"skv"` + signedDirectoryDepth string `param:"sdd"` + preauthorizedAgentObjectId string `param:"saoid"` + agentObjectId string `param:"suoid"` + correlationId string `param:"scid"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +// PreauthorizedAgentObjectId returns preauthorizedAgentObjectId +func (p *SASQueryParameters) PreauthorizedAgentObjectId() string { + return p.preauthorizedAgentObjectId +} + +// AgentObjectId returns agentObjectId +func (p *SASQueryParameters) AgentObjectId() string { + return p.agentObjectId +} + +// SignedCorrelationId returns signedCorrelationId +func (p *SASQueryParameters) SignedCorrelationId() string { + return p.correlationId +} + +// SignedTid returns aignedTid +func (p *SASQueryParameters) SignedTid() string { + return p.signedTid +} + +// SignedStart returns signedStart +func (p *SASQueryParameters) SignedStart() time.Time { + return p.signedStart +} + +// SignedExpiry returns signedExpiry +func (p *SASQueryParameters) SignedExpiry() time.Time { + return p.signedExpiry +} + +// SignedService returns signedService +func (p *SASQueryParameters) SignedService() string { + return p.signedService +} + +// SignedVersion returns signedVersion +func (p *SASQueryParameters) SignedVersion() string { + return p.signedVersion +} + +// SnapshotTime returns snapshotTime +func (p *SASQueryParameters) SnapshotTime() time.Time { + return p.snapshotTime +} + +// Version returns version +func (p *SASQueryParameters) Version() string { + return p.version +} + +// Services returns services +func (p *SASQueryParameters) Services() string { + return p.services +} + +// ResourceTypes returns resourceTypes +func (p *SASQueryParameters) ResourceTypes() string { + return p.resourceTypes +} + +// Protocol returns protocol +func (p *SASQueryParameters) Protocol() SASProtocol { + return p.protocol +} + +// StartTime returns startTime +func (p *SASQueryParameters) StartTime() time.Time { + return p.startTime +} + +// ExpiryTime returns expiryTime +func (p *SASQueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +// IPRange returns ipRange +func (p *SASQueryParameters) IPRange() IPRange { + return p.ipRange +} + +// Identifier returns identifier +func (p *SASQueryParameters) Identifier() string { + return p.identifier +} + +// Resource returns resource +func (p *SASQueryParameters) Resource() string { + return p.resource +} + +// Permissions returns permissions +func (p *SASQueryParameters) Permissions() string { + return p.permissions +} + +// Signature returns signature +func (p *SASQueryParameters) Signature() string { + return p.signature +} + +// CacheControl returns cacheControl +func (p *SASQueryParameters) CacheControl() string { + return p.cacheControl +} + +// ContentDisposition returns contentDisposition +func (p *SASQueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +// ContentEncoding returns contentEncoding +func (p *SASQueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +// ContentLanguage returns contentLanguage +func (p *SASQueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +// ContentType returns sontentType +func (p *SASQueryParameters) ContentType() string { + return p.contentType +} + +// SignedDirectoryDepth returns signedDirectoryDepth +func (p *SASQueryParameters) SignedDirectoryDepth() string { + return p.signedDirectoryDepth +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { + p := SASQueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = SASProtocol(val) + case "snapshot": + p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val) + case "st": + p.startTime, p.stTimeFormat, _ = parseSASTimeString(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOid = val + case "sktid": + p.signedTid = val + case "skt": + p.signedStart, _ = time.Parse(SASTimeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(SASTimeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.preauthorizedAgentObjectId = val + case "suoid": + p.agentObjectId = val + case "scid": + p.correlationId = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} + +// AddToValues adds the SAS components to the specified query parameters map. +func (p *SASQueryParameters) addToValues(v url.Values) url.Values { + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signedOid != "" { + v.Add("skoid", p.signedOid) + v.Add("sktid", p.signedTid) + v.Add("skt", p.signedStart.Format(SASTimeFormat)) + v.Add("ske", p.signedExpiry.Format(SASTimeFormat)) + v.Add("sks", p.signedService) + v.Add("skv", p.signedVersion) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + if p.signedDirectoryDepth != "" { + v.Add("sdd", p.signedDirectoryDepth) + } + if p.preauthorizedAgentObjectId != "" { + v.Add("saoid", p.preauthorizedAgentObjectId) + } + if p.agentObjectId != "" { + v.Add("suoid", p.agentObjectId) + } + if p.correlationId != "" { + v.Add("scid", p.correlationId) + } + return v +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *SASQueryParameters) Encode() string { + v := url.Values{} + p.addToValues(v) + return v.Encode() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go new file mode 100644 index 000000000000..488baed8c0c3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go @@ -0,0 +1,365 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "bytes" + "fmt" + "strings" + "time" +) + +// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +type BlobSASSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to SASVersion + Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + SnapshotTime time.Time + Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + ContainerName string + BlobName string // Use "" to create a Container SAS + Directory string // Not nil for a directory SAS (ie sr=d) + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct + BlobVersion string // sr=bv + PreauthorizedAgentObjectId string + AgentObjectId string + CorrelationId string +} + +func getDirectoryDepth(path string) string { + if path == "" { + return "" + } + return fmt.Sprint(strings.Count(path, "/") + 1) +} + +// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce +// the proper SAS query parameters. +// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential +func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { + resource := "c" + if sharedKeyCredential == nil { + return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential") + } + + if !v.SnapshotTime.IsZero() { + resource = "bs" + //Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else if v.BlobVersion != "" { + resource = "bv" + //Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else if v.Directory != "" { + resource = "d" + v.BlobName = "" + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else if v.BlobName == "" { + // Make sure the permission characters are in the correct order + perms := &ContainerSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else { + resource = "b" + // Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } + if v.Version == "" { + v.Version = SASVersion + } + startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + signedIdentifier := v.Identifier + + //udk := sharedKeyCredential.getUDKParams() + // + //if udk != nil { + // udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{}) + // //I don't like this answer to combining the functions + // //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it. + // signedIdentifier = strings.Join([]string{ + // udk.SignedOid, + // udk.SignedTid, + // udkStart, + // udkExpiry, + // udk.SignedService, + // udk.SignedVersion, + // v.PreauthorizedAgentObjectId, + // v.AgentObjectId, + // v.CorrelationId, + // }, "\n") + //} + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName, v.Directory), + signedIdentifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + snapshotTime, // signed timestamp + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature := "" + signature, err := sharedKeyCredential.ComputeHMACSHA256(stringToSign) + if err != nil { + return SASQueryParameters{}, err + } + + p := SASQueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + preauthorizedAgentObjectId: v.PreauthorizedAgentObjectId, + agentObjectId: v.AgentObjectId, + correlationId: v.CorrelationId, + // Calculated SAS signature + signature: signature, + } + + ////User delegation SAS specific parameters + //if udk != nil { + // p.signedOid = udk.SignedOid + // p.signedTid = udk.SignedTid + // p.signedStart = udk.SignedStart + // p.signedExpiry = udk.SignedExpiry + // p.signedService = udk.SignedService + // p.signedVersion = udk.SignedVersion + //} + + return p, nil +} + +// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. +func getCanonicalName(account string, containerName string, blobName string, directoryName string) string { + // Container: "/blob/account/containername" + // Blob: "/blob/account/containername/blobname" + elements := []string{"/blob/", account, "/", containerName} + if blobName != "" { + elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) + } else if directoryName != "" { + elements = append(elements, "/", directoryName) + } + return strings.Join(elements, "") +} + +// ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob +type ContainerSASPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool + Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only +} + +// String produces the SAS permissions string for an Azure Storage container. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p ContainerSASPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.List { + b.WriteRune('l') + } + if p.Tag { + b.WriteRune('t') + } + if p.Execute { + b.WriteRune('e') + } + if p.ModifyOwnership { + b.WriteRune('o') + } + if p.ModifyPermissions { + b.WriteRune('p') + } + return b.String() +} + +// Parse initializes the ContainerSASPermissions's fields from a string. +func (p *ContainerSASPermissions) Parse(s string) error { + *p = ContainerSASPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'l': + p.List = true + case 't': + p.Tag = true + case 'e': + p.Execute = true + case 'o': + p.ModifyOwnership = true + case 'p': + p.ModifyPermissions = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} + +// BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +type BlobSASPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions bool +} + +// String produces the SAS permissions string for an Azure Storage blob. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p BlobSASPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.Tag { + b.WriteRune('t') + } + if p.List { + b.WriteRune('l') + } + if p.Move { + b.WriteRune('m') + } + if p.Execute { + b.WriteRune('e') + } + if p.Ownership { + b.WriteRune('o') + } + if p.Permissions { + b.WriteRune('p') + } + return b.String() +} + +// Parse initializes the BlobSASPermissions's fields from a string. +func (p *BlobSASPermissions) Parse(s string) error { + *p = BlobSASPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 't': + p.Tag = true + case 'l': + p.List = true + case 'm': + p.Move = true + case 'e': + p.Execute = true + case 'o': + p.Ownership = true + case 'p': + p.Permissions = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go new file mode 100644 index 000000000000..e75dd10b31e7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go @@ -0,0 +1,266 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "errors" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +//nolint +const ( + // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. + ContainerNameRoot = "$root" + + // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. + ContainerNameLogs = "$logs" +) + +// ServiceClient represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers. +type ServiceClient struct { + client *serviceClient + sharedKey *SharedKeyCredential +} + +// URL returns the URL endpoint used by the ServiceClient object. +func (s ServiceClient) URL() string { + return s.client.endpoint +} + +// NewServiceClient creates a ServiceClient object using the specified URL, Azure AD credential, and options. +// Example of serviceURL: https://.blob.core.windows.net +func NewServiceClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*ServiceClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(serviceURL, conOptions) + + return &ServiceClient{ + client: newServiceClient(conn.Endpoint(), conn.Pipeline()), + }, nil +} + +// NewServiceClientWithNoCredential creates a ServiceClient object using the specified URL and options. +// Example of serviceURL: https://.blob.core.windows.net? +func NewServiceClientWithNoCredential(serviceURL string, options *ClientOptions) (*ServiceClient, error) { + conOptions := getConnectionOptions(options) + conn := newConnection(serviceURL, conOptions) + + return &ServiceClient{ + client: newServiceClient(conn.Endpoint(), conn.Pipeline()), + }, nil +} + +// NewServiceClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options. +// Example of serviceURL: https://.blob.core.windows.net +func NewServiceClientWithSharedKey(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*ServiceClient, error) { + authPolicy := newSharedKeyCredPolicy(cred) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(serviceURL, conOptions) + + return &ServiceClient{ + client: newServiceClient(conn.Endpoint(), conn.Pipeline()), + sharedKey: cred, + }, nil +} + +// NewServiceClientFromConnectionString creates a service client from the given connection string. +//nolint +func NewServiceClientFromConnectionString(connectionString string, options *ClientOptions) (*ServiceClient, error) { + endpoint, credential, err := parseConnectionString(connectionString) + if err != nil { + return nil, err + } + return NewServiceClientWithSharedKey(endpoint, credential, options) +} + +// NewContainerClient creates a new ContainerClient object by concatenating containerName to the end of +// ServiceClient's URL. The new ContainerClient uses the same request policy pipeline as the ServiceClient. +// To change the pipeline, create the ContainerClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewContainerClient instead of calling this object's +// NewContainerClient method. +func (s *ServiceClient) NewContainerClient(containerName string) (*ContainerClient, error) { + containerURL := appendToURLPath(s.client.endpoint, containerName) + return &ContainerClient{ + client: newContainerClient(containerURL, s.client.pl), + sharedKey: s.sharedKey, + }, nil +} + +// CreateContainer is a lifecycle method to creates a new container under the specified account. +// If the container with the same name already exists, a ResourceExistsError will be raised. +// This method returns a client with which to interact with the newly created container. +func (s *ServiceClient) CreateContainer(ctx context.Context, containerName string, options *ContainerCreateOptions) (ContainerCreateResponse, error) { + containerClient, err := s.NewContainerClient(containerName) + if err != nil { + return ContainerCreateResponse{}, err + } + containerCreateResp, err := containerClient.Create(ctx, options) + return containerCreateResp, err +} + +// DeleteContainer is a lifecycle method that marks the specified container for deletion. +// The container and any blobs contained within it are later deleted during garbage collection. +// If the container is not found, a ResourceNotFoundError will be raised. +func (s *ServiceClient) DeleteContainer(ctx context.Context, containerName string, options *ContainerDeleteOptions) (ContainerDeleteResponse, error) { + containerClient, _ := s.NewContainerClient(containerName) + containerDeleteResp, err := containerClient.Delete(ctx, options) + return containerDeleteResp, err +} + +// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required) +func appendToURLPath(u string, name string) string { + // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f" + // When you call url.Parse() this is what you'll get: + // Scheme: "https" + // Opaque: "" + // User: nil + // Host: "ms.com" + // Path: "/a/b/" This should start with a / and it might or might not have a trailing slash + // RawPath: "" + // ForceQuery: false + // RawQuery: "k1=v1&k2=v2" + // Fragment: "f" + uri, _ := url.Parse(u) + + if len(uri.Path) == 0 || uri.Path[len(uri.Path)-1] != '/' { + uri.Path += "/" // Append "/" to end before appending name + } + uri.Path += name + return uri.String() +} + +// GetAccountInfo provides account level information +func (s *ServiceClient) GetAccountInfo(ctx context.Context, o *ServiceGetAccountInfoOptions) (ServiceGetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := s.client.GetAccountInfo(ctx, getAccountInfoOptions) + return toServiceGetAccountInfoResponse(resp), handleError(err) +} + +// ListContainers operation returns a pager of the containers under the specified account. +// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (s *ServiceClient) ListContainers(o *ListContainersOptions) *ServiceListContainersSegmentPager { + listOptions := o.format() + pager := s.client.ListContainersSegment(listOptions) + //TODO: .Err()? + //// override the generated advancer, which is incorrect + //if pager.Err() != nil { + // return pager + //} + + pager.advancer = func(ctx context.Context, response serviceClientListContainersSegmentResponse) (*policy.Request, error) { + if response.ListContainersSegmentResponse.NextMarker == nil { + return nil, handleError(errors.New("unexpected missing NextMarker")) + } + req, err := s.client.listContainersSegmentCreateRequest(ctx, listOptions) + if err != nil { + return nil, handleError(err) + } + queryValues, _ := url.ParseQuery(req.Raw().URL.RawQuery) + queryValues.Set("marker", *response.ListContainersSegmentResponse.NextMarker) + + req.Raw().URL.RawQuery = queryValues.Encode() + return req, nil + } + + return toServiceListContainersSegmentPager(*pager) +} + +// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules. +func (s *ServiceClient) GetProperties(ctx context.Context, o *ServiceGetPropertiesOptions) (ServiceGetPropertiesResponse, error) { + getPropertiesOptions := o.format() + resp, err := s.client.GetProperties(ctx, getPropertiesOptions) + + return toServiceGetPropertiesResponse(resp), handleError(err) +} + +// SetProperties Sets the properties of a storage account's Blob service, including Azure Storage Analytics. +// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved. +func (s *ServiceClient) SetProperties(ctx context.Context, o *ServiceSetPropertiesOptions) (ServiceSetPropertiesResponse, error) { + properties, setPropertiesOptions := o.format() + resp, err := s.client.SetProperties(ctx, properties, setPropertiesOptions) + + return toServiceSetPropertiesResponse(resp), handleError(err) +} + +// GetStatistics Retrieves statistics related to replication for the Blob service. +// It is only available when read-access geo-redundant replication is enabled for the storage account. +// With geo-redundant replication, Azure Storage maintains your data durable +// in two locations. In both locations, Azure Storage constantly maintains +// multiple healthy replicas of your data. The location where you read, +// create, update, or delete data is the primary storage account location. +// The primary location exists in the region you choose at the time you +// create an account via the Azure Management Azure classic portal, for +// example, North Central US. The location to which your data is replicated +// is the secondary location. The secondary location is automatically +// determined based on the location of the primary; it is in a second data +// center that resides in the same region as the primary location. Read-only +// access is available from the secondary location, if read-access geo-redundant +// replication is enabled for your storage account. +func (s *ServiceClient) GetStatistics(ctx context.Context, o *ServiceGetStatisticsOptions) (ServiceGetStatisticsResponse, error) { + getStatisticsOptions := o.format() + resp, err := s.client.GetStatistics(ctx, getStatisticsOptions) + + return toServiceGetStatisticsResponse(resp), handleError(err) +} + +// CanGetAccountSASToken checks if shared key in ServiceClient is nil +func (s *ServiceClient) CanGetAccountSASToken() bool { + return s.sharedKey != nil +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +// This validity can be checked with CanGetAccountSASToken(). +func (s *ServiceClient) GetSASURL(resources AccountSASResourceTypes, permissions AccountSASPermissions, start time.Time, expiry time.Time) (string, error) { + if s.sharedKey == nil { + return "", errors.New("SAS can only be signed with a SharedKeyCredential") + } + + qps, err := AccountSASSignatureValues{ + Version: SASVersion, + Protocol: SASProtocolHTTPS, + Permissions: permissions.String(), + Services: "b", + ResourceTypes: resources.String(), + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.Sign(s.sharedKey) + if err != nil { + return "", err + } + + endpoint := s.URL() + if !strings.HasSuffix(endpoint, "/") { + endpoint += "/" + } + endpoint += "?" + qps.Encode() + + return endpoint, nil +} + +// FindBlobsByTags operation finds all blobs in the storage account whose tags match a given search expression. +// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" +func (s *ServiceClient) FindBlobsByTags(ctx context.Context, o *ServiceFilterBlobsOptions) (ServiceFilterBlobsResponse, error) { + // TODO: Use pager here? Missing support from zz_generated_pagers.go + serviceFilterBlobsOptions := o.pointer() + resp, err := s.client.FilterBlobs(ctx, serviceFilterBlobsOptions) + return toServiceFilterBlobsResponse(resp), err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go new file mode 100644 index 000000000000..60b1e5a76b6f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go @@ -0,0 +1,197 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "sync/atomic" + "time" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCredential, error) { + c := SharedKeyCredential{accountName: accountName} + if err := c.SetAccountKey(accountKey); err != nil { + return nil, err + } + return &c, nil +} + +// SharedKeyCredential contains an account's name and its primary or secondary key. +// It is immutable making it shareable and goroutine-safe. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey atomic.Value // []byte +} + +// AccountName returns the Storage account's name. +func (c *SharedKeyCredential) AccountName() string { + return c.accountName +} + +// SetAccountKey replaces the existing account key with the specified account key. +func (c *SharedKeyCredential) SetAccountKey(accountKey string) error { + _bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return fmt.Errorf("decode account key: %w", err) + } + c.accountKey.Store(_bytes) + return nil +} + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (c *SharedKeyCredential) ComputeHMACSHA256(message string) (string, error) { + h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := req.Header + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + + canonicalizedResource, err := c.buildCanonicalizedResource(req.URL) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{ + req.Method, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + "", // Empty date because x-ms-date is expected (as per web page above) + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + c.buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + return stringToSign, nil +} + +func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + sort.Strings(keys) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return ch.String() +} + +func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(c.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + return "", fmt.Errorf("failed to parse query params: %w", err) + } + + if len(params) > 0 { // There is at least 1 query parameter + var paramNames []string // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + } + } + return cr.String(), nil +} + +type sharedKeyCredPolicy struct { + cred *SharedKeyCredential +} + +func newSharedKeyCredPolicy(cred *SharedKeyCredential) *sharedKeyCredPolicy { + return &sharedKeyCredPolicy{cred: cred} +} + +func (s *sharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + if d := req.Raw().Header.Get(headerXmsDate); d == "" { + req.Raw().Header.Set(headerXmsDate, time.Now().UTC().Format(http.TimeFormat)) + } + stringToSign, err := s.cred.buildStringToSign(req.Raw()) + if err != nil { + return nil, err + } + signature, err := s.cred.ComputeHMACSHA256(stringToSign) + if err != nil { + return nil, err + } + authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") + req.Raw().Header.Set(headerAuthorization, authHeader) + + response, err := req.Next() + if err != nil && response != nil && response.StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-NewSASQueryParameters:\n"+stringToSign+"\n===============================\n") + } + return response, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go new file mode 100644 index 000000000000..08c9c8730909 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go @@ -0,0 +1,236 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "net/http" + "sort" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// InternalError is an internal error type that all errors get wrapped in. +type InternalError struct { + cause error +} + +// Error checks if InternalError can be cast as StorageError +func (e *InternalError) Error() string { + if (errors.Is(e.cause, StorageError{})) { + return e.cause.Error() + } + + return fmt.Sprintf("===== INTERNAL ERROR =====\n%s", e.cause.Error()) +} + +// Is casts err into InternalError +func (e *InternalError) Is(err error) bool { + _, ok := err.(*InternalError) + + return ok +} + +// As casts target interface into InternalError +func (e *InternalError) As(target interface{}) bool { + nt, ok := target.(**InternalError) + + if ok { + *nt = e + return ok + } + + //goland:noinspection GoErrorsAs + return errors.As(e.cause, target) +} + +// StorageError is the internal struct that replaces the generated StorageError. +// TL;DR: This implements xml.Unmarshaler, and when the original StorageError is substituted, this unmarshaler kicks in. +// This handles the description and details. defunkifyStorageError handles the response, cause, and service code. +type StorageError struct { + response *http.Response + description string + + ErrorCode StorageErrorCode + details map[string]string +} + +func handleError(err error) error { + if err == nil { + return nil + } + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { + return &InternalError{responseErrorToStorageError(respErr)} + } + + if err != nil { + return &InternalError{err} + } + + return nil +} + +// converts an *azcore.ResponseError to a *StorageError, or if that fails, a *InternalError +func responseErrorToStorageError(responseError *azcore.ResponseError) error { + var storageError StorageError + body, err := runtime.Payload(responseError.RawResponse) + if err != nil { + goto Default + } + if len(body) > 0 { + if err := xml.Unmarshal(body, &storageError); err != nil { + goto Default + } + } + + storageError.response = responseError.RawResponse + + storageError.ErrorCode = StorageErrorCode(responseError.RawResponse.Header.Get("x-ms-error-code")) + + if code, ok := storageError.details["Code"]; ok { + storageError.ErrorCode = StorageErrorCode(code) + delete(storageError.details, "Code") + } + + return &storageError + +Default: + return &InternalError{ + cause: responseError, + } +} + +// StatusCode returns service-error information. The caller may examine these values but should not modify any of them. +func (e *StorageError) StatusCode() int { + return e.response.StatusCode +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e StorageError) Error() string { + b := &bytes.Buffer{} + + if e.response != nil { + _, _ = fmt.Fprintf(b, "===== RESPONSE ERROR (ErrorCode=%s) =====\n", e.ErrorCode) + _, _ = fmt.Fprintf(b, "Description=%s, Details: ", e.description) + if len(e.details) == 0 { + b.WriteString("(none)\n") + } else { + b.WriteRune('\n') + keys := make([]string, 0, len(e.details)) + // Alphabetize the details + for k := range e.details { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + _, _ = fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) + } + } + // req := azcore.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request + // TODO: Come Here Mohit Adele + //writeRequestWithResponse(b, &azcore.Request{Request: e.response.Request}, e.response) + } + + return b.String() + ///azcore.writeRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) + // return e.ErrorNode.Error(b.String()) +} + +// Is checks if err can be cast as StorageError +func (e StorageError) Is(err error) bool { + _, ok := err.(StorageError) + _, ok2 := err.(*StorageError) + + return ok || ok2 +} + +// Response returns StorageError.response +func (e StorageError) Response() *http.Response { + return e.response +} + +//nolint +func writeRequestWithResponse(b *bytes.Buffer, request *policy.Request, response *http.Response) { + // Write the request into the buffer. + _, _ = fmt.Fprint(b, " "+request.Raw().Method+" "+request.Raw().URL.String()+"\n") + writeHeader(b, request.Raw().Header) + if response != nil { + _, _ = fmt.Fprintln(b, " --------------------------------------------------------------------------------") + _, _ = fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n") + writeHeader(b, response.Header) + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +//nolint +func writeHeader(b *bytes.Buffer, header map[string][]string) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + // Redact the value of any Authorization header to prevent security information from persisting in logs + value := interface{}("REDACTED") + if !strings.EqualFold(k, "Authorization") { + value = header[k] + } + _, _ = fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} + +// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). +func (e *StorageError) Temporary() bool { + if e.response != nil { + if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) || (e.response.StatusCode == http.StatusBadGateway) { + return true + } + } + + return false +} + +// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors. +//nolint +func (e *StorageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { + tokName := "" + var t xml.Token + for t, err = d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = tt.Name.Local + case xml.EndElement: + tokName = "" + case xml.CharData: + switch tokName { + case "": + continue + case "Message": + e.description = string(tt) + default: + if e.details == nil { + e.details = map[string]string{} + } + e.details[tokName] = string(tt) + } + } + } + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go new file mode 100644 index 000000000000..341858f1ad8a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go @@ -0,0 +1,107 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "errors" + "fmt" + "io" + "strconv" +) + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Raw converts PageRange into primitive start, end integers of type int64 +func (pr *PageRange) Raw() (start, end int64) { + if pr.Start != nil { + start = *pr.Start + } + if pr.End != nil { + end = *pr.End + } + + return +} + +// HttpRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HttpRange indicates the entire resource. An HttpRange +// which has an offset but na zero value count indicates from the offset to the resource's end. +type HttpRange struct { + Offset int64 + Count int64 +} + +func NewHttpRange(offset, count int64) *HttpRange { + return &HttpRange{Offset: offset, Count: count} +} + +func (r *HttpRange) format() *string { + if r == nil || (r.Offset == 0 && r.Count == 0) { // Do common case first for performance + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) + return &dataRange +} + +func getSourceRange(offset, count *int64) *string { + if offset == nil && count == nil { + return nil + } + newOffset := int64(0) + newCount := int64(CountToEnd) + + if offset != nil { + newOffset = *offset + } + + if count != nil { + newCount = *count + } + + return (&HttpRange{Offset: newOffset, Count: newCount}).format() +} + +func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body's are "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go new file mode 100644 index 000000000000..93a2b1a70077 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +const ( + // ETagNone represents an empty entity tag. + ETagNone = "" + + // ETagAny matches any entity tag. + ETagAny = "*" +) + +// ContainerAccessConditions identifies container-specific access conditions which you optionally set. +type ContainerAccessConditions struct { + ModifiedAccessConditions *ModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +func (ac *ContainerAccessConditions) format() (*ModifiedAccessConditions, *LeaseAccessConditions) { + if ac == nil { + return nil, nil + } + + return ac.ModifiedAccessConditions, ac.LeaseAccessConditions +} + +// BlobAccessConditions identifies blob-specific access conditions which you optionally set. +type BlobAccessConditions struct { + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (ac *BlobAccessConditions) format() (*LeaseAccessConditions, *ModifiedAccessConditions) { + if ac == nil { + return nil, nil + } + + return ac.LeaseAccessConditions, ac.ModifiedAccessConditions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go new file mode 100644 index 000000000000..19c3fef66a91 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go @@ -0,0 +1,184 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import "time" + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlobCreateOptions provides set of configurations for Create Append Blob operation +type AppendBlobCreateOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + BlobAccessConditions *BlobAccessConditions + + HTTPHeaders *BlobHTTPHeaders + + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo + // Optional. Used to set blob tags in various blob operations. + TagsMap map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string +} + +func (o *AppendBlobCreateOptions) format() (*appendBlobClientCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions, + *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { + + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := appendBlobClientCreateOptions{ + BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap), + Metadata: o.Metadata, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// AppendBlobCreateResponse contains the response from method AppendBlobClient.Create. +type AppendBlobCreateResponse struct { + appendBlobClientCreateResponse +} + +func toAppendBlobCreateResponse(resp appendBlobClientCreateResponse) AppendBlobCreateResponse { + return AppendBlobCreateResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlobAppendBlockOptions provides set of configurations for AppendBlock operation +type AppendBlobAppendBlockOptions struct { + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AppendPositionAccessConditions *AppendPositionAccessConditions + + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo + + BlobAccessConditions *BlobAccessConditions +} + +func (o *AppendBlobAppendBlockOptions) format() (*appendBlobClientAppendBlockOptions, *AppendPositionAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions, *LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &appendBlobClientAppendBlockOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions +} + +// AppendBlobAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. +type AppendBlobAppendBlockResponse struct { + appendBlobClientAppendBlockResponse +} + +func toAppendBlobAppendBlockResponse(resp appendBlobClientAppendBlockResponse) AppendBlobAppendBlockResponse { + return AppendBlobAppendBlockResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlobAppendBlockFromURLOptions provides set of configurations for AppendBlockFromURL operation +type AppendBlobAppendBlockFromURLOptions struct { + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AppendPositionAccessConditions *AppendPositionAccessConditions + + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + BlobAccessConditions *BlobAccessConditions + // Optional, you can specify whether a particular range of the blob is read + Offset *int64 + + Count *int64 +} + +func (o *AppendBlobAppendBlockFromURLOptions) format() (*appendBlobClientAppendBlockFromURLOptions, *CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *AppendPositionAccessConditions, *ModifiedAccessConditions, *SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := &appendBlobClientAppendBlockFromURLOptions{ + SourceRange: getSourceRange(o.Offset, o.Count), + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// AppendBlobAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. +type AppendBlobAppendBlockFromURLResponse struct { + appendBlobClientAppendBlockFromURLResponse +} + +func toAppendBlobAppendBlockFromURLResponse(resp appendBlobClientAppendBlockFromURLResponse) AppendBlobAppendBlockFromURLResponse { + return AppendBlobAppendBlockFromURLResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlobSealOptions provides set of configurations for SealAppendBlob operation +type AppendBlobSealOptions struct { + BlobAccessConditions *BlobAccessConditions + AppendPositionAccessConditions *AppendPositionAccessConditions +} + +func (o *AppendBlobSealOptions) format() (leaseAccessConditions *LeaseAccessConditions, + modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) { + if o == nil { + return nil, nil, nil + } + + return +} + +// AppendBlobSealResponse contains the response from method AppendBlobClient.Seal. +type AppendBlobSealResponse struct { + appendBlobClientSealResponse +} + +func toAppendBlobSealResponse(resp appendBlobClientSealResponse) AppendBlobSealResponse { + return AppendBlobSealResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go new file mode 100644 index 000000000000..f4425b18c828 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go @@ -0,0 +1,478 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "io" + "net/http" + "time" +) + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobDownloadOptions provides set of configurations for Download blob operation +type BlobDownloadOptions struct { + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Optional, you can specify whether a particular range of the blob is read + Offset *int64 + Count *int64 + + BlobAccessConditions *BlobAccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (o *BlobDownloadOptions) format() (*blobClientDownloadOptions, *LeaseAccessConditions, *CpkInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + offset := int64(0) + count := int64(CountToEnd) + + if o.Offset != nil { + offset = *o.Offset + } + + if o.Count != nil { + count = *o.Count + } + + basics := blobClientDownloadOptions{ + RangeGetContentMD5: o.RangeGetContentMD5, + Range: (&HttpRange{Offset: offset, Count: count}).format(), + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions +} + +// BlobDownloadResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry. +type BlobDownloadResponse struct { + blobClientDownloadResponse + ctx context.Context + b *BlobClient + getInfo HTTPGetterInfo + ObjectReplicationRules []ObjectReplicationPolicy +} + +// Body constructs new RetryReader stream for reading data. If a connection fails +// while reading, it will make additional requests to reestablish a connection and +// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0 +// (the default), returns the original response body and no retries will be performed. +// Pass in nil for options to accept the default options. +func (r *BlobDownloadResponse) Body(options *RetryReaderOptions) io.ReadCloser { + if options == nil { + options = &RetryReaderOptions{} + } + + if options.MaxRetryRequests == 0 { // No additional retries + return r.RawResponse.Body + } + return NewRetryReader(r.ctx, r.RawResponse, r.getInfo, *options, + func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { + accessConditions := &BlobAccessConditions{ + ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: &getInfo.ETag}, + } + options := BlobDownloadOptions{ + Offset: &getInfo.Offset, + Count: &getInfo.Count, + BlobAccessConditions: accessConditions, + CpkInfo: options.CpkInfo, + //CpkScopeInfo: o.CpkScopeInfo, + } + resp, err := r.b.Download(ctx, &options) + if err != nil { + return nil, err + } + return resp.RawResponse, err + }, + ) +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobDeleteOptions provides set of configurations for Delete blob operation +type BlobDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself + DeleteSnapshots *DeleteSnapshotsOptionType + BlobAccessConditions *BlobAccessConditions +} + +func (o *BlobDeleteOptions) format() (*blobClientDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := blobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + } + + if o.BlobAccessConditions == nil { + return &basics, nil, nil + } + + return &basics, o.BlobAccessConditions.LeaseAccessConditions, o.BlobAccessConditions.ModifiedAccessConditions +} + +// BlobDeleteResponse contains the response from method BlobClient.Delete. +type BlobDeleteResponse struct { + blobClientDeleteResponse +} + +func toBlobDeleteResponse(resp blobClientDeleteResponse) BlobDeleteResponse { + return BlobDeleteResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobUndeleteOptions provides set of configurations for Blob Undelete operation +type BlobUndeleteOptions struct { +} + +func (o *BlobUndeleteOptions) format() *blobClientUndeleteOptions { + return nil +} + +// BlobUndeleteResponse contains the response from method BlobClient.Undelete. +type BlobUndeleteResponse struct { + blobClientUndeleteResponse +} + +func toBlobUndeleteResponse(resp blobClientUndeleteResponse) BlobUndeleteResponse { + return BlobUndeleteResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobSetTierOptions provides set of configurations for SetTier on blob operation +type BlobSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobSetTierOptions) format() (*blobClientSetTierOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := blobClientSetTierOptions{RehydratePriority: o.RehydratePriority} + return &basics, o.LeaseAccessConditions, o.ModifiedAccessConditions +} + +// BlobSetTierResponse contains the response from method BlobClient.SetTier. +type BlobSetTierResponse struct { + blobClientSetTierResponse +} + +func toBlobSetTierResponse(resp blobClientSetTierResponse) BlobSetTierResponse { + return BlobSetTierResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobGetPropertiesOptions provides set of configurations for GetProperties blob operation +type BlobGetPropertiesOptions struct { + BlobAccessConditions *BlobAccessConditions + CpkInfo *CpkInfo +} + +func (o *BlobGetPropertiesOptions) format() (blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, + leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions = o.BlobAccessConditions.format() + return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions +} + +// ObjectReplicationRules struct +type ObjectReplicationRules struct { + RuleId string + Status string +} + +// ObjectReplicationPolicy are deserialized attributes +type ObjectReplicationPolicy struct { + PolicyId *string + Rules *[]ObjectReplicationRules +} + +// BlobGetPropertiesResponse reformat the GetPropertiesResponse object for easy consumption +type BlobGetPropertiesResponse struct { + blobClientGetPropertiesResponse + + // deserialized attributes + ObjectReplicationRules []ObjectReplicationPolicy +} + +func toGetBlobPropertiesResponse(resp blobClientGetPropertiesResponse) BlobGetPropertiesResponse { + getResp := BlobGetPropertiesResponse{ + blobClientGetPropertiesResponse: resp, + ObjectReplicationRules: deserializeORSPolicies(resp.ObjectReplicationRules), + } + return getResp +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobSetHTTPHeadersOptions provides set of configurations for SetHTTPHeaders on blob operation +type BlobSetHTTPHeadersOptions struct { + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobSetHTTPHeadersOptions) format() (*blobClientSetHTTPHeadersOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions +} + +// BlobSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type BlobSetHTTPHeadersResponse struct { + blobClientSetHTTPHeadersResponse +} + +func toBlobSetHTTPHeadersResponse(resp blobClientSetHTTPHeadersResponse) BlobSetHTTPHeadersResponse { + return BlobSetHTTPHeadersResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobSetMetadataOptions provides set of configurations for Set Metadata on blob operation +type BlobSetMetadataOptions struct { + LeaseAccessConditions *LeaseAccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobSetMetadataOptions) format() (leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, + cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + return o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions +} + +// BlobSetMetadataResponse contains the response from method BlobClient.SetMetadata. +type BlobSetMetadataResponse struct { + blobClientSetMetadataResponse +} + +func toBlobSetMetadataResponse(resp blobClientSetMetadataResponse) BlobSetMetadataResponse { + return BlobSetMetadataResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobCreateSnapshotOptions provides set of configurations for CreateSnapshot of blob operation +type BlobCreateSnapshotOptions struct { + Metadata map[string]string + LeaseAccessConditions *LeaseAccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobCreateSnapshotOptions) format() (blobSetMetadataOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, + cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + basics := blobClientCreateSnapshotOptions{ + Metadata: o.Metadata, + } + + return &basics, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions, o.LeaseAccessConditions +} + +// BlobCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot +type BlobCreateSnapshotResponse struct { + blobClientCreateSnapshotResponse +} + +func toBlobCreateSnapshotResponse(resp blobClientCreateSnapshotResponse) BlobCreateSnapshotResponse { + return BlobCreateSnapshotResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobStartCopyOptions provides set of configurations for StartCopyFromURL blob operation +type BlobStartCopyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Used to set blob tags in various blob operations. + TagsMap map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + ModifiedAccessConditions *ModifiedAccessConditions + + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *BlobStartCopyOptions) format() (blobStartCopyFromUrlOptions *blobClientStartCopyFromURLOptions, + sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + basics := blobClientStartCopyFromURLOptions{ + BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap), + Metadata: o.Metadata, + RehydratePriority: o.RehydratePriority, + SealBlob: o.SealBlob, + Tier: o.Tier, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + return &basics, o.SourceModifiedAccessConditions, o.ModifiedAccessConditions, o.LeaseAccessConditions +} + +// BlobStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type BlobStartCopyFromURLResponse struct { + blobClientStartCopyFromURLResponse +} + +func toBlobStartCopyFromURLResponse(resp blobClientStartCopyFromURLResponse) BlobStartCopyFromURLResponse { + return BlobStartCopyFromURLResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobAbortCopyOptions provides set of configurations for AbortCopyFromURL operation +type BlobAbortCopyOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *BlobAbortCopyOptions) format() (blobAbortCopyFromUrlOptions *blobClientAbortCopyFromURLOptions, + leaseAccessConditions *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return nil, o.LeaseAccessConditions +} + +// BlobAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL +type BlobAbortCopyFromURLResponse struct { + blobClientAbortCopyFromURLResponse +} + +func toBlobAbortCopyFromURLResponse(resp blobClientAbortCopyFromURLResponse) BlobAbortCopyFromURLResponse { + return BlobAbortCopyFromURLResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobSetTagsOptions provides set of configurations for SetTags operation +type BlobSetTagsOptions struct { + // The version id parameter is an opaque DateTime value that, when present, + // specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + VersionID *string + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Optional header, Specifies the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + TagsMap map[string]string + + ModifiedAccessConditions *ModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *BlobSetTagsOptions) format() (*blobClientSetTagsOptions, *ModifiedAccessConditions, *LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &blobClientSetTagsOptions{ + Tags: serializeBlobTags(o.TagsMap), + TransactionalContentMD5: o.TransactionalContentMD5, + TransactionalContentCRC64: o.TransactionalContentCRC64, + VersionID: o.VersionID, + } + + return options, o.ModifiedAccessConditions, o.LeaseAccessConditions +} + +// BlobSetTagsResponse contains the response from method BlobClient.SetTags +type BlobSetTagsResponse struct { + blobClientSetTagsResponse +} + +func toBlobSetTagsResponse(resp blobClientSetTagsResponse) BlobSetTagsResponse { + return BlobSetTagsResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobGetTagsOptions provides set of configurations for GetTags operation +type BlobGetTagsOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. + Snapshot *string + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string + + BlobAccessConditions *BlobAccessConditions +} + +func (o *BlobGetTagsOptions) format() (*blobClientGetTagsOptions, *ModifiedAccessConditions, *LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &blobClientGetTagsOptions{ + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + + return options, modifiedAccessConditions, leaseAccessConditions +} + +// BlobGetTagsResponse contains the response from method BlobClient.GetTags +type BlobGetTagsResponse struct { + blobClientGetTagsResponse +} + +func toBlobGetTagsResponse(resp blobClientGetTagsResponse) BlobGetTagsResponse { + return BlobGetTagsResponse{resp} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go new file mode 100644 index 000000000000..4e574622cca7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go @@ -0,0 +1,160 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobAcquireLeaseOptions provides set of configurations for AcquireLeaseBlob operation +type BlobAcquireLeaseOptions struct { + // Specifies the Duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease + // can be between 15 and 60 seconds. A lease Duration cannot be changed using renew or change. + Duration *int32 + + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobAcquireLeaseOptions) format() (blobClientAcquireLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return blobClientAcquireLeaseOptions{}, nil + } + return blobClientAcquireLeaseOptions{ + Duration: o.Duration, + }, o.ModifiedAccessConditions +} + +// BlobAcquireLeaseResponse contains the response from method BlobLeaseClient.AcquireLease. +type BlobAcquireLeaseResponse struct { + blobClientAcquireLeaseResponse +} + +func toBlobAcquireLeaseResponse(resp blobClientAcquireLeaseResponse) BlobAcquireLeaseResponse { + return BlobAcquireLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobBreakLeaseOptions provides set of configurations for BreakLeaseBlob operation +type BlobBreakLeaseOptions struct { + // For a break operation, proposed Duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease + // is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than + // the break period. If this header does not appear with a break operation, a fixed-Duration lease breaks after the remaining + // lease period elapses, and an infinite lease breaks immediately. + BreakPeriod *int32 + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobBreakLeaseOptions) format() (*blobClientBreakLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + if o.BreakPeriod != nil { + period := leasePeriodPointer(*o.BreakPeriod) + return &blobClientBreakLeaseOptions{ + BreakPeriod: period, + }, o.ModifiedAccessConditions + } + + return nil, o.ModifiedAccessConditions +} + +// BlobBreakLeaseResponse contains the response from method BlobLeaseClient.BreakLease. +type BlobBreakLeaseResponse struct { + blobClientBreakLeaseResponse +} + +func toBlobBreakLeaseResponse(resp blobClientBreakLeaseResponse) BlobBreakLeaseResponse { + return BlobBreakLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobChangeLeaseOptions provides set of configurations for ChangeLeaseBlob operation +type BlobChangeLeaseOptions struct { + ProposedLeaseID *string + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobChangeLeaseOptions) format() (*string, *blobClientChangeLeaseOptions, *ModifiedAccessConditions, error) { + generatedUuid, err := uuid.New() + if err != nil { + return nil, nil, nil, err + } + leaseID := to.Ptr(generatedUuid.String()) + if o == nil { + return leaseID, nil, nil, nil + } + + if o.ProposedLeaseID == nil { + o.ProposedLeaseID = leaseID + } + + return o.ProposedLeaseID, nil, o.ModifiedAccessConditions, nil +} + +// BlobChangeLeaseResponse contains the response from method BlobLeaseClient.ChangeLease +type BlobChangeLeaseResponse struct { + blobClientChangeLeaseResponse +} + +func toBlobChangeLeaseResponse(resp blobClientChangeLeaseResponse) BlobChangeLeaseResponse { + return BlobChangeLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobRenewLeaseOptions provides set of configurations for RenewLeaseBlob operation +type BlobRenewLeaseOptions struct { + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobRenewLeaseOptions) format() (*blobClientRenewLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.ModifiedAccessConditions +} + +// BlobRenewLeaseResponse contains the response from method BlobClient.RenewLease. +type BlobRenewLeaseResponse struct { + blobClientRenewLeaseResponse +} + +func toBlobRenewLeaseResponse(resp blobClientRenewLeaseResponse) BlobRenewLeaseResponse { + return BlobRenewLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ReleaseLeaseBlobOptions provides set of configurations for ReleaseLeaseBlob operation +type ReleaseLeaseBlobOptions struct { + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ReleaseLeaseBlobOptions) format() (*blobClientReleaseLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.ModifiedAccessConditions +} + +// BlobReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type BlobReleaseLeaseResponse struct { + blobClientReleaseLeaseResponse +} + +func toBlobReleaseLeaseResponse(resp blobClientReleaseLeaseResponse) BlobReleaseLeaseResponse { + return BlobReleaseLeaseResponse{resp} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go new file mode 100644 index 000000000000..06d4368557ac --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import "time" + +// --------------------------------------------------------------------------------------------------------------------- + +// BlockBlobUploadOptions provides set of configurations for UploadBlockBlob operation +type BlockBlobUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + TagsMap map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + HTTPHeaders *BlobHTTPHeaders + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + BlobAccessConditions *BlobAccessConditions +} + +func (o *BlockBlobUploadOptions) format() (*blockBlobClientUploadOptions, *BlobHTTPHeaders, *LeaseAccessConditions, + *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + basics := blockBlobClientUploadOptions{ + BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap), + Metadata: o.Metadata, + Tier: o.Tier, + TransactionalContentMD5: o.TransactionalContentMD5, + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return &basics, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// BlockBlobUploadResponse contains the response from method BlockBlobClient.Upload. +type BlockBlobUploadResponse struct { + blockBlobClientUploadResponse +} + +func toBlockBlobUploadResponse(resp blockBlobClientUploadResponse) BlockBlobUploadResponse { + return BlockBlobUploadResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlockBlobStageBlockOptions provides set of configurations for StageBlock operation +type BlockBlobStageBlockOptions struct { + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo + + LeaseAccessConditions *LeaseAccessConditions + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +func (o *BlockBlobStageBlockOptions) format() (*blockBlobClientStageBlockOptions, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo) { + if o == nil { + return nil, nil, nil, nil + } + + return &blockBlobClientStageBlockOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + }, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo +} + +// BlockBlobStageBlockResponse contains the response from method BlockBlobClient.StageBlock. +type BlockBlobStageBlockResponse struct { + blockBlobClientStageBlockResponse +} + +func toBlockBlobStageBlockResponse(resp blockBlobClientStageBlockResponse) BlockBlobStageBlockResponse { + return BlockBlobStageBlockResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlockBlobStageBlockFromURLOptions provides set of configurations for StageBlockFromURL operation +type BlockBlobStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + LeaseAccessConditions *LeaseAccessConditions + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + + Offset *int64 + + Count *int64 + + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo +} + +func (o *BlockBlobStageBlockFromURLOptions) format() (*blockBlobClientStageBlockFromURLOptions, *CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + options := &blockBlobClientStageBlockFromURLOptions{ + CopySourceAuthorization: o.CopySourceAuthorization, + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + SourceRange: getSourceRange(o.Offset, o.Count), + } + + return options, o.CpkInfo, o.CpkScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions +} + +// BlockBlobStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. +type BlockBlobStageBlockFromURLResponse struct { + blockBlobClientStageBlockFromURLResponse +} + +func toBlockBlobStageBlockFromURLResponse(resp blockBlobClientStageBlockFromURLResponse) BlockBlobStageBlockFromURLResponse { + return BlockBlobStageBlockFromURLResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlockBlobCommitBlockListOptions provides set of configurations for CommitBlockList operation +type BlockBlobCommitBlockListOptions struct { + BlobTagsMap map[string]string + Metadata map[string]string + RequestID *string + Tier *AccessTier + Timeout *int32 + TransactionalContentCRC64 []byte + TransactionalContentMD5 []byte + BlobHTTPHeaders *BlobHTTPHeaders + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + BlobAccessConditions *BlobAccessConditions +} + +func (o *BlockBlobCommitBlockListOptions) format() (*blockBlobClientCommitBlockListOptions, *BlobHTTPHeaders, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &blockBlobClientCommitBlockListOptions{ + BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), + Metadata: o.Metadata, + RequestID: o.RequestID, + Tier: o.Tier, + Timeout: o.Timeout, + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, o.BlobHTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// BlockBlobCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. +type BlockBlobCommitBlockListResponse struct { + blockBlobClientCommitBlockListResponse +} + +func toBlockBlobCommitBlockListResponse(resp blockBlobClientCommitBlockListResponse) BlockBlobCommitBlockListResponse { + return BlockBlobCommitBlockListResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlockBlobGetBlockListOptions provides set of configurations for GetBlockList operation +type BlockBlobGetBlockListOptions struct { + Snapshot *string + BlobAccessConditions *BlobAccessConditions +} + +func (o *BlockBlobGetBlockListOptions) format() (*blockBlobClientGetBlockListOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return &blockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions +} + +// BlockBlobGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. +type BlockBlobGetBlockListResponse struct { + blockBlobClientGetBlockListResponse +} + +func toBlockBlobGetBlockListResponse(resp blockBlobClientGetBlockListResponse) BlockBlobGetBlockListResponse { + return BlockBlobGetBlockListResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlockBlobCopyFromURLOptions provides set of configurations for CopyBlockBlobFromURL operation +type BlockBlobCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsMap map[string]string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + BlobAccessConditions *BlobAccessConditions +} + +func (o *BlockBlobCopyFromURLOptions) format() (*blobClientCopyFromURLOptions, *SourceModifiedAccessConditions, *ModifiedAccessConditions, *LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + options := &blobClientCopyFromURLOptions{ + BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), + CopySourceAuthorization: o.CopySourceAuthorization, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + Metadata: o.Metadata, + SourceContentMD5: o.SourceContentMD5, + Tier: o.Tier, + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions +} + +// BlockBlobCopyFromURLResponse contains the response from method BlockBlobClient.CopyFromURL. +type BlockBlobCopyFromURLResponse struct { + blobClientCopyFromURLResponse +} + +func toBlockBlobCopyFromURLResponse(resp blobClientCopyFromURLResponse) BlockBlobCopyFromURLResponse { + return BlockBlobCopyFromURLResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go new file mode 100644 index 000000000000..657a767dd546 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go @@ -0,0 +1,55 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// ClientOptions adds additional client options while constructing connection +type ClientOptions struct { + // Logging configures the built-in logging policy. + Logging policy.LogOptions + + // Retry configures the built-in retry policy. + Retry policy.RetryOptions + + // Telemetry configures the built-in telemetry policy. + Telemetry policy.TelemetryOptions + + // Transport sets the transport for HTTP requests. + Transport policy.Transporter + + // PerCallPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCallPolicies []policy.Policy + + // PerRetryPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetryPolicies []policy.Policy +} + +func (c *ClientOptions) toPolicyOptions() *azcore.ClientOptions { + return &azcore.ClientOptions{ + Logging: c.Logging, + Retry: c.Retry, + Telemetry: c.Telemetry, + Transport: c.Transport, + PerCallPolicies: c.PerCallPolicies, + PerRetryPolicies: c.PerRetryPolicies, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +func getConnectionOptions(options *ClientOptions) *policy.ClientOptions { + if options == nil { + options = &ClientOptions{} + } + return options.toPolicyOptions() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go new file mode 100644 index 000000000000..a33103e4b77c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go @@ -0,0 +1,271 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerCreateOptions provides set of configurations for CreateContainer operation +type ContainerCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]string + + // Optional. Specifies the encryption scope settings to set on the container. + CpkScope *ContainerCpkScopeInfo +} + +func (o *ContainerCreateOptions) format() (*containerClientCreateOptions, *ContainerCpkScopeInfo) { + if o == nil { + return nil, nil + } + + basicOptions := containerClientCreateOptions{ + Access: o.Access, + Metadata: o.Metadata, + } + + return &basicOptions, o.CpkScope +} + +// ContainerCreateResponse is wrapper around containerClientCreateResponse +type ContainerCreateResponse struct { + containerClientCreateResponse +} + +func toContainerCreateResponse(resp containerClientCreateResponse) ContainerCreateResponse { + return ContainerCreateResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerDeleteOptions provides set of configurations for DeleteContainer operation +type ContainerDeleteOptions struct { + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerDeleteOptions) format() (*containerClientDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions +} + +// ContainerDeleteResponse contains the response from method ContainerClient.Delete. +type ContainerDeleteResponse struct { + containerClientDeleteResponse +} + +func toContainerDeleteResponse(resp containerClientDeleteResponse) ContainerDeleteResponse { + return ContainerDeleteResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerGetPropertiesOptions provides set of configurations for GetPropertiesContainer operation +type ContainerGetPropertiesOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *ContainerGetPropertiesOptions) format() (*containerClientGetPropertiesOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// ContainerGetPropertiesResponse contains the response from method ContainerClient.GetProperties +type ContainerGetPropertiesResponse struct { + containerClientGetPropertiesResponse +} + +func toContainerGetPropertiesResponse(resp containerClientGetPropertiesResponse) ContainerGetPropertiesResponse { + return ContainerGetPropertiesResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerSetMetadataOptions provides set of configurations for SetMetadataContainer operation +type ContainerSetMetadataOptions struct { + Metadata map[string]string + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerSetMetadataOptions) format() (*containerClientSetMetadataOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + return &containerClientSetMetadataOptions{Metadata: o.Metadata}, o.LeaseAccessConditions, o.ModifiedAccessConditions +} + +// ContainerSetMetadataResponse contains the response from method containerClient.SetMetadata +type ContainerSetMetadataResponse struct { + containerClientSetMetadataResponse +} + +func toContainerSetMetadataResponse(resp containerClientSetMetadataResponse) ContainerSetMetadataResponse { + return ContainerSetMetadataResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerGetAccessPolicyOptions provides set of configurations for GetAccessPolicy operation +type ContainerGetAccessPolicyOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *ContainerGetAccessPolicyOptions) format() (*containerClientGetAccessPolicyOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// ContainerGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. +type ContainerGetAccessPolicyResponse struct { + containerClientGetAccessPolicyResponse +} + +func toContainerGetAccessPolicyResponse(resp containerClientGetAccessPolicyResponse) ContainerGetAccessPolicyResponse { + return ContainerGetAccessPolicyResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerSetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation +type ContainerSetAccessPolicyOptions struct { + AccessConditions *ContainerAccessConditions + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // the acls for the container + ContainerACL []*SignedIdentifier + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +func (o *ContainerSetAccessPolicyOptions) format() (*containerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + mac, lac := o.AccessConditions.format() + return &containerClientSetAccessPolicyOptions{ + Access: o.Access, + ContainerACL: o.ContainerACL, + RequestID: o.RequestID, + }, lac, mac +} + +// ContainerSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy +type ContainerSetAccessPolicyResponse struct { + containerClientSetAccessPolicyResponse +} + +func toContainerSetAccessPolicyResponse(resp containerClientSetAccessPolicyResponse) ContainerSetAccessPolicyResponse { + return ContainerSetAccessPolicyResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerListBlobsFlatOptions provides set of configurations for SetAccessPolicy operation +type ContainerListBlobsFlatOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +func (o *ContainerListBlobsFlatOptions) format() *containerClientListBlobFlatSegmentOptions { + if o == nil { + return nil + } + + return &containerClientListBlobFlatSegmentOptions{ + Include: o.Include, + Marker: o.Marker, + Maxresults: o.MaxResults, + Prefix: o.Prefix, + } +} + +// ContainerListBlobFlatPager provides operations for iterating over paged responses +type ContainerListBlobFlatPager struct { + *containerClientListBlobFlatSegmentPager +} + +func toContainerListBlobFlatSegmentPager(resp *containerClientListBlobFlatSegmentPager) *ContainerListBlobFlatPager { + return &ContainerListBlobFlatPager{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +//ContainerListBlobsHierarchyOptions provides set of configurations for ContainerClient.ListBlobsHierarchy +type ContainerListBlobsHierarchyOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +func (o *ContainerListBlobsHierarchyOptions) format() *containerClientListBlobHierarchySegmentOptions { + if o == nil { + return nil + } + + return &containerClientListBlobHierarchySegmentOptions{ + Include: o.Include, + Marker: o.Marker, + Maxresults: o.MaxResults, + Prefix: o.Prefix, + } +} + +// ContainerListBlobHierarchyPager provides operations for iterating over paged responses. +type ContainerListBlobHierarchyPager struct { + containerClientListBlobHierarchySegmentPager +} + +func toContainerListBlobHierarchySegmentPager(resp *containerClientListBlobHierarchySegmentPager) *ContainerListBlobHierarchyPager { + if resp == nil { + return nil + } + return &ContainerListBlobHierarchyPager{*resp} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go new file mode 100644 index 000000000000..87572e9178f6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go @@ -0,0 +1,166 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +// --------------------------------------------------------------------------------------------------------------------- + +// LeaseBreakNaturally tells ContainerClient's or BlobClient's BreakLease method to break the lease using service semantics. +const LeaseBreakNaturally = -1 + +func leasePeriodPointer(period int32) *int32 { + if period != LeaseBreakNaturally { + return &period + } else { + return nil + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerAcquireLeaseOptions provides set of configurations for AcquireLeaseContainer operation +type ContainerAcquireLeaseOptions struct { + Duration *int32 + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerAcquireLeaseOptions) format() (containerClientAcquireLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return containerClientAcquireLeaseOptions{}, nil + } + containerAcquireLeaseOptions := containerClientAcquireLeaseOptions{ + Duration: o.Duration, + } + + return containerAcquireLeaseOptions, o.ModifiedAccessConditions +} + +// ContainerAcquireLeaseResponse contains the response from method ContainerLeaseClient.AcquireLease. +type ContainerAcquireLeaseResponse struct { + containerClientAcquireLeaseResponse +} + +func toContainerAcquireLeaseResponse(resp containerClientAcquireLeaseResponse) ContainerAcquireLeaseResponse { + return ContainerAcquireLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerBreakLeaseOptions provides set of configurations for BreakLeaseContainer operation +type ContainerBreakLeaseOptions struct { + BreakPeriod *int32 + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerBreakLeaseOptions) format() (*containerClientBreakLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + containerBreakLeaseOptions := &containerClientBreakLeaseOptions{ + BreakPeriod: o.BreakPeriod, + } + + return containerBreakLeaseOptions, o.ModifiedAccessConditions +} + +// ContainerBreakLeaseResponse contains the response from method ContainerLeaseClient.BreakLease. +type ContainerBreakLeaseResponse struct { + containerClientBreakLeaseResponse +} + +func toContainerBreakLeaseResponse(resp containerClientBreakLeaseResponse) ContainerBreakLeaseResponse { + return ContainerBreakLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerChangeLeaseOptions provides set of configurations for ChangeLeaseContainer operation +type ContainerChangeLeaseOptions struct { + ProposedLeaseID *string + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerChangeLeaseOptions) format() (*string, *containerClientChangeLeaseOptions, *ModifiedAccessConditions, error) { + generatedUuid, err := uuid.New() + if err != nil { + return nil, nil, nil, err + } + leaseID := to.Ptr(generatedUuid.String()) + if o == nil { + return leaseID, nil, nil, err + } + + if o.ProposedLeaseID == nil { + o.ProposedLeaseID = leaseID + } + + return o.ProposedLeaseID, nil, o.ModifiedAccessConditions, err +} + +// ContainerChangeLeaseResponse contains the response from method ContainerLeaseClient.ChangeLease. +type ContainerChangeLeaseResponse struct { + containerClientChangeLeaseResponse +} + +func toContainerChangeLeaseResponse(resp containerClientChangeLeaseResponse) ContainerChangeLeaseResponse { + return ContainerChangeLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerReleaseLeaseOptions provides set of configurations for ReleaseLeaseContainer operation +type ContainerReleaseLeaseOptions struct { + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerReleaseLeaseOptions) format() (*containerClientReleaseLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.ModifiedAccessConditions +} + +// ContainerReleaseLeaseResponse contains the response from method ContainerLeaseClient.ReleaseLease. +type ContainerReleaseLeaseResponse struct { + containerClientReleaseLeaseResponse +} + +func toContainerReleaseLeaseResponse(resp containerClientReleaseLeaseResponse) ContainerReleaseLeaseResponse { + return ContainerReleaseLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerRenewLeaseOptions provides set of configurations for RenewLeaseContainer operation +type ContainerRenewLeaseOptions struct { + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerRenewLeaseOptions) format() (*containerClientRenewLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.ModifiedAccessConditions +} + +// ContainerRenewLeaseResponse contains the response from method ContainerLeaseClient.RenewLease. +type ContainerRenewLeaseResponse struct { + containerClientRenewLeaseResponse +} + +func toContainerRenewLeaseResponse(resp containerClientRenewLeaseResponse) ContainerRenewLeaseResponse { + return ContainerRenewLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go new file mode 100644 index 000000000000..c7a67abe7746 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go @@ -0,0 +1,201 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "fmt" +) + +const _1MiB = 1024 * 1024 + +// UploadOption identifies options used by the UploadBuffer and UploadFile functions. +type UploadOption struct { + // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress func(bytesTransferred int64) + + // HTTPHeaders indicates the HTTP headers to be associated with the blob. + HTTPHeaders *BlobHTTPHeaders + + // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. + Metadata map[string]string + + // BlobAccessConditions indicates the access conditions for the block blob. + BlobAccessConditions *BlobAccessConditions + + // AccessTier indicates the tier of blob + AccessTier *AccessTier + + // TagsMap + TagsMap map[string]string + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + + // Parallelism indicates the maximum number of blocks to upload in parallel (0=default) + Parallelism uint16 + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 *[]byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 *[]byte +} + +func (o *UploadOption) getStageBlockOptions() *BlockBlobStageBlockOptions { + leaseAccessConditions, _ := o.BlobAccessConditions.format() + return &BlockBlobStageBlockOptions{ + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + } +} + +func (o *UploadOption) getUploadBlockBlobOptions() *BlockBlobUploadOptions { + return &BlockBlobUploadOptions{ + TagsMap: o.TagsMap, + Metadata: o.Metadata, + Tier: o.AccessTier, + HTTPHeaders: o.HTTPHeaders, + BlobAccessConditions: o.BlobAccessConditions, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + } +} + +func (o *UploadOption) getCommitBlockListOptions() *BlockBlobCommitBlockListOptions { + return &BlockBlobCommitBlockListOptions{ + BlobTagsMap: o.TagsMap, + Metadata: o.Metadata, + Tier: o.AccessTier, + BlobHTTPHeaders: o.HTTPHeaders, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadStreamOptions provides set of configurations for UploadStream operation +type UploadStreamOptions struct { + // TransferManager provides a TransferManager that controls buffer allocation/reuse and + // concurrency. This overrides BufferSize and MaxBuffers if set. + TransferManager TransferManager + transferMangerNotSet bool + // BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB. + BufferSize int + // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file. + MaxBuffers int + HTTPHeaders *BlobHTTPHeaders + Metadata map[string]string + BlobAccessConditions *BlobAccessConditions + AccessTier *AccessTier + BlobTagsMap map[string]string + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (u *UploadStreamOptions) defaults() error { + if u.TransferManager != nil { + return nil + } + + if u.MaxBuffers == 0 { + u.MaxBuffers = 1 + } + + if u.BufferSize < _1MiB { + u.BufferSize = _1MiB + } + + var err error + u.TransferManager, err = NewStaticBuffer(u.BufferSize, u.MaxBuffers) + if err != nil { + return fmt.Errorf("bug: default transfer manager could not be created: %s", err) + } + u.transferMangerNotSet = true + return nil +} + +func (u *UploadStreamOptions) getStageBlockOptions() *BlockBlobStageBlockOptions { + leaseAccessConditions, _ := u.BlobAccessConditions.format() + return &BlockBlobStageBlockOptions{ + CpkInfo: u.CpkInfo, + CpkScopeInfo: u.CpkScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + } +} + +func (u *UploadStreamOptions) getCommitBlockListOptions() *BlockBlobCommitBlockListOptions { + options := &BlockBlobCommitBlockListOptions{ + BlobTagsMap: u.BlobTagsMap, + Metadata: u.Metadata, + Tier: u.AccessTier, + BlobHTTPHeaders: u.HTTPHeaders, + CpkInfo: u.CpkInfo, + CpkScopeInfo: u.CpkScopeInfo, + BlobAccessConditions: u.BlobAccessConditions, + } + + return options +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DownloadOptions identifies options used by the DownloadToBuffer and DownloadToFile functions. +type DownloadOptions struct { + // BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + BlobAccessConditions *BlobAccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + + // Parallelism indicates the maximum number of blocks to download in parallel (0=default) + Parallelism uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +func (o *DownloadOptions) getBlobPropertiesOptions() *BlobGetPropertiesOptions { + return &BlobGetPropertiesOptions{ + BlobAccessConditions: o.BlobAccessConditions, + CpkInfo: o.CpkInfo, + } +} + +func (o *DownloadOptions) getDownloadBlobOptions(offSet, count int64, rangeGetContentMD5 *bool) *BlobDownloadOptions { + return &BlobDownloadOptions{ + BlobAccessConditions: o.BlobAccessConditions, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + Offset: &offSet, + Count: &count, + RangeGetContentMD5: rangeGetContentMD5, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BatchTransferOptions identifies options used by DoBatchTransfer. +type BatchTransferOptions struct { + TransferSize int64 + ChunkSize int64 + Parallelism uint16 + Operation func(offset int64, chunkSize int64, ctx context.Context) error + OperationName string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go new file mode 100644 index 000000000000..2be2758736a1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go @@ -0,0 +1,402 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "strconv" + "time" +) + +// --------------------------------------------------------------------------------------------------------------------- + +func rangeToString(offset, count int64) string { + return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10) +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobCreateOptions provides set of configurations for CreatePageBlob operation +type PageBlobCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + // Optional. Used to set blob tags in various blob operations. + BlobTagsMap map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + + HTTPHeaders *BlobHTTPHeaders + + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo + + BlobAccessConditions *BlobAccessConditions + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool +} + +func (o *PageBlobCreateOptions) format() (*pageBlobClientCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &pageBlobClientCreateOptions{ + BlobSequenceNumber: o.BlobSequenceNumber, + BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), + Metadata: o.Metadata, + Tier: o.Tier, + } + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// PageBlobCreateResponse contains the response from method PageBlobClient.Create. +type PageBlobCreateResponse struct { + pageBlobClientCreateResponse +} + +func toPageBlobCreateResponse(resp pageBlobClientCreateResponse) PageBlobCreateResponse { + return PageBlobCreateResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobUploadPagesOptions provides set of configurations for UploadPages operation +type PageBlobUploadPagesOptions struct { + // Specify the transactional crc64 for the body, to be validated by the service. + PageRange *HttpRange + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + SequenceNumberAccessConditions *SequenceNumberAccessConditions + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobUploadPagesOptions) format() (*pageBlobClientUploadPagesOptions, *LeaseAccessConditions, + *CpkInfo, *CpkScopeInfo, *SequenceNumberAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &pageBlobClientUploadPagesOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + + if o.PageRange != nil { + options.Range = o.PageRange.format() + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions +} + +// PageBlobUploadPagesResponse contains the response from method PageBlobClient.UploadPages. +type PageBlobUploadPagesResponse struct { + pageBlobClientUploadPagesResponse +} + +func toPageBlobUploadPagesResponse(resp pageBlobClientUploadPagesResponse) PageBlobUploadPagesResponse { + return PageBlobUploadPagesResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobUploadPagesFromURLOptions provides set of configurations for UploadPagesFromURL operation +type PageBlobUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo + + SequenceNumberAccessConditions *SequenceNumberAccessConditions + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobUploadPagesFromURLOptions) format() (*pageBlobClientUploadPagesFromURLOptions, *CpkInfo, *CpkScopeInfo, + *LeaseAccessConditions, *SequenceNumberAccessConditions, *ModifiedAccessConditions, *SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := &pageBlobClientUploadPagesFromURLOptions{ + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + CopySourceAuthorization: o.CopySourceAuthorization, + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// PageBlobUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL +type PageBlobUploadPagesFromURLResponse struct { + pageBlobClientUploadPagesFromURLResponse +} + +func toPageBlobUploadPagesFromURLResponse(resp pageBlobClientUploadPagesFromURLResponse) PageBlobUploadPagesFromURLResponse { + return PageBlobUploadPagesFromURLResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobClearPagesOptions provides set of configurations for PageBlobClient.ClearPages operation +type PageBlobClearPagesOptions struct { + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + SequenceNumberAccessConditions *SequenceNumberAccessConditions + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobClearPagesOptions) format() (*LeaseAccessConditions, *CpkInfo, + *CpkScopeInfo, *SequenceNumberAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions +} + +// PageBlobClearPagesResponse contains the response from method PageBlobClient.ClearPages +type PageBlobClearPagesResponse struct { + pageBlobClientClearPagesResponse +} + +func toPageBlobClearPagesResponse(resp pageBlobClientClearPagesResponse) PageBlobClearPagesResponse { + return PageBlobClearPagesResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobGetPageRangesOptions provides set of configurations for GetPageRanges operation +type PageBlobGetPageRangesOptions struct { + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + PrevSnapshot *string + // Optional, you can specify whether a particular range of the blob is read + PageRange *HttpRange + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobGetPageRangesOptions) format() (*pageBlobClientGetPageRangesOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return &pageBlobClientGetPageRangesOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Range: o.PageRange.format(), + Snapshot: o.Snapshot, + }, leaseAccessConditions, modifiedAccessConditions +} + +// PageBlobGetPageRangesPager provides operations for iterating over paged responses +type PageBlobGetPageRangesPager struct { + *pageBlobClientGetPageRangesPager +} + +func toPageBlobGetPageRangesPager(resp *pageBlobClientGetPageRangesPager) *PageBlobGetPageRangesPager { + return &PageBlobGetPageRangesPager{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobGetPageRangesDiffOptions provides set of configurations for PageBlobClient.GetPageRangesDiff operation +type PageBlobGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + PrevSnapshot *string + // Optional, you can specify whether a particular range of the blob is read + PageRange *HttpRange + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobGetPageRangesDiffOptions) format() (*pageBlobClientGetPageRangesDiffOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return &pageBlobClientGetPageRangesDiffOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + PrevSnapshotURL: o.PrevSnapshotURL, + Prevsnapshot: o.PrevSnapshot, + Range: o.PageRange.format(), + Snapshot: o.Snapshot, + }, leaseAccessConditions, modifiedAccessConditions + +} + +// PageBlobGetPageRangesDiffPager provides operations for iterating over paged responses +type PageBlobGetPageRangesDiffPager struct { + *pageBlobClientGetPageRangesDiffPager +} + +func toPageBlobGetPageRangesDiffPager(resp *pageBlobClientGetPageRangesDiffPager) *PageBlobGetPageRangesDiffPager { + return &PageBlobGetPageRangesDiffPager{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobResizeOptions provides set of configurations for PageBlobClient.Resize operation +type PageBlobResizeOptions struct { + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobResizeOptions) format() (*pageBlobClientResizeOptions, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return nil, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// PageBlobResizeResponse contains the response from method PageBlobClient.Resize +type PageBlobResizeResponse struct { + pageBlobClientResizeResponse +} + +func toPageBlobResizeResponse(resp pageBlobClientResizeResponse) PageBlobResizeResponse { + return PageBlobResizeResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobUpdateSequenceNumberOptions provides set of configurations for PageBlobClient.UpdateSequenceNumber operation +type PageBlobUpdateSequenceNumberOptions struct { + ActionType *SequenceNumberActionType + + BlobSequenceNumber *int64 + + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobUpdateSequenceNumberOptions) format() (*SequenceNumberActionType, *pageBlobClientUpdateSequenceNumberOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + options := &pageBlobClientUpdateSequenceNumberOptions{ + BlobSequenceNumber: o.BlobSequenceNumber, + } + + if *o.ActionType == SequenceNumberActionTypeIncrement { + options.BlobSequenceNumber = nil + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return o.ActionType, options, leaseAccessConditions, modifiedAccessConditions +} + +// PageBlobUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber +type PageBlobUpdateSequenceNumberResponse struct { + pageBlobClientUpdateSequenceNumberResponse +} + +func toPageBlobUpdateSequenceNumberResponse(resp pageBlobClientUpdateSequenceNumberResponse) PageBlobUpdateSequenceNumberResponse { + return PageBlobUpdateSequenceNumberResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobCopyIncrementalOptions provides set of configurations for PageBlobClient.StartCopyIncremental operation +type PageBlobCopyIncrementalOptions struct { + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *PageBlobCopyIncrementalOptions) format() (*pageBlobClientCopyIncrementalOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.ModifiedAccessConditions +} + +// PageBlobCopyIncrementalResponse contains the response from method PageBlobClient.StartCopyIncremental +type PageBlobCopyIncrementalResponse struct { + pageBlobClientCopyIncrementalResponse +} + +func toPageBlobCopyIncrementalResponse(resp pageBlobClientCopyIncrementalResponse) PageBlobCopyIncrementalResponse { + return PageBlobCopyIncrementalResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go new file mode 100644 index 000000000000..3cf85ca43b1a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go @@ -0,0 +1,68 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "net/url" + "strings" +) + +func serializeBlobTagsToStrPtr(tagsMap map[string]string) *string { + if tagsMap == nil { + return nil + } + tags := make([]string, 0) + for key, val := range tagsMap { + tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) + } + //tags = tags[:len(tags)-1] + blobTagsString := strings.Join(tags, "&") + return &blobTagsString +} + +func serializeBlobTags(tagsMap map[string]string) *BlobTags { + if tagsMap == nil { + return nil + } + blobTagSet := make([]*BlobTag, 0) + for key, val := range tagsMap { + newKey, newVal := key, val + blobTagSet = append(blobTagSet, &BlobTag{Key: &newKey, Value: &newVal}) + } + return &BlobTags{BlobTagSet: blobTagSet} +} + +func deserializeORSPolicies(policies map[string]string) (objectReplicationPolicies []ObjectReplicationPolicy) { + if policies == nil { + return nil + } + // For source blobs (blobs that have policy ids and rule ids applied to them), + // the header will be formatted as "x-ms-or-_: {Complete, Failed}". + // The value of this header is the status of the replication. + orPolicyStatusHeader := make(map[string]string) + for key, value := range policies { + if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" { + orPolicyStatusHeader[key] = value + } + } + + parsedResult := make(map[string][]ObjectReplicationRules) + for key, value := range orPolicyStatusHeader { + policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_") + policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1] + + parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleId: ruleId, Status: value}) + } + + for policyId, rules := range parsedResult { + objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{ + PolicyId: &policyId, + Rules: &rules, + }) + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go new file mode 100644 index 000000000000..747a94ee2451 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go @@ -0,0 +1,226 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +// --------------------------------------------------------------------------------------------------------------------- + +// ServiceGetAccountInfoOptions provides set of options for ServiceClient.GetAccountInfo +type ServiceGetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *ServiceGetAccountInfoOptions) format() *serviceClientGetAccountInfoOptions { + return nil +} + +// ServiceGetAccountInfoResponse contains the response from ServiceClient.GetAccountInfo +type ServiceGetAccountInfoResponse struct { + serviceClientGetAccountInfoResponse +} + +func toServiceGetAccountInfoResponse(resp serviceClientGetAccountInfoResponse) ServiceGetAccountInfoResponse { + return ServiceGetAccountInfoResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListContainersDetail indicates what additional information the service should return with each container. +type ListContainersDetail struct { + // Tells the service whether to return metadata for each container. + Metadata bool + + // Tells the service whether to return soft-deleted containers. + Deleted bool +} + +// string produces the `Include` query parameter's value. +func (o *ListContainersDetail) format() []ListContainersIncludeType { + if !o.Metadata && !o.Deleted { + return nil + } + + items := make([]ListContainersIncludeType, 0, 2) + // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! + if o.Deleted { + items = append(items, ListContainersIncludeTypeDeleted) + } + if o.Metadata { + items = append(items, ListContainersIncludeTypeMetadata) + } + return items +} + +// ListContainersOptions provides set of configurations for ListContainers operation +type ListContainersOptions struct { + Include ListContainersDetail + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing operation did not return all containers + // remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in + // a subsequent call to request the next page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify max results, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, + // then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible + // that the service will return fewer results than specified by max results, or than the default of 5000. + MaxResults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +func (o *ListContainersOptions) format() *serviceClientListContainersSegmentOptions { + if o == nil { + return nil + } + + return &serviceClientListContainersSegmentOptions{ + Include: o.Include.format(), + Marker: o.Marker, + Maxresults: o.MaxResults, + Prefix: o.Prefix, + } +} + +// ServiceListContainersSegmentPager provides operations for iterating over paged responses. +type ServiceListContainersSegmentPager struct { + serviceClientListContainersSegmentPager +} + +func toServiceListContainersSegmentPager(resp serviceClientListContainersSegmentPager) *ServiceListContainersSegmentPager { + return &ServiceListContainersSegmentPager{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ServiceGetPropertiesOptions provides set of options for ServiceClient.GetProperties +type ServiceGetPropertiesOptions struct { + // placeholder for future options +} + +func (o *ServiceGetPropertiesOptions) format() *serviceClientGetPropertiesOptions { + return nil +} + +// ServiceGetPropertiesResponse contains the response from ServiceClient.GetProperties +type ServiceGetPropertiesResponse struct { + serviceClientGetPropertiesResponse +} + +func toServiceGetPropertiesResponse(resp serviceClientGetPropertiesResponse) ServiceGetPropertiesResponse { + return ServiceGetPropertiesResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ServiceSetPropertiesOptions provides set of options for ServiceClient.SetProperties +type ServiceSetPropertiesOptions struct { + // The set of CORS rules. + Cors []*CorsRule + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics + + // Azure Analytics Logging settings. + Logging *Logging + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite +} + +func (o *ServiceSetPropertiesOptions) format() (StorageServiceProperties, *serviceClientSetPropertiesOptions) { + if o == nil { + return StorageServiceProperties{}, nil + } + + return StorageServiceProperties{ + Cors: o.Cors, + DefaultServiceVersion: o.DefaultServiceVersion, + DeleteRetentionPolicy: o.DeleteRetentionPolicy, + HourMetrics: o.HourMetrics, + Logging: o.Logging, + MinuteMetrics: o.MinuteMetrics, + StaticWebsite: o.StaticWebsite, + }, nil +} + +// ServiceSetPropertiesResponse contains the response from ServiceClient.SetProperties +type ServiceSetPropertiesResponse struct { + serviceClientSetPropertiesResponse +} + +func toServiceSetPropertiesResponse(resp serviceClientSetPropertiesResponse) ServiceSetPropertiesResponse { + return ServiceSetPropertiesResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ServiceGetStatisticsOptions provides set of options for ServiceClient.GetStatistics +type ServiceGetStatisticsOptions struct { + // placeholder for future options +} + +func (o *ServiceGetStatisticsOptions) format() *serviceClientGetStatisticsOptions { + return nil +} + +// ServiceGetStatisticsResponse contains the response from ServiceClient.GetStatistics. +type ServiceGetStatisticsResponse struct { + serviceClientGetStatisticsResponse +} + +func toServiceGetStatisticsResponse(resp serviceClientGetStatisticsResponse) ServiceGetStatisticsResponse { + return ServiceGetStatisticsResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ServiceFilterBlobsOptions provides set of configurations for ServiceClient.FindBlobsByTags +type ServiceFilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker + // value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value + // can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server + // will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for + // retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or + // than the default of 5000. + MaxResults *int32 + // Filters the results to return only to return only blobs whose tags match the specified expression. + Where *string +} + +func (o *ServiceFilterBlobsOptions) pointer() *serviceClientFilterBlobsOptions { + if o == nil { + return nil + } + return &serviceClientFilterBlobsOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Where: o.Where, + } +} + +// ServiceFilterBlobsResponse contains the response from ServiceClient.FindBlobsByTags +type ServiceFilterBlobsResponse struct { + serviceClientFilterBlobsResponse +} + +func toServiceFilterBlobsResponse(resp serviceClientFilterBlobsResponse) ServiceFilterBlobsResponse { + return ServiceFilterBlobsResponse{resp} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go new file mode 100644 index 000000000000..ca5aac8cd746 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go @@ -0,0 +1,648 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +type appendBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// newAppendBlobClient creates a new instance of appendBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func newAppendBlobClient(endpoint string, pl runtime.Pipeline) *appendBlobClient { + client := &appendBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append +// Block operation is permitted only if the blob was created with x-ms-blob-type set to +// AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// body - Initial data +// appendBlobClientAppendBlockOptions - appendBlobClientAppendBlockOptions contains the optional parameters for the appendBlobClient.AppendBlock +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock +// method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *appendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, appendBlobClientAppendBlockOptions *appendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (appendBlobClientAppendBlockResponse, error) { + req, err := client.appendBlockCreateRequest(ctx, contentLength, body, appendBlobClientAppendBlockOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return appendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) + } + return client.appendBlockHandleResponse(resp) +} + +// appendBlockCreateRequest creates the AppendBlock request. +func (client *appendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, appendBlobClientAppendBlockOptions *appendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientAppendBlockOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockOptions.TransactionalContentMD5)) + } + if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.TransactionalContentCRC64 != nil { + req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockOptions.TransactionalContentCRC64)) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)) + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientAppendBlockOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, req.SetBody(body, "application/octet-stream") +} + +// appendBlockHandleResponse handles the AppendBlock response. +func (client *appendBlobClient) appendBlockHandleResponse(resp *http.Response) (appendBlobClientAppendBlockResponse, error) { + result := appendBlobClientAppendBlockResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where +// the contents are read from a source url. The Append Block operation is permitted only if the blob was +// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// sourceURL - Specify a URL to the copy source. +// contentLength - The length of the request. +// appendBlobClientAppendBlockFromURLOptions - appendBlobClientAppendBlockFromURLOptions contains the optional parameters +// for the appendBlobClient.AppendBlockFromURL method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL +// method. +func (client *appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, appendBlobClientAppendBlockFromURLOptions *appendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (appendBlobClientAppendBlockFromURLResponse, error) { + req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, appendBlobClientAppendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return appendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.appendBlockFromURLHandleResponse(resp) +} + +// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. +func (client *appendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, appendBlobClientAppendBlockFromURLOptions *appendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientAppendBlockFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-copy-source", sourceURL) + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceRange != nil { + req.Raw().Header.Set("x-ms-source-range", *appendBlobClientAppendBlockFromURLOptions.SourceRange) + } + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceContentMD5 != nil { + req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.SourceContentMD5)) + } + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceContentcrc64 != nil { + req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.SourceContentcrc64)) + } + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.TransactionalContentMD5)) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)) + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientAppendBlockFromURLOptions.RequestID) + } + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.CopySourceAuthorization != nil { + req.Raw().Header.Set("x-ms-copy-source-authorization", *appendBlobClientAppendBlockFromURLOptions.CopySourceAuthorization) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. +func (client *appendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (appendBlobClientAppendBlockFromURLResponse, error) { + result := appendBlobClientAppendBlockFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// Create - The Create Append Blob operation creates a new append blob. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// appendBlobClientCreateOptions - appendBlobClientCreateOptions contains the optional parameters for the appendBlobClient.Create +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *appendBlobClient) Create(ctx context.Context, contentLength int64, appendBlobClientCreateOptions *appendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (appendBlobClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, contentLength, appendBlobClientCreateOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return appendBlobClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return appendBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return appendBlobClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *appendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, appendBlobClientCreateOptions *appendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientCreateOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-blob-type", "AppendBlob") + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + } + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.Metadata != nil { + for k, v := range appendBlobClientCreateOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientCreateOptions.RequestID) + } + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *appendBlobClientCreateOptions.BlobTagsString) + } + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", appendBlobClientCreateOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*appendBlobClientCreateOptions.ImmutabilityPolicyMode)) + } + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.LegalHold != nil { + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*appendBlobClientCreateOptions.LegalHold)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *appendBlobClient) createHandleResponse(resp *http.Response) (appendBlobClientCreateResponse, error) { + result := appendBlobClientCreateResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return appendBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return appendBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version +// or later. +// If the operation fails it returns an *azcore.ResponseError type. +// appendBlobClientSealOptions - appendBlobClientSealOptions contains the optional parameters for the appendBlobClient.Seal +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock +// method. +func (client *appendBlobClient) Seal(ctx context.Context, appendBlobClientSealOptions *appendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (appendBlobClientSealResponse, error) { + req, err := client.sealCreateRequest(ctx, appendBlobClientSealOptions, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) + if err != nil { + return appendBlobClientSealResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return appendBlobClientSealResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return appendBlobClientSealResponse{}, runtime.NewResponseError(resp) + } + return client.sealHandleResponse(resp) +} + +// sealCreateRequest creates the Seal request. +func (client *appendBlobClient) sealCreateRequest(ctx context.Context, appendBlobClientSealOptions *appendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "seal") + if appendBlobClientSealOptions != nil && appendBlobClientSealOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientSealOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if appendBlobClientSealOptions != nil && appendBlobClientSealOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientSealOptions.RequestID) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// sealHandleResponse handles the Seal response. +func (client *appendBlobClient) sealHandleResponse(resp *http.Response) (appendBlobClientSealResponse, error) { + result := appendBlobClientSealResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientSealResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return appendBlobClientSealResponse{}, err + } + result.IsSealed = &isSealed + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go new file mode 100644 index 000000000000..607c6a714dc9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go @@ -0,0 +1,2831 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strconv" + "strings" + "time" +) + +type blobClient struct { + endpoint string + pl runtime.Pipeline +} + +// newBlobClient creates a new instance of blobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func newBlobClient(endpoint string, pl runtime.Pipeline) *blobClient { + client := &blobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination +// blob with zero length and full metadata. +// If the operation fails it returns an *azcore.ResponseError type. +// copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. +// blobClientAbortCopyFromURLOptions - blobClientAbortCopyFromURLOptions contains the optional parameters for the blobClient.AbortCopyFromURL +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *blobClient) AbortCopyFromURL(ctx context.Context, copyID string, blobClientAbortCopyFromURLOptions *blobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (blobClientAbortCopyFromURLResponse, error) { + req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, blobClientAbortCopyFromURLOptions, leaseAccessConditions) + if err != nil { + return blobClientAbortCopyFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientAbortCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return blobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.abortCopyFromURLHandleResponse(resp) +} + +// abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. +func (client *blobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, blobClientAbortCopyFromURLOptions *blobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "copy") + reqQP.Set("copyid", copyID) + if blobClientAbortCopyFromURLOptions != nil && blobClientAbortCopyFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientAbortCopyFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-copy-action", "abort") + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientAbortCopyFromURLOptions != nil && blobClientAbortCopyFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientAbortCopyFromURLOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// abortCopyFromURLHandleResponse handles the AbortCopyFromURL response. +func (client *blobClient) abortCopyFromURLHandleResponse(resp *http.Response) (blobClientAbortCopyFromURLResponse, error) { + result := blobClientAbortCopyFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientAbortCopyFromURLResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientAcquireLeaseOptions - blobClientAcquireLeaseOptions contains the optional parameters for the blobClient.AcquireLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) AcquireLease(ctx context.Context, blobClientAcquireLeaseOptions *blobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, blobClientAcquireLeaseOptions, modifiedAccessConditions) + if err != nil { + return blobClientAcquireLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.acquireLeaseHandleResponse(resp) +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *blobClient) acquireLeaseCreateRequest(ctx context.Context, blobClientAcquireLeaseOptions *blobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientAcquireLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "acquire") + if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.Duration != nil { + req.Raw().Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*blobClientAcquireLeaseOptions.Duration), 10)) + } + if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.ProposedLeaseID != nil { + req.Raw().Header.Set("x-ms-proposed-lease-id", *blobClientAcquireLeaseOptions.ProposedLeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientAcquireLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *blobClient) acquireLeaseHandleResponse(resp *http.Response) (blobClientAcquireLeaseResponse, error) { + result := blobClientAcquireLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientBreakLeaseOptions - blobClientBreakLeaseOptions contains the optional parameters for the blobClient.BreakLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) BreakLease(ctx context.Context, blobClientBreakLeaseOptions *blobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, blobClientBreakLeaseOptions, modifiedAccessConditions) + if err != nil { + return blobClientBreakLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return blobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.breakLeaseHandleResponse(resp) +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *blobClient) breakLeaseCreateRequest(ctx context.Context, blobClientBreakLeaseOptions *blobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientBreakLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "break") + if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.BreakPeriod != nil { + req.Raw().Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*blobClientBreakLeaseOptions.BreakPeriod), 10)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientBreakLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (blobClientBreakLeaseResponse, error) { + result := blobClientBreakLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return blobClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientBreakLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// leaseID - Specifies the current lease ID on the resource. +// proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// blobClientChangeLeaseOptions - blobClientChangeLeaseOptions contains the optional parameters for the blobClient.ChangeLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, blobClientChangeLeaseOptions *blobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, blobClientChangeLeaseOptions, modifiedAccessConditions) + if err != nil { + return blobClientChangeLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.changeLeaseHandleResponse(resp) +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *blobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, blobClientChangeLeaseOptions *blobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if blobClientChangeLeaseOptions != nil && blobClientChangeLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientChangeLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "change") + req.Raw().Header.Set("x-ms-lease-id", leaseID) + req.Raw().Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientChangeLeaseOptions != nil && blobClientChangeLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientChangeLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *blobClient) changeLeaseHandleResponse(resp *http.Response) (blobClientChangeLeaseResponse, error) { + result := blobClientChangeLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientChangeLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response +// until the copy is complete. +// If the operation fails it returns an *azcore.ResponseError type. +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// blobClientCopyFromURLOptions - blobClientCopyFromURLOptions contains the optional parameters for the blobClient.CopyFromURL +// method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *blobClient) CopyFromURL(ctx context.Context, copySource string, blobClientCopyFromURLOptions *blobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientCopyFromURLResponse, error) { + req, err := client.copyFromURLCreateRequest(ctx, copySource, blobClientCopyFromURLOptions, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return blobClientCopyFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return blobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.copyFromURLHandleResponse(resp) +} + +// copyFromURLCreateRequest creates the CopyFromURL request. +func (client *blobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, blobClientCopyFromURLOptions *blobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientCopyFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-requires-sync", "true") + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Metadata != nil { + for k, v := range blobClientCopyFromURLOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Tier != nil { + req.Raw().Header.Set("x-ms-access-tier", string(*blobClientCopyFromURLOptions.Tier)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-copy-source", copySource) + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientCopyFromURLOptions.RequestID) + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.SourceContentMD5 != nil { + req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blobClientCopyFromURLOptions.SourceContentMD5)) + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *blobClientCopyFromURLOptions.BlobTagsString) + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientCopyFromURLOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientCopyFromURLOptions.ImmutabilityPolicyMode)) + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.LegalHold != nil { + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blobClientCopyFromURLOptions.LegalHold)) + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.CopySourceAuthorization != nil { + req.Raw().Header.Set("x-ms-copy-source-authorization", *blobClientCopyFromURLOptions.CopySourceAuthorization) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// copyFromURLHandleResponse handles the CopyFromURL response. +func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (blobClientCopyFromURLResponse, error) { + result := blobClientCopyFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientCopyFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientCopyFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientCopyFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + return result, nil +} + +// CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientCreateSnapshotOptions - blobClientCreateSnapshotOptions contains the optional parameters for the blobClient.CreateSnapshot +// method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *blobClient) CreateSnapshot(ctx context.Context, blobClientCreateSnapshotOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientCreateSnapshotResponse, error) { + req, err := client.createSnapshotCreateRequest(ctx, blobClientCreateSnapshotOptions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return blobClientCreateSnapshotResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientCreateSnapshotResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + } + return client.createSnapshotHandleResponse(resp) +} + +// createSnapshotCreateRequest creates the CreateSnapshot request. +func (client *blobClient) createSnapshotCreateRequest(ctx context.Context, blobClientCreateSnapshotOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "snapshot") + if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientCreateSnapshotOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.Metadata != nil { + for k, v := range blobClientCreateSnapshotOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientCreateSnapshotOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// createSnapshotHandleResponse handles the CreateSnapshot response. +func (client *blobClient) createSnapshotHandleResponse(resp *http.Response) (blobClientCreateSnapshotResponse, error) { + result := blobClientCreateSnapshotResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientCreateSnapshotResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientCreateSnapshotResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blobClientCreateSnapshotResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// Delete - If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed +// from the storage account. If the storage account's soft delete feature is enabled, +// then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service +// retains the blob or snapshot for the number of days specified by the +// DeleteRetentionPolicy section of Storage service properties [Set-Blob-Service-Properties.md]. After the specified number +// of days has passed, the blob's data is permanently removed from the storage +// account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use +// the List Blobs API and specify the "include=deleted" query parameter to discover +// which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. +// All other operations on a soft-deleted blob or snapshot causes the service to +// return an HTTP status code of 404 (ResourceNotFound). +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientDeleteOptions - blobClientDeleteOptions contains the optional parameters for the blobClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) Delete(ctx context.Context, blobClientDeleteOptions *blobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, blobClientDeleteOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return blobClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return blobClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *blobClient) deleteCreateRequest(ctx context.Context, blobClientDeleteOptions *blobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blobClientDeleteOptions != nil && blobClientDeleteOptions.Snapshot != nil { + reqQP.Set("snapshot", *blobClientDeleteOptions.Snapshot) + } + if blobClientDeleteOptions != nil && blobClientDeleteOptions.VersionID != nil { + reqQP.Set("versionid", *blobClientDeleteOptions.VersionID) + } + if blobClientDeleteOptions != nil && blobClientDeleteOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientDeleteOptions.Timeout), 10)) + } + if blobClientDeleteOptions != nil && blobClientDeleteOptions.BlobDeleteType != nil { + reqQP.Set("deletetype", string(*blobClientDeleteOptions.BlobDeleteType)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobClientDeleteOptions != nil && blobClientDeleteOptions.DeleteSnapshots != nil { + req.Raw().Header.Set("x-ms-delete-snapshots", string(*blobClientDeleteOptions.DeleteSnapshots)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientDeleteOptions != nil && blobClientDeleteOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientDeleteOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *blobClient) deleteHandleResponse(resp *http.Response) (blobClientDeleteResponse, error) { + result := blobClientDeleteResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// options - blobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the blobClient.DeleteImmutabilityPolicy +// method. +func (client *blobClient) DeleteImmutabilityPolicy(ctx context.Context, options *blobClientDeleteImmutabilityPolicyOptions) (blobClientDeleteImmutabilityPolicyResponse, error) { + req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) + if err != nil { + return blobClientDeleteImmutabilityPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientDeleteImmutabilityPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.deleteImmutabilityPolicyHandleResponse(resp) +} + +// deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. +func (client *blobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Context, options *blobClientDeleteImmutabilityPolicyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "immutabilityPolicies") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// deleteImmutabilityPolicyHandleResponse handles the DeleteImmutabilityPolicy response. +func (client *blobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Response) (blobClientDeleteImmutabilityPolicyResponse, error) { + result := blobClientDeleteImmutabilityPolicyResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDeleteImmutabilityPolicyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You +// can also call Download to read a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientDownloadOptions - blobClientDownloadOptions contains the optional parameters for the blobClient.Download method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) Download(ctx context.Context, blobClientDownloadOptions *blobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientDownloadResponse, error) { + req, err := client.downloadCreateRequest(ctx, blobClientDownloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return blobClientDownloadResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientDownloadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { + return blobClientDownloadResponse{}, runtime.NewResponseError(resp) + } + return client.downloadHandleResponse(resp) +} + +// downloadCreateRequest creates the Download request. +func (client *blobClient) downloadCreateRequest(ctx context.Context, blobClientDownloadOptions *blobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blobClientDownloadOptions != nil && blobClientDownloadOptions.Snapshot != nil { + reqQP.Set("snapshot", *blobClientDownloadOptions.Snapshot) + } + if blobClientDownloadOptions != nil && blobClientDownloadOptions.VersionID != nil { + reqQP.Set("versionid", *blobClientDownloadOptions.VersionID) + } + if blobClientDownloadOptions != nil && blobClientDownloadOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientDownloadOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + if blobClientDownloadOptions != nil && blobClientDownloadOptions.Range != nil { + req.Raw().Header.Set("x-ms-range", *blobClientDownloadOptions.Range) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobClientDownloadOptions != nil && blobClientDownloadOptions.RangeGetContentMD5 != nil { + req.Raw().Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*blobClientDownloadOptions.RangeGetContentMD5)) + } + if blobClientDownloadOptions != nil && blobClientDownloadOptions.RangeGetContentCRC64 != nil { + req.Raw().Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*blobClientDownloadOptions.RangeGetContentCRC64)) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientDownloadOptions != nil && blobClientDownloadOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientDownloadOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// downloadHandleResponse handles the Download response. +func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClientDownloadResponse, error) { + result := blobClientDownloadResponse{RawResponse: resp} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.LastModified = &lastModified + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.IsCurrentVersion = &isCurrentVersion + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.BlobContentMD5 = blobContentMD5 + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.LastAccessed = &lastAccessed + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.LegalHold = &legalHold + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-error-code"); val != "" { + result.ErrorCode = &val + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// options - blobClientGetAccountInfoOptions contains the optional parameters for the blobClient.GetAccountInfo method. +func (client *blobClient) GetAccountInfo(ctx context.Context, options *blobClientGetAccountInfoOptions) (blobClientGetAccountInfoResponse, error) { + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return blobClientGetAccountInfoResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getAccountInfoHandleResponse(resp) +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *blobClient) getAccountInfoCreateRequest(ctx context.Context, options *blobClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *blobClient) getAccountInfoHandleResponse(resp *http.Response) (blobClientGetAccountInfoResponse, error) { + result := blobClientGetAccountInfoResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + return result, nil +} + +// GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties +// for the blob. It does not return the content of the blob. +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientGetPropertiesOptions - blobClientGetPropertiesOptions contains the optional parameters for the blobClient.GetProperties +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) GetProperties(ctx context.Context, blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, blobClientGetPropertiesOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *blobClient) getPropertiesCreateRequest(ctx context.Context, blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.Snapshot != nil { + reqQP.Set("snapshot", *blobClientGetPropertiesOptions.Snapshot) + } + if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.VersionID != nil { + reqQP.Set("versionid", *blobClientGetPropertiesOptions.VersionID) + } + if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientGetPropertiesOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientGetPropertiesOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blobClientGetPropertiesResponse, error) { + result := blobClientGetPropertiesResponse{RawResponse: resp} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.CreationTime = &creationTime + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { + isIncrementalCopy, err := strconv.ParseBool(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.IsIncrementalCopy = &isIncrementalCopy + } + if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { + result.DestinationSnapshot = &val + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { + accessTierInferred, err := strconv.ParseBool(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.AccessTierInferred = &accessTierInferred + } + if val := resp.Header.Get("x-ms-archive-status"); val != "" { + result.ArchiveStatus = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.AccessTierChangeTime = &accessTierChangeTime + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.IsCurrentVersion = &isCurrentVersion + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-expiry-time"); val != "" { + expiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.ExpiresOn = &expiresOn + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { + result.RehydratePriority = &val + } + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.LastAccessed = &lastAccessed + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.LegalHold = &legalHold + } + return result, nil +} + +// GetTags - The Get Tags operation enables users to get the tags associated with a blob. +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientGetTagsOptions - blobClientGetTagsOptions contains the optional parameters for the blobClient.GetTags method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *blobClient) GetTags(ctx context.Context, blobClientGetTagsOptions *blobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientGetTagsResponse, error) { + req, err := client.getTagsCreateRequest(ctx, blobClientGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return blobClientGetTagsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientGetTagsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientGetTagsResponse{}, runtime.NewResponseError(resp) + } + return client.getTagsHandleResponse(resp) +} + +// getTagsCreateRequest creates the GetTags request. +func (client *blobClient) getTagsCreateRequest(ctx context.Context, blobClientGetTagsOptions *blobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tags") + if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientGetTagsOptions.Timeout), 10)) + } + if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.Snapshot != nil { + reqQP.Set("snapshot", *blobClientGetTagsOptions.Snapshot) + } + if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.VersionID != nil { + reqQP.Set("versionid", *blobClientGetTagsOptions.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientGetTagsOptions.RequestID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getTagsHandleResponse handles the GetTags response. +func (client *blobClient) getTagsHandleResponse(resp *http.Response) (blobClientGetTagsResponse, error) { + result := blobClientGetTagsResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetTagsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { + return blobClientGetTagsResponse{}, err + } + return result, nil +} + +// Query - The Query operation enables users to select/project on blob data by providing simple query expressions. +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientQueryOptions - blobClientQueryOptions contains the optional parameters for the blobClient.Query method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) Query(ctx context.Context, blobClientQueryOptions *blobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientQueryResponse, error) { + req, err := client.queryCreateRequest(ctx, blobClientQueryOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return blobClientQueryResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientQueryResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { + return blobClientQueryResponse{}, runtime.NewResponseError(resp) + } + return client.queryHandleResponse(resp) +} + +// queryCreateRequest creates the Query request. +func (client *blobClient) queryCreateRequest(ctx context.Context, blobClientQueryOptions *blobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "query") + if blobClientQueryOptions != nil && blobClientQueryOptions.Snapshot != nil { + reqQP.Set("snapshot", *blobClientQueryOptions.Snapshot) + } + if blobClientQueryOptions != nil && blobClientQueryOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientQueryOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientQueryOptions != nil && blobClientQueryOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientQueryOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + if blobClientQueryOptions != nil && blobClientQueryOptions.QueryRequest != nil { + return req, runtime.MarshalAsXML(req, *blobClientQueryOptions.QueryRequest) + } + return req, nil +} + +// queryHandleResponse handles the Query response. +func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQueryResponse, error) { + result := blobClientQueryResponse{RawResponse: resp} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.LastModified = &lastModified + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientQueryResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientQueryResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return blobClientQueryResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.BlobContentMD5 = blobContentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + return result, nil +} + +// ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// leaseID - Specifies the current lease ID on the resource. +// blobClientReleaseLeaseOptions - blobClientReleaseLeaseOptions contains the optional parameters for the blobClient.ReleaseLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) ReleaseLease(ctx context.Context, leaseID string, blobClientReleaseLeaseOptions *blobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, blobClientReleaseLeaseOptions, modifiedAccessConditions) + if err != nil { + return blobClientReleaseLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseLeaseHandleResponse(resp) +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *blobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, blobClientReleaseLeaseOptions *blobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if blobClientReleaseLeaseOptions != nil && blobClientReleaseLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientReleaseLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "release") + req.Raw().Header.Set("x-ms-lease-id", leaseID) + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientReleaseLeaseOptions != nil && blobClientReleaseLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientReleaseLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *blobClient) releaseLeaseHandleResponse(resp *http.Response) (blobClientReleaseLeaseResponse, error) { + result := blobClientReleaseLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// leaseID - Specifies the current lease ID on the resource. +// blobClientRenewLeaseOptions - blobClientRenewLeaseOptions contains the optional parameters for the blobClient.RenewLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) RenewLease(ctx context.Context, leaseID string, blobClientRenewLeaseOptions *blobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, blobClientRenewLeaseOptions, modifiedAccessConditions) + if err != nil { + return blobClientRenewLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.renewLeaseHandleResponse(resp) +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *blobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, blobClientRenewLeaseOptions *blobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if blobClientRenewLeaseOptions != nil && blobClientRenewLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientRenewLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "renew") + req.Raw().Header.Set("x-ms-lease-id", leaseID) + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientRenewLeaseOptions != nil && blobClientRenewLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientRenewLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *blobClient) renewLeaseHandleResponse(resp *http.Response) (blobClientRenewLeaseResponse, error) { + result := blobClientRenewLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientRenewLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetExpiry - Sets the time a blob will expire and be deleted. +// If the operation fails it returns an *azcore.ResponseError type. +// expiryOptions - Required. Indicates mode of the expiry time +// options - blobClientSetExpiryOptions contains the optional parameters for the blobClient.SetExpiry method. +func (client *blobClient) SetExpiry(ctx context.Context, expiryOptions BlobExpiryOptions, options *blobClientSetExpiryOptions) (blobClientSetExpiryResponse, error) { + req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) + if err != nil { + return blobClientSetExpiryResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetExpiryResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientSetExpiryResponse{}, runtime.NewResponseError(resp) + } + return client.setExpiryHandleResponse(resp) +} + +// setExpiryCreateRequest creates the SetExpiry request. +func (client *blobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions BlobExpiryOptions, options *blobClientSetExpiryOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "expiry") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("x-ms-expiry-option", string(expiryOptions)) + if options != nil && options.ExpiresOn != nil { + req.Raw().Header.Set("x-ms-expiry-time", *options.ExpiresOn) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setExpiryHandleResponse handles the SetExpiry response. +func (client *blobClient) setExpiryHandleResponse(resp *http.Response) (blobClientSetExpiryResponse, error) { + result := blobClientSetExpiryResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetExpiryResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetExpiryResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientSetHTTPHeadersOptions - blobClientSetHTTPHeadersOptions contains the optional parameters for the blobClient.SetHTTPHeaders +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) SetHTTPHeaders(ctx context.Context, blobClientSetHTTPHeadersOptions *blobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetHTTPHeadersResponse, error) { + req, err := client.setHTTPHeadersCreateRequest(ctx, blobClientSetHTTPHeadersOptions, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return blobClientSetHTTPHeadersResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetHTTPHeadersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + } + return client.setHTTPHeadersHandleResponse(resp) +} + +// setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. +func (client *blobClient) setHTTPHeadersCreateRequest(ctx context.Context, blobClientSetHTTPHeadersOptions *blobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if blobClientSetHTTPHeadersOptions != nil && blobClientSetHTTPHeadersOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetHTTPHeadersOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientSetHTTPHeadersOptions != nil && blobClientSetHTTPHeadersOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetHTTPHeadersOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. +func (client *blobClient) setHTTPHeadersHandleResponse(resp *http.Response) (blobClientSetHTTPHeadersResponse, error) { + result := blobClientSetHTTPHeadersResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientSetHTTPHeadersResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetHTTPHeadersResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientSetImmutabilityPolicyOptions - blobClientSetImmutabilityPolicyOptions contains the optional parameters for the +// blobClient.SetImmutabilityPolicy method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) SetImmutabilityPolicy(ctx context.Context, blobClientSetImmutabilityPolicyOptions *blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetImmutabilityPolicyResponse, error) { + req, err := client.setImmutabilityPolicyCreateRequest(ctx, blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions) + if err != nil { + return blobClientSetImmutabilityPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetImmutabilityPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.setImmutabilityPolicyHandleResponse(resp) +} + +// setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. +func (client *blobClient) setImmutabilityPolicyCreateRequest(ctx context.Context, blobClientSetImmutabilityPolicyOptions *blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "immutabilityPolicies") + if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetImmutabilityPolicyOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetImmutabilityPolicyOptions.RequestID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyMode)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setImmutabilityPolicyHandleResponse handles the SetImmutabilityPolicy response. +func (client *blobClient) setImmutabilityPolicyHandleResponse(resp *http.Response) (blobClientSetImmutabilityPolicyResponse, error) { + result := blobClientSetImmutabilityPolicyResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetImmutabilityPolicyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiry, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetImmutabilityPolicyResponse{}, err + } + result.ImmutabilityPolicyExpiry = &immutabilityPolicyExpiry + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val) + } + return result, nil +} + +// SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. +// If the operation fails it returns an *azcore.ResponseError type. +// legalHold - Specified if a legal hold should be set on the blob. +// options - blobClientSetLegalHoldOptions contains the optional parameters for the blobClient.SetLegalHold method. +func (client *blobClient) SetLegalHold(ctx context.Context, legalHold bool, options *blobClientSetLegalHoldOptions) (blobClientSetLegalHoldResponse, error) { + req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) + if err != nil { + return blobClientSetLegalHoldResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetLegalHoldResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) + } + return client.setLegalHoldHandleResponse(resp) +} + +// setLegalHoldCreateRequest creates the SetLegalHold request. +func (client *blobClient) setLegalHoldCreateRequest(ctx context.Context, legalHold bool, options *blobClientSetLegalHoldOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "legalhold") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(legalHold)) + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setLegalHoldHandleResponse handles the SetLegalHold response. +func (client *blobClient) setLegalHoldHandleResponse(resp *http.Response) (blobClientSetLegalHoldResponse, error) { + result := blobClientSetLegalHoldResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetLegalHoldResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return blobClientSetLegalHoldResponse{}, err + } + result.LegalHold = &legalHold + } + return result, nil +} + +// SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value +// pairs +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientSetMetadataOptions - blobClientSetMetadataOptions contains the optional parameters for the blobClient.SetMetadata +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) SetMetadata(ctx context.Context, blobClientSetMetadataOptions *blobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, blobClientSetMetadataOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return blobClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *blobClient) setMetadataCreateRequest(ctx context.Context, blobClientSetMetadataOptions *blobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "metadata") + if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetMetadataOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.Metadata != nil { + for k, v := range blobClientSetMetadataOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetMetadataOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *blobClient) setMetadataHandleResponse(resp *http.Response) (blobClientSetMetadataResponse, error) { + result := blobClientSetMetadataResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetMetadataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blobClientSetMetadataResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// SetTags - The Set Tags operation enables users to set tags on a blob. +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientSetTagsOptions - blobClientSetTagsOptions contains the optional parameters for the blobClient.SetTags method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *blobClient) SetTags(ctx context.Context, blobClientSetTagsOptions *blobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientSetTagsResponse, error) { + req, err := client.setTagsCreateRequest(ctx, blobClientSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return blobClientSetTagsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetTagsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return blobClientSetTagsResponse{}, runtime.NewResponseError(resp) + } + return client.setTagsHandleResponse(resp) +} + +// setTagsCreateRequest creates the SetTags request. +func (client *blobClient) setTagsCreateRequest(ctx context.Context, blobClientSetTagsOptions *blobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tags") + if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetTagsOptions.Timeout), 10)) + } + if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.VersionID != nil { + reqQP.Set("versionid", *blobClientSetTagsOptions.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blobClientSetTagsOptions.TransactionalContentMD5)) + } + if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.TransactionalContentCRC64 != nil { + req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blobClientSetTagsOptions.TransactionalContentCRC64)) + } + if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetTagsOptions.RequestID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("Accept", "application/xml") + if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.Tags != nil { + return req, runtime.MarshalAsXML(req, *blobClientSetTagsOptions.Tags) + } + return req, nil +} + +// setTagsHandleResponse handles the SetTags response. +func (client *blobClient) setTagsHandleResponse(resp *http.Response) (blobClientSetTagsResponse, error) { + result := blobClientSetTagsResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetTagsResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetTier - The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage +// account and on a block blob in a blob storage account (locally redundant storage only). A +// premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive +// storage type. This operation does not update the blob's ETag. +// If the operation fails it returns an *azcore.ResponseError type. +// tier - Indicates the tier to be set on the blob. +// blobClientSetTierOptions - blobClientSetTierOptions contains the optional parameters for the blobClient.SetTier method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) SetTier(ctx context.Context, tier AccessTier, blobClientSetTierOptions *blobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetTierResponse, error) { + req, err := client.setTierCreateRequest(ctx, tier, blobClientSetTierOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return blobClientSetTierResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetTierResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return blobClientSetTierResponse{}, runtime.NewResponseError(resp) + } + return client.setTierHandleResponse(resp) +} + +// setTierCreateRequest creates the SetTier request. +func (client *blobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, blobClientSetTierOptions *blobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tier") + if blobClientSetTierOptions != nil && blobClientSetTierOptions.Snapshot != nil { + reqQP.Set("snapshot", *blobClientSetTierOptions.Snapshot) + } + if blobClientSetTierOptions != nil && blobClientSetTierOptions.VersionID != nil { + reqQP.Set("versionid", *blobClientSetTierOptions.VersionID) + } + if blobClientSetTierOptions != nil && blobClientSetTierOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetTierOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-access-tier", string(tier)) + if blobClientSetTierOptions != nil && blobClientSetTierOptions.RehydratePriority != nil { + req.Raw().Header.Set("x-ms-rehydrate-priority", string(*blobClientSetTierOptions.RehydratePriority)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientSetTierOptions != nil && blobClientSetTierOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetTierOptions.RequestID) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setTierHandleResponse handles the SetTier response. +func (client *blobClient) setTierHandleResponse(resp *http.Response) (blobClientSetTierResponse, error) { + result := blobClientSetTierResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. +// If the operation fails it returns an *azcore.ResponseError type. +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// blobClientStartCopyFromURLOptions - blobClientStartCopyFromURLOptions contains the optional parameters for the blobClient.StartCopyFromURL +// method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *blobClient) StartCopyFromURL(ctx context.Context, copySource string, blobClientStartCopyFromURLOptions *blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientStartCopyFromURLResponse, error) { + req, err := client.startCopyFromURLCreateRequest(ctx, copySource, blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return blobClientStartCopyFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientStartCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return blobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.startCopyFromURLHandleResponse(resp) +} + +// startCopyFromURLCreateRequest creates the StartCopyFromURL request. +func (client *blobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, blobClientStartCopyFromURLOptions *blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientStartCopyFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Metadata != nil { + for k, v := range blobClientStartCopyFromURLOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Tier != nil { + req.Raw().Header.Set("x-ms-access-tier", string(*blobClientStartCopyFromURLOptions.Tier)) + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.RehydratePriority != nil { + req.Raw().Header.Set("x-ms-rehydrate-priority", string(*blobClientStartCopyFromURLOptions.RehydratePriority)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header.Set("x-ms-source-if-tags", *sourceModifiedAccessConditions.SourceIfTags) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-copy-source", copySource) + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientStartCopyFromURLOptions.RequestID) + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *blobClientStartCopyFromURLOptions.BlobTagsString) + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.SealBlob != nil { + req.Raw().Header.Set("x-ms-seal-blob", strconv.FormatBool(*blobClientStartCopyFromURLOptions.SealBlob)) + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientStartCopyFromURLOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientStartCopyFromURLOptions.ImmutabilityPolicyMode)) + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.LegalHold != nil { + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blobClientStartCopyFromURLOptions.LegalHold)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// startCopyFromURLHandleResponse handles the StartCopyFromURL response. +func (client *blobClient) startCopyFromURLHandleResponse(resp *http.Response) (blobClientStartCopyFromURLResponse, error) { + result := blobClientStartCopyFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientStartCopyFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientStartCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + return result, nil +} + +// Undelete - Undelete a blob that was previously soft deleted +// If the operation fails it returns an *azcore.ResponseError type. +// options - blobClientUndeleteOptions contains the optional parameters for the blobClient.Undelete method. +func (client *blobClient) Undelete(ctx context.Context, options *blobClientUndeleteOptions) (blobClientUndeleteResponse, error) { + req, err := client.undeleteCreateRequest(ctx, options) + if err != nil { + return blobClientUndeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientUndeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientUndeleteResponse{}, runtime.NewResponseError(resp) + } + return client.undeleteHandleResponse(resp) +} + +// undeleteCreateRequest creates the Undelete request. +func (client *blobClient) undeleteCreateRequest(ctx context.Context, options *blobClientUndeleteOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// undeleteHandleResponse handles the Undelete response. +func (client *blobClient) undeleteHandleResponse(resp *http.Response) (blobClientUndeleteResponse, error) { + result := blobClientUndeleteResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientUndeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go new file mode 100644 index 000000000000..3f78a28aa406 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go @@ -0,0 +1,953 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +type blockBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// newBlockBlobClient creates a new instance of blockBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func newBlockBlobClient(endpoint string, pl runtime.Pipeline) *blockBlobClient { + client := &blockBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written to the +// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that +// have changed, then committing the new and existing blocks together. You can do +// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit +// the most recently uploaded version of the block, whichever list it may +// belong to. +// If the operation fails it returns an *azcore.ResponseError type. +// blocks - Blob Blocks. +// blockBlobClientCommitBlockListOptions - blockBlobClientCommitBlockListOptions contains the optional parameters for the +// blockBlobClient.CommitBlockList method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, blockBlobClientCommitBlockListOptions *blockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientCommitBlockListResponse, error) { + req, err := client.commitBlockListCreateRequest(ctx, blocks, blockBlobClientCommitBlockListOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) + } + return client.commitBlockListHandleResponse(resp) +} + +// commitBlockListCreateRequest creates the CommitBlockList request. +func (client *blockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, blockBlobClientCommitBlockListOptions *blockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientCommitBlockListOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientCommitBlockListOptions.TransactionalContentMD5)) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.TransactionalContentCRC64 != nil { + req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientCommitBlockListOptions.TransactionalContentCRC64)) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Metadata != nil { + for k, v := range blockBlobClientCommitBlockListOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Tier != nil { + req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientCommitBlockListOptions.Tier)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientCommitBlockListOptions.RequestID) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *blockBlobClientCommitBlockListOptions.BlobTagsString) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", blockBlobClientCommitBlockListOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blockBlobClientCommitBlockListOptions.ImmutabilityPolicyMode)) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.LegalHold != nil { + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blockBlobClientCommitBlockListOptions.LegalHold)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, runtime.MarshalAsXML(req, blocks) +} + +// commitBlockListHandleResponse handles the CommitBlockList response. +func (client *blockBlobClient) commitBlockListHandleResponse(resp *http.Response) (blockBlobClientCommitBlockListResponse, error) { + result := blockBlobClientCommitBlockListResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob +// If the operation fails it returns an *azcore.ResponseError type. +// listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. +// blockBlobClientGetBlockListOptions - blockBlobClientGetBlockListOptions contains the optional parameters for the blockBlobClient.GetBlockList +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, blockBlobClientGetBlockListOptions *blockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientGetBlockListResponse, error) { + req, err := client.getBlockListCreateRequest(ctx, listType, blockBlobClientGetBlockListOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return blockBlobClientGetBlockListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blockBlobClientGetBlockListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) + } + return client.getBlockListHandleResponse(resp) +} + +// getBlockListCreateRequest creates the GetBlockList request. +func (client *blockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, blockBlobClientGetBlockListOptions *blockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.Snapshot != nil { + reqQP.Set("snapshot", *blockBlobClientGetBlockListOptions.Snapshot) + } + reqQP.Set("blocklisttype", string(listType)) + if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientGetBlockListOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientGetBlockListOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getBlockListHandleResponse handles the GetBlockList response. +func (client *blockBlobClient) getBlockListHandleResponse(resp *http.Response) (blockBlobClientGetBlockListResponse, error) { + result := blockBlobClientGetBlockListResponse{RawResponse: resp} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blockBlobClientGetBlockListResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientGetBlockListResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { + return blockBlobClientGetBlockListResponse{}, err + } + return result, nil +} + +// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not +// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform +// partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// blockBlobClientPutBlobFromURLOptions - blockBlobClientPutBlobFromURLOptions contains the optional parameters for the blockBlobClient.PutBlobFromURL +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL +// method. +func (client *blockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, blockBlobClientPutBlobFromURLOptions *blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (blockBlobClientPutBlobFromURLResponse, error) { + req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return blockBlobClientPutBlobFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blockBlobClientPutBlobFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.putBlobFromURLHandleResponse(resp) +} + +// putBlobFromURLCreateRequest creates the PutBlobFromURL request. +func (client *blockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, blockBlobClientPutBlobFromURLOptions *blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientPutBlobFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-blob-type", "BlockBlob") + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientPutBlobFromURLOptions.TransactionalContentMD5)) + } + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + } + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Metadata != nil { + for k, v := range blockBlobClientPutBlobFromURLOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Tier != nil { + req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientPutBlobFromURLOptions.Tier)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header.Set("x-ms-source-if-tags", *sourceModifiedAccessConditions.SourceIfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientPutBlobFromURLOptions.RequestID) + } + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.SourceContentMD5 != nil { + req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blockBlobClientPutBlobFromURLOptions.SourceContentMD5)) + } + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *blockBlobClientPutBlobFromURLOptions.BlobTagsString) + } + req.Raw().Header.Set("x-ms-copy-source", copySource) + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.CopySourceBlobProperties != nil { + req.Raw().Header.Set("x-ms-copy-source-blob-properties", strconv.FormatBool(*blockBlobClientPutBlobFromURLOptions.CopySourceBlobProperties)) + } + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.CopySourceAuthorization != nil { + req.Raw().Header.Set("x-ms-copy-source-authorization", *blockBlobClientPutBlobFromURLOptions.CopySourceAuthorization) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// putBlobFromURLHandleResponse handles the PutBlobFromURL response. +func (client *blockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (blockBlobClientPutBlobFromURLResponse, error) { + result := blockBlobClientPutBlobFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientPutBlobFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientPutBlobFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blockBlobClientPutBlobFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// contentLength - The length of the request. +// body - Initial data +// blockBlobClientStageBlockOptions - blockBlobClientStageBlockOptions contains the optional parameters for the blockBlobClient.StageBlock +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +func (client *blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, blockBlobClientStageBlockOptions *blockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (blockBlobClientStageBlockResponse, error) { + req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, blockBlobClientStageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo) + if err != nil { + return blockBlobClientStageBlockResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blockBlobClientStageBlockResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) + } + return client.stageBlockHandleResponse(resp) +} + +// stageBlockCreateRequest creates the StageBlock request. +func (client *blockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, blockBlobClientStageBlockOptions *blockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientStageBlockOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockOptions.TransactionalContentMD5)) + } + if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.TransactionalContentCRC64 != nil { + req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockOptions.TransactionalContentCRC64)) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientStageBlockOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, req.SetBody(body, "application/octet-stream") +} + +// stageBlockHandleResponse handles the StageBlock response. +func (client *blockBlobClient) stageBlockHandleResponse(resp *http.Response) (blockBlobClientStageBlockResponse, error) { + result := blockBlobClientStageBlockResponse{RawResponse: resp} + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientStageBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientStageBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientStageBlockResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blockBlobClientStageBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents +// are read from a URL. +// If the operation fails it returns an *azcore.ResponseError type. +// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// contentLength - The length of the request. +// sourceURL - Specify a URL to the copy source. +// blockBlobClientStageBlockFromURLOptions - blockBlobClientStageBlockFromURLOptions contains the optional parameters for +// the blockBlobClient.StageBlockFromURL method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL +// method. +func (client *blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, blockBlobClientStageBlockFromURLOptions *blockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (blockBlobClientStageBlockFromURLResponse, error) { + req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, blockBlobClientStageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return blockBlobClientStageBlockFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blockBlobClientStageBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.stageBlockFromURLHandleResponse(resp) +} + +// stageBlockFromURLCreateRequest creates the StageBlockFromURL request. +func (client *blockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, blockBlobClientStageBlockFromURLOptions *blockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientStageBlockFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Raw().Header.Set("x-ms-copy-source", sourceURL) + if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceRange != nil { + req.Raw().Header.Set("x-ms-source-range", *blockBlobClientStageBlockFromURLOptions.SourceRange) + } + if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceContentMD5 != nil { + req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockFromURLOptions.SourceContentMD5)) + } + if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceContentcrc64 != nil { + req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockFromURLOptions.SourceContentcrc64)) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientStageBlockFromURLOptions.RequestID) + } + if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.CopySourceAuthorization != nil { + req.Raw().Header.Set("x-ms-copy-source-authorization", *blockBlobClientStageBlockFromURLOptions.CopySourceAuthorization) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// stageBlockFromURLHandleResponse handles the StageBlockFromURL response. +func (client *blockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (blockBlobClientStageBlockFromURLResponse, error) { + result := blockBlobClientStageBlockFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientStageBlockFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientStageBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blockBlobClientStageBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob +// overwrites any existing metadata on the blob. Partial updates are not supported with Put +// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of +// the content of a block blob, use the Put Block List operation. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// body - Initial data +// blockBlobClientUploadOptions - blockBlobClientUploadOptions contains the optional parameters for the blockBlobClient.Upload +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, blockBlobClientUploadOptions *blockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientUploadResponse, error) { + req, err := client.uploadCreateRequest(ctx, contentLength, body, blockBlobClientUploadOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return blockBlobClientUploadResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blockBlobClientUploadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blockBlobClientUploadResponse{}, runtime.NewResponseError(resp) + } + return client.uploadHandleResponse(resp) +} + +// uploadCreateRequest creates the Upload request. +func (client *blockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, blockBlobClientUploadOptions *blockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientUploadOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-blob-type", "BlockBlob") + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientUploadOptions.TransactionalContentMD5)) + } + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + } + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Metadata != nil { + for k, v := range blockBlobClientUploadOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Tier != nil { + req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientUploadOptions.Tier)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientUploadOptions.RequestID) + } + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *blockBlobClientUploadOptions.BlobTagsString) + } + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", blockBlobClientUploadOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blockBlobClientUploadOptions.ImmutabilityPolicyMode)) + } + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.LegalHold != nil { + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blockBlobClientUploadOptions.LegalHold)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, req.SetBody(body, "application/octet-stream") +} + +// uploadHandleResponse handles the Upload response. +func (client *blockBlobClient) uploadHandleResponse(resp *http.Response) (blockBlobClientUploadResponse, error) { + result := blockBlobClientUploadResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientUploadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientUploadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blockBlobClientUploadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go new file mode 100644 index 000000000000..2348df04a43a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go @@ -0,0 +1,841 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +const ( + moduleName = "azblob" + moduleVersion = "v0.4.1" +) + +// AccessTier enum +type AccessTier string + +const ( + AccessTierArchive AccessTier = "Archive" + AccessTierCool AccessTier = "Cool" + AccessTierHot AccessTier = "Hot" + AccessTierP10 AccessTier = "P10" + AccessTierP15 AccessTier = "P15" + AccessTierP20 AccessTier = "P20" + AccessTierP30 AccessTier = "P30" + AccessTierP4 AccessTier = "P4" + AccessTierP40 AccessTier = "P40" + AccessTierP50 AccessTier = "P50" + AccessTierP6 AccessTier = "P6" + AccessTierP60 AccessTier = "P60" + AccessTierP70 AccessTier = "P70" + AccessTierP80 AccessTier = "P80" +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return []AccessTier{ + AccessTierArchive, + AccessTierCool, + AccessTierHot, + AccessTierP10, + AccessTierP15, + AccessTierP20, + AccessTierP30, + AccessTierP4, + AccessTierP40, + AccessTierP50, + AccessTierP6, + AccessTierP60, + AccessTierP70, + AccessTierP80, + } +} + +// ToPtr returns a *AccessTier pointing to the current value. +func (c AccessTier) ToPtr() *AccessTier { + return &c +} + +// AccountKind enum +type AccountKind string + +const ( + AccountKindStorage AccountKind = "Storage" + AccountKindBlobStorage AccountKind = "BlobStorage" + AccountKindStorageV2 AccountKind = "StorageV2" + AccountKindFileStorage AccountKind = "FileStorage" + AccountKindBlockBlobStorage AccountKind = "BlockBlobStorage" +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return []AccountKind{ + AccountKindStorage, + AccountKindBlobStorage, + AccountKindStorageV2, + AccountKindFileStorage, + AccountKindBlockBlobStorage, + } +} + +// ToPtr returns a *AccountKind pointing to the current value. +func (c AccountKind) ToPtr() *AccountKind { + return &c +} + +// ArchiveStatus enum +type ArchiveStatus string + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool" + ArchiveStatusRehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot" +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return []ArchiveStatus{ + ArchiveStatusRehydratePendingToCool, + ArchiveStatusRehydratePendingToHot, + } +} + +// ToPtr returns a *ArchiveStatus pointing to the current value. +func (c ArchiveStatus) ToPtr() *ArchiveStatus { + return &c +} + +// BlobExpiryOptions enum +type BlobExpiryOptions string + +const ( + BlobExpiryOptionsAbsolute BlobExpiryOptions = "Absolute" + BlobExpiryOptionsNeverExpire BlobExpiryOptions = "NeverExpire" + BlobExpiryOptionsRelativeToCreation BlobExpiryOptions = "RelativeToCreation" + BlobExpiryOptionsRelativeToNow BlobExpiryOptions = "RelativeToNow" +) + +// PossibleBlobExpiryOptionsValues returns the possible values for the BlobExpiryOptions const type. +func PossibleBlobExpiryOptionsValues() []BlobExpiryOptions { + return []BlobExpiryOptions{ + BlobExpiryOptionsAbsolute, + BlobExpiryOptionsNeverExpire, + BlobExpiryOptionsRelativeToCreation, + BlobExpiryOptionsRelativeToNow, + } +} + +// ToPtr returns a *BlobExpiryOptions pointing to the current value. +func (c BlobExpiryOptions) ToPtr() *BlobExpiryOptions { + return &c +} + +// BlobGeoReplicationStatus - The status of the secondary location +type BlobGeoReplicationStatus string + +const ( + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" + BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap" + BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable" +) + +// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. +func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { + return []BlobGeoReplicationStatus{ + BlobGeoReplicationStatusLive, + BlobGeoReplicationStatusBootstrap, + BlobGeoReplicationStatusUnavailable, + } +} + +// ToPtr returns a *BlobGeoReplicationStatus pointing to the current value. +func (c BlobGeoReplicationStatus) ToPtr() *BlobGeoReplicationStatus { + return &c +} + +// BlobImmutabilityPolicyMode enum +type BlobImmutabilityPolicyMode string + +const ( + BlobImmutabilityPolicyModeMutable BlobImmutabilityPolicyMode = "Mutable" + BlobImmutabilityPolicyModeUnlocked BlobImmutabilityPolicyMode = "Unlocked" + BlobImmutabilityPolicyModeLocked BlobImmutabilityPolicyMode = "Locked" +) + +// PossibleBlobImmutabilityPolicyModeValues returns the possible values for the BlobImmutabilityPolicyMode const type. +func PossibleBlobImmutabilityPolicyModeValues() []BlobImmutabilityPolicyMode { + return []BlobImmutabilityPolicyMode{ + BlobImmutabilityPolicyModeMutable, + BlobImmutabilityPolicyModeUnlocked, + BlobImmutabilityPolicyModeLocked, + } +} + +// ToPtr returns a *BlobImmutabilityPolicyMode pointing to the current value. +func (c BlobImmutabilityPolicyMode) ToPtr() *BlobImmutabilityPolicyMode { + return &c +} + +// BlobType enum +type BlobType string + +const ( + BlobTypeBlockBlob BlobType = "BlockBlob" + BlobTypePageBlob BlobType = "PageBlob" + BlobTypeAppendBlob BlobType = "AppendBlob" +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return []BlobType{ + BlobTypeBlockBlob, + BlobTypePageBlob, + BlobTypeAppendBlob, + } +} + +// ToPtr returns a *BlobType pointing to the current value. +func (c BlobType) ToPtr() *BlobType { + return &c +} + +// BlockListType enum +type BlockListType string + +const ( + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" + BlockListTypeAll BlockListType = "all" +) + +// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return []BlockListType{ + BlockListTypeCommitted, + BlockListTypeUncommitted, + BlockListTypeAll, + } +} + +// ToPtr returns a *BlockListType pointing to the current value. +func (c BlockListType) ToPtr() *BlockListType { + return &c +} + +// CopyStatusType enum +type CopyStatusType string + +const ( + CopyStatusTypePending CopyStatusType = "pending" + CopyStatusTypeSuccess CopyStatusType = "success" + CopyStatusTypeAborted CopyStatusType = "aborted" + CopyStatusTypeFailed CopyStatusType = "failed" +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return []CopyStatusType{ + CopyStatusTypePending, + CopyStatusTypeSuccess, + CopyStatusTypeAborted, + CopyStatusTypeFailed, + } +} + +// ToPtr returns a *CopyStatusType pointing to the current value. +func (c CopyStatusType) ToPtr() *CopyStatusType { + return &c +} + +// DeleteSnapshotsOptionType enum +type DeleteSnapshotsOptionType string + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = "include" + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = "only" +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return []DeleteSnapshotsOptionType{ + DeleteSnapshotsOptionTypeInclude, + DeleteSnapshotsOptionTypeOnly, + } +} + +// ToPtr returns a *DeleteSnapshotsOptionType pointing to the current value. +func (c DeleteSnapshotsOptionType) ToPtr() *DeleteSnapshotsOptionType { + return &c +} + +// EncryptionAlgorithmType enum +type EncryptionAlgorithmType string + +const ( + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256" +) + +// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return []EncryptionAlgorithmType{ + EncryptionAlgorithmTypeNone, + EncryptionAlgorithmTypeAES256, + } +} + +// ToPtr returns a *EncryptionAlgorithmType pointing to the current value. +func (c EncryptionAlgorithmType) ToPtr() *EncryptionAlgorithmType { + return &c +} + +// LeaseDurationType enum +type LeaseDurationType string + +const ( + LeaseDurationTypeInfinite LeaseDurationType = "infinite" + LeaseDurationTypeFixed LeaseDurationType = "fixed" +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return []LeaseDurationType{ + LeaseDurationTypeInfinite, + LeaseDurationTypeFixed, + } +} + +// ToPtr returns a *LeaseDurationType pointing to the current value. +func (c LeaseDurationType) ToPtr() *LeaseDurationType { + return &c +} + +// LeaseStateType enum +type LeaseStateType string + +const ( + LeaseStateTypeAvailable LeaseStateType = "available" + LeaseStateTypeLeased LeaseStateType = "leased" + LeaseStateTypeExpired LeaseStateType = "expired" + LeaseStateTypeBreaking LeaseStateType = "breaking" + LeaseStateTypeBroken LeaseStateType = "broken" +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return []LeaseStateType{ + LeaseStateTypeAvailable, + LeaseStateTypeLeased, + LeaseStateTypeExpired, + LeaseStateTypeBreaking, + LeaseStateTypeBroken, + } +} + +// ToPtr returns a *LeaseStateType pointing to the current value. +func (c LeaseStateType) ToPtr() *LeaseStateType { + return &c +} + +// LeaseStatusType enum +type LeaseStatusType string + +const ( + LeaseStatusTypeLocked LeaseStatusType = "locked" + LeaseStatusTypeUnlocked LeaseStatusType = "unlocked" +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return []LeaseStatusType{ + LeaseStatusTypeLocked, + LeaseStatusTypeUnlocked, + } +} + +// ToPtr returns a *LeaseStatusType pointing to the current value. +func (c LeaseStatusType) ToPtr() *LeaseStatusType { + return &c +} + +// ListBlobsIncludeItem enum +type ListBlobsIncludeItem string + +const ( + ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy" + ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted" + ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" + ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" + ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" + ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" + ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" + ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" + ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" + ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" +) + +// PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type. +func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { + return []ListBlobsIncludeItem{ + ListBlobsIncludeItemCopy, + ListBlobsIncludeItemDeleted, + ListBlobsIncludeItemMetadata, + ListBlobsIncludeItemSnapshots, + ListBlobsIncludeItemUncommittedblobs, + ListBlobsIncludeItemVersions, + ListBlobsIncludeItemTags, + ListBlobsIncludeItemImmutabilitypolicy, + ListBlobsIncludeItemLegalhold, + ListBlobsIncludeItemDeletedwithversions, + } +} + +// ToPtr returns a *ListBlobsIncludeItem pointing to the current value. +func (c ListBlobsIncludeItem) ToPtr() *ListBlobsIncludeItem { + return &c +} + +// ListContainersIncludeType enum +type ListContainersIncludeType string + +const ( + ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" + ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted" + ListContainersIncludeTypeSystem ListContainersIncludeType = "system" +) + +// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return []ListContainersIncludeType{ + ListContainersIncludeTypeMetadata, + ListContainersIncludeTypeDeleted, + ListContainersIncludeTypeSystem, + } +} + +// ToPtr returns a *ListContainersIncludeType pointing to the current value. +func (c ListContainersIncludeType) ToPtr() *ListContainersIncludeType { + return &c +} + +// PremiumPageBlobAccessTier enum +type PremiumPageBlobAccessTier string + +const ( + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = "P10" + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = "P15" + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = "P20" + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = "P30" + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = "P4" + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = "P40" + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = "P50" + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = "P6" + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = "P60" + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = "P70" + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = "P80" +) + +// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type. +func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { + return []PremiumPageBlobAccessTier{ + PremiumPageBlobAccessTierP10, + PremiumPageBlobAccessTierP15, + PremiumPageBlobAccessTierP20, + PremiumPageBlobAccessTierP30, + PremiumPageBlobAccessTierP4, + PremiumPageBlobAccessTierP40, + PremiumPageBlobAccessTierP50, + PremiumPageBlobAccessTierP6, + PremiumPageBlobAccessTierP60, + PremiumPageBlobAccessTierP70, + PremiumPageBlobAccessTierP80, + } +} + +// ToPtr returns a *PremiumPageBlobAccessTier pointing to the current value. +func (c PremiumPageBlobAccessTier) ToPtr() *PremiumPageBlobAccessTier { + return &c +} + +// PublicAccessType enum +type PublicAccessType string + +const ( + PublicAccessTypeBlob PublicAccessType = "blob" + PublicAccessTypeContainer PublicAccessType = "container" +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return []PublicAccessType{ + PublicAccessTypeBlob, + PublicAccessTypeContainer, + } +} + +// ToPtr returns a *PublicAccessType pointing to the current value. +func (c PublicAccessType) ToPtr() *PublicAccessType { + return &c +} + +// QueryFormatType - The quick query format type. +type QueryFormatType string + +const ( + QueryFormatTypeDelimited QueryFormatType = "delimited" + QueryFormatTypeJSON QueryFormatType = "json" + QueryFormatTypeArrow QueryFormatType = "arrow" + QueryFormatTypeParquet QueryFormatType = "parquet" +) + +// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return []QueryFormatType{ + QueryFormatTypeDelimited, + QueryFormatTypeJSON, + QueryFormatTypeArrow, + QueryFormatTypeParquet, + } +} + +// ToPtr returns a *QueryFormatType pointing to the current value. +func (c QueryFormatType) ToPtr() *QueryFormatType { + return &c +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority string + +const ( + RehydratePriorityHigh RehydratePriority = "High" + RehydratePriorityStandard RehydratePriority = "Standard" +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return []RehydratePriority{ + RehydratePriorityHigh, + RehydratePriorityStandard, + } +} + +// ToPtr returns a *RehydratePriority pointing to the current value. +func (c RehydratePriority) ToPtr() *RehydratePriority { + return &c +} + +// SKUName enum +type SKUName string + +const ( + SKUNameStandardLRS SKUName = "Standard_LRS" + SKUNameStandardGRS SKUName = "Standard_GRS" + SKUNameStandardRAGRS SKUName = "Standard_RAGRS" + SKUNameStandardZRS SKUName = "Standard_ZRS" + SKUNamePremiumLRS SKUName = "Premium_LRS" +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return []SKUName{ + SKUNameStandardLRS, + SKUNameStandardGRS, + SKUNameStandardRAGRS, + SKUNameStandardZRS, + SKUNamePremiumLRS, + } +} + +// ToPtr returns a *SKUName pointing to the current value. +func (c SKUName) ToPtr() *SKUName { + return &c +} + +// SequenceNumberActionType enum +type SequenceNumberActionType string + +const ( + SequenceNumberActionTypeMax SequenceNumberActionType = "max" + SequenceNumberActionTypeUpdate SequenceNumberActionType = "update" + SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" +) + +// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return []SequenceNumberActionType{ + SequenceNumberActionTypeMax, + SequenceNumberActionTypeUpdate, + SequenceNumberActionTypeIncrement, + } +} + +// ToPtr returns a *SequenceNumberActionType pointing to the current value. +func (c SequenceNumberActionType) ToPtr() *SequenceNumberActionType { + return &c +} + +// StorageErrorCode - Error codes returned by the service +type StorageErrorCode string + +const ( + StorageErrorCodeAccountAlreadyExists StorageErrorCode = "AccountAlreadyExists" + StorageErrorCodeAccountBeingCreated StorageErrorCode = "AccountBeingCreated" + StorageErrorCodeAccountIsDisabled StorageErrorCode = "AccountIsDisabled" + StorageErrorCodeAppendPositionConditionNotMet StorageErrorCode = "AppendPositionConditionNotMet" + StorageErrorCodeAuthenticationFailed StorageErrorCode = "AuthenticationFailed" + StorageErrorCodeAuthorizationFailure StorageErrorCode = "AuthorizationFailure" + StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCode = "AuthorizationPermissionMismatch" + StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCode = "AuthorizationProtocolMismatch" + StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch" + StorageErrorCodeAuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch" + StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch" + StorageErrorCodeBlobAlreadyExists StorageErrorCode = "BlobAlreadyExists" + StorageErrorCodeBlobArchived StorageErrorCode = "BlobArchived" + StorageErrorCodeBlobBeingRehydrated StorageErrorCode = "BlobBeingRehydrated" + StorageErrorCodeBlobImmutableDueToPolicy StorageErrorCode = "BlobImmutableDueToPolicy" + StorageErrorCodeBlobNotArchived StorageErrorCode = "BlobNotArchived" + StorageErrorCodeBlobNotFound StorageErrorCode = "BlobNotFound" + StorageErrorCodeBlobOverwritten StorageErrorCode = "BlobOverwritten" + StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCode = "BlobTierInadequateForContentLength" + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption StorageErrorCode = "BlobUsesCustomerSpecifiedEncryption" + StorageErrorCodeBlockCountExceedsLimit StorageErrorCode = "BlockCountExceedsLimit" + StorageErrorCodeBlockListTooLong StorageErrorCode = "BlockListTooLong" + StorageErrorCodeCannotChangeToLowerTier StorageErrorCode = "CannotChangeToLowerTier" + StorageErrorCodeCannotVerifyCopySource StorageErrorCode = "CannotVerifyCopySource" + StorageErrorCodeConditionHeadersNotSupported StorageErrorCode = "ConditionHeadersNotSupported" + StorageErrorCodeConditionNotMet StorageErrorCode = "ConditionNotMet" + StorageErrorCodeContainerAlreadyExists StorageErrorCode = "ContainerAlreadyExists" + StorageErrorCodeContainerBeingDeleted StorageErrorCode = "ContainerBeingDeleted" + StorageErrorCodeContainerDisabled StorageErrorCode = "ContainerDisabled" + StorageErrorCodeContainerNotFound StorageErrorCode = "ContainerNotFound" + StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCode = "ContentLengthLargerThanTierLimit" + StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCode = "CopyAcrossAccountsNotSupported" + StorageErrorCodeCopyIDMismatch StorageErrorCode = "CopyIdMismatch" + StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" + StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" + StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCode = "IncrementalCopyBlobMismatch" + StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCode = "IncrementalCopySourceMustBeSnapshot" + StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCode = "InfiniteLeaseDurationRequired" + StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" + StorageErrorCodeInternalError StorageErrorCode = "InternalError" + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCode = "InvalidAuthenticationInfo" + StorageErrorCodeInvalidBlobOrBlock StorageErrorCode = "InvalidBlobOrBlock" + StorageErrorCodeInvalidBlobTier StorageErrorCode = "InvalidBlobTier" + StorageErrorCodeInvalidBlobType StorageErrorCode = "InvalidBlobType" + StorageErrorCodeInvalidBlockID StorageErrorCode = "InvalidBlockId" + StorageErrorCodeInvalidBlockList StorageErrorCode = "InvalidBlockList" + StorageErrorCodeInvalidHTTPVerb StorageErrorCode = "InvalidHttpVerb" + StorageErrorCodeInvalidHeaderValue StorageErrorCode = "InvalidHeaderValue" + StorageErrorCodeInvalidInput StorageErrorCode = "InvalidInput" + StorageErrorCodeInvalidMD5 StorageErrorCode = "InvalidMd5" + StorageErrorCodeInvalidMetadata StorageErrorCode = "InvalidMetadata" + StorageErrorCodeInvalidOperation StorageErrorCode = "InvalidOperation" + StorageErrorCodeInvalidPageRange StorageErrorCode = "InvalidPageRange" + StorageErrorCodeInvalidQueryParameterValue StorageErrorCode = "InvalidQueryParameterValue" + StorageErrorCodeInvalidRange StorageErrorCode = "InvalidRange" + StorageErrorCodeInvalidResourceName StorageErrorCode = "InvalidResourceName" + StorageErrorCodeInvalidSourceBlobType StorageErrorCode = "InvalidSourceBlobType" + StorageErrorCodeInvalidSourceBlobURL StorageErrorCode = "InvalidSourceBlobUrl" + StorageErrorCodeInvalidURI StorageErrorCode = "InvalidUri" + StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCode = "InvalidVersionForPageBlobOperation" + StorageErrorCodeInvalidXMLDocument StorageErrorCode = "InvalidXmlDocument" + StorageErrorCodeInvalidXMLNodeValue StorageErrorCode = "InvalidXmlNodeValue" + StorageErrorCodeLeaseAlreadyBroken StorageErrorCode = "LeaseAlreadyBroken" + StorageErrorCodeLeaseAlreadyPresent StorageErrorCode = "LeaseAlreadyPresent" + StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCode = "LeaseIdMismatchWithBlobOperation" + StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCode = "LeaseIdMismatchWithContainerOperation" + StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCode = "LeaseIdMismatchWithLeaseOperation" + StorageErrorCodeLeaseIDMissing StorageErrorCode = "LeaseIdMissing" + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCode = "LeaseIsBreakingAndCannotBeAcquired" + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCode = "LeaseIsBreakingAndCannotBeChanged" + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCode = "LeaseIsBrokenAndCannotBeRenewed" + StorageErrorCodeLeaseLost StorageErrorCode = "LeaseLost" + StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCode = "LeaseNotPresentWithBlobOperation" + StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCode = "LeaseNotPresentWithContainerOperation" + StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCode = "LeaseNotPresentWithLeaseOperation" + StorageErrorCodeMD5Mismatch StorageErrorCode = "Md5Mismatch" + StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCode = "MaxBlobSizeConditionNotMet" + StorageErrorCodeMetadataTooLarge StorageErrorCode = "MetadataTooLarge" + StorageErrorCodeMissingContentLengthHeader StorageErrorCode = "MissingContentLengthHeader" + StorageErrorCodeMissingRequiredHeader StorageErrorCode = "MissingRequiredHeader" + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCode = "MissingRequiredQueryParameter" + StorageErrorCodeMissingRequiredXMLNode StorageErrorCode = "MissingRequiredXmlNode" + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCode = "MultipleConditionHeadersNotSupported" + StorageErrorCodeNoAuthenticationInformation StorageErrorCode = "NoAuthenticationInformation" + StorageErrorCodeNoPendingCopyOperation StorageErrorCode = "NoPendingCopyOperation" + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCode = "OperationNotAllowedOnIncrementalCopyBlob" + StorageErrorCodeOperationTimedOut StorageErrorCode = "OperationTimedOut" + StorageErrorCodeOutOfRangeInput StorageErrorCode = "OutOfRangeInput" + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCode = "OutOfRangeQueryParameterValue" + StorageErrorCodePendingCopyOperation StorageErrorCode = "PendingCopyOperation" + StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCode = "PreviousSnapshotCannotBeNewer" + StorageErrorCodePreviousSnapshotNotFound StorageErrorCode = "PreviousSnapshotNotFound" + StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCode = "PreviousSnapshotOperationNotSupported" + StorageErrorCodeRequestBodyTooLarge StorageErrorCode = "RequestBodyTooLarge" + StorageErrorCodeRequestURLFailedToParse StorageErrorCode = "RequestUrlFailedToParse" + StorageErrorCodeResourceAlreadyExists StorageErrorCode = "ResourceAlreadyExists" + StorageErrorCodeResourceNotFound StorageErrorCode = "ResourceNotFound" + StorageErrorCodeResourceTypeMismatch StorageErrorCode = "ResourceTypeMismatch" + StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCode = "SequenceNumberConditionNotMet" + StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCode = "SequenceNumberIncrementTooLarge" + StorageErrorCodeServerBusy StorageErrorCode = "ServerBusy" + StorageErrorCodeSnapshotCountExceeded StorageErrorCode = "SnapshotCountExceeded" + StorageErrorCodeSnapshotOperationRateExceeded StorageErrorCode = "SnapshotOperationRateExceeded" + StorageErrorCodeSnapshotsPresent StorageErrorCode = "SnapshotsPresent" + StorageErrorCodeSourceConditionNotMet StorageErrorCode = "SourceConditionNotMet" + StorageErrorCodeSystemInUse StorageErrorCode = "SystemInUse" + StorageErrorCodeTargetConditionNotMet StorageErrorCode = "TargetConditionNotMet" + StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCode = "UnauthorizedBlobOverwrite" + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCode = "UnsupportedHttpVerb" + StorageErrorCodeUnsupportedHeader StorageErrorCode = "UnsupportedHeader" + StorageErrorCodeUnsupportedQueryParameter StorageErrorCode = "UnsupportedQueryParameter" + StorageErrorCodeUnsupportedXMLNode StorageErrorCode = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeValues returns the possible values for the StorageErrorCode const type. +func PossibleStorageErrorCodeValues() []StorageErrorCode { + return []StorageErrorCode{ + StorageErrorCodeAccountAlreadyExists, + StorageErrorCodeAccountBeingCreated, + StorageErrorCodeAccountIsDisabled, + StorageErrorCodeAppendPositionConditionNotMet, + StorageErrorCodeAuthenticationFailed, + StorageErrorCodeAuthorizationFailure, + StorageErrorCodeAuthorizationPermissionMismatch, + StorageErrorCodeAuthorizationProtocolMismatch, + StorageErrorCodeAuthorizationResourceTypeMismatch, + StorageErrorCodeAuthorizationServiceMismatch, + StorageErrorCodeAuthorizationSourceIPMismatch, + StorageErrorCodeBlobAlreadyExists, + StorageErrorCodeBlobArchived, + StorageErrorCodeBlobBeingRehydrated, + StorageErrorCodeBlobImmutableDueToPolicy, + StorageErrorCodeBlobNotArchived, + StorageErrorCodeBlobNotFound, + StorageErrorCodeBlobOverwritten, + StorageErrorCodeBlobTierInadequateForContentLength, + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption, + StorageErrorCodeBlockCountExceedsLimit, + StorageErrorCodeBlockListTooLong, + StorageErrorCodeCannotChangeToLowerTier, + StorageErrorCodeCannotVerifyCopySource, + StorageErrorCodeConditionHeadersNotSupported, + StorageErrorCodeConditionNotMet, + StorageErrorCodeContainerAlreadyExists, + StorageErrorCodeContainerBeingDeleted, + StorageErrorCodeContainerDisabled, + StorageErrorCodeContainerNotFound, + StorageErrorCodeContentLengthLargerThanTierLimit, + StorageErrorCodeCopyAcrossAccountsNotSupported, + StorageErrorCodeCopyIDMismatch, + StorageErrorCodeEmptyMetadataKey, + StorageErrorCodeFeatureVersionMismatch, + StorageErrorCodeIncrementalCopyBlobMismatch, + StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, + StorageErrorCodeIncrementalCopySourceMustBeSnapshot, + StorageErrorCodeInfiniteLeaseDurationRequired, + StorageErrorCodeInsufficientAccountPermissions, + StorageErrorCodeInternalError, + StorageErrorCodeInvalidAuthenticationInfo, + StorageErrorCodeInvalidBlobOrBlock, + StorageErrorCodeInvalidBlobTier, + StorageErrorCodeInvalidBlobType, + StorageErrorCodeInvalidBlockID, + StorageErrorCodeInvalidBlockList, + StorageErrorCodeInvalidHTTPVerb, + StorageErrorCodeInvalidHeaderValue, + StorageErrorCodeInvalidInput, + StorageErrorCodeInvalidMD5, + StorageErrorCodeInvalidMetadata, + StorageErrorCodeInvalidOperation, + StorageErrorCodeInvalidPageRange, + StorageErrorCodeInvalidQueryParameterValue, + StorageErrorCodeInvalidRange, + StorageErrorCodeInvalidResourceName, + StorageErrorCodeInvalidSourceBlobType, + StorageErrorCodeInvalidSourceBlobURL, + StorageErrorCodeInvalidURI, + StorageErrorCodeInvalidVersionForPageBlobOperation, + StorageErrorCodeInvalidXMLDocument, + StorageErrorCodeInvalidXMLNodeValue, + StorageErrorCodeLeaseAlreadyBroken, + StorageErrorCodeLeaseAlreadyPresent, + StorageErrorCodeLeaseIDMismatchWithBlobOperation, + StorageErrorCodeLeaseIDMismatchWithContainerOperation, + StorageErrorCodeLeaseIDMismatchWithLeaseOperation, + StorageErrorCodeLeaseIDMissing, + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, + StorageErrorCodeLeaseLost, + StorageErrorCodeLeaseNotPresentWithBlobOperation, + StorageErrorCodeLeaseNotPresentWithContainerOperation, + StorageErrorCodeLeaseNotPresentWithLeaseOperation, + StorageErrorCodeMD5Mismatch, + StorageErrorCodeMaxBlobSizeConditionNotMet, + StorageErrorCodeMetadataTooLarge, + StorageErrorCodeMissingContentLengthHeader, + StorageErrorCodeMissingRequiredHeader, + StorageErrorCodeMissingRequiredQueryParameter, + StorageErrorCodeMissingRequiredXMLNode, + StorageErrorCodeMultipleConditionHeadersNotSupported, + StorageErrorCodeNoAuthenticationInformation, + StorageErrorCodeNoPendingCopyOperation, + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, + StorageErrorCodeOperationTimedOut, + StorageErrorCodeOutOfRangeInput, + StorageErrorCodeOutOfRangeQueryParameterValue, + StorageErrorCodePendingCopyOperation, + StorageErrorCodePreviousSnapshotCannotBeNewer, + StorageErrorCodePreviousSnapshotNotFound, + StorageErrorCodePreviousSnapshotOperationNotSupported, + StorageErrorCodeRequestBodyTooLarge, + StorageErrorCodeRequestURLFailedToParse, + StorageErrorCodeResourceAlreadyExists, + StorageErrorCodeResourceNotFound, + StorageErrorCodeResourceTypeMismatch, + StorageErrorCodeSequenceNumberConditionNotMet, + StorageErrorCodeSequenceNumberIncrementTooLarge, + StorageErrorCodeServerBusy, + StorageErrorCodeSnapshotCountExceeded, + StorageErrorCodeSnapshotOperationRateExceeded, + StorageErrorCodeSnapshotsPresent, + StorageErrorCodeSourceConditionNotMet, + StorageErrorCodeSystemInUse, + StorageErrorCodeTargetConditionNotMet, + StorageErrorCodeUnauthorizedBlobOverwrite, + StorageErrorCodeUnsupportedHTTPVerb, + StorageErrorCodeUnsupportedHeader, + StorageErrorCodeUnsupportedQueryParameter, + StorageErrorCodeUnsupportedXMLNode, + } +} + +// ToPtr returns a *StorageErrorCode pointing to the current value. +func (c StorageErrorCode) ToPtr() *StorageErrorCode { + return &c +} + +// BlobDeleteType enum +type BlobDeleteType string + +const ( + BlobDeleteTypeNone BlobDeleteType = "None" + BlobDeleteTypePermanent BlobDeleteType = "Permanent" +) + +// PossibleBlobDeleteTypeValues returns the possible values for the BlobDeleteType const type. +func PossibleBlobDeleteTypeValues() []BlobDeleteType { + return []BlobDeleteType{ + BlobDeleteTypeNone, + BlobDeleteTypePermanent, + } +} + +// ToPtr returns a *BlobDeleteType pointing to the current value. +func (c BlobDeleteType) ToPtr() *BlobDeleteType { + return &c +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go new file mode 100644 index 000000000000..c9245ce10d43 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go @@ -0,0 +1,1442 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +type containerClient struct { + endpoint string + pl runtime.Pipeline +} + +// newContainerClient creates a new instance of containerClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func newContainerClient(endpoint string, pl runtime.Pipeline) *containerClient { + client := &containerClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientAcquireLeaseOptions - containerClientAcquireLeaseOptions contains the optional parameters for the containerClient.AcquireLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) AcquireLease(ctx context.Context, containerClientAcquireLeaseOptions *containerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, containerClientAcquireLeaseOptions, modifiedAccessConditions) + if err != nil { + return containerClientAcquireLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return containerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.acquireLeaseHandleResponse(resp) +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *containerClient) acquireLeaseCreateRequest(ctx context.Context, containerClientAcquireLeaseOptions *containerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientAcquireLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "acquire") + if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.Duration != nil { + req.Raw().Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*containerClientAcquireLeaseOptions.Duration), 10)) + } + if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.ProposedLeaseID != nil { + req.Raw().Header.Set("x-ms-proposed-lease-id", *containerClientAcquireLeaseOptions.ProposedLeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientAcquireLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *containerClient) acquireLeaseHandleResponse(resp *http.Response) (containerClientAcquireLeaseResponse, error) { + result := containerClientAcquireLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientBreakLeaseOptions - containerClientBreakLeaseOptions contains the optional parameters for the containerClient.BreakLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) BreakLease(ctx context.Context, containerClientBreakLeaseOptions *containerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, containerClientBreakLeaseOptions, modifiedAccessConditions) + if err != nil { + return containerClientBreakLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return containerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.breakLeaseHandleResponse(resp) +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *containerClient) breakLeaseCreateRequest(ctx context.Context, containerClientBreakLeaseOptions *containerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientBreakLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "break") + if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.BreakPeriod != nil { + req.Raw().Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*containerClientBreakLeaseOptions.BreakPeriod), 10)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientBreakLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (containerClientBreakLeaseResponse, error) { + result := containerClientBreakLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return containerClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientBreakLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// leaseID - Specifies the current lease ID on the resource. +// proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// containerClientChangeLeaseOptions - containerClientChangeLeaseOptions contains the optional parameters for the containerClient.ChangeLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, containerClientChangeLeaseOptions *containerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, containerClientChangeLeaseOptions, modifiedAccessConditions) + if err != nil { + return containerClientChangeLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.changeLeaseHandleResponse(resp) +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *containerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, containerClientChangeLeaseOptions *containerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if containerClientChangeLeaseOptions != nil && containerClientChangeLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientChangeLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "change") + req.Raw().Header.Set("x-ms-lease-id", leaseID) + req.Raw().Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientChangeLeaseOptions != nil && containerClientChangeLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientChangeLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *containerClient) changeLeaseHandleResponse(resp *http.Response) (containerClientChangeLeaseResponse, error) { + result := containerClientChangeLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientChangeLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Create - creates a new container under the specified account. If the container with the same name already exists, the operation +// fails +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientCreateOptions - containerClientCreateOptions contains the optional parameters for the containerClient.Create +// method. +// ContainerCpkScopeInfo - ContainerCpkScopeInfo contains a group of parameters for the containerClient.Create method. +func (client *containerClient) Create(ctx context.Context, containerClientCreateOptions *containerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (containerClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, containerClientCreateOptions, containerCpkScopeInfo) + if err != nil { + return containerClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return containerClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *containerClient) createCreateRequest(ctx context.Context, containerClientCreateOptions *containerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if containerClientCreateOptions != nil && containerClientCreateOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientCreateOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if containerClientCreateOptions != nil && containerClientCreateOptions.Metadata != nil { + for k, v := range containerClientCreateOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if containerClientCreateOptions != nil && containerClientCreateOptions.Access != nil { + req.Raw().Header.Set("x-ms-blob-public-access", string(*containerClientCreateOptions.Access)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientCreateOptions != nil && containerClientCreateOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientCreateOptions.RequestID) + } + if containerCpkScopeInfo != nil && containerCpkScopeInfo.DefaultEncryptionScope != nil { + req.Raw().Header.Set("x-ms-default-encryption-scope", *containerCpkScopeInfo.DefaultEncryptionScope) + } + if containerCpkScopeInfo != nil && containerCpkScopeInfo.PreventEncryptionScopeOverride != nil { + req.Raw().Header.Set("x-ms-deny-encryption-scope-override", strconv.FormatBool(*containerCpkScopeInfo.PreventEncryptionScopeOverride)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *containerClient) createHandleResponse(resp *http.Response) (containerClientCreateResponse, error) { + result := containerClientCreateResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientCreateResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later +// deleted during garbage collection +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientDeleteOptions - containerClientDeleteOptions contains the optional parameters for the containerClient.Delete +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) Delete(ctx context.Context, containerClientDeleteOptions *containerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, containerClientDeleteOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return containerClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return containerClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *containerClient) deleteCreateRequest(ctx context.Context, containerClientDeleteOptions *containerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if containerClientDeleteOptions != nil && containerClientDeleteOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientDeleteOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientDeleteOptions != nil && containerClientDeleteOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientDeleteOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *containerClient) deleteHandleResponse(resp *http.Response) (containerClientDeleteResponse, error) { + result := containerClientDeleteResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may +// be accessed publicly. +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientGetAccessPolicyOptions - containerClientGetAccessPolicyOptions contains the optional parameters for the +// containerClient.GetAccessPolicy method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *containerClient) GetAccessPolicy(ctx context.Context, containerClientGetAccessPolicyOptions *containerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (containerClientGetAccessPolicyResponse, error) { + req, err := client.getAccessPolicyCreateRequest(ctx, containerClientGetAccessPolicyOptions, leaseAccessConditions) + if err != nil { + return containerClientGetAccessPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientGetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.getAccessPolicyHandleResponse(resp) +} + +// getAccessPolicyCreateRequest creates the GetAccessPolicy request. +func (client *containerClient) getAccessPolicyCreateRequest(ctx context.Context, containerClientGetAccessPolicyOptions *containerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "acl") + if containerClientGetAccessPolicyOptions != nil && containerClientGetAccessPolicyOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientGetAccessPolicyOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientGetAccessPolicyOptions != nil && containerClientGetAccessPolicyOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientGetAccessPolicyOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getAccessPolicyHandleResponse handles the GetAccessPolicy response. +func (client *containerClient) getAccessPolicyHandleResponse(resp *http.Response) (containerClientGetAccessPolicyResponse, error) { + result := containerClientGetAccessPolicyResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientGetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result); err != nil { + return containerClientGetAccessPolicyResponse{}, err + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// options - containerClientGetAccountInfoOptions contains the optional parameters for the containerClient.GetAccountInfo +// method. +func (client *containerClient) GetAccountInfo(ctx context.Context, options *containerClientGetAccountInfoOptions) (containerClientGetAccountInfoResponse, error) { + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return containerClientGetAccountInfoResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getAccountInfoHandleResponse(resp) +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *containerClient) getAccountInfoCreateRequest(ctx context.Context, options *containerClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *containerClient) getAccountInfoHandleResponse(resp *http.Response) (containerClientGetAccountInfoResponse, error) { + result := containerClientGetAccountInfoResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + return result, nil +} + +// GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned +// does not include the container's list of blobs +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientGetPropertiesOptions - containerClientGetPropertiesOptions contains the optional parameters for the containerClient.GetProperties +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *containerClient) GetProperties(ctx context.Context, containerClientGetPropertiesOptions *containerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (containerClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, containerClientGetPropertiesOptions, leaseAccessConditions) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *containerClient) getPropertiesCreateRequest(ctx context.Context, containerClientGetPropertiesOptions *containerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if containerClientGetPropertiesOptions != nil && containerClientGetPropertiesOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientGetPropertiesOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientGetPropertiesOptions != nil && containerClientGetPropertiesOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientGetPropertiesOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) (containerClientGetPropertiesResponse, error) { + result := containerClientGetPropertiesResponse{RawResponse: resp} + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) + } + if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { + hasImmutabilityPolicy, err := strconv.ParseBool(val) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + result.HasImmutabilityPolicy = &hasImmutabilityPolicy + } + if val := resp.Header.Get("x-ms-has-legal-hold"); val != "" { + hasLegalHold, err := strconv.ParseBool(val) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + result.HasLegalHold = &hasLegalHold + } + if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { + result.DefaultEncryptionScope = &val + } + if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { + denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + } + if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { + isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled + } + return result, nil +} + +// ListBlobFlatSegment - [Update] The List Blobs operation returns a list of the blobs under the specified container +// If the operation fails it returns an *azcore.ResponseError type. +// options - containerClientListBlobFlatSegmentOptions contains the optional parameters for the containerClient.ListBlobFlatSegment +// method. +func (client *containerClient) ListBlobFlatSegment(options *containerClientListBlobFlatSegmentOptions) *containerClientListBlobFlatSegmentPager { + return &containerClientListBlobFlatSegmentPager{ + client: client, + requester: func(ctx context.Context) (*policy.Request, error) { + return client.listBlobFlatSegmentCreateRequest(ctx, options) + }, + advancer: func(ctx context.Context, resp containerClientListBlobFlatSegmentResponse) (*policy.Request, error) { + return runtime.NewRequest(ctx, http.MethodGet, *resp.ListBlobsFlatSegmentResponse.NextMarker) + }, + } +} + +// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request. +func (client *containerClient) listBlobFlatSegmentCreateRequest(ctx context.Context, options *containerClientListBlobFlatSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. +func (client *containerClient) listBlobFlatSegmentHandleResponse(resp *http.Response) (containerClientListBlobFlatSegmentResponse, error) { + result := containerClientListBlobFlatSegmentResponse{RawResponse: resp} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientListBlobFlatSegmentResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { + return containerClientListBlobFlatSegmentResponse{}, err + } + return result, nil +} + +// ListBlobHierarchySegment - [Update] The List Blobs operation returns a list of the blobs under the specified container +// If the operation fails it returns an *azcore.ResponseError type. +// delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that +// acts as a placeholder for all blobs whose names begin with the same substring up to the +// appearance of the delimiter character. The delimiter may be a single character or a string. +// options - containerClientListBlobHierarchySegmentOptions contains the optional parameters for the containerClient.ListBlobHierarchySegment +// method. +func (client *containerClient) ListBlobHierarchySegment(delimiter string, options *containerClientListBlobHierarchySegmentOptions) *containerClientListBlobHierarchySegmentPager { + return &containerClientListBlobHierarchySegmentPager{ + client: client, + requester: func(ctx context.Context) (*policy.Request, error) { + return client.listBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + }, + advancer: func(ctx context.Context, resp containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) { + return runtime.NewRequest(ctx, http.MethodGet, *resp.ListBlobsHierarchySegmentResponse.NextMarker) + }, + } +} + +// listBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request. +func (client *containerClient) listBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *containerClientListBlobHierarchySegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + reqQP.Set("delimiter", delimiter) + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// listBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. +func (client *containerClient) listBlobHierarchySegmentHandleResponse(resp *http.Response) (containerClientListBlobHierarchySegmentResponse, error) { + result := containerClientListBlobHierarchySegmentResponse{RawResponse: resp} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientListBlobHierarchySegmentResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { + return containerClientListBlobHierarchySegmentResponse{}, err + } + return result, nil +} + +// ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// leaseID - Specifies the current lease ID on the resource. +// containerClientReleaseLeaseOptions - containerClientReleaseLeaseOptions contains the optional parameters for the containerClient.ReleaseLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) ReleaseLease(ctx context.Context, leaseID string, containerClientReleaseLeaseOptions *containerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, containerClientReleaseLeaseOptions, modifiedAccessConditions) + if err != nil { + return containerClientReleaseLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseLeaseHandleResponse(resp) +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *containerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, containerClientReleaseLeaseOptions *containerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if containerClientReleaseLeaseOptions != nil && containerClientReleaseLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientReleaseLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "release") + req.Raw().Header.Set("x-ms-lease-id", leaseID) + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientReleaseLeaseOptions != nil && containerClientReleaseLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientReleaseLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *containerClient) releaseLeaseHandleResponse(resp *http.Response) (containerClientReleaseLeaseResponse, error) { + result := containerClientReleaseLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Rename - Renames an existing container. +// If the operation fails it returns an *azcore.ResponseError type. +// sourceContainerName - Required. Specifies the name of the container to rename. +// options - containerClientRenameOptions contains the optional parameters for the containerClient.Rename method. +func (client *containerClient) Rename(ctx context.Context, sourceContainerName string, options *containerClientRenameOptions) (containerClientRenameResponse, error) { + req, err := client.renameCreateRequest(ctx, sourceContainerName, options) + if err != nil { + return containerClientRenameResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientRenameResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientRenameResponse{}, runtime.NewResponseError(resp) + } + return client.renameHandleResponse(resp) +} + +// renameCreateRequest creates the Rename request. +func (client *containerClient) renameCreateRequest(ctx context.Context, sourceContainerName string, options *containerClientRenameOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "rename") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("x-ms-source-container-name", sourceContainerName) + if options != nil && options.SourceLeaseID != nil { + req.Raw().Header.Set("x-ms-source-lease-id", *options.SourceLeaseID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// renameHandleResponse handles the Rename response. +func (client *containerClient) renameHandleResponse(resp *http.Response) (containerClientRenameResponse, error) { + result := containerClientRenameResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientRenameResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// leaseID - Specifies the current lease ID on the resource. +// containerClientRenewLeaseOptions - containerClientRenewLeaseOptions contains the optional parameters for the containerClient.RenewLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) RenewLease(ctx context.Context, leaseID string, containerClientRenewLeaseOptions *containerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, containerClientRenewLeaseOptions, modifiedAccessConditions) + if err != nil { + return containerClientRenewLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.renewLeaseHandleResponse(resp) +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *containerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, containerClientRenewLeaseOptions *containerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if containerClientRenewLeaseOptions != nil && containerClientRenewLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientRenewLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "renew") + req.Raw().Header.Set("x-ms-lease-id", leaseID) + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientRenewLeaseOptions != nil && containerClientRenewLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientRenewLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *containerClient) renewLeaseHandleResponse(resp *http.Response) (containerClientRenewLeaseResponse, error) { + result := containerClientRenewLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientRenewLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Restore - Restores a previously-deleted container. +// If the operation fails it returns an *azcore.ResponseError type. +// options - containerClientRestoreOptions contains the optional parameters for the containerClient.Restore method. +func (client *containerClient) Restore(ctx context.Context, options *containerClientRestoreOptions) (containerClientRestoreResponse, error) { + req, err := client.restoreCreateRequest(ctx, options) + if err != nil { + return containerClientRestoreResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientRestoreResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return containerClientRestoreResponse{}, runtime.NewResponseError(resp) + } + return client.restoreHandleResponse(resp) +} + +// restoreCreateRequest creates the Restore request. +func (client *containerClient) restoreCreateRequest(ctx context.Context, options *containerClientRestoreOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + if options != nil && options.DeletedContainerName != nil { + req.Raw().Header.Set("x-ms-deleted-container-name", *options.DeletedContainerName) + } + if options != nil && options.DeletedContainerVersion != nil { + req.Raw().Header.Set("x-ms-deleted-container-version", *options.DeletedContainerVersion) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// restoreHandleResponse handles the Restore response. +func (client *containerClient) restoreHandleResponse(resp *http.Response) (containerClientRestoreResponse, error) { + result := containerClientRestoreResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientRestoreResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container +// may be accessed publicly. +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientSetAccessPolicyOptions - containerClientSetAccessPolicyOptions contains the optional parameters for the +// containerClient.SetAccessPolicy method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) SetAccessPolicy(ctx context.Context, containerClientSetAccessPolicyOptions *containerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientSetAccessPolicyResponse, error) { + req, err := client.setAccessPolicyCreateRequest(ctx, containerClientSetAccessPolicyOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return containerClientSetAccessPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientSetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.setAccessPolicyHandleResponse(resp) +} + +// setAccessPolicyCreateRequest creates the SetAccessPolicy request. +func (client *containerClient) setAccessPolicyCreateRequest(ctx context.Context, containerClientSetAccessPolicyOptions *containerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "acl") + if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientSetAccessPolicyOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.Access != nil { + req.Raw().Header.Set("x-ms-blob-public-access", string(*containerClientSetAccessPolicyOptions.Access)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientSetAccessPolicyOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + type wrapper struct { + XMLName xml.Name `xml:"SignedIdentifiers"` + ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` + } + if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.ContainerACL != nil { + return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerClientSetAccessPolicyOptions.ContainerACL}) + } + return req, nil +} + +// setAccessPolicyHandleResponse handles the SetAccessPolicy response. +func (client *containerClient) setAccessPolicyHandleResponse(resp *http.Response) (containerClientSetAccessPolicyResponse, error) { + result := containerClientSetAccessPolicyResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientSetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientSetMetadataOptions - containerClientSetMetadataOptions contains the optional parameters for the containerClient.SetMetadata +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) SetMetadata(ctx context.Context, containerClientSetMetadataOptions *containerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, containerClientSetMetadataOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return containerClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *containerClient) setMetadataCreateRequest(ctx context.Context, containerClientSetMetadataOptions *containerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "metadata") + if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientSetMetadataOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.Metadata != nil { + for k, v := range containerClientSetMetadataOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientSetMetadataOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *containerClient) setMetadataHandleResponse(resp *http.Response) (containerClientSetMetadataResponse, error) { + result := containerClientSetMetadataResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientSetMetadataResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// body - Initial data +// options - containerClientSubmitBatchOptions contains the optional parameters for the containerClient.SubmitBatch method. +func (client *containerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *containerClientSubmitBatchOptions) (containerClientSubmitBatchResponse, error) { + req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) + if err != nil { + return containerClientSubmitBatchResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientSubmitBatchResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return containerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + } + return client.submitBatchHandleResponse(resp) +} + +// submitBatchCreateRequest creates the SubmitBatch request. +func (client *containerClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *containerClientSubmitBatchOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "batch") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Raw().Header.Set("Content-Type", multipartContentType) + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, runtime.MarshalAsXML(req, body) +} + +// submitBatchHandleResponse handles the SubmitBatch response. +func (client *containerClient) submitBatchHandleResponse(resp *http.Response) (containerClientSubmitBatchResponse, error) { + result := containerClientSubmitBatchResponse{RawResponse: resp} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go new file mode 100644 index 000000000000..d40d63b1b0d4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go @@ -0,0 +1,2158 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "encoding/xml" + "time" +) + +// AccessPolicy - An Access policy +type AccessPolicy struct { + // the date-time the policy expires + Expiry *time.Time `xml:"Expiry"` + + // the permissions for the acl policy + Permission *string `xml:"Permission"` + + // the date-time the policy is active + Start *time.Time `xml:"Start"` +} + +// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. +func (a AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(&a), + Expiry: (*timeRFC3339)(a.Expiry), + Start: (*timeRFC3339)(a.Start), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. +func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(a), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + a.Expiry = (*time.Time)(aux.Expiry) + a.Start = (*time.Time)(aux.Start) + return nil +} + +// AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock method. +type AppendPositionAccessConditions struct { + // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. + // Append Block will succeed only if the append position is equal to this number. If + // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + AppendPosition *int64 + // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would + // cause the blob to exceed that limit or if the blob size is already greater than + // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - + // Precondition Failed). + MaxSize *int64 +} + +// ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted. +type ArrowConfiguration struct { + // REQUIRED + Schema []*ArrowField `xml:"Schema>Field"` +} + +// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. +func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ArrowConfiguration + aux := &struct { + *alias + Schema *[]*ArrowField `xml:"Schema>Field"` + }{ + alias: (*alias)(&a), + } + if a.Schema != nil { + aux.Schema = &a.Schema + } + return e.EncodeElement(aux, start) +} + +// ArrowField - Groups settings regarding specific field of an arrow schema +type ArrowField struct { + // REQUIRED + Type *string `xml:"Type"` + Name *string `xml:"Name"` + Precision *int32 `xml:"Precision"` + Scale *int32 `xml:"Scale"` +} + +// BlobFlatListSegment struct +type BlobFlatListSegment struct { + // REQUIRED + BlobItems []*BlobItemInternal `xml:"Blob"` +} + +// MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. +func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobFlatListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItemInternal `xml:"Blob"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + return e.EncodeElement(aux, start) +} + +// BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +// BlobHierarchyListSegment struct +type BlobHierarchyListSegment struct { + // REQUIRED + BlobItems []*BlobItemInternal `xml:"Blob"` + BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"` +} + +// MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment. +func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobHierarchyListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItemInternal `xml:"Blob"` + BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + if b.BlobPrefixes != nil { + aux.BlobPrefixes = &b.BlobPrefixes + } + return e.EncodeElement(aux, start) +} + +// BlobItemInternal - An Azure Storage blob +type BlobItemInternal struct { + // REQUIRED + Deleted *bool `xml:"Deleted"` + + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a blob + Properties *BlobPropertiesInternal `xml:"Properties"` + + // REQUIRED + Snapshot *string `xml:"Snapshot"` + + // Blob tags + BlobTags *BlobTags `xml:"Tags"` + HasVersionsOnly *bool `xml:"HasVersionsOnly"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + + // Dictionary of + OrMetadata map[string]*string `xml:"OrMetadata"` + VersionID *string `xml:"VersionId"` +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItemInternal. +func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias BlobItemInternal + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + OrMetadata additionalProperties `xml:"OrMetadata"` + }{ + alias: (*alias)(b), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + b.Metadata = (map[string]*string)(aux.Metadata) + b.OrMetadata = (map[string]*string)(aux.OrMetadata) + return nil +} + +// BlobPrefix struct +type BlobPrefix struct { + // REQUIRED + Name *string `xml:"Name"` +} + +// BlobPropertiesInternal - Properties of a blob +type BlobPropertiesInternal struct { + // REQUIRED + Etag *string `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + AccessTier *AccessTier `xml:"AccessTier"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + ArchiveStatus *ArchiveStatus `xml:"ArchiveStatus"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + BlobType *BlobType `xml:"BlobType"` + CacheControl *string `xml:"Cache-Control"` + ContentDisposition *string `xml:"Content-Disposition"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + + // Size in bytes + ContentLength *int64 `xml:"Content-Length"` + ContentMD5 []byte `xml:"Content-MD5"` + ContentType *string `xml:"Content-Type"` + CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` + CopyID *string `xml:"CopyId"` + CopyProgress *string `xml:"CopyProgress"` + CopySource *string `xml:"CopySource"` + CopyStatus *CopyStatusType `xml:"CopyStatus"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + CreationTime *time.Time `xml:"Creation-Time"` + CustomerProvidedKeySHA256 *string `xml:"CustomerProvidedKeySha256"` + DeletedTime *time.Time `xml:"DeletedTime"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + + // The name of the encryption scope under which the blob is encrypted. + EncryptionScope *string `xml:"EncryptionScope"` + ExpiresOn *time.Time `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + IsSealed *bool `xml:"Sealed"` + LastAccessedOn *time.Time `xml:"LastAccessTime"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + LegalHold *bool `xml:"LegalHold"` + + // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High + // and Standard. + RehydratePriority *RehydratePriority `xml:"RehydratePriority"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + TagCount *int32 `xml:"TagCount"` +} + +// MarshalXML implements the xml.Marshaller interface for type BlobPropertiesInternal. +func (b BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobPropertiesInternal + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *[]byte `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&b), + AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), + CreationTime: (*timeRFC1123)(b.CreationTime), + DeletedTime: (*timeRFC1123)(b.DeletedTime), + ExpiresOn: (*timeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), + LastModified: (*timeRFC1123)(b.LastModified), + } + if b.ContentMD5 != nil { + aux.ContentMD5 = &b.ContentMD5 + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPropertiesInternal. +func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias BlobPropertiesInternal + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *[]byte `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(b), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) + b.CreationTime = (*time.Time)(aux.CreationTime) + b.DeletedTime = (*time.Time)(aux.DeletedTime) + b.ExpiresOn = (*time.Time)(aux.ExpiresOn) + b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) + b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) + b.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// BlobTag struct +type BlobTag struct { + // REQUIRED + Key *string `xml:"Key"` + + // REQUIRED + Value *string `xml:"Value"` +} + +// BlobTags - Blob tags +type BlobTags struct { + // REQUIRED + BlobTagSet []*BlobTag `xml:"TagSet>Tag"` +} + +// MarshalXML implements the xml.Marshaller interface for type BlobTags. +func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "Tags" + type alias BlobTags + aux := &struct { + *alias + BlobTagSet *[]*BlobTag `xml:"TagSet>Tag"` + }{ + alias: (*alias)(&b), + } + if b.BlobTagSet != nil { + aux.BlobTagSet = &b.BlobTagSet + } + return e.EncodeElement(aux, start) +} + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block struct { + // REQUIRED; The base64 encoded block ID. + Name *string `xml:"Name"` + + // REQUIRED; The block size in bytes. + Size *int64 `xml:"Size"` +} + +// BlockList struct +type BlockList struct { + CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` +} + +// MarshalXML implements the xml.Marshaller interface for type BlockList. +func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlockList + aux := &struct { + *alias + CommittedBlocks *[]*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks *[]*Block `xml:"UncommittedBlocks>Block"` + }{ + alias: (*alias)(&b), + } + if b.CommittedBlocks != nil { + aux.CommittedBlocks = &b.CommittedBlocks + } + if b.UncommittedBlocks != nil { + aux.UncommittedBlocks = &b.UncommittedBlocks + } + return e.EncodeElement(aux, start) +} + +// BlockLookupList struct +type BlockLookupList struct { + Committed []*string `xml:"Committed"` + Latest []*string `xml:"Latest"` + Uncommitted []*string `xml:"Uncommitted"` +} + +// MarshalXML implements the xml.Marshaller interface for type BlockLookupList. +func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "BlockList" + type alias BlockLookupList + aux := &struct { + *alias + Committed *[]*string `xml:"Committed"` + Latest *[]*string `xml:"Latest"` + Uncommitted *[]*string `xml:"Uncommitted"` + }{ + alias: (*alias)(&b), + } + if b.Committed != nil { + aux.Committed = &b.Committed + } + if b.Latest != nil { + aux.Latest = &b.Latest + } + if b.Uncommitted != nil { + aux.Uncommitted = &b.Uncommitted + } + return e.EncodeElement(aux, start) +} + +// ClearRange enum +type ClearRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +// ContainerCpkScopeInfo contains a group of parameters for the containerClient.Create method. +type ContainerCpkScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// ContainerItem - An Azure Storage container +type ContainerItem struct { + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a container + Properties *ContainerProperties `xml:"Properties"` + Deleted *bool `xml:"Deleted"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + Version *string `xml:"Version"` +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem. +func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias ContainerItem + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + }{ + alias: (*alias)(c), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + c.Metadata = (map[string]*string)(aux.Metadata) + return nil +} + +// ContainerProperties - Properties of a container +type ContainerProperties struct { + // REQUIRED + Etag *string `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + DeletedTime *time.Time `xml:"DeletedTime"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + + // Indicates if version level worm is enabled on this container. + IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + PublicAccess *PublicAccessType `xml:"PublicAccess"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` +} + +// MarshalXML implements the xml.Marshaller interface for type ContainerProperties. +func (c ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&c), + DeletedTime: (*timeRFC1123)(c.DeletedTime), + LastModified: (*timeRFC1123)(c.LastModified), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties. +func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(c), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + c.DeletedTime = (*time.Time)(aux.DeletedTime) + c.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CorsRule struct { + // REQUIRED; the request headers that the origin domain may specify on the CORS request. + AllowedHeaders *string `xml:"AllowedHeaders"` + + // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods *string `xml:"AllowedMethods"` + + // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain + // is the domain from which the request originates. Note that the origin must be an exact + // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' + // to allow all origin domains to make requests via CORS. + AllowedOrigins *string `xml:"AllowedOrigins"` + + // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request + // issuer + ExposedHeaders *string `xml:"ExposedHeaders"` + + // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` +} + +// CpkInfo contains a group of parameters for the blobClient.Download method. +type CpkInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +type CpkScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. +type DelimitedTextConfiguration struct { + // The string used to separate columns. + ColumnSeparator *string `xml:"ColumnSeparator"` + + // The string used as an escape character. + EscapeChar *string `xml:"EscapeChar"` + + // The string used to quote a specific field. + FieldQuote *string `xml:"FieldQuote"` + + // Represents whether the data has headers. + HeadersPresent *bool `xml:"HasHeaders"` + + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// FilterBlobItem - Blob info from a Filter Blobs API call +type FilterBlobItem struct { + // REQUIRED + ContainerName *string `xml:"ContainerName"` + + // REQUIRED + Name *string `xml:"Name"` + + // Blob tags + Tags *BlobTags `xml:"Tags"` +} + +// FilterBlobSegment - The result of a Filter Blobs API call +type FilterBlobSegment struct { + // REQUIRED + Blobs []*FilterBlobItem `xml:"Blobs>Blob"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + + // REQUIRED + Where *string `xml:"Where"` + NextMarker *string `xml:"NextMarker"` +} + +// MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. +func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias FilterBlobSegment + aux := &struct { + *alias + Blobs *[]*FilterBlobItem `xml:"Blobs>Blob"` + }{ + alias: (*alias)(&f), + } + if f.Blobs != nil { + aux.Blobs = &f.Blobs + } + return e.EncodeElement(aux, start) +} + +// GeoReplication - Geo-Replication information for the Secondary Storage Service +type GeoReplication struct { + // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available + // for read operations at the secondary. Primary writes after this point in time may or may + // not be available for reads. + LastSyncTime *time.Time `xml:"LastSyncTime"` + + // REQUIRED; The status of the secondary location + Status *BlobGeoReplicationStatus `xml:"Status"` +} + +// MarshalXML implements the xml.Marshaller interface for type GeoReplication. +func (g GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(&g), + LastSyncTime: (*timeRFC1123)(g.LastSyncTime), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. +func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(g), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + return nil +} + +// JSONTextConfiguration - json text configuration +type JSONTextConfiguration struct { + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// KeyInfo - Key information +type KeyInfo struct { + // REQUIRED; The date-time the key expires in ISO 8601 UTC time + Expiry *string `xml:"Expiry"` + + // REQUIRED; The date-time the key is active in ISO 8601 UTC time + Start *string `xml:"Start"` +} + +// LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobFlatListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobHierarchyListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Delimiter *string `xml:"Delimiter"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse struct { + // REQUIRED + ContainerItems []*ContainerItem `xml:"Containers>Container"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse. +func (l ListContainersSegmentResponse) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ListContainersSegmentResponse + aux := &struct { + *alias + ContainerItems *[]*ContainerItem `xml:"Containers>Container"` + }{ + alias: (*alias)(&l), + } + if l.ContainerItems != nil { + aux.ContainerItems = &l.ContainerItems + } + return e.EncodeElement(aux, start) +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // REQUIRED; Indicates whether all delete requests should be logged. + Delete *bool `xml:"Delete"` + + // REQUIRED; Indicates whether all read requests should be logged. + Read *bool `xml:"Read"` + + // REQUIRED; the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // REQUIRED; The version of Storage Analytics to configure. + Version *string `xml:"Version"` + + // REQUIRED; Indicates whether all write requests should be logged. + Write *bool `xml:"Write"` +} + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics struct { + // REQUIRED; Indicates whether metrics are enabled for the Blob service. + Enabled *bool `xml:"Enabled"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + + // the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // The version of Storage Analytics to configure. + Version *string `xml:"Version"` +} + +// ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *string + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *string + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time +} + +// PageList - the list of pages +type PageList struct { + ClearRange []*ClearRange `xml:"ClearRange"` + NextMarker *string `xml:"NextMarker"` + PageRange []*PageRange `xml:"PageRange"` +} + +// MarshalXML implements the xml.Marshaller interface for type PageList. +func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias PageList + aux := &struct { + *alias + ClearRange *[]*ClearRange `xml:"ClearRange"` + PageRange *[]*PageRange `xml:"PageRange"` + }{ + alias: (*alias)(&p), + } + if p.ClearRange != nil { + aux.ClearRange = &p.ClearRange + } + if p.PageRange != nil { + aux.PageRange = &p.PageRange + } + return e.EncodeElement(aux, start) +} + +// PageRange struct +type PageRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +// QueryFormat struct +type QueryFormat struct { + // REQUIRED; The quick query format type. + Type *QueryFormatType `xml:"Type"` + + // Groups the settings used for formatting the response if the response should be Arrow formatted. + ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` + + // Groups the settings used for interpreting the blob data if the blob is delimited text formatted. + DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` + + // json text configuration + JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` + + // Anything + ParquetTextConfiguration interface{} `xml:"ParquetTextConfiguration"` +} + +// QueryRequest - Groups the set of query request settings. +type QueryRequest struct { + // REQUIRED; The query expression in SQL. The maximum size of the query expression is 256KiB. + Expression *string `xml:"Expression"` + + // REQUIRED; Required. The type of the provided query expression. + QueryType *string `xml:"QueryType"` + InputSerialization *QuerySerialization `xml:"InputSerialization"` + OutputSerialization *QuerySerialization `xml:"OutputSerialization"` +} + +// MarshalXML implements the xml.Marshaller interface for type QueryRequest. +func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "QueryRequest" + type alias QueryRequest + aux := &struct { + *alias + }{ + alias: (*alias)(&q), + } + return e.EncodeElement(aux, start) +} + +//QuerySerialization struct +type QuerySerialization struct { + // REQUIRED + Format *QueryFormat `xml:"Format"` +} + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy struct { + // REQUIRED; Indicates whether a retention policy is enabled for the storage service + Enabled *bool `xml:"Enabled"` + + // Indicates whether permanent delete is allowed on this storage account. + AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` + + // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this + // value will be deleted + Days *int32 `xml:"Days"` +} + +// SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // REQUIRED; An Access policy + AccessPolicy *AccessPolicy `xml:"AccessPolicy"` + + // REQUIRED; a unique id + ID *string `xml:"Id"` +} + +// SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *string + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *string + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // REQUIRED; Indicates whether this account is hosting a static website + Enabled *bool `xml:"Enabled"` + + // Absolute path of the default index page + DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` + + // The absolute path of the custom 404 page + ErrorDocument404Path *string `xml:"ErrorDocument404Path"` + + // The default name of the index page under each directory + IndexDocument *string `xml:"IndexDocument"` +} + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + // The set of CORS rules. + Cors []*CorsRule `xml:"Cors>CorsRule"` + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string `xml:"DefaultServiceVersion"` + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics `xml:"HourMetrics"` + + // Azure Analytics Logging settings. + Logging *Logging `xml:"Logging"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite `xml:"StaticWebsite"` +} + +// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. +func (s StorageServiceProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias StorageServiceProperties + aux := &struct { + *alias + Cors *[]*CorsRule `xml:"Cors>CorsRule"` + }{ + alias: (*alias)(&s), + } + if s.Cors != nil { + aux.Cors = &s.Cors + } + return e.EncodeElement(aux, start) +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + // Geo-Replication information for the Secondary Storage Service + GeoReplication *GeoReplication `xml:"GeoReplication"` +} + +// UserDelegationKey - A user delegation key +type UserDelegationKey struct { + // REQUIRED; The date-time the key expires + SignedExpiry *time.Time `xml:"SignedExpiry"` + + // REQUIRED; The Azure Active Directory object ID in GUID format. + SignedOid *string `xml:"SignedOid"` + + // REQUIRED; Abbreviation of the Azure Storage service that accepts the key + SignedService *string `xml:"SignedService"` + + // REQUIRED; The date-time the key is active + SignedStart *time.Time `xml:"SignedStart"` + + // REQUIRED; The Azure Active Directory tenant ID in GUID format + SignedTid *string `xml:"SignedTid"` + + // REQUIRED; The service version that created the key + SignedVersion *string `xml:"SignedVersion"` + + // REQUIRED; The key as a base64 string + Value *string `xml:"Value"` +} + +// MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. +func (u UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` + SignedStart *timeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(&u), + SignedExpiry: (*timeRFC3339)(u.SignedExpiry), + SignedStart: (*timeRFC3339)(u.SignedStart), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. +func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` + SignedStart *timeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(u), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + u.SignedExpiry = (*time.Time)(aux.SignedExpiry) + u.SignedStart = (*time.Time)(aux.SignedStart) + return nil +} + +// appendBlobClientAppendBlockFromURLOptions contains the optional parameters for the appendBlobClient.AppendBlockFromURL +// method. +type appendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // Bytes of source data in the specified range. + SourceRange *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// appendBlobClientAppendBlockOptions contains the optional parameters for the appendBlobClient.AppendBlock method. +type appendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// appendBlobClientCreateOptions contains the optional parameters for the appendBlobClient.Create method. +type appendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// appendBlobClientSealOptions contains the optional parameters for the appendBlobClient.Seal method. +type appendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientAbortCopyFromURLOptions contains the optional parameters for the blobClient.AbortCopyFromURL method. +type blobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientAcquireLeaseOptions contains the optional parameters for the blobClient.AcquireLease method. +type blobClientAcquireLeaseOptions struct { + // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease + // can be between 15 and 60 seconds. A lease duration cannot be changed using + // renew or change. + Duration *int32 + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientBreakLeaseOptions contains the optional parameters for the blobClient.BreakLease method. +type blobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientChangeLeaseOptions contains the optional parameters for the blobClient.ChangeLease method. +type blobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientCopyFromURLOptions contains the optional parameters for the blobClient.CopyFromURL method. +type blobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientCreateSnapshotOptions contains the optional parameters for the blobClient.CreateSnapshot method. +type blobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the blobClient.DeleteImmutabilityPolicy +// method. +type blobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientDeleteOptions contains the optional parameters for the blobClient.Delete method. +type blobClientDeleteOptions struct { + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + BlobDeleteType *BlobDeleteType + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// blobClientDownloadOptions contains the optional parameters for the blobClient.Download method. +type blobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// blobClientGetAccountInfoOptions contains the optional parameters for the blobClient.GetAccountInfo method. +type blobClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// blobClientGetPropertiesOptions contains the optional parameters for the blobClient.GetProperties method. +type blobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// blobClientGetTagsOptions contains the optional parameters for the blobClient.GetTags method. +type blobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// blobClientQueryOptions contains the optional parameters for the blobClient.Query method. +type blobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientReleaseLeaseOptions contains the optional parameters for the blobClient.ReleaseLease method. +type blobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientRenewLeaseOptions contains the optional parameters for the blobClient.RenewLease method. +type blobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientSetExpiryOptions contains the optional parameters for the blobClient.SetExpiry method. +type blobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientSetHTTPHeadersOptions contains the optional parameters for the blobClient.SetHTTPHeaders method. +type blobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientSetImmutabilityPolicyOptions contains the optional parameters for the blobClient.SetImmutabilityPolicy method. +type blobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientSetLegalHoldOptions contains the optional parameters for the blobClient.SetLegalHold method. +type blobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientSetMetadataOptions contains the optional parameters for the blobClient.SetMetadata method. +type blobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientSetTagsOptions contains the optional parameters for the blobClient.SetTags method. +type blobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Blob tags + Tags *BlobTags + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// blobClientSetTierOptions contains the optional parameters for the blobClient.SetTier method. +type blobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// blobClientStartCopyFromURLOptions contains the optional parameters for the blobClient.StartCopyFromURL method. +type blobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientUndeleteOptions contains the optional parameters for the blobClient.Undelete method. +type blobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blockBlobClientCommitBlockListOptions contains the optional parameters for the blockBlobClient.CommitBlockList method. +type blockBlobClientCommitBlockListOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// blockBlobClientGetBlockListOptions contains the optional parameters for the blockBlobClient.GetBlockList method. +type blockBlobClientGetBlockListOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blockBlobClientPutBlobFromURLOptions contains the optional parameters for the blockBlobClient.PutBlobFromURL method. +type blockBlobClientPutBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// blockBlobClientStageBlockFromURLOptions contains the optional parameters for the blockBlobClient.StageBlockFromURL method. +type blockBlobClientStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // Bytes of source data in the specified range. + SourceRange *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blockBlobClientStageBlockOptions contains the optional parameters for the blockBlobClient.StageBlock method. +type blockBlobClientStageBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// blockBlobClientUploadOptions contains the optional parameters for the blockBlobClient.Upload method. +type blockBlobClientUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// containerClientAcquireLeaseOptions contains the optional parameters for the containerClient.AcquireLease method. +type containerClientAcquireLeaseOptions struct { + // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease + // can be between 15 and 60 seconds. A lease duration cannot be changed using + // renew or change. + Duration *int32 + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientBreakLeaseOptions contains the optional parameters for the containerClient.BreakLease method. +type containerClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientChangeLeaseOptions contains the optional parameters for the containerClient.ChangeLease method. +type containerClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientCreateOptions contains the optional parameters for the containerClient.Create method. +type containerClientCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientDeleteOptions contains the optional parameters for the containerClient.Delete method. +type containerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientGetAccessPolicyOptions contains the optional parameters for the containerClient.GetAccessPolicy method. +type containerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientGetAccountInfoOptions contains the optional parameters for the containerClient.GetAccountInfo method. +type containerClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// containerClientGetPropertiesOptions contains the optional parameters for the containerClient.GetProperties method. +type containerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientListBlobFlatSegmentOptions contains the optional parameters for the containerClient.ListBlobFlatSegment +// method. +type containerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientListBlobHierarchySegmentOptions contains the optional parameters for the containerClient.ListBlobHierarchySegment +// method. +type containerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientReleaseLeaseOptions contains the optional parameters for the containerClient.ReleaseLease method. +type containerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientRenameOptions contains the optional parameters for the containerClient.Rename method. +type containerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientRenewLeaseOptions contains the optional parameters for the containerClient.RenewLease method. +type containerClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientRestoreOptions contains the optional parameters for the containerClient.Restore method. +type containerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedContainerName *string + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientSetAccessPolicyOptions contains the optional parameters for the containerClient.SetAccessPolicy method. +type containerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // the acls for the container + ContainerACL []*SignedIdentifier + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientSetMetadataOptions contains the optional parameters for the containerClient.SetMetadata method. +type containerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientSubmitBatchOptions contains the optional parameters for the containerClient.SubmitBatch method. +type containerClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientClearPagesOptions contains the optional parameters for the pageBlobClient.ClearPages method. +type pageBlobClientClearPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientCopyIncrementalOptions contains the optional parameters for the pageBlobClient.CopyIncremental method. +type pageBlobClientCopyIncrementalOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientCreateOptions contains the optional parameters for the pageBlobClient.Create method. +type pageBlobClientCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientGetPageRangesDiffOptions contains the optional parameters for the pageBlobClient.GetPageRangesDiff method. +type pageBlobClientGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + Prevsnapshot *string + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientGetPageRangesOptions contains the optional parameters for the pageBlobClient.GetPageRanges method. +type pageBlobClientGetPageRangesOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientResizeOptions contains the optional parameters for the pageBlobClient.Resize method. +type pageBlobClientResizeOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the pageBlobClient.UpdateSequenceNumber +// method. +type pageBlobClientUpdateSequenceNumberOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientUploadPagesFromURLOptions contains the optional parameters for the pageBlobClient.UploadPagesFromURL method. +type pageBlobClientUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientUploadPagesOptions contains the optional parameters for the pageBlobClient.UploadPages method. +type pageBlobClientUploadPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// serviceClientFilterBlobsOptions contains the optional parameters for the serviceClient.FilterBlobs method. +type serviceClientFilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Filters the results to return only to return only blobs whose tags match the specified expression. + Where *string +} + +// serviceClientGetAccountInfoOptions contains the optional parameters for the serviceClient.GetAccountInfo method. +type serviceClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// serviceClientGetPropertiesOptions contains the optional parameters for the serviceClient.GetProperties method. +type serviceClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// serviceClientGetStatisticsOptions contains the optional parameters for the serviceClient.GetStatistics method. +type serviceClientGetStatisticsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// serviceClientGetUserDelegationKeyOptions contains the optional parameters for the serviceClient.GetUserDelegationKey method. +type serviceClientGetUserDelegationKeyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// serviceClientListContainersSegmentOptions contains the optional parameters for the serviceClient.ListContainersSegment +// method. +type serviceClientListContainersSegmentOptions struct { + // Include this parameter to specify that the container's metadata be returned as part of the response body. + Include []ListContainersIncludeType + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// serviceClientSetPropertiesOptions contains the optional parameters for the serviceClient.SetProperties method. +type serviceClientSetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// serviceClientSubmitBatchOptions contains the optional parameters for the serviceClient.SubmitBatch method. +type serviceClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go new file mode 100644 index 000000000000..bad81201ba2d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go @@ -0,0 +1,1247 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +type pageBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// newPageBlobClient creates a new instance of pageBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func newPageBlobClient(endpoint string, pl runtime.Pipeline) *pageBlobClient { + client := &pageBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// ClearPages - The Clear Pages operation clears a set of pages from a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// pageBlobClientClearPagesOptions - pageBlobClientClearPagesOptions contains the optional parameters for the pageBlobClient.ClearPages +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) ClearPages(ctx context.Context, contentLength int64, pageBlobClientClearPagesOptions *pageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientClearPagesResponse, error) { + req, err := client.clearPagesCreateRequest(ctx, contentLength, pageBlobClientClearPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return pageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) + } + return client.clearPagesHandleResponse(resp) +} + +// clearPagesCreateRequest creates the ClearPages request. +func (client *pageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, pageBlobClientClearPagesOptions *pageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientClearPagesOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-page-write", "clear") + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.Range != nil { + req.Raw().Header.Set("x-ms-range", *pageBlobClientClearPagesOptions.Range) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientClearPagesOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// clearPagesHandleResponse handles the ClearPages response. +func (client *pageBlobClient) clearPagesHandleResponse(resp *http.Response) (pageBlobClientClearPagesResponse, error) { + result := pageBlobClientClearPagesResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied +// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can +// be read or copied from as usual. This API is supported since REST version +// 2016-05-31. +// If the operation fails it returns an *azcore.ResponseError type. +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// pageBlobClientCopyIncrementalOptions - pageBlobClientCopyIncrementalOptions contains the optional parameters for the pageBlobClient.CopyIncremental +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) CopyIncremental(ctx context.Context, copySource string, pageBlobClientCopyIncrementalOptions *pageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientCopyIncrementalResponse, error) { + req, err := client.copyIncrementalCreateRequest(ctx, copySource, pageBlobClientCopyIncrementalOptions, modifiedAccessConditions) + if err != nil { + return pageBlobClientCopyIncrementalResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientCopyIncrementalResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return pageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) + } + return client.copyIncrementalHandleResponse(resp) +} + +// copyIncrementalCreateRequest creates the CopyIncremental request. +func (client *pageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, pageBlobClientCopyIncrementalOptions *pageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "incrementalcopy") + if pageBlobClientCopyIncrementalOptions != nil && pageBlobClientCopyIncrementalOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientCopyIncrementalOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-copy-source", copySource) + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientCopyIncrementalOptions != nil && pageBlobClientCopyIncrementalOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientCopyIncrementalOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// copyIncrementalHandleResponse handles the CopyIncremental response. +func (client *pageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (pageBlobClientCopyIncrementalResponse, error) { + result := pageBlobClientCopyIncrementalResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientCopyIncrementalResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + return result, nil +} + +// Create - The Create operation creates a new page blob. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// pageBlobClientCreateOptions - pageBlobClientCreateOptions contains the optional parameters for the pageBlobClient.Create +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, pageBlobClientCreateOptions *pageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, pageBlobClientCreateOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return pageBlobClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return pageBlobClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *pageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, pageBlobClientCreateOptions *pageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientCreateOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-blob-type", "PageBlob") + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Tier != nil { + req.Raw().Header.Set("x-ms-access-tier", string(*pageBlobClientCreateOptions.Tier)) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + } + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Metadata != nil { + for k, v := range pageBlobClientCreateOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.BlobSequenceNumber != nil { + req.Raw().Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*pageBlobClientCreateOptions.BlobSequenceNumber, 10)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientCreateOptions.RequestID) + } + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *pageBlobClientCreateOptions.BlobTagsString) + } + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", pageBlobClientCreateOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*pageBlobClientCreateOptions.ImmutabilityPolicyMode)) + } + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.LegalHold != nil { + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*pageBlobClientCreateOptions.LegalHold)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *pageBlobClient) createHandleResponse(resp *http.Response) (pageBlobClientCreateResponse, error) { + result := pageBlobClientCreateResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return pageBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// GetPageRanges - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page +// blob +// If the operation fails it returns an *azcore.ResponseError type. +// pageBlobClientGetPageRangesOptions - pageBlobClientGetPageRangesOptions contains the optional parameters for the pageBlobClient.GetPageRanges +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) GetPageRanges(pageBlobClientGetPageRangesOptions *pageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *pageBlobClientGetPageRangesPager { + return &pageBlobClientGetPageRangesPager{ + client: client, + requester: func(ctx context.Context) (*policy.Request, error) { + return client.getPageRangesCreateRequest(ctx, pageBlobClientGetPageRangesOptions, leaseAccessConditions, modifiedAccessConditions) + }, + advancer: func(ctx context.Context, resp pageBlobClientGetPageRangesResponse) (*policy.Request, error) { + return runtime.NewRequest(ctx, http.MethodGet, *resp.PageList.NextMarker) + }, + } +} + +// getPageRangesCreateRequest creates the GetPageRanges request. +func (client *pageBlobClient) getPageRangesCreateRequest(ctx context.Context, pageBlobClientGetPageRangesOptions *pageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Snapshot != nil { + reqQP.Set("snapshot", *pageBlobClientGetPageRangesOptions.Snapshot) + } + if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientGetPageRangesOptions.Timeout), 10)) + } + if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Marker != nil { + reqQP.Set("marker", *pageBlobClientGetPageRangesOptions.Marker) + } + if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*pageBlobClientGetPageRangesOptions.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Range != nil { + req.Raw().Header.Set("x-ms-range", *pageBlobClientGetPageRangesOptions.Range) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientGetPageRangesOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getPageRangesHandleResponse handles the GetPageRanges response. +func (client *pageBlobClient) getPageRangesHandleResponse(resp *http.Response) (pageBlobClientGetPageRangesResponse, error) { + result := pageBlobClientGetPageRangesResponse{RawResponse: resp} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientGetPageRangesResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientGetPageRangesResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return pageBlobClientGetPageRangesResponse{}, err + } + return result, nil +} + +// GetPageRangesDiff - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were +// changed between target blob and previous snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// pageBlobClientGetPageRangesDiffOptions - pageBlobClientGetPageRangesDiffOptions contains the optional parameters for the +// pageBlobClient.GetPageRangesDiff method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) GetPageRangesDiff(pageBlobClientGetPageRangesDiffOptions *pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *pageBlobClientGetPageRangesDiffPager { + return &pageBlobClientGetPageRangesDiffPager{ + client: client, + requester: func(ctx context.Context) (*policy.Request, error) { + return client.getPageRangesDiffCreateRequest(ctx, pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions) + }, + advancer: func(ctx context.Context, resp pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) { + return runtime.NewRequest(ctx, http.MethodGet, *resp.PageList.NextMarker) + }, + } +} + +// getPageRangesDiffCreateRequest creates the GetPageRangesDiff request. +func (client *pageBlobClient) getPageRangesDiffCreateRequest(ctx context.Context, pageBlobClientGetPageRangesDiffOptions *pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Snapshot != nil { + reqQP.Set("snapshot", *pageBlobClientGetPageRangesDiffOptions.Snapshot) + } + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientGetPageRangesDiffOptions.Timeout), 10)) + } + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Prevsnapshot != nil { + reqQP.Set("prevsnapshot", *pageBlobClientGetPageRangesDiffOptions.Prevsnapshot) + } + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Marker != nil { + reqQP.Set("marker", *pageBlobClientGetPageRangesDiffOptions.Marker) + } + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*pageBlobClientGetPageRangesDiffOptions.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.PrevSnapshotURL != nil { + req.Raw().Header.Set("x-ms-previous-snapshot-url", *pageBlobClientGetPageRangesDiffOptions.PrevSnapshotURL) + } + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Range != nil { + req.Raw().Header.Set("x-ms-range", *pageBlobClientGetPageRangesDiffOptions.Range) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientGetPageRangesDiffOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getPageRangesDiffHandleResponse handles the GetPageRangesDiff response. +func (client *pageBlobClient) getPageRangesDiffHandleResponse(resp *http.Response) (pageBlobClientGetPageRangesDiffResponse, error) { + result := pageBlobClientGetPageRangesDiffResponse{RawResponse: resp} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientGetPageRangesDiffResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientGetPageRangesDiffResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return pageBlobClientGetPageRangesDiffResponse{}, err + } + return result, nil +} + +// Resize - Resize the Blob +// If the operation fails it returns an *azcore.ResponseError type. +// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// pageBlobClientResizeOptions - pageBlobClientResizeOptions contains the optional parameters for the pageBlobClient.Resize +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) Resize(ctx context.Context, blobContentLength int64, pageBlobClientResizeOptions *pageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientResizeResponse, error) { + req, err := client.resizeCreateRequest(ctx, blobContentLength, pageBlobClientResizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return pageBlobClientResizeResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientResizeResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return pageBlobClientResizeResponse{}, runtime.NewResponseError(resp) + } + return client.resizeHandleResponse(resp) +} + +// resizeCreateRequest creates the Resize request. +func (client *pageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, pageBlobClientResizeOptions *pageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if pageBlobClientResizeOptions != nil && pageBlobClientResizeOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientResizeOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientResizeOptions != nil && pageBlobClientResizeOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientResizeOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// resizeHandleResponse handles the Resize response. +func (client *pageBlobClient) resizeHandleResponse(resp *http.Response) (pageBlobClientResizeResponse, error) { + result := pageBlobClientResizeResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientResizeResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientResizeResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// UpdateSequenceNumber - Update the sequence number of the blob +// If the operation fails it returns an *azcore.ResponseError type. +// sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to +// page blobs only. This property indicates how the service should modify the blob's sequence number +// pageBlobClientUpdateSequenceNumberOptions - pageBlobClientUpdateSequenceNumberOptions contains the optional parameters +// for the pageBlobClient.UpdateSequenceNumber method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, pageBlobClientUpdateSequenceNumberOptions *pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientUpdateSequenceNumberResponse, error) { + req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return pageBlobClientUpdateSequenceNumberResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientUpdateSequenceNumberResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return pageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) + } + return client.updateSequenceNumberHandleResponse(resp) +} + +// updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. +func (client *pageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, pageBlobClientUpdateSequenceNumberOptions *pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUpdateSequenceNumberOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction)) + if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.BlobSequenceNumber != nil { + req.Raw().Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*pageBlobClientUpdateSequenceNumberOptions.BlobSequenceNumber, 10)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUpdateSequenceNumberOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. +func (client *pageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (pageBlobClientUpdateSequenceNumberResponse, error) { + result := pageBlobClientUpdateSequenceNumberResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientUpdateSequenceNumberResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientUpdateSequenceNumberResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// UploadPages - The Upload Pages operation writes a range of pages to a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// body - Initial data +// pageBlobClientUploadPagesOptions - pageBlobClientUploadPagesOptions contains the optional parameters for the pageBlobClient.UploadPages +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, pageBlobClientUploadPagesOptions *pageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientUploadPagesResponse, error) { + req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, pageBlobClientUploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return pageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) + } + return client.uploadPagesHandleResponse(resp) +} + +// uploadPagesCreateRequest creates the UploadPages request. +func (client *pageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, pageBlobClientUploadPagesOptions *pageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUploadPagesOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-page-write", "update") + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesOptions.TransactionalContentMD5)) + } + if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.TransactionalContentCRC64 != nil { + req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesOptions.TransactionalContentCRC64)) + } + if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.Range != nil { + req.Raw().Header.Set("x-ms-range", *pageBlobClientUploadPagesOptions.Range) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUploadPagesOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, req.SetBody(body, "application/octet-stream") +} + +// uploadPagesHandleResponse handles the UploadPages response. +func (client *pageBlobClient) uploadPagesHandleResponse(resp *http.Response) (pageBlobClientUploadPagesResponse, error) { + result := pageBlobClientUploadPagesResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from +// a URL +// If the operation fails it returns an *azcore.ResponseError type. +// sourceURL - Specify a URL to the copy source. +// sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header +// and x-ms-range/Range destination range header. +// contentLength - The length of the request. +// rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end +// is required. +// pageBlobClientUploadPagesFromURLOptions - pageBlobClientUploadPagesFromURLOptions contains the optional parameters for +// the pageBlobClient.UploadPagesFromURL method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL +// method. +func (client *pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, pageBlobClientUploadPagesFromURLOptions *pageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (pageBlobClientUploadPagesFromURLResponse, error) { + req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, pageBlobClientUploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return pageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.uploadPagesFromURLHandleResponse(resp) +} + +// uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. +func (client *pageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, pageBlobClientUploadPagesFromURLOptions *pageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUploadPagesFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-page-write", "update") + req.Raw().Header.Set("x-ms-copy-source", sourceURL) + req.Raw().Header.Set("x-ms-source-range", sourceRange) + if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.SourceContentMD5 != nil { + req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesFromURLOptions.SourceContentMD5)) + } + if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.SourceContentcrc64 != nil { + req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesFromURLOptions.SourceContentcrc64)) + } + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Raw().Header.Set("x-ms-range", rangeParam) + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUploadPagesFromURLOptions.RequestID) + } + if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.CopySourceAuthorization != nil { + req.Raw().Header.Set("x-ms-copy-source-authorization", *pageBlobClientUploadPagesFromURLOptions.CopySourceAuthorization) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. +func (client *pageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (pageBlobClientUploadPagesFromURLResponse, error) { + result := pageBlobClientUploadPagesFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go new file mode 100644 index 000000000000..9f0cc4629fdd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go @@ -0,0 +1,287 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "reflect" +) + +// containerClientListBlobFlatSegmentPager provides operations for iterating over paged responses. +type containerClientListBlobFlatSegmentPager struct { + client *containerClient + current containerClientListBlobFlatSegmentResponse + err error + requester func(context.Context) (*policy.Request, error) + advancer func(context.Context, containerClientListBlobFlatSegmentResponse) (*policy.Request, error) +} + +// Err returns the last error encountered while paging. +func (p *containerClientListBlobFlatSegmentPager) Err() error { + return p.err +} + +// NextPage returns true if the pager advanced to the next page. +// Returns false if there are no more pages or an error occurred. +func (p *containerClientListBlobFlatSegmentPager) NextPage(ctx context.Context) bool { + var req *policy.Request + var err error + if !reflect.ValueOf(p.current).IsZero() { + if p.current.ListBlobsFlatSegmentResponse.NextMarker == nil || len(*p.current.ListBlobsFlatSegmentResponse.NextMarker) == 0 { + return false + } + req, err = p.advancer(ctx, p.current) + } else { + req, err = p.requester(ctx) + } + if err != nil { + p.err = err + return false + } + resp, err := p.client.pl.Do(req) + if err != nil { + p.err = err + return false + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + p.err = runtime.NewResponseError(resp) + return false + } + result, err := p.client.listBlobFlatSegmentHandleResponse(resp) + if err != nil { + p.err = err + return false + } + p.current = result + return true +} + +// PageResponse returns the current containerClientListBlobFlatSegmentResponse page. +func (p *containerClientListBlobFlatSegmentPager) PageResponse() containerClientListBlobFlatSegmentResponse { + return p.current +} + +// containerClientListBlobHierarchySegmentPager provides operations for iterating over paged responses. +type containerClientListBlobHierarchySegmentPager struct { + client *containerClient + current containerClientListBlobHierarchySegmentResponse + err error + requester func(context.Context) (*policy.Request, error) + advancer func(context.Context, containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) +} + +// Err returns the last error encountered while paging. +func (p *containerClientListBlobHierarchySegmentPager) Err() error { + return p.err +} + +// NextPage returns true if the pager advanced to the next page. +// Returns false if there are no more pages or an error occurred. +func (p *containerClientListBlobHierarchySegmentPager) NextPage(ctx context.Context) bool { + var req *policy.Request + var err error + if !reflect.ValueOf(p.current).IsZero() { + if p.current.ListBlobsHierarchySegmentResponse.NextMarker == nil || len(*p.current.ListBlobsHierarchySegmentResponse.NextMarker) == 0 { + return false + } + req, err = p.advancer(ctx, p.current) + } else { + req, err = p.requester(ctx) + } + if err != nil { + p.err = err + return false + } + resp, err := p.client.pl.Do(req) + if err != nil { + p.err = err + return false + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + p.err = runtime.NewResponseError(resp) + return false + } + result, err := p.client.listBlobHierarchySegmentHandleResponse(resp) + if err != nil { + p.err = err + return false + } + p.current = result + return true +} + +// PageResponse returns the current containerClientListBlobHierarchySegmentResponse page. +func (p *containerClientListBlobHierarchySegmentPager) PageResponse() containerClientListBlobHierarchySegmentResponse { + return p.current +} + +// pageBlobClientGetPageRangesDiffPager provides operations for iterating over paged responses. +type pageBlobClientGetPageRangesDiffPager struct { + client *pageBlobClient + current pageBlobClientGetPageRangesDiffResponse + err error + requester func(context.Context) (*policy.Request, error) + advancer func(context.Context, pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) +} + +// Err returns the last error encountered while paging. +func (p *pageBlobClientGetPageRangesDiffPager) Err() error { + return p.err +} + +// NextPage returns true if the pager advanced to the next page. +// Returns false if there are no more pages or an error occurred. +func (p *pageBlobClientGetPageRangesDiffPager) NextPage(ctx context.Context) bool { + var req *policy.Request + var err error + if !reflect.ValueOf(p.current).IsZero() { + if p.current.PageList.NextMarker == nil || len(*p.current.PageList.NextMarker) == 0 { + return false + } + req, err = p.advancer(ctx, p.current) + } else { + req, err = p.requester(ctx) + } + if err != nil { + p.err = err + return false + } + resp, err := p.client.pl.Do(req) + if err != nil { + p.err = err + return false + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + p.err = runtime.NewResponseError(resp) + return false + } + result, err := p.client.getPageRangesDiffHandleResponse(resp) + if err != nil { + p.err = err + return false + } + p.current = result + return true +} + +// PageResponse returns the current pageBlobClientGetPageRangesDiffResponse page. +func (p *pageBlobClientGetPageRangesDiffPager) PageResponse() pageBlobClientGetPageRangesDiffResponse { + return p.current +} + +// pageBlobClientGetPageRangesPager provides operations for iterating over paged responses. +type pageBlobClientGetPageRangesPager struct { + client *pageBlobClient + current pageBlobClientGetPageRangesResponse + err error + requester func(context.Context) (*policy.Request, error) + advancer func(context.Context, pageBlobClientGetPageRangesResponse) (*policy.Request, error) +} + +// Err returns the last error encountered while paging. +func (p *pageBlobClientGetPageRangesPager) Err() error { + return p.err +} + +// NextPage returns true if the pager advanced to the next page. +// Returns false if there are no more pages or an error occurred. +func (p *pageBlobClientGetPageRangesPager) NextPage(ctx context.Context) bool { + var req *policy.Request + var err error + if !reflect.ValueOf(p.current).IsZero() { + if p.current.PageList.NextMarker == nil || len(*p.current.PageList.NextMarker) == 0 { + return false + } + req, err = p.advancer(ctx, p.current) + } else { + req, err = p.requester(ctx) + } + if err != nil { + p.err = err + return false + } + resp, err := p.client.pl.Do(req) + if err != nil { + p.err = err + return false + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + p.err = runtime.NewResponseError(resp) + return false + } + result, err := p.client.getPageRangesHandleResponse(resp) + if err != nil { + p.err = err + return false + } + p.current = result + return true +} + +// PageResponse returns the current pageBlobClientGetPageRangesResponse page. +func (p *pageBlobClientGetPageRangesPager) PageResponse() pageBlobClientGetPageRangesResponse { + return p.current +} + +// serviceClientListContainersSegmentPager provides operations for iterating over paged responses. +type serviceClientListContainersSegmentPager struct { + client *serviceClient + current serviceClientListContainersSegmentResponse + err error + requester func(context.Context) (*policy.Request, error) + advancer func(context.Context, serviceClientListContainersSegmentResponse) (*policy.Request, error) +} + +// Err returns the last error encountered while paging. +func (p *serviceClientListContainersSegmentPager) Err() error { + return p.err +} + +// NextPage returns true if the pager advanced to the next page. +// Returns false if there are no more pages or an error occurred. +func (p *serviceClientListContainersSegmentPager) NextPage(ctx context.Context) bool { + var req *policy.Request + var err error + if !reflect.ValueOf(p.current).IsZero() { + if p.current.ListContainersSegmentResponse.NextMarker == nil || len(*p.current.ListContainersSegmentResponse.NextMarker) == 0 { + return false + } + req, err = p.advancer(ctx, p.current) + } else { + req, err = p.requester(ctx) + } + if err != nil { + p.err = err + return false + } + resp, err := p.client.pl.Do(req) + if err != nil { + p.err = err + return false + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + p.err = runtime.NewResponseError(resp) + return false + } + result, err := p.client.listContainersSegmentHandleResponse(resp) + if err != nil { + p.err = err + return false + } + p.current = result + return true +} + +// PageResponse returns the current serviceClientListContainersSegmentResponse page. +func (p *serviceClientListContainersSegmentPager) PageResponse() serviceClientListContainersSegmentResponse { + return p.current +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go new file mode 100644 index 000000000000..60c1c0c34ec8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go @@ -0,0 +1,2434 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "net/http" + "time" +) + +// appendBlobClientAppendBlockFromURLResponse contains the response from method appendBlobClient.AppendBlockFromURL. +type appendBlobClientAppendBlockFromURLResponse struct { + appendBlobClientAppendBlockFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// appendBlobClientAppendBlockFromURLResult contains the result from method appendBlobClient.AppendBlockFromURL. +type appendBlobClientAppendBlockFromURLResult struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// appendBlobClientAppendBlockResponse contains the response from method appendBlobClient.AppendBlock. +type appendBlobClientAppendBlockResponse struct { + appendBlobClientAppendBlockResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// appendBlobClientAppendBlockResult contains the result from method appendBlobClient.AppendBlock. +type appendBlobClientAppendBlockResult struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// appendBlobClientCreateResponse contains the response from method appendBlobClient.Create. +type appendBlobClientCreateResponse struct { + appendBlobClientCreateResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// appendBlobClientCreateResult contains the result from method appendBlobClient.Create. +type appendBlobClientCreateResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// appendBlobClientSealResponse contains the response from method appendBlobClient.Seal. +type appendBlobClientSealResponse struct { + appendBlobClientSealResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// appendBlobClientSealResult contains the result from method appendBlobClient.Seal. +type appendBlobClientSealResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientAbortCopyFromURLResponse contains the response from method blobClient.AbortCopyFromURL. +type blobClientAbortCopyFromURLResponse struct { + blobClientAbortCopyFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientAbortCopyFromURLResult contains the result from method blobClient.AbortCopyFromURL. +type blobClientAbortCopyFromURLResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientAcquireLeaseResponse contains the response from method blobClient.AcquireLease. +type blobClientAcquireLeaseResponse struct { + blobClientAcquireLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientAcquireLeaseResult contains the result from method blobClient.AcquireLease. +type blobClientAcquireLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientBreakLeaseResponse contains the response from method blobClient.BreakLease. +type blobClientBreakLeaseResponse struct { + blobClientBreakLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientBreakLeaseResult contains the result from method blobClient.BreakLease. +type blobClientBreakLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientChangeLeaseResponse contains the response from method blobClient.ChangeLease. +type blobClientChangeLeaseResponse struct { + blobClientChangeLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientChangeLeaseResult contains the result from method blobClient.ChangeLease. +type blobClientChangeLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientCopyFromURLResponse contains the response from method blobClient.CopyFromURL. +type blobClientCopyFromURLResponse struct { + blobClientCopyFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientCopyFromURLResult contains the result from method blobClient.CopyFromURL. +type blobClientCopyFromURLResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// blobClientCreateSnapshotResponse contains the response from method blobClient.CreateSnapshot. +type blobClientCreateSnapshotResponse struct { + blobClientCreateSnapshotResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientCreateSnapshotResult contains the result from method blobClient.CreateSnapshot. +type blobClientCreateSnapshotResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Snapshot contains the information returned from the x-ms-snapshot header response. + Snapshot *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// blobClientDeleteImmutabilityPolicyResponse contains the response from method blobClient.DeleteImmutabilityPolicy. +type blobClientDeleteImmutabilityPolicyResponse struct { + blobClientDeleteImmutabilityPolicyResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientDeleteImmutabilityPolicyResult contains the result from method blobClient.DeleteImmutabilityPolicy. +type blobClientDeleteImmutabilityPolicyResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientDeleteResponse contains the response from method blobClient.Delete. +type blobClientDeleteResponse struct { + blobClientDeleteResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientDeleteResult contains the result from method blobClient.Delete. +type blobClientDeleteResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientDownloadResponse contains the response from method blobClient.Download. +type blobClientDownloadResponse struct { + blobClientDownloadResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientDownloadResult contains the result from method blobClient.Download. +type blobClientDownloadResult struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ErrorCode contains the information returned from the x-ms-error-code header response. + ErrorCode *string + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// blobClientGetAccountInfoResponse contains the response from method blobClient.GetAccountInfo. +type blobClientGetAccountInfoResponse struct { + blobClientGetAccountInfoResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientGetAccountInfoResult contains the result from method blobClient.GetAccountInfo. +type blobClientGetAccountInfoResult struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientGetPropertiesResponse contains the response from method blobClient.GetProperties. +type blobClientGetPropertiesResponse struct { + blobClientGetPropertiesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientGetPropertiesResult contains the result from method blobClient.GetProperties. +type blobClientGetPropertiesResult struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // AccessTier contains the information returned from the x-ms-access-tier header response. + AccessTier *string + + // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response. + AccessTierChangeTime *time.Time + + // AccessTierInferred contains the information returned from the x-ms-access-tier-inferred header response. + AccessTierInferred *bool + + // ArchiveStatus contains the information returned from the x-ms-archive-status header response. + ArchiveStatus *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DestinationSnapshot contains the information returned from the x-ms-copy-destination-snapshot header response. + DestinationSnapshot *string + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ExpiresOn contains the information returned from the x-ms-expiry-time header response. + ExpiresOn *time.Time + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsIncrementalCopy contains the information returned from the x-ms-incremental-copy header response. + IsIncrementalCopy *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]string + + // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response. + RehydratePriority *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// blobClientGetTagsResponse contains the response from method blobClient.GetTags. +type blobClientGetTagsResponse struct { + blobClientGetTagsResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientGetTagsResult contains the result from method blobClient.GetTags. +type blobClientGetTagsResult struct { + BlobTags + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// blobClientQueryResponse contains the response from method blobClient.Query. +type blobClientQueryResponse struct { + blobClientQueryResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientQueryResult contains the result from method blobClient.Query. +type blobClientQueryResult struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientReleaseLeaseResponse contains the response from method blobClient.ReleaseLease. +type blobClientReleaseLeaseResponse struct { + blobClientReleaseLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientReleaseLeaseResult contains the result from method blobClient.ReleaseLease. +type blobClientReleaseLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientRenewLeaseResponse contains the response from method blobClient.RenewLease. +type blobClientRenewLeaseResponse struct { + blobClientRenewLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientRenewLeaseResult contains the result from method blobClient.RenewLease. +type blobClientRenewLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientSetExpiryResponse contains the response from method blobClient.SetExpiry. +type blobClientSetExpiryResponse struct { + blobClientSetExpiryResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetExpiryResult contains the result from method blobClient.SetExpiry. +type blobClientSetExpiryResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientSetHTTPHeadersResponse contains the response from method blobClient.SetHTTPHeaders. +type blobClientSetHTTPHeadersResponse struct { + blobClientSetHTTPHeadersResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetHTTPHeadersResult contains the result from method blobClient.SetHTTPHeaders. +type blobClientSetHTTPHeadersResult struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientSetImmutabilityPolicyResponse contains the response from method blobClient.SetImmutabilityPolicy. +type blobClientSetImmutabilityPolicyResponse struct { + blobClientSetImmutabilityPolicyResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetImmutabilityPolicyResult contains the result from method blobClient.SetImmutabilityPolicy. +type blobClientSetImmutabilityPolicyResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ImmutabilityPolicyExpiry contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiry *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientSetLegalHoldResponse contains the response from method blobClient.SetLegalHold. +type blobClientSetLegalHoldResponse struct { + blobClientSetLegalHoldResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetLegalHoldResult contains the result from method blobClient.SetLegalHold. +type blobClientSetLegalHoldResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientSetMetadataResponse contains the response from method blobClient.SetMetadata. +type blobClientSetMetadataResponse struct { + blobClientSetMetadataResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetMetadataResult contains the result from method blobClient.SetMetadata. +type blobClientSetMetadataResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// blobClientSetTagsResponse contains the response from method blobClient.SetTags. +type blobClientSetTagsResponse struct { + blobClientSetTagsResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetTagsResult contains the result from method blobClient.SetTags. +type blobClientSetTagsResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientSetTierResponse contains the response from method blobClient.SetTier. +type blobClientSetTierResponse struct { + blobClientSetTierResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetTierResult contains the result from method blobClient.SetTier. +type blobClientSetTierResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientStartCopyFromURLResponse contains the response from method blobClient.StartCopyFromURL. +type blobClientStartCopyFromURLResponse struct { + blobClientStartCopyFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientStartCopyFromURLResult contains the result from method blobClient.StartCopyFromURL. +type blobClientStartCopyFromURLResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// blobClientUndeleteResponse contains the response from method blobClient.Undelete. +type blobClientUndeleteResponse struct { + blobClientUndeleteResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientUndeleteResult contains the result from method blobClient.Undelete. +type blobClientUndeleteResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blockBlobClientCommitBlockListResponse contains the response from method blockBlobClient.CommitBlockList. +type blockBlobClientCommitBlockListResponse struct { + blockBlobClientCommitBlockListResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blockBlobClientCommitBlockListResult contains the result from method blockBlobClient.CommitBlockList. +type blockBlobClientCommitBlockListResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// blockBlobClientGetBlockListResponse contains the response from method blockBlobClient.GetBlockList. +type blockBlobClientGetBlockListResponse struct { + blockBlobClientGetBlockListResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blockBlobClientGetBlockListResult contains the result from method blockBlobClient.GetBlockList. +type blockBlobClientGetBlockListResult struct { + BlockList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 `xml:"BlobContentLength"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *string `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// blockBlobClientPutBlobFromURLResponse contains the response from method blockBlobClient.PutBlobFromURL. +type blockBlobClientPutBlobFromURLResponse struct { + blockBlobClientPutBlobFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blockBlobClientPutBlobFromURLResult contains the result from method blockBlobClient.PutBlobFromURL. +type blockBlobClientPutBlobFromURLResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// blockBlobClientStageBlockFromURLResponse contains the response from method blockBlobClient.StageBlockFromURL. +type blockBlobClientStageBlockFromURLResponse struct { + blockBlobClientStageBlockFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blockBlobClientStageBlockFromURLResult contains the result from method blockBlobClient.StageBlockFromURL. +type blockBlobClientStageBlockFromURLResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// blockBlobClientStageBlockResponse contains the response from method blockBlobClient.StageBlock. +type blockBlobClientStageBlockResponse struct { + blockBlobClientStageBlockResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blockBlobClientStageBlockResult contains the result from method blockBlobClient.StageBlock. +type blockBlobClientStageBlockResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// blockBlobClientUploadResponse contains the response from method blockBlobClient.Upload. +type blockBlobClientUploadResponse struct { + blockBlobClientUploadResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blockBlobClientUploadResult contains the result from method blockBlobClient.Upload. +type blockBlobClientUploadResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// containerClientAcquireLeaseResponse contains the response from method containerClient.AcquireLease. +type containerClientAcquireLeaseResponse struct { + containerClientAcquireLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientAcquireLeaseResult contains the result from method containerClient.AcquireLease. +type containerClientAcquireLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientBreakLeaseResponse contains the response from method containerClient.BreakLease. +type containerClientBreakLeaseResponse struct { + containerClientBreakLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientBreakLeaseResult contains the result from method containerClient.BreakLease. +type containerClientBreakLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientChangeLeaseResponse contains the response from method containerClient.ChangeLease. +type containerClientChangeLeaseResponse struct { + containerClientChangeLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientChangeLeaseResult contains the result from method containerClient.ChangeLease. +type containerClientChangeLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientCreateResponse contains the response from method containerClient.Create. +type containerClientCreateResponse struct { + containerClientCreateResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientCreateResult contains the result from method containerClient.Create. +type containerClientCreateResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientDeleteResponse contains the response from method containerClient.Delete. +type containerClientDeleteResponse struct { + containerClientDeleteResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientDeleteResult contains the result from method containerClient.Delete. +type containerClientDeleteResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientGetAccessPolicyResponse contains the response from method containerClient.GetAccessPolicy. +type containerClientGetAccessPolicyResponse struct { + containerClientGetAccessPolicyResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientGetAccessPolicyResult contains the result from method containerClient.GetAccessPolicy. +type containerClientGetAccessPolicyResult struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *string `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // a collection of signed identifiers + SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// containerClientGetAccountInfoResponse contains the response from method containerClient.GetAccountInfo. +type containerClientGetAccountInfoResponse struct { + containerClientGetAccountInfoResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientGetAccountInfoResult contains the result from method containerClient.GetAccountInfo. +type containerClientGetAccountInfoResult struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientGetPropertiesResponse contains the response from method containerClient.GetProperties. +type containerClientGetPropertiesResponse struct { + containerClientGetPropertiesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientGetPropertiesResult contains the result from method containerClient.GetProperties. +type containerClientGetPropertiesResult struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response. + DefaultEncryptionScope *string + + // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response. + DenyEncryptionScopeOverride *bool + + // ETag contains the information returned from the ETag header response. + ETag *string + + // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. + HasImmutabilityPolicy *bool + + // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response. + HasLegalHold *bool + + // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled + // header response. + IsImmutableStorageWithVersioningEnabled *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientListBlobFlatSegmentResponse contains the response from method containerClient.ListBlobFlatSegment. +type containerClientListBlobFlatSegmentResponse struct { + containerClientListBlobFlatSegmentResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientListBlobFlatSegmentResult contains the result from method containerClient.ListBlobFlatSegment. +type containerClientListBlobFlatSegmentResult struct { + ListBlobsFlatSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// containerClientListBlobHierarchySegmentResponse contains the response from method containerClient.ListBlobHierarchySegment. +type containerClientListBlobHierarchySegmentResponse struct { + containerClientListBlobHierarchySegmentResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientListBlobHierarchySegmentResult contains the result from method containerClient.ListBlobHierarchySegment. +type containerClientListBlobHierarchySegmentResult struct { + ListBlobsHierarchySegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// containerClientReleaseLeaseResponse contains the response from method containerClient.ReleaseLease. +type containerClientReleaseLeaseResponse struct { + containerClientReleaseLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientReleaseLeaseResult contains the result from method containerClient.ReleaseLease. +type containerClientReleaseLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientRenameResponse contains the response from method containerClient.Rename. +type containerClientRenameResponse struct { + containerClientRenameResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientRenameResult contains the result from method containerClient.Rename. +type containerClientRenameResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientRenewLeaseResponse contains the response from method containerClient.RenewLease. +type containerClientRenewLeaseResponse struct { + containerClientRenewLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientRenewLeaseResult contains the result from method containerClient.RenewLease. +type containerClientRenewLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientRestoreResponse contains the response from method containerClient.Restore. +type containerClientRestoreResponse struct { + containerClientRestoreResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientRestoreResult contains the result from method containerClient.Restore. +type containerClientRestoreResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientSetAccessPolicyResponse contains the response from method containerClient.SetAccessPolicy. +type containerClientSetAccessPolicyResponse struct { + containerClientSetAccessPolicyResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientSetAccessPolicyResult contains the result from method containerClient.SetAccessPolicy. +type containerClientSetAccessPolicyResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientSetMetadataResponse contains the response from method containerClient.SetMetadata. +type containerClientSetMetadataResponse struct { + containerClientSetMetadataResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientSetMetadataResult contains the result from method containerClient.SetMetadata. +type containerClientSetMetadataResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientSubmitBatchResponse contains the response from method containerClient.SubmitBatch. +type containerClientSubmitBatchResponse struct { + containerClientSubmitBatchResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientSubmitBatchResult contains the result from method containerClient.SubmitBatch. +type containerClientSubmitBatchResult struct { + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// pageBlobClientClearPagesResponse contains the response from method pageBlobClient.ClearPages. +type pageBlobClientClearPagesResponse struct { + pageBlobClientClearPagesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientClearPagesResult contains the result from method pageBlobClient.ClearPages. +type pageBlobClientClearPagesResult struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// pageBlobClientCopyIncrementalResponse contains the response from method pageBlobClient.CopyIncremental. +type pageBlobClientCopyIncrementalResponse struct { + pageBlobClientCopyIncrementalResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientCopyIncrementalResult contains the result from method pageBlobClient.CopyIncremental. +type pageBlobClientCopyIncrementalResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// pageBlobClientCreateResponse contains the response from method pageBlobClient.Create. +type pageBlobClientCreateResponse struct { + pageBlobClientCreateResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientCreateResult contains the result from method pageBlobClient.Create. +type pageBlobClientCreateResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// pageBlobClientGetPageRangesDiffResponse contains the response from method pageBlobClient.GetPageRangesDiff. +type pageBlobClientGetPageRangesDiffResponse struct { + pageBlobClientGetPageRangesDiffResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientGetPageRangesDiffResult contains the result from method pageBlobClient.GetPageRangesDiff. +type pageBlobClientGetPageRangesDiffResult struct { + PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 `xml:"BlobContentLength"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *string `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// pageBlobClientGetPageRangesResponse contains the response from method pageBlobClient.GetPageRanges. +type pageBlobClientGetPageRangesResponse struct { + pageBlobClientGetPageRangesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientGetPageRangesResult contains the result from method pageBlobClient.GetPageRanges. +type pageBlobClientGetPageRangesResult struct { + PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 `xml:"BlobContentLength"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *string `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// pageBlobClientResizeResponse contains the response from method pageBlobClient.Resize. +type pageBlobClientResizeResponse struct { + pageBlobClientResizeResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientResizeResult contains the result from method pageBlobClient.Resize. +type pageBlobClientResizeResult struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// pageBlobClientUpdateSequenceNumberResponse contains the response from method pageBlobClient.UpdateSequenceNumber. +type pageBlobClientUpdateSequenceNumberResponse struct { + pageBlobClientUpdateSequenceNumberResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientUpdateSequenceNumberResult contains the result from method pageBlobClient.UpdateSequenceNumber. +type pageBlobClientUpdateSequenceNumberResult struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// pageBlobClientUploadPagesFromURLResponse contains the response from method pageBlobClient.UploadPagesFromURL. +type pageBlobClientUploadPagesFromURLResponse struct { + pageBlobClientUploadPagesFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientUploadPagesFromURLResult contains the result from method pageBlobClient.UploadPagesFromURL. +type pageBlobClientUploadPagesFromURLResult struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// pageBlobClientUploadPagesResponse contains the response from method pageBlobClient.UploadPages. +type pageBlobClientUploadPagesResponse struct { + pageBlobClientUploadPagesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientUploadPagesResult contains the result from method pageBlobClient.UploadPages. +type pageBlobClientUploadPagesResult struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// serviceClientFilterBlobsResponse contains the response from method serviceClient.FilterBlobs. +type serviceClientFilterBlobsResponse struct { + serviceClientFilterBlobsResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientFilterBlobsResult contains the result from method serviceClient.FilterBlobs. +type serviceClientFilterBlobsResult struct { + FilterBlobSegment + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// serviceClientGetAccountInfoResponse contains the response from method serviceClient.GetAccountInfo. +type serviceClientGetAccountInfoResponse struct { + serviceClientGetAccountInfoResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientGetAccountInfoResult contains the result from method serviceClient.GetAccountInfo. +type serviceClientGetAccountInfoResult struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// serviceClientGetPropertiesResponse contains the response from method serviceClient.GetProperties. +type serviceClientGetPropertiesResponse struct { + serviceClientGetPropertiesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientGetPropertiesResult contains the result from method serviceClient.GetProperties. +type serviceClientGetPropertiesResult struct { + StorageServiceProperties + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// serviceClientGetStatisticsResponse contains the response from method serviceClient.GetStatistics. +type serviceClientGetStatisticsResponse struct { + serviceClientGetStatisticsResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientGetStatisticsResult contains the result from method serviceClient.GetStatistics. +type serviceClientGetStatisticsResult struct { + StorageServiceStats + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// serviceClientGetUserDelegationKeyResponse contains the response from method serviceClient.GetUserDelegationKey. +type serviceClientGetUserDelegationKeyResponse struct { + serviceClientGetUserDelegationKeyResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientGetUserDelegationKeyResult contains the result from method serviceClient.GetUserDelegationKey. +type serviceClientGetUserDelegationKeyResult struct { + UserDelegationKey + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// serviceClientListContainersSegmentResponse contains the response from method serviceClient.ListContainersSegment. +type serviceClientListContainersSegmentResponse struct { + serviceClientListContainersSegmentResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientListContainersSegmentResult contains the result from method serviceClient.ListContainersSegment. +type serviceClientListContainersSegmentResult struct { + ListContainersSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// serviceClientSetPropertiesResponse contains the response from method serviceClient.SetProperties. +type serviceClientSetPropertiesResponse struct { + serviceClientSetPropertiesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientSetPropertiesResult contains the result from method serviceClient.SetProperties. +type serviceClientSetPropertiesResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// serviceClientSubmitBatchResponse contains the response from method serviceClient.SubmitBatch. +type serviceClientSubmitBatchResponse struct { + serviceClientSubmitBatchResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientSubmitBatchResult contains the result from method serviceClient.SubmitBatch. +type serviceClientSubmitBatchResult struct { + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go new file mode 100644 index 000000000000..7dcf6ef13e3c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go @@ -0,0 +1,551 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +type serviceClient struct { + endpoint string + pl runtime.Pipeline +} + +// newServiceClient creates a new instance of serviceClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func newServiceClient(endpoint string, pl runtime.Pipeline) *serviceClient { + client := &serviceClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search +// expression. Filter blobs searches across all containers within a storage account but can +// be scoped within the expression to a single container. +// If the operation fails it returns an *azcore.ResponseError type. +// options - serviceClientFilterBlobsOptions contains the optional parameters for the serviceClient.FilterBlobs method. +func (client *serviceClient) FilterBlobs(ctx context.Context, options *serviceClientFilterBlobsOptions) (serviceClientFilterBlobsResponse, error) { + req, err := client.filterBlobsCreateRequest(ctx, options) + if err != nil { + return serviceClientFilterBlobsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientFilterBlobsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return serviceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + } + return client.filterBlobsHandleResponse(resp) +} + +// filterBlobsCreateRequest creates the FilterBlobs request. +func (client *serviceClient) filterBlobsCreateRequest(ctx context.Context, options *serviceClientFilterBlobsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blobs") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Where != nil { + reqQP.Set("where", *options.Where) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// filterBlobsHandleResponse handles the FilterBlobs response. +func (client *serviceClient) filterBlobsHandleResponse(resp *http.Response) (serviceClientFilterBlobsResponse, error) { + result := serviceClientFilterBlobsResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return serviceClientFilterBlobsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { + return serviceClientFilterBlobsResponse{}, err + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// options - serviceClientGetAccountInfoOptions contains the optional parameters for the serviceClient.GetAccountInfo method. +func (client *serviceClient) GetAccountInfo(ctx context.Context, options *serviceClientGetAccountInfoOptions) (serviceClientGetAccountInfoResponse, error) { + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return serviceClientGetAccountInfoResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return serviceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getAccountInfoHandleResponse(resp) +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *serviceClient) getAccountInfoCreateRequest(ctx context.Context, options *serviceClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) (serviceClientGetAccountInfoResponse, error) { + result := serviceClientGetAccountInfoResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return serviceClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { + isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) + if err != nil { + return serviceClientGetAccountInfoResponse{}, err + } + result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled + } + return result, nil +} + +// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and +// CORS (Cross-Origin Resource Sharing) rules. +// If the operation fails it returns an *azcore.ResponseError type. +// options - serviceClientGetPropertiesOptions contains the optional parameters for the serviceClient.GetProperties method. +func (client *serviceClient) GetProperties(ctx context.Context, options *serviceClientGetPropertiesOptions) (serviceClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return serviceClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return serviceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *serviceClient) getPropertiesCreateRequest(ctx context.Context, options *serviceClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (serviceClientGetPropertiesResponse, error) { + result := serviceClientGetPropertiesResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { + return serviceClientGetPropertiesResponse{}, err + } + return result, nil +} + +// GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary +// location endpoint when read-access geo-redundant replication is enabled for the storage account. +// If the operation fails it returns an *azcore.ResponseError type. +// options - serviceClientGetStatisticsOptions contains the optional parameters for the serviceClient.GetStatistics method. +func (client *serviceClient) GetStatistics(ctx context.Context, options *serviceClientGetStatisticsOptions) (serviceClientGetStatisticsResponse, error) { + req, err := client.getStatisticsCreateRequest(ctx, options) + if err != nil { + return serviceClientGetStatisticsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientGetStatisticsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return serviceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + } + return client.getStatisticsHandleResponse(resp) +} + +// getStatisticsCreateRequest creates the GetStatistics request. +func (client *serviceClient) getStatisticsCreateRequest(ctx context.Context, options *serviceClientGetStatisticsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "stats") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getStatisticsHandleResponse handles the GetStatistics response. +func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (serviceClientGetStatisticsResponse, error) { + result := serviceClientGetStatisticsResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return serviceClientGetStatisticsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { + return serviceClientGetStatisticsResponse{}, err + } + return result, nil +} + +// GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using +// bearer token authentication. +// If the operation fails it returns an *azcore.ResponseError type. +// keyInfo - Key information +// options - serviceClientGetUserDelegationKeyOptions contains the optional parameters for the serviceClient.GetUserDelegationKey +// method. +func (client *serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *serviceClientGetUserDelegationKeyOptions) (serviceClientGetUserDelegationKeyResponse, error) { + req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) + if err != nil { + return serviceClientGetUserDelegationKeyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientGetUserDelegationKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return serviceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) + } + return client.getUserDelegationKeyHandleResponse(resp) +} + +// getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. +func (client *serviceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *serviceClientGetUserDelegationKeyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "userdelegationkey") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, runtime.MarshalAsXML(req, keyInfo) +} + +// getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. +func (client *serviceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (serviceClientGetUserDelegationKeyResponse, error) { + result := serviceClientGetUserDelegationKeyResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return serviceClientGetUserDelegationKeyResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { + return serviceClientGetUserDelegationKeyResponse{}, err + } + return result, nil +} + +// ListContainersSegment - The List Containers Segment operation returns a list of the containers under the specified account +// If the operation fails it returns an *azcore.ResponseError type. +// options - serviceClientListContainersSegmentOptions contains the optional parameters for the serviceClient.ListContainersSegment +// method. +func (client *serviceClient) ListContainersSegment(options *serviceClientListContainersSegmentOptions) *serviceClientListContainersSegmentPager { + return &serviceClientListContainersSegmentPager{ + client: client, + requester: func(ctx context.Context) (*policy.Request, error) { + return client.listContainersSegmentCreateRequest(ctx, options) + }, + advancer: func(ctx context.Context, resp serviceClientListContainersSegmentResponse) (*policy.Request, error) { + return runtime.NewRequest(ctx, http.MethodGet, *resp.ListContainersSegmentResponse.NextMarker) + }, + } +} + +// listContainersSegmentCreateRequest creates the ListContainersSegment request. +func (client *serviceClient) listContainersSegmentCreateRequest(ctx context.Context, options *serviceClientListContainersSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// listContainersSegmentHandleResponse handles the ListContainersSegment response. +func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Response) (serviceClientListContainersSegmentResponse, error) { + result := serviceClientListContainersSegmentResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.ListContainersSegmentResponse); err != nil { + return serviceClientListContainersSegmentResponse{}, err + } + return result, nil +} + +// SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules +// If the operation fails it returns an *azcore.ResponseError type. +// storageServiceProperties - The StorageService properties. +// options - serviceClientSetPropertiesOptions contains the optional parameters for the serviceClient.SetProperties method. +func (client *serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *serviceClientSetPropertiesOptions) (serviceClientSetPropertiesResponse, error) { + req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) + if err != nil { + return serviceClientSetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return serviceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.setPropertiesHandleResponse(resp) +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *serviceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *serviceClientSetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, runtime.MarshalAsXML(req, storageServiceProperties) +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *serviceClient) setPropertiesHandleResponse(resp *http.Response) (serviceClientSetPropertiesResponse, error) { + result := serviceClientSetPropertiesResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// body - Initial data +// options - serviceClientSubmitBatchOptions contains the optional parameters for the serviceClient.SubmitBatch method. +func (client *serviceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *serviceClientSubmitBatchOptions) (serviceClientSubmitBatchResponse, error) { + req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) + if err != nil { + return serviceClientSubmitBatchResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientSubmitBatchResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return serviceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + } + return client.submitBatchHandleResponse(resp) +} + +// submitBatchCreateRequest creates the SubmitBatch request. +func (client *serviceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *serviceClientSubmitBatchOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "batch") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Raw().Header.Set("Content-Type", multipartContentType) + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, runtime.MarshalAsXML(req, body) +} + +// submitBatchHandleResponse handles the SubmitBatch response. +func (client *serviceClient) submitBatchHandleResponse(resp *http.Response) (serviceClientSubmitBatchResponse, error) { + result := serviceClientSubmitBatchResponse{RawResponse: resp} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go new file mode 100644 index 000000000000..42726159b6f9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go @@ -0,0 +1,42 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "strings" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` +) + +type timeRFC1123 time.Time + +func (t timeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(rfc1123JSON)) + return b, nil +} + +func (t timeRFC1123) MarshalText() ([]byte, error) { + b := []byte(time.Time(t).Format(time.RFC1123)) + return b, nil +} + +func (t *timeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) + *t = timeRFC1123(p) + return err +} + +func (t *timeRFC1123) UnmarshalText(data []byte) error { + p, err := time.Parse(time.RFC1123, string(data)) + *t = timeRFC1123(p) + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go new file mode 100644 index 000000000000..c51d8d78c128 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go @@ -0,0 +1,58 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "regexp" + "strings" + "time" +) + +const ( + utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` + utcLayout = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +type timeRFC3339 time.Time + +func (t timeRFC3339) MarshalJSON() (json []byte, err error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t timeRFC3339) MarshalText() (text []byte, err error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *timeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcLayoutJSON + if tzOffsetRegex.Match(data) { + layout = rfc3339JSON + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + layout := utcLayout + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = timeRFC3339(p) + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go new file mode 100644 index 000000000000..1cf97387de22 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go @@ -0,0 +1,40 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "encoding/xml" + "strings" +) + +type additionalProperties map[string]*string + +// UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. +func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + for t, err := d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + break + case xml.CharData: + if tokName == "" { + continue + } + if *ap == nil { + *ap = additionalProperties{} + } + s := string(tt) + (*ap)[tokName] = &s + tokName = "" + break + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE deleted file mode 100644 index e3d9a64d1d85..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md deleted file mode 100644 index 261c041e7aba..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# go-ansiterm - -This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. - -For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. - -The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). - -See parser_test.go for examples exercising the state machine and generating appropriate function calls. - ------ -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go deleted file mode 100644 index 96504a33bc9e..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/constants.go +++ /dev/null @@ -1,188 +0,0 @@ -package ansiterm - -const LogEnv = "DEBUG_TERMINAL" - -// ANSI constants -// References: -// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm -// -- http://man7.org/linux/man-pages/man4/console_codes.4.html -// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html -// -- http://en.wikipedia.org/wiki/ANSI_escape_code -// -- http://vt100.net/emu/dec_ansi_parser -// -- http://vt100.net/emu/vt500_parser.svg -// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html -// -- http://www.inwap.com/pdp10/ansicode.txt -const ( - // ECMA-48 Set Graphics Rendition - // Note: - // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved - // -- Fonts could possibly be supported via SetCurrentConsoleFontEx - // -- Windows does not expose the per-window cursor (i.e., caret) blink times - ANSI_SGR_RESET = 0 - ANSI_SGR_BOLD = 1 - ANSI_SGR_DIM = 2 - _ANSI_SGR_ITALIC = 3 - ANSI_SGR_UNDERLINE = 4 - _ANSI_SGR_BLINKSLOW = 5 - _ANSI_SGR_BLINKFAST = 6 - ANSI_SGR_REVERSE = 7 - _ANSI_SGR_INVISIBLE = 8 - _ANSI_SGR_LINETHROUGH = 9 - _ANSI_SGR_FONT_00 = 10 - _ANSI_SGR_FONT_01 = 11 - _ANSI_SGR_FONT_02 = 12 - _ANSI_SGR_FONT_03 = 13 - _ANSI_SGR_FONT_04 = 14 - _ANSI_SGR_FONT_05 = 15 - _ANSI_SGR_FONT_06 = 16 - _ANSI_SGR_FONT_07 = 17 - _ANSI_SGR_FONT_08 = 18 - _ANSI_SGR_FONT_09 = 19 - _ANSI_SGR_FONT_10 = 20 - _ANSI_SGR_DOUBLEUNDERLINE = 21 - ANSI_SGR_BOLD_DIM_OFF = 22 - _ANSI_SGR_ITALIC_OFF = 23 - ANSI_SGR_UNDERLINE_OFF = 24 - _ANSI_SGR_BLINK_OFF = 25 - _ANSI_SGR_RESERVED_00 = 26 - ANSI_SGR_REVERSE_OFF = 27 - _ANSI_SGR_INVISIBLE_OFF = 28 - _ANSI_SGR_LINETHROUGH_OFF = 29 - ANSI_SGR_FOREGROUND_BLACK = 30 - ANSI_SGR_FOREGROUND_RED = 31 - ANSI_SGR_FOREGROUND_GREEN = 32 - ANSI_SGR_FOREGROUND_YELLOW = 33 - ANSI_SGR_FOREGROUND_BLUE = 34 - ANSI_SGR_FOREGROUND_MAGENTA = 35 - ANSI_SGR_FOREGROUND_CYAN = 36 - ANSI_SGR_FOREGROUND_WHITE = 37 - _ANSI_SGR_RESERVED_01 = 38 - ANSI_SGR_FOREGROUND_DEFAULT = 39 - ANSI_SGR_BACKGROUND_BLACK = 40 - ANSI_SGR_BACKGROUND_RED = 41 - ANSI_SGR_BACKGROUND_GREEN = 42 - ANSI_SGR_BACKGROUND_YELLOW = 43 - ANSI_SGR_BACKGROUND_BLUE = 44 - ANSI_SGR_BACKGROUND_MAGENTA = 45 - ANSI_SGR_BACKGROUND_CYAN = 46 - ANSI_SGR_BACKGROUND_WHITE = 47 - _ANSI_SGR_RESERVED_02 = 48 - ANSI_SGR_BACKGROUND_DEFAULT = 49 - // 50 - 65: Unsupported - - ANSI_MAX_CMD_LENGTH = 4096 - - MAX_INPUT_EVENTS = 128 - DEFAULT_WIDTH = 80 - DEFAULT_HEIGHT = 24 - - ANSI_BEL = 0x07 - ANSI_BACKSPACE = 0x08 - ANSI_TAB = 0x09 - ANSI_LINE_FEED = 0x0A - ANSI_VERTICAL_TAB = 0x0B - ANSI_FORM_FEED = 0x0C - ANSI_CARRIAGE_RETURN = 0x0D - ANSI_ESCAPE_PRIMARY = 0x1B - ANSI_ESCAPE_SECONDARY = 0x5B - ANSI_OSC_STRING_ENTRY = 0x5D - ANSI_COMMAND_FIRST = 0x40 - ANSI_COMMAND_LAST = 0x7E - DCS_ENTRY = 0x90 - CSI_ENTRY = 0x9B - OSC_STRING = 0x9D - ANSI_PARAMETER_SEP = ";" - ANSI_CMD_G0 = '(' - ANSI_CMD_G1 = ')' - ANSI_CMD_G2 = '*' - ANSI_CMD_G3 = '+' - ANSI_CMD_DECPNM = '>' - ANSI_CMD_DECPAM = '=' - ANSI_CMD_OSC = ']' - ANSI_CMD_STR_TERM = '\\' - - KEY_CONTROL_PARAM_2 = ";2" - KEY_CONTROL_PARAM_3 = ";3" - KEY_CONTROL_PARAM_4 = ";4" - KEY_CONTROL_PARAM_5 = ";5" - KEY_CONTROL_PARAM_6 = ";6" - KEY_CONTROL_PARAM_7 = ";7" - KEY_CONTROL_PARAM_8 = ";8" - KEY_ESC_CSI = "\x1B[" - KEY_ESC_N = "\x1BN" - KEY_ESC_O = "\x1BO" - - FILL_CHARACTER = ' ' -) - -func getByteRange(start byte, end byte) []byte { - bytes := make([]byte, 0, 32) - for i := start; i <= end; i++ { - bytes = append(bytes, byte(i)) - } - - return bytes -} - -var toGroundBytes = getToGroundBytes() -var executors = getExecuteBytes() - -// SPACE 20+A0 hex Always and everywhere a blank space -// Intermediate 20-2F hex !"#$%&'()*+,-./ -var intermeds = getByteRange(0x20, 0x2F) - -// Parameters 30-3F hex 0123456789:;<=>? -// CSI Parameters 30-39, 3B hex 0123456789; -var csiParams = getByteRange(0x30, 0x3F) - -var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) - -// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ -var upperCase = getByteRange(0x40, 0x5F) - -// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ -var lowerCase = getByteRange(0x60, 0x7E) - -// Alphabetics 40-7E hex (all of upper and lower case) -var alphabetics = append(upperCase, lowerCase...) - -var printables = getByteRange(0x20, 0x7F) - -var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) -var escapeToGroundBytes = getEscapeToGroundBytes() - -// See http://www.vt100.net/emu/vt500_parser.png for description of the complex -// byte ranges below - -func getEscapeToGroundBytes() []byte { - escapeToGroundBytes := getByteRange(0x30, 0x4F) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) - escapeToGroundBytes = append(escapeToGroundBytes, 0x59) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) - return escapeToGroundBytes -} - -func getExecuteBytes() []byte { - executeBytes := getByteRange(0x00, 0x17) - executeBytes = append(executeBytes, 0x19) - executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) - return executeBytes -} - -func getToGroundBytes() []byte { - groundBytes := []byte{0x18} - groundBytes = append(groundBytes, 0x1A) - groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) - groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) - groundBytes = append(groundBytes, 0x99) - groundBytes = append(groundBytes, 0x9A) - groundBytes = append(groundBytes, 0x9C) - return groundBytes -} - -// Delete 7F hex Always and everywhere ignored -// C1 Control 80-9F hex 32 additional control characters -// G1 Displayable A1-FE hex 94 additional displayable characters -// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go deleted file mode 100644 index 8d66e777c038..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/context.go +++ /dev/null @@ -1,7 +0,0 @@ -package ansiterm - -type ansiContext struct { - currentChar byte - paramBuffer []byte - interBuffer []byte -} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go deleted file mode 100644 index bcbe00d0c5ec..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go +++ /dev/null @@ -1,49 +0,0 @@ -package ansiterm - -type csiEntryState struct { - baseState -} - -func (csiState csiEntryState) Handle(b byte) (s state, e error) { - csiState.parser.logf("CsiEntry::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - return csiState.parser.csiParam, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiEntryState) Transition(s state) error { - csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - case csiState.parser.csiParam: - switch { - case sliceContains(csiParams, csiState.parser.context.currentChar): - csiState.parser.collectParam() - case sliceContains(intermeds, csiState.parser.context.currentChar): - csiState.parser.collectInter() - } - } - - return nil -} - -func (csiState csiEntryState) Enter() error { - csiState.parser.clear() - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go deleted file mode 100644 index 7ed5e01c3428..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go +++ /dev/null @@ -1,38 +0,0 @@ -package ansiterm - -type csiParamState struct { - baseState -} - -func (csiState csiParamState) Handle(b byte) (s state, e error) { - csiState.parser.logf("CsiParam::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - csiState.parser.collectParam() - return csiState, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiParamState) Transition(s state) error { - csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go deleted file mode 100644 index 1c719db9e48c..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ /dev/null @@ -1,36 +0,0 @@ -package ansiterm - -type escapeIntermediateState struct { - baseState -} - -func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - escState.parser.logf("escapeIntermediateState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(intermeds, b): - return escState, escState.parser.collectInter() - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeIntermediateToGroundBytes, b): - return escState.parser.ground, nil - } - - return escState, nil -} - -func (escState escapeIntermediateState) Transition(s state) error { - escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go deleted file mode 100644 index 6390abd231ba..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/escape_state.go +++ /dev/null @@ -1,47 +0,0 @@ -package ansiterm - -type escapeState struct { - baseState -} - -func (escState escapeState) Handle(b byte) (s state, e error) { - escState.parser.logf("escapeState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case b == ANSI_ESCAPE_SECONDARY: - return escState.parser.csiEntry, nil - case b == ANSI_OSC_STRING_ENTRY: - return escState.parser.oscString, nil - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeToGroundBytes, b): - return escState.parser.ground, nil - case sliceContains(intermeds, b): - return escState.parser.escapeIntermediate, nil - } - - return escState, nil -} - -func (escState escapeState) Transition(s state) error { - escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - case escState.parser.escapeIntermediate: - return escState.parser.collectInter() - } - - return nil -} - -func (escState escapeState) Enter() error { - escState.parser.clear() - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go deleted file mode 100644 index 98087b38c202..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/event_handler.go +++ /dev/null @@ -1,90 +0,0 @@ -package ansiterm - -type AnsiEventHandler interface { - // Print - Print(b byte) error - - // Execute C0 commands - Execute(b byte) error - - // CUrsor Up - CUU(int) error - - // CUrsor Down - CUD(int) error - - // CUrsor Forward - CUF(int) error - - // CUrsor Backward - CUB(int) error - - // Cursor to Next Line - CNL(int) error - - // Cursor to Previous Line - CPL(int) error - - // Cursor Horizontal position Absolute - CHA(int) error - - // Vertical line Position Absolute - VPA(int) error - - // CUrsor Position - CUP(int, int) error - - // Horizontal and Vertical Position (depends on PUM) - HVP(int, int) error - - // Text Cursor Enable Mode - DECTCEM(bool) error - - // Origin Mode - DECOM(bool) error - - // 132 Column Mode - DECCOLM(bool) error - - // Erase in Display - ED(int) error - - // Erase in Line - EL(int) error - - // Insert Line - IL(int) error - - // Delete Line - DL(int) error - - // Insert Character - ICH(int) error - - // Delete Character - DCH(int) error - - // Set Graphics Rendition - SGR([]int) error - - // Pan Down - SU(int) error - - // Pan Up - SD(int) error - - // Device Attributes - DA([]string) error - - // Set Top and Bottom Margins - DECSTBM(int, int) error - - // Index - IND() error - - // Reverse Index - RI() error - - // Flush updates from previous commands - Flush() error -} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go deleted file mode 100644 index 52451e94693a..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/ground_state.go +++ /dev/null @@ -1,24 +0,0 @@ -package ansiterm - -type groundState struct { - baseState -} - -func (gs groundState) Handle(b byte) (s state, e error) { - gs.parser.context.currentChar = b - - nextState, err := gs.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(printables, b): - return gs, gs.parser.print() - - case sliceContains(executors, b): - return gs, gs.parser.execute() - } - - return gs, nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go deleted file mode 100644 index 593b10ab6963..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ /dev/null @@ -1,31 +0,0 @@ -package ansiterm - -type oscStringState struct { - baseState -} - -func (oscState oscStringState) Handle(b byte) (s state, e error) { - oscState.parser.logf("OscString::Handle %#x", b) - nextState, err := oscState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case isOscStringTerminator(b): - return oscState.parser.ground, nil - } - - return oscState, nil -} - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go deleted file mode 100644 index 03cec7ada62a..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser.go +++ /dev/null @@ -1,151 +0,0 @@ -package ansiterm - -import ( - "errors" - "log" - "os" -) - -type AnsiParser struct { - currState state - eventHandler AnsiEventHandler - context *ansiContext - csiEntry state - csiParam state - dcsEntry state - escape state - escapeIntermediate state - error state - ground state - oscString state - stateMap []state - - logf func(string, ...interface{}) -} - -type Option func(*AnsiParser) - -func WithLogf(f func(string, ...interface{})) Option { - return func(ap *AnsiParser) { - ap.logf = f - } -} - -func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { - ap := &AnsiParser{ - eventHandler: evtHandler, - context: &ansiContext{}, - } - for _, o := range opts { - o(ap) - } - - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { - logFile, _ := os.Create("ansiParser.log") - logger := log.New(logFile, "", log.LstdFlags) - if ap.logf != nil { - l := ap.logf - ap.logf = func(s string, v ...interface{}) { - l(s, v...) - logger.Printf(s, v...) - } - } else { - ap.logf = logger.Printf - } - } - - if ap.logf == nil { - ap.logf = func(string, ...interface{}) {} - } - - ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} - ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} - ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} - ap.escape = escapeState{baseState{name: "Escape", parser: ap}} - ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} - ap.error = errorState{baseState{name: "Error", parser: ap}} - ap.ground = groundState{baseState{name: "Ground", parser: ap}} - ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} - - ap.stateMap = []state{ - ap.csiEntry, - ap.csiParam, - ap.dcsEntry, - ap.escape, - ap.escapeIntermediate, - ap.error, - ap.ground, - ap.oscString, - } - - ap.currState = getState(initialState, ap.stateMap) - - ap.logf("CreateParser: parser %p", ap) - return ap -} - -func getState(name string, states []state) state { - for _, el := range states { - if el.Name() == name { - return el - } - } - - return nil -} - -func (ap *AnsiParser) Parse(bytes []byte) (int, error) { - for i, b := range bytes { - if err := ap.handle(b); err != nil { - return i, err - } - } - - return len(bytes), ap.eventHandler.Flush() -} - -func (ap *AnsiParser) handle(b byte) error { - ap.context.currentChar = b - newState, err := ap.currState.Handle(b) - if err != nil { - return err - } - - if newState == nil { - ap.logf("WARNING: newState is nil") - return errors.New("New state of 'nil' is invalid.") - } - - if newState != ap.currState { - if err := ap.changeState(newState); err != nil { - return err - } - } - - return nil -} - -func (ap *AnsiParser) changeState(newState state) error { - ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) - - // Exit old state - if err := ap.currState.Exit(); err != nil { - ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) - return err - } - - // Perform transition action - if err := ap.currState.Transition(newState); err != nil { - ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) - return err - } - - // Enter new state - if err := newState.Enter(); err != nil { - ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) - return err - } - - ap.currState = newState - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go deleted file mode 100644 index de0a1f9cde3b..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ /dev/null @@ -1,99 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func parseParams(bytes []byte) ([]string, error) { - paramBuff := make([]byte, 0, 0) - params := []string{} - - for _, v := range bytes { - if v == ';' { - if len(paramBuff) > 0 { - // Completed parameter, append it to the list - s := string(paramBuff) - params = append(params, s) - paramBuff = make([]byte, 0, 0) - } - } else { - paramBuff = append(paramBuff, v) - } - } - - // Last parameter may not be terminated with ';' - if len(paramBuff) > 0 { - s := string(paramBuff) - params = append(params, s) - } - - return params, nil -} - -func parseCmd(context ansiContext) (string, error) { - return string(context.currentChar), nil -} - -func getInt(params []string, dflt int) int { - i := getInts(params, 1, dflt)[0] - return i -} - -func getInts(params []string, minCount int, dflt int) []int { - ints := []int{} - - for _, v := range params { - i, _ := strconv.Atoi(v) - // Zero is mapped to the default value in VT100. - if i == 0 { - i = dflt - } - ints = append(ints, i) - } - - if len(ints) < minCount { - remaining := minCount - len(ints) - for i := 0; i < remaining; i++ { - ints = append(ints, dflt) - } - } - - return ints -} - -func (ap *AnsiParser) modeDispatch(param string, set bool) error { - switch param { - case "?3": - return ap.eventHandler.DECCOLM(set) - case "?6": - return ap.eventHandler.DECOM(set) - case "?25": - return ap.eventHandler.DECTCEM(set) - } - return nil -} - -func (ap *AnsiParser) hDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], true) - } - - return nil -} - -func (ap *AnsiParser) lDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], false) - } - - return nil -} - -func getEraseParam(params []string) int { - param := getInt(params, 0) - if param < 0 || 3 < param { - param = 0 - } - - return param -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go deleted file mode 100644 index 0bb5e51e9aa6..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_actions.go +++ /dev/null @@ -1,119 +0,0 @@ -package ansiterm - -func (ap *AnsiParser) collectParam() error { - currChar := ap.context.currentChar - ap.logf("collectParam %#x", currChar) - ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) - return nil -} - -func (ap *AnsiParser) collectInter() error { - currChar := ap.context.currentChar - ap.logf("collectInter %#x", currChar) - ap.context.paramBuffer = append(ap.context.interBuffer, currChar) - return nil -} - -func (ap *AnsiParser) escDispatch() error { - cmd, _ := parseCmd(*ap.context) - intermeds := ap.context.interBuffer - ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) - ap.logf("escDispatch: %v(%v)", cmd, intermeds) - - switch cmd { - case "D": // IND - return ap.eventHandler.IND() - case "E": // NEL, equivalent to CRLF - err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) - if err == nil { - err = ap.eventHandler.Execute(ANSI_LINE_FEED) - } - return err - case "M": // RI - return ap.eventHandler.RI() - } - - return nil -} - -func (ap *AnsiParser) csiDispatch() error { - cmd, _ := parseCmd(*ap.context) - params, _ := parseParams(ap.context.paramBuffer) - ap.logf("Parsed params: %v with length: %d", params, len(params)) - - ap.logf("csiDispatch: %v(%v)", cmd, params) - - switch cmd { - case "@": - return ap.eventHandler.ICH(getInt(params, 1)) - case "A": - return ap.eventHandler.CUU(getInt(params, 1)) - case "B": - return ap.eventHandler.CUD(getInt(params, 1)) - case "C": - return ap.eventHandler.CUF(getInt(params, 1)) - case "D": - return ap.eventHandler.CUB(getInt(params, 1)) - case "E": - return ap.eventHandler.CNL(getInt(params, 1)) - case "F": - return ap.eventHandler.CPL(getInt(params, 1)) - case "G": - return ap.eventHandler.CHA(getInt(params, 1)) - case "H": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.CUP(x, y) - case "J": - param := getEraseParam(params) - return ap.eventHandler.ED(param) - case "K": - param := getEraseParam(params) - return ap.eventHandler.EL(param) - case "L": - return ap.eventHandler.IL(getInt(params, 1)) - case "M": - return ap.eventHandler.DL(getInt(params, 1)) - case "P": - return ap.eventHandler.DCH(getInt(params, 1)) - case "S": - return ap.eventHandler.SU(getInt(params, 1)) - case "T": - return ap.eventHandler.SD(getInt(params, 1)) - case "c": - return ap.eventHandler.DA(params) - case "d": - return ap.eventHandler.VPA(getInt(params, 1)) - case "f": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.HVP(x, y) - case "h": - return ap.hDispatch(params) - case "l": - return ap.lDispatch(params) - case "m": - return ap.eventHandler.SGR(getInts(params, 1, 0)) - case "r": - ints := getInts(params, 2, 1) - top, bottom := ints[0], ints[1] - return ap.eventHandler.DECSTBM(top, bottom) - default: - ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) - return nil - } - -} - -func (ap *AnsiParser) print() error { - return ap.eventHandler.Print(ap.context.currentChar) -} - -func (ap *AnsiParser) clear() error { - ap.context = &ansiContext{} - return nil -} - -func (ap *AnsiParser) execute() error { - return ap.eventHandler.Execute(ap.context.currentChar) -} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go deleted file mode 100644 index f2ea1fcd12da..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/states.go +++ /dev/null @@ -1,71 +0,0 @@ -package ansiterm - -type stateID int - -type state interface { - Enter() error - Exit() error - Handle(byte) (state, error) - Name() string - Transition(state) error -} - -type baseState struct { - name string - parser *AnsiParser -} - -func (base baseState) Enter() error { - return nil -} - -func (base baseState) Exit() error { - return nil -} - -func (base baseState) Handle(b byte) (s state, e error) { - - switch { - case b == CSI_ENTRY: - return base.parser.csiEntry, nil - case b == DCS_ENTRY: - return base.parser.dcsEntry, nil - case b == ANSI_ESCAPE_PRIMARY: - return base.parser.escape, nil - case b == OSC_STRING: - return base.parser.oscString, nil - case sliceContains(toGroundBytes, b): - return base.parser.ground, nil - } - - return nil, nil -} - -func (base baseState) Name() string { - return base.name -} - -func (base baseState) Transition(s state) error { - if s == base.parser.ground { - execBytes := []byte{0x18} - execBytes = append(execBytes, 0x1A) - execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) - execBytes = append(execBytes, getByteRange(0x91, 0x97)...) - execBytes = append(execBytes, 0x99) - execBytes = append(execBytes, 0x9A) - - if sliceContains(execBytes, base.parser.context.currentChar) { - return base.parser.execute() - } - } - - return nil -} - -type dcsEntryState struct { - baseState -} - -type errorState struct { - baseState -} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go deleted file mode 100644 index 392114493a22..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/utilities.go +++ /dev/null @@ -1,21 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func sliceContains(bytes []byte, b byte) bool { - for _, v := range bytes { - if v == b { - return true - } - } - - return false -} - -func convertBytesToInteger(bytes []byte) int { - s := string(bytes) - i, _ := strconv.Atoi(s) - return i -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go deleted file mode 100644 index 5599082ae9cb..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ /dev/null @@ -1,196 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "os" - "strconv" - "strings" - "syscall" - - "github.com/Azure/go-ansiterm" - windows "golang.org/x/sys/windows" -) - -// Windows keyboard constants -// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. -const ( - VK_PRIOR = 0x21 // PAGE UP key - VK_NEXT = 0x22 // PAGE DOWN key - VK_END = 0x23 // END key - VK_HOME = 0x24 // HOME key - VK_LEFT = 0x25 // LEFT ARROW key - VK_UP = 0x26 // UP ARROW key - VK_RIGHT = 0x27 // RIGHT ARROW key - VK_DOWN = 0x28 // DOWN ARROW key - VK_SELECT = 0x29 // SELECT key - VK_PRINT = 0x2A // PRINT key - VK_EXECUTE = 0x2B // EXECUTE key - VK_SNAPSHOT = 0x2C // PRINT SCREEN key - VK_INSERT = 0x2D // INS key - VK_DELETE = 0x2E // DEL key - VK_HELP = 0x2F // HELP key - VK_F1 = 0x70 // F1 key - VK_F2 = 0x71 // F2 key - VK_F3 = 0x72 // F3 key - VK_F4 = 0x73 // F4 key - VK_F5 = 0x74 // F5 key - VK_F6 = 0x75 // F6 key - VK_F7 = 0x76 // F7 key - VK_F8 = 0x77 // F8 key - VK_F9 = 0x78 // F9 key - VK_F10 = 0x79 // F10 key - VK_F11 = 0x7A // F11 key - VK_F12 = 0x7B // F12 key - - RIGHT_ALT_PRESSED = 0x0001 - LEFT_ALT_PRESSED = 0x0002 - RIGHT_CTRL_PRESSED = 0x0004 - LEFT_CTRL_PRESSED = 0x0008 - SHIFT_PRESSED = 0x0010 - NUMLOCK_ON = 0x0020 - SCROLLLOCK_ON = 0x0040 - CAPSLOCK_ON = 0x0080 - ENHANCED_KEY = 0x0100 -) - -type ansiCommand struct { - CommandBytes []byte - Command string - Parameters []string - IsSpecial bool -} - -func newAnsiCommand(command []byte) *ansiCommand { - - if isCharacterSelectionCmdChar(command[1]) { - // Is Character Set Selection commands - return &ansiCommand{ - CommandBytes: command, - Command: string(command), - IsSpecial: true, - } - } - - // last char is command character - lastCharIndex := len(command) - 1 - - ac := &ansiCommand{ - CommandBytes: command, - Command: string(command[lastCharIndex]), - IsSpecial: false, - } - - // more than a single escape - if lastCharIndex != 0 { - start := 1 - // skip if double char escape sequence - if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { - start++ - } - // convert this to GetNextParam method - ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) - } - - return ac -} - -func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { - if index < 0 || index >= len(ac.Parameters) { - return defaultValue - } - - param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) - if err != nil { - return defaultValue - } - - return int16(param) -} - -func (ac *ansiCommand) String() string { - return fmt.Sprintf("0x%v \"%v\" (\"%v\")", - bytesToHex(ac.CommandBytes), - ac.Command, - strings.Join(ac.Parameters, "\",\"")) -} - -// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. -// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. -func isAnsiCommandChar(b byte) bool { - switch { - case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: - return true - case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: - // non-CSI escape sequence terminator - return true - case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: - // String escape sequence terminator - return true - } - return false -} - -func isXtermOscSequence(command []byte, current byte) bool { - return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) -} - -func isCharacterSelectionCmdChar(b byte) bool { - return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) -} - -// bytesToHex converts a slice of bytes to a human-readable string. -func bytesToHex(b []byte) string { - hex := make([]string, len(b)) - for i, ch := range b { - hex[i] = fmt.Sprintf("%X", ch) - } - return strings.Join(hex, "") -} - -// ensureInRange adjusts the passed value, if necessary, to ensure it is within -// the passed min / max range. -func ensureInRange(n int16, min int16, max int16) int16 { - if n < min { - return min - } else if n > max { - return max - } else { - return n - } -} - -func GetStdFile(nFile int) (*os.File, uintptr) { - var file *os.File - - // syscall uses negative numbers - // windows package uses very big uint32 - // Keep these switches split so we don't have to convert ints too much. - switch uint32(nFile) { - case windows.STD_INPUT_HANDLE: - file = os.Stdin - case windows.STD_OUTPUT_HANDLE: - file = os.Stdout - case windows.STD_ERROR_HANDLE: - file = os.Stderr - default: - switch nFile { - case syscall.STD_INPUT_HANDLE: - file = os.Stdin - case syscall.STD_OUTPUT_HANDLE: - file = os.Stdout - case syscall.STD_ERROR_HANDLE: - file = os.Stderr - default: - panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) - } - } - - fd, err := syscall.GetStdHandle(nFile) - if err != nil { - panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) - } - - return file, uintptr(fd) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go deleted file mode 100644 index 6055e33b912b..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/api.go +++ /dev/null @@ -1,327 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "syscall" - "unsafe" -) - -//=========================================================================================================== -// IMPORTANT NOTE: -// -// The methods below make extensive use of the "unsafe" package to obtain the required pointers. -// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack -// variables) the pointers reference *before* the API completes. -// -// As a result, in those cases, the code must hint that the variables remain in active by invoking the -// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer -// require unsafe pointers. -// -// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform -// the garbage collector the variables remain in use if: -// -// -- The value is not a pointer (e.g., int32, struct) -// -- The value is not referenced by the method after passing the pointer to Windows -// -// See http://golang.org/doc/go1.3. -//=========================================================================================================== - -var ( - kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - - getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") - setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") - setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") - setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") - getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") - setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") - scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") - setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") - writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") - readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") - waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") -) - -// Windows Console constants -const ( - // Console modes - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_WINDOW_INPUT = 0x0008 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_EXTENDED_FLAGS = 0x0080 - ENABLE_AUTO_POSITION = 0x0100 - ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 - - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 - ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 - DISABLE_NEWLINE_AUTO_RETURN = 0x0008 - ENABLE_LVB_GRID_WORLDWIDE = 0x0010 - - // Character attributes - // Note: - // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). - // Clearing all foreground or background colors results in black; setting all creates white. - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. - FOREGROUND_BLUE uint16 = 0x0001 - FOREGROUND_GREEN uint16 = 0x0002 - FOREGROUND_RED uint16 = 0x0004 - FOREGROUND_INTENSITY uint16 = 0x0008 - FOREGROUND_MASK uint16 = 0x000F - - BACKGROUND_BLUE uint16 = 0x0010 - BACKGROUND_GREEN uint16 = 0x0020 - BACKGROUND_RED uint16 = 0x0040 - BACKGROUND_INTENSITY uint16 = 0x0080 - BACKGROUND_MASK uint16 = 0x00F0 - - COMMON_LVB_MASK uint16 = 0xFF00 - COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 - COMMON_LVB_UNDERSCORE uint16 = 0x8000 - - // Input event types - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - KEY_EVENT = 0x0001 - MOUSE_EVENT = 0x0002 - WINDOW_BUFFER_SIZE_EVENT = 0x0004 - MENU_EVENT = 0x0008 - FOCUS_EVENT = 0x0010 - - // WaitForSingleObject return codes - WAIT_ABANDONED = 0x00000080 - WAIT_FAILED = 0xFFFFFFFF - WAIT_SIGNALED = 0x0000000 - WAIT_TIMEOUT = 0x00000102 - - // WaitForSingleObject wait duration - WAIT_INFINITE = 0xFFFFFFFF - WAIT_ONE_SECOND = 1000 - WAIT_HALF_SECOND = 500 - WAIT_QUARTER_SECOND = 250 -) - -// Windows API Console types -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) -// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment -type ( - CHAR_INFO struct { - UnicodeChar uint16 - Attributes uint16 - } - - CONSOLE_CURSOR_INFO struct { - Size uint32 - Visible int32 - } - - CONSOLE_SCREEN_BUFFER_INFO struct { - Size COORD - CursorPosition COORD - Attributes uint16 - Window SMALL_RECT - MaximumWindowSize COORD - } - - COORD struct { - X int16 - Y int16 - } - - SMALL_RECT struct { - Left int16 - Top int16 - Right int16 - Bottom int16 - } - - // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - INPUT_RECORD struct { - EventType uint16 - KeyEvent KEY_EVENT_RECORD - } - - KEY_EVENT_RECORD struct { - KeyDown int32 - RepeatCount uint16 - VirtualKeyCode uint16 - VirtualScanCode uint16 - UnicodeChar uint16 - ControlKeyState uint32 - } - - WINDOW_BUFFER_SIZE struct { - Size COORD - } -) - -// boolToBOOL converts a Go bool into a Windows int32. -func boolToBOOL(f bool) int32 { - if f { - return int32(1) - } else { - return int32(0) - } -} - -// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. -func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorInfo sets the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. -func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorPosition location of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. -func SetConsoleCursorPosition(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// GetConsoleMode gets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. -func GetConsoleMode(handle uintptr) (mode uint32, err error) { - err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) - return mode, err -} - -// SetConsoleMode sets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. -func SetConsoleMode(handle uintptr, mode uint32) error { - r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) - use(mode) - return checkError(r1, r2, err) -} - -// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. -func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - info := CONSOLE_SCREEN_BUFFER_INFO{} - err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) - if err != nil { - return nil, err - } - return &info, nil -} - -func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { - r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) - use(scrollRect) - use(clipRect) - use(destOrigin) - use(char) - return checkError(r1, r2, err) -} - -// SetConsoleScreenBufferSize sets the size of the console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. -func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// SetConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { - r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) - use(attribute) - return checkError(r1, r2, err) -} - -// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. -// Note that the size and location must be within and no larger than the backing console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. -func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { - r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) - use(isAbsolute) - use(rect) - return checkError(r1, r2, err) -} - -// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. -func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { - r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) - use(buffer) - use(bufferSize) - use(bufferCoord) - return checkError(r1, r2, err) -} - -// ReadConsoleInput reads (and removes) data from the console input buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. -func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { - r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) - use(buffer) - return checkError(r1, r2, err) -} - -// WaitForSingleObject waits for the passed handle to be signaled. -// It returns true if the handle was signaled; false otherwise. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. -func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { - r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) - switch r1 { - case WAIT_ABANDONED, WAIT_TIMEOUT: - return false, nil - case WAIT_SIGNALED: - return true, nil - } - use(msWait) - return false, err -} - -// String helpers -func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { - return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) -} - -func (coord COORD) String() string { - return fmt.Sprintf("%v,%v", coord.X, coord.Y) -} - -func (rect SMALL_RECT) String() string { - return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) -} - -// checkError evaluates the results of a Windows API call and returns the error if it failed. -func checkError(r1, r2 uintptr, err error) error { - // Windows APIs return non-zero to indicate success - if r1 != 0 { - return nil - } - - // Return the error if provided, otherwise default to EINVAL - if err != nil { - return err - } - return syscall.EINVAL -} - -// coordToPointer converts a COORD into a uintptr (by fooling the type system). -func coordToPointer(c COORD) uintptr { - // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. - return uintptr(*((*uint32)(unsafe.Pointer(&c)))) -} - -// use is a no-op, but the compiler cannot see that it is. -// Calling use(p) ensures that p is kept live until that point. -func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go deleted file mode 100644 index cbec8f728f49..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -const ( - FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE -) - -// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the -// request represented by the passed ANSI mode. -func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { - switch ansiMode { - - // Mode styles - case ansiterm.ANSI_SGR_BOLD: - windowsMode = windowsMode | FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: - windowsMode &^= FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_UNDERLINE: - windowsMode = windowsMode | COMMON_LVB_UNDERSCORE - - case ansiterm.ANSI_SGR_REVERSE: - inverted = true - - case ansiterm.ANSI_SGR_REVERSE_OFF: - inverted = false - - case ansiterm.ANSI_SGR_UNDERLINE_OFF: - windowsMode &^= COMMON_LVB_UNDERSCORE - - // Foreground colors - case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: - windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_BLACK: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_RED: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED - - case ansiterm.ANSI_SGR_FOREGROUND_GREEN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_BLUE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_CYAN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_WHITE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - - // Background colors - case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: - // Black with no intensity - windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_BLACK: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_RED: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED - - case ansiterm.ANSI_SGR_BACKGROUND_GREEN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_BLUE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_CYAN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_WHITE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE - } - - return windowsMode, inverted -} - -// invertAttributes inverts the foreground and background colors of a Windows attributes value -func invertAttributes(windowsMode uint16) uint16 { - return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go deleted file mode 100644 index 3ee06ea72824..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build windows - -package winterm - -const ( - horizontal = iota - vertical -) - -func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { - if h.originMode { - sr := h.effectiveSr(info.Window) - return SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - } else { - return SMALL_RECT{ - Top: info.Window.Top, - Bottom: info.Window.Bottom, - Left: 0, - Right: info.Size.X - 1, - } - } -} - -// setCursorPosition sets the cursor to the specified position, bounded to the screen size -func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { - position.X = ensureInRange(position.X, window.Left, window.Right) - position.Y = ensureInRange(position.Y, window.Top, window.Bottom) - err := SetConsoleCursorPosition(h.fd, position) - if err != nil { - return err - } - h.logf("Cursor position set: (%d, %d)", position.X, position.Y) - return err -} - -func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { - return h.moveCursor(vertical, param) -} - -func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { - return h.moveCursor(horizontal, param) -} - -func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - switch moveMode { - case horizontal: - position.X += int16(param) - case vertical: - position.Y += int16(param) - } - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = 0 - position.Y += int16(param) - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = int16(param) - 1 - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go deleted file mode 100644 index 244b5fa25efb..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { - // Ignore an invalid (negative area) request - if toCoord.Y < fromCoord.Y { - return nil - } - - var err error - - var coordStart = COORD{} - var coordEnd = COORD{} - - xCurrent, yCurrent := fromCoord.X, fromCoord.Y - xEnd, yEnd := toCoord.X, toCoord.Y - - // Clear any partial initial line - if xCurrent > 0 { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yCurrent - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent += 1 - } - - // Clear intervening rectangular section - if yCurrent < yEnd { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd-1 - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent = yEnd - } - - // Clear remaining partial ending line - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { - region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} - width := toCoord.X - fromCoord.X + 1 - height := toCoord.Y - fromCoord.Y + 1 - size := uint32(width) * uint32(height) - - if size <= 0 { - return nil - } - - buffer := make([]CHAR_INFO, size) - - char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} - for i := 0; i < int(size); i++ { - buffer[i] = char - } - - err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go deleted file mode 100644 index 2d27fa1d0288..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build windows - -package winterm - -// effectiveSr gets the current effective scroll region in buffer coordinates -func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { - top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) - bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) - if top >= bottom { - top = window.Top - bottom = window.Bottom - } - return scrollRegion{top: top, bottom: bottom} -} - -func (h *windowsAnsiEventHandler) scrollUp(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - return h.scroll(param, sr, info) -} - -func (h *windowsAnsiEventHandler) scrollDown(param int) error { - return h.scrollUp(-param) -} - -func (h *windowsAnsiEventHandler) deleteLines(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - start := info.CursorPosition.Y - sr := h.effectiveSr(info.Window) - // Lines cannot be inserted or deleted outside the scrolling region. - if start >= sr.top && start <= sr.bottom { - sr.top = start - return h.scroll(param, sr, info) - } else { - return nil - } -} - -func (h *windowsAnsiEventHandler) insertLines(param int) error { - return h.deleteLines(-param) -} - -// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. -func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { - h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) - h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) - - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: 0, - Y: sr.top - int16(param), - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} - -func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - return h.scrollLine(param, info.CursorPosition, info) -} - -func (h *windowsAnsiEventHandler) insertCharacters(param int) error { - return h.deleteCharacters(-param) -} - -// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. -func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: position.Y, - Bottom: position.Y, - Left: position.X, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: position.X - int16(columns), - Y: position.Y, - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go deleted file mode 100644 index afa7635d77ba..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package winterm - -// AddInRange increments a value by the passed quantity while ensuring the values -// always remain within the supplied min / max range. -func addInRange(n int16, increment int16, min int16, max int16) int16 { - return ensureInRange(n+increment, min, max) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go deleted file mode 100644 index 2d40fb75ad0b..000000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ /dev/null @@ -1,743 +0,0 @@ -// +build windows - -package winterm - -import ( - "bytes" - "log" - "os" - "strconv" - - "github.com/Azure/go-ansiterm" -) - -type windowsAnsiEventHandler struct { - fd uintptr - file *os.File - infoReset *CONSOLE_SCREEN_BUFFER_INFO - sr scrollRegion - buffer bytes.Buffer - attributes uint16 - inverted bool - wrapNext bool - drewMarginByte bool - originMode bool - marginByte byte - curInfo *CONSOLE_SCREEN_BUFFER_INFO - curPos COORD - logf func(string, ...interface{}) -} - -type Option func(*windowsAnsiEventHandler) - -func WithLogf(f func(string, ...interface{})) Option { - return func(w *windowsAnsiEventHandler) { - w.logf = f - } -} - -func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { - infoReset, err := GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - h := &windowsAnsiEventHandler{ - fd: fd, - file: file, - infoReset: infoReset, - attributes: infoReset.Attributes, - } - for _, o := range opts { - o(h) - } - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ := os.Create("winEventHandler.log") - logger := log.New(logFile, "", log.LstdFlags) - if h.logf != nil { - l := h.logf - h.logf = func(s string, v ...interface{}) { - l(s, v...) - logger.Printf(s, v...) - } - } else { - h.logf = logger.Printf - } - } - - if h.logf == nil { - h.logf = func(string, ...interface{}) {} - } - - return h -} - -type scrollRegion struct { - top int16 - bottom int16 -} - -// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the -// current cursor position and scroll region settings, in which case it returns -// true. If no special handling is necessary, then it does nothing and returns -// false. -// -// In the false case, the caller should ensure that a carriage return -// and line feed are inserted or that the text is otherwise wrapped. -func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { - if h.wrapNext { - if err := h.Flush(); err != nil { - return false, err - } - h.clearWrap() - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return false, err - } - sr := h.effectiveSr(info.Window) - if pos.Y == sr.bottom { - // Scrolling is necessary. Let Windows automatically scroll if the scrolling region - // is the full window. - if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { - if includeCR { - pos.X = 0 - h.updatePos(pos) - } - return false, nil - } - - // A custom scroll region is active. Scroll the window manually to simulate - // the LF. - if err := h.Flush(); err != nil { - return false, err - } - h.logf("Simulating LF inside scroll region") - if err := h.scrollUp(1); err != nil { - return false, err - } - if includeCR { - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - - } else if pos.Y < info.Window.Bottom { - // Let Windows handle the LF. - pos.Y++ - if includeCR { - pos.X = 0 - } - h.updatePos(pos) - return false, nil - } else { - // The cursor is at the bottom of the screen but outside the scroll - // region. Skip the LF. - h.logf("Simulating LF outside scroll region") - if includeCR { - if err := h.Flush(); err != nil { - return false, err - } - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - } -} - -// executeLF executes a LF without a CR. -func (h *windowsAnsiEventHandler) executeLF() error { - handled, err := h.simulateLF(false) - if err != nil { - return err - } - if !handled { - // Windows LF will reset the cursor column position. Write the LF - // and restore the cursor position. - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - if pos.X != 0 { - if err := h.Flush(); err != nil { - return err - } - h.logf("Resetting cursor position for LF without CR") - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - } - return nil -} - -func (h *windowsAnsiEventHandler) Print(b byte) error { - if h.wrapNext { - h.buffer.WriteByte(h.marginByte) - h.clearWrap() - if _, err := h.simulateLF(true); err != nil { - return err - } - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X == info.Size.X-1 { - h.wrapNext = true - h.marginByte = b - } else { - pos.X++ - h.updatePos(pos) - h.buffer.WriteByte(b) - } - return nil -} - -func (h *windowsAnsiEventHandler) Execute(b byte) error { - switch b { - case ansiterm.ANSI_TAB: - h.logf("Execute(TAB)") - // Move to the next tab stop, but preserve auto-wrap if already set. - if !h.wrapNext { - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - pos.X = (pos.X + 8) - pos.X%8 - if pos.X >= info.Size.X { - pos.X = info.Size.X - 1 - } - if err := h.Flush(); err != nil { - return err - } - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - return nil - - case ansiterm.ANSI_BEL: - h.buffer.WriteByte(ansiterm.ANSI_BEL) - return nil - - case ansiterm.ANSI_BACKSPACE: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X > 0 { - pos.X-- - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) - } - return nil - - case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: - // Treat as true LF. - return h.executeLF() - - case ansiterm.ANSI_LINE_FEED: - // Simulate a CR and LF for now since there is no way in go-ansiterm - // to tell if the LF should include CR (and more things break when it's - // missing than when it's incorrectly added). - handled, err := h.simulateLF(true) - if handled || err != nil { - return err - } - return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - - case ansiterm.ANSI_CARRIAGE_RETURN: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X != 0 { - pos.X = 0 - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) - } - return nil - - default: - return nil - } -} - -func (h *windowsAnsiEventHandler) CUU(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(-param) -} - -func (h *windowsAnsiEventHandler) CUD(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(param) -} - -func (h *windowsAnsiEventHandler) CUF(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(param) -} - -func (h *windowsAnsiEventHandler) CUB(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(-param) -} - -func (h *windowsAnsiEventHandler) CNL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(param) -} - -func (h *windowsAnsiEventHandler) CPL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(-param) -} - -func (h *windowsAnsiEventHandler) CHA(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorColumn(param) -} - -func (h *windowsAnsiEventHandler) VPA(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("VPA: [[%d]]", param) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - window := h.getCursorWindow(info) - position := info.CursorPosition - position.Y = window.Top + int16(param) - 1 - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) CUP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUP: [[%d %d]]", row, col) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - window := h.getCursorWindow(info) - position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) HVP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("HVP: [[%d %d]]", row, col) - h.clearWrap() - return h.CUP(row, col) -} - -func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) - h.clearWrap() - return nil -} - -func (h *windowsAnsiEventHandler) DECOM(enable bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) - h.clearWrap() - h.originMode = enable - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) - h.clearWrap() - if err := h.ED(2); err != nil { - return err - } - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - targetWidth := int16(80) - if use132 { - targetWidth = 132 - } - if info.Size.X < targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - h.logf("set buffer failed: %v", err) - return err - } - } - window := info.Window - window.Left = 0 - window.Right = targetWidth - 1 - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - h.logf("set window failed: %v", err) - return err - } - if info.Size.X > targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - h.logf("set buffer failed: %v", err) - return err - } - } - return SetConsoleCursorPosition(h.fd, COORD{0, 0}) -} - -func (h *windowsAnsiEventHandler) ED(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("ED: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - - // [J -- Erases from the cursor to the end of the screen, including the cursor position. - // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. - // [2J -- Erases the complete display. The cursor does not move. - // Notes: - // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X - 1, info.Size.Y - 1} - - case 1: - start = COORD{0, 0} - end = info.CursorPosition - - case 2: - start = COORD{0, 0} - end = COORD{info.Size.X - 1, info.Size.Y - 1} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - // If the whole buffer was cleared, move the window to the top while preserving - // the window-relative cursor position. - if param == 2 { - pos := info.CursorPosition - window := info.Window - pos.Y -= window.Top - window.Bottom -= window.Top - window.Top = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - return err - } - } - - return nil -} - -func (h *windowsAnsiEventHandler) EL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("EL: [%v]", strconv.Itoa(param)) - h.clearWrap() - - // [K -- Erases from the cursor to the end of the line, including the cursor position. - // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. - // [2K -- Erases the complete line. - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X, info.CursorPosition.Y} - - case 1: - start = COORD{0, info.CursorPosition.Y} - end = info.CursorPosition - - case 2: - start = COORD{0, info.CursorPosition.Y} - end = COORD{info.Size.X, info.CursorPosition.Y} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) IL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("IL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertLines(param) -} - -func (h *windowsAnsiEventHandler) DL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteLines(param) -} - -func (h *windowsAnsiEventHandler) ICH(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("ICH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertCharacters(param) -} - -func (h *windowsAnsiEventHandler) DCH(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DCH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteCharacters(param) -} - -func (h *windowsAnsiEventHandler) SGR(params []int) error { - if err := h.Flush(); err != nil { - return err - } - strings := []string{} - for _, v := range params { - strings = append(strings, strconv.Itoa(v)) - } - - h.logf("SGR: [%v]", strings) - - if len(params) <= 0 { - h.attributes = h.infoReset.Attributes - h.inverted = false - } else { - for _, attr := range params { - - if attr == ansiterm.ANSI_SGR_RESET { - h.attributes = h.infoReset.Attributes - h.inverted = false - continue - } - - h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) - } - } - - attributes := h.attributes - if h.inverted { - attributes = invertAttributes(attributes) - } - err := SetConsoleTextAttribute(h.fd, attributes) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) SU(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("SU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollUp(param) -} - -func (h *windowsAnsiEventHandler) SD(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("SD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollDown(param) -} - -func (h *windowsAnsiEventHandler) DA(params []string) error { - h.logf("DA: [%v]", params) - // DA cannot be implemented because it must send data on the VT100 input stream, - // which is not available to go-ansiterm. - return nil -} - -func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECSTBM: [%d, %d]", top, bottom) - - // Windows is 0 indexed, Linux is 1 indexed - h.sr.top = int16(top - 1) - h.sr.bottom = int16(bottom - 1) - - // This command also moves the cursor to the origin. - h.clearWrap() - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) RI() error { - if err := h.Flush(); err != nil { - return err - } - h.logf("RI: []") - h.clearWrap() - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - if info.CursorPosition.Y == sr.top { - return h.scrollDown(1) - } - - return h.moveCursorVertical(-1) -} - -func (h *windowsAnsiEventHandler) IND() error { - h.logf("IND: []") - return h.executeLF() -} - -func (h *windowsAnsiEventHandler) Flush() error { - h.curInfo = nil - if h.buffer.Len() > 0 { - h.logf("Flush: [%s]", h.buffer.Bytes()) - if _, err := h.buffer.WriteTo(h.file); err != nil { - return err - } - } - - if h.wrapNext && !h.drewMarginByte { - h.logf("Flush: drawing margin byte '%c'", h.marginByte) - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} - size := COORD{1, 1} - position := COORD{0, 0} - region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} - if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { - return err - } - h.drewMarginByte = true - } - return nil -} - -// cacheConsoleInfo ensures that the current console screen information has been queried -// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. -func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { - if h.curInfo == nil { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return COORD{}, nil, err - } - h.curInfo = info - h.curPos = info.CursorPosition - } - return h.curPos, h.curInfo, nil -} - -func (h *windowsAnsiEventHandler) updatePos(pos COORD) { - if h.curInfo == nil { - panic("failed to call getCurrentInfo before calling updatePos") - } - h.curPos = pos -} - -// clearWrap clears the state where the cursor is in the margin -// waiting for the next character before wrapping the line. This must -// be done before most operations that act on the cursor. -func (h *windowsAnsiEventHandler) clearWrap() { - h.wrapNext = false - h.drewMarginByte = false -} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE new file mode 100644 index 000000000000..3d8b93bc7987 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go new file mode 100644 index 000000000000..259ca6d56f4c --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go @@ -0,0 +1,39 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package cache allows third parties to implement external storage for caching token data +for distributed systems or multiple local applications access. + +The data stored and extracted will represent the entire cache. Therefore it is recommended +one msal instance per user. This data is considered opaque and there are no guarantees to +implementers on the format being passed. +*/ +package cache + +// Marshaler marshals data from an internal cache to bytes that can be stored. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Unmarshaler unmarshals data from a storage medium into the internal cache, overwriting it. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Serializer can serialize the cache to binary or from binary into the cache. +type Serializer interface { + Marshaler + Unmarshaler +} + +// ExportReplace is used export or replace what is in the cache. +type ExportReplace interface { + // Replace replaces the cache with what is in external storage. + // key is the suggested key which can be used for partioning the cache + Replace(cache Unmarshaler, key string) + // Export writes the binary representation of the cache (cache.Marshal()) to + // external storage. This is considered opaque. + // key is the suggested key which can be used for partioning the cache + Export(cache Marshaler, key string) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go new file mode 100644 index 000000000000..41ca21c94528 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -0,0 +1,455 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package confidential provides a client for authentication of "confidential" applications. +A "confidential" application is defined as an app that run on servers. They are considered +difficult to access and for that reason capable of keeping an application secret. +Confidential clients can hold configuration-time secrets. +*/ +package confidential + +import ( + "context" + "crypto" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "net/url" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +/* +Design note: + +confidential.Client uses base.Client as an embedded type. base.Client statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. + +Duplicate Calls shared between public.Client and this package: +There is some duplicate call options provided here that are the same as in public.Client . This +is a design choices. Go proverb(https://www.youtube.com/watch?v=PAAkCSZUG1c&t=9m28s): +"a little copying is better than a little dependency". Yes, we could have another package with +shared options (fail). That divides like 2 options from all others which makes the user look +through more docs. We can have all clients in one package, but I think separate packages +here makes for better naming (public.Client vs client.PublicClient). So I chose a little +duplication. + +.Net People, Take note on X509: +This uses x509.Certificates and private keys. x509 does not store private keys. .Net +has some x509.Certificate2 thing that has private keys, but that is just some bullcrap that .Net +added, it doesn't exist in real life. Seriously, "x509.Certificate2", bahahahaha. As such I've +put a PEM decoder into here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be include in the package documentation. + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type Account = shared.Account + +// CertFromPEM converts a PEM file (.pem or .key) for use with NewCredFromCert(). The file +// must contain the public certificate and the private key. If a PEM block is encrypted and +// password is not an empty string, it attempts to decrypt the PEM blocks using the password. +// Multiple certs are due to certificate chaining for use cases like TLS that sign from root to leaf. +func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.PrivateKey, error) { + var certs []*x509.Certificate + var priv crypto.PrivateKey + for { + block, rest := pem.Decode(pemData) + if block == nil { + break + } + + //nolint:staticcheck // x509.IsEncryptedPEMBlock and x509.DecryptPEMBlock are deprecated. They are used here only to support a usecase. + if x509.IsEncryptedPEMBlock(block) { + b, err := x509.DecryptPEMBlock(block, []byte(password)) + if err != nil { + return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %v", err) + } + block, _ = pem.Decode(b) + if block == nil { + return nil, nil, fmt.Errorf("encounter encrypted PEM block that did not decode") + } + } + + switch block.Type { + case "CERTIFICATE": + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be parsed by x509: %v", err) + } + certs = append(certs, cert) + case "PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + + var err error + priv, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + case "RSA PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + var err error + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + } + pemData = rest + } + + if len(certs) == 0 { + return nil, nil, fmt.Errorf("no certificates found") + } + + if priv == nil { + return nil, nil, fmt.Errorf("no private key found") + } + + return certs, priv, nil +} + +// AssertionRequestOptions has required information for client assertion claims +type AssertionRequestOptions = exported.AssertionRequestOptions + +// Credential represents the credential used in confidential client flows. +type Credential struct { + secret string + + cert *x509.Certificate + key crypto.PrivateKey + x5c []string + + assertionCallback func(context.Context, AssertionRequestOptions) (string, error) +} + +// toInternal returns the accesstokens.Credential that is used internally. The current structure of the +// code requires that client.go, requests.go and confidential.go share a credential type without +// having import recursion. That requires the type used between is in a shared package. Therefore +// we have this. +func (c Credential) toInternal() *accesstokens.Credential { + return &accesstokens.Credential{Secret: c.secret, Cert: c.cert, Key: c.key, AssertionCallback: c.assertionCallback, X5c: c.x5c} +} + +// NewCredFromSecret creates a Credential from a secret. +func NewCredFromSecret(secret string) (Credential, error) { + if secret == "" { + return Credential{}, errors.New("secret can't be empty string") + } + return Credential{secret: secret}, nil +} + +// NewCredFromAssertion creates a Credential from a signed assertion. +// +// Deprecated: a Credential created by this function can't refresh the +// assertion when it expires. Use NewCredFromAssertionCallback instead. +func NewCredFromAssertion(assertion string) (Credential, error) { + if assertion == "" { + return Credential{}, errors.New("assertion can't be empty string") + } + return NewCredFromAssertionCallback(func(context.Context, AssertionRequestOptions) (string, error) { return assertion, nil }), nil +} + +// NewCredFromAssertionCallback creates a Credential that invokes a callback to get assertions +// authenticating the application. The callback must be thread safe. +func NewCredFromAssertionCallback(callback func(context.Context, AssertionRequestOptions) (string, error)) Credential { + return Credential{assertionCallback: callback} +} + +// NewCredFromCert creates a Credential from an x509.Certificate and an RSA private key. +// CertFromPEM() can be used to get these values from a PEM file. +func NewCredFromCert(cert *x509.Certificate, key crypto.PrivateKey) Credential { + cred, _ := NewCredFromCertChain([]*x509.Certificate{cert}, key) + return cred +} + +// NewCredFromCertChain creates a Credential from a chain of x509.Certificates and an RSA private key +// as returned by CertFromPEM(). +func NewCredFromCertChain(certs []*x509.Certificate, key crypto.PrivateKey) (Credential, error) { + cred := Credential{key: key} + k, ok := key.(*rsa.PrivateKey) + if !ok { + return cred, errors.New("key must be an RSA key") + } + for _, cert := range certs { + certKey, ok := cert.PublicKey.(*rsa.PublicKey) + if ok && k.E == certKey.E && k.N.Cmp(certKey.N) == 0 { + // We know this is the signing cert because its public key matches the given private key. + // This cert must be first in x5c. + cred.cert = cert + cred.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cred.x5c...) + } else { + cred.x5c = append(cred.x5c, base64.StdEncoding.EncodeToString(cert.Raw)) + } + } + if cred.cert == nil { + return cred, errors.New("key doesn't match any certificate") + } + return cred, nil +} + +// AutoDetectRegion instructs MSAL Go to auto detect region for Azure regional token service. +func AutoDetectRegion() string { + return "TryAutoDetect" +} + +// Client is a representation of authentication client for confidential applications as defined in the +// package doc. A new Client should be created PER SERVICE USER. +// For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications +type Client struct { + base base.Client + + cred *accesstokens.Credential + + // userID is some unique identifier for a user. It actually isn't used by us at all, it + // simply acts as another hint that a confidential.Client is for a single user. + userID string +} + +// Options are optional settings for New(). These options are set using various functions +// returning Option calls. +type Options struct { + // Accessor controls cache persistence. + // By default there is no cache persistence. This can be set using the WithAccessor() option. + Accessor cache.ExportReplace + + // The host of the Azure Active Directory authority. + // The default is https://login.microsoftonline.com/common. This can be changed using the + // WithAuthority() option. + Authority string + + // The HTTP client used for making requests. + // It defaults to a shared http.Client. + HTTPClient ops.HTTPClient + + // SendX5C specifies if x5c claim(public key of the certificate) should be sent to STS. + SendX5C bool + + // Instructs MSAL Go to use an Azure regional token service with sepcified AzureRegion. + AzureRegion string +} + +func (o Options) validate() error { + u, err := url.Parse(o.Authority) + if err != nil { + return fmt.Errorf("the Authority(%s) does not parse as a valid URL", o.Authority) + } + if u.Scheme != "https" { + return fmt.Errorf("the Authority(%s) does not appear to use https", o.Authority) + } + return nil +} + +// Option is an optional argument to New(). +type Option func(o *Options) + +// WithAuthority allows you to provide a custom authority for use in the client. +func WithAuthority(authority string) Option { + return func(o *Options) { + o.Authority = authority + } +} + +// WithAccessor provides a cache accessor that will read and write to some externally managed cache +// that may or may not be shared with other applications. +func WithAccessor(accessor cache.ExportReplace) Option { + return func(o *Options) { + o.Accessor = accessor + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *Options) { + o.HTTPClient = httpClient + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C() Option { + return func(o *Options) { + o.SendX5C = true + } +} + +// WithAzureRegion sets the region(preferred) or Confidential.AutoDetectRegion() for auto detecting region. +// Region names as per https://azure.microsoft.com/en-ca/global-infrastructure/geographies/. +// See https://aka.ms/region-map for more details on region names. +// The region value should be short region name for the region where the service is deployed. +// For example "centralus" is short name for region Central US. +// Not all auth flows can use the regional token service. +// Service To Service (client credential flow) tokens can be obtained from the regional service. +// Requires configuration at the tenant level. +// Auto-detection works on a limited number of Azure artifacts (VMs, Azure functions). +// If auto-detection fails, the non-regional endpoint will be used. +// If an invalid region name is provided, the non-regional endpoint MIGHT be used or the token request MIGHT fail. +func WithAzureRegion(val string) Option { + return func(o *Options) { + o.AzureRegion = val + } +} + +// New is the constructor for Client. userID is the unique identifier of the user this client +// will store credentials for (a Client is per user). clientID is the Azure clientID and cred is +// the type of credential to use. +func New(clientID string, cred Credential, options ...Option) (Client, error) { + opts := Options{ + Authority: base.AuthorityPublicCloud, + HTTPClient: shared.DefaultClient, + } + + for _, o := range options { + o(&opts) + } + if err := opts.validate(); err != nil { + return Client{}, err + } + + base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), base.WithX5C(opts.SendX5C), base.WithCacheAccessor(opts.Accessor), base.WithRegionDetection(opts.AzureRegion)) + if err != nil { + return Client{}, err + } + + return Client{ + base: base, + cred: cred.toInternal(), + }, nil +} + +// UserID is the unique user identifier this client if for. +func (cca Client) UserID() string { + return cca.userID +} + +// AuthCodeURL creates a URL used to acquire an authorization code. Users need to call CreateAuthorizationCodeURLParameters and pass it in. +func (cca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string) (string, error) { + return cca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, cca.base.AuthParams) +} + +// AcquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type AcquireTokenSilentOptions struct { + // Account represents the account to use. To set, use the WithSilentAccount() option. + Account Account +} + +// AcquireTokenSilentOption changes options inside AcquireTokenSilentOptions used in .AcquireTokenSilent(). +type AcquireTokenSilentOption func(a *AcquireTokenSilentOptions) + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) AcquireTokenSilentOption { + return func(a *AcquireTokenSilentOptions) { + a.Account = account + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, options ...AcquireTokenSilentOption) (AuthResult, error) { + opts := AcquireTokenSilentOptions{} + for _, o := range options { + o(&opts) + } + var isAppCache bool + if opts.Account.IsZero() { + isAppCache = true + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: opts.Account, + RequestType: accesstokens.ATConfidential, + Credential: cca.cred, + IsAppCache: isAppCache, + } + + return cca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// AcquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type AcquireTokenByAuthCodeOptions struct { + Challenge string +} + +// AcquireTokenByAuthCodeOption changes options inside AcquireTokenByAuthCodeOptions used in .AcquireTokenByAuthCode(). +type AcquireTokenByAuthCodeOption func(a *AcquireTokenByAuthCodeOptions) + +// WithChallenge allows you to provide a challenge for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) AcquireTokenByAuthCodeOption { + return func(a *AcquireTokenByAuthCodeOptions) { + a.Challenge = challenge + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...AcquireTokenByAuthCodeOption) (AuthResult, error) { + opts := AcquireTokenByAuthCodeOptions{} + for _, o := range options { + o(&opts) + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: opts.Challenge, + AppType: accesstokens.ATConfidential, + Credential: cca.cred, // This setting differs from public.Client.AcquireTokenByAuthCode + RedirectURI: redirectURI, + } + + return cca.base.AcquireTokenByAuthCode(ctx, params) +} + +// AcquireTokenByCredential acquires a security token from the authority, using the client credentials grant. +func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string) (AuthResult, error) { + authParams := cca.base.AuthParams + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATClientCredentials + + token, err := cca.base.Token.Credential(ctx, authParams, cca.cred) + if err != nil { + return AuthResult{}, err + } + return cca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +// Refer https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow. +func (cca Client) AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string) (AuthResult, error) { + params := base.AcquireTokenOnBehalfOfParameters{ + Scopes: scopes, + UserAssertion: userAssertion, + Credential: cca.cred, + } + return cca.base.AcquireTokenOnBehalfOf(ctx, params) +} + +// Account gets the account in the token cache with the specified homeAccountID. +func (cca Client) Account(homeAccountID string) Account { + return cca.base.Account(homeAccountID) +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (cca Client) RemoveAccount(account Account) error { + cca.base.RemoveAccount(account) + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md new file mode 100644 index 000000000000..34a699f48018 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md @@ -0,0 +1,111 @@ +# MSAL Error Design + +Author: Abhidnya Patil(abhidnya.patil@microsoft.com) + +Contributors: + +- John Doak(jdoak@microsoft.com) +- Keegan Caruso(Keegan.Caruso@microsoft.com) +- Joel Hendrix(jhendrix@microsoft.com) + +## Background + +Errors in MSAL are intended for app developers to troubleshoot and not for displaying to end-users. + +### Go error handling vs other MSAL languages + +Most modern languages use exception based errors. Simply put, you "throw" an exception and it must be caught at some routine in the upper stack or it will eventually crash the program. + +Go doesn't use exceptions, instead it relies on multiple return values, one of which can be the builtin error interface type. It is up to the user to decide what to do. + +### Go custom error types + +Errors can be created in Go by simply using errors.New() or fmt.Errorf() to create an "error". + +Custom errors can be created in multiple ways. One of the more robust ways is simply to satisfy the error interface: + +```go +type MyCustomErr struct { + Msg string +} +func (m MyCustomErr) Error() string { // This implements "error" + return m.Msg +} +``` + +### MSAL Error Goals + +- Provide diagnostics to the user and for tickets that can be used to track down bugs or client misconfigurations +- Detect errors that are transitory and can be retried +- Allow the user to identify certain errors that the program can respond to, such a informing the user for the need to do an enrollment + +## Implementing Client Side Errors + +Client side errors indicate a misconfiguration or passing of bad arguments that is non-recoverable. Retrying isn't possible. + +These errors can simply be standard Go errors created by errors.New() or fmt.Errorf(). If down the line we need a custom error, we can introduce it, but for now the error messages just need to be clear on what the issue was. + +## Implementing Service Side Errors + +Service side errors occur when an external RPC responds either with an HTTP error code or returns a message that includes an error. + +These errors can be transitory (please slow down) or permanent (HTTP 404). To provide our diagnostic goals, we require the ability to differentiate these errors from other errors. + +The current implementation includes a specialized type that captures any error from the server: + +```go +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} +``` + +A user will always receive the most concise error we provide. They can tell if it is a server side error using Go error package: + +```go +var callErr CallErr +if errors.As(err, &callErr) { + ... +} +``` + +We provide a Verbose() function that can retrieve the most verbose message from any error we provide: + +```go +fmt.Println(errors.Verbose(err)) +``` + +If further differentiation is required, we can add custom errors that use Go error wrapping on top of CallErr to achieve our diagnostic goals (such as detecting when to retry a call due to transient errors). + +CallErr is always thrown from the comm package (which handles all http requests) and looks similar to: + +```go +return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, ErrorResponse), //ErrorResponse is the json body extracted from the http response + } +``` + +## Future Decisions + +The ability to retry calls needs to have centralized responsibility. Either the user is doing it or the client is doing it. + +If the user should be responsible, our errors package will include a CanRetry() function that will inform the user if the error provided to them is retryable. This is based on the http error code and possibly the type of error that was returned. It would also include a sleep time if the server returned an amount of time to wait. + +Otherwise we will do this internally and retries will be left to us. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go new file mode 100644 index 000000000000..c8adf3da2393 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go @@ -0,0 +1,90 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package errors + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + + "github.com/kylelemons/godebug/pretty" +) + +var prettyConf = &pretty.Config{ + IncludeUnexported: false, + SkipZeroFields: true, + TrackCycles: true, + Formatter: map[reflect.Type]interface{}{ + reflect.TypeOf((*io.Reader)(nil)).Elem(): func(r io.Reader) string { + b, err := ioutil.ReadAll(r) + if err != nil { + return "could not read io.Reader content" + } + return string(b) + }, + }, +} + +type verboser interface { + Verbose() string +} + +// Verbose prints the most verbose error that the error message has. +func Verbose(err error) string { + build := strings.Builder{} + for { + if err == nil { + break + } + if v, ok := err.(verboser); ok { + build.WriteString(v.Verbose()) + } else { + build.WriteString(err.Error()) + } + err = errors.Unwrap(err) + } + return build.String() +} + +// New is equivalent to errors.New(). +func New(text string) error { + return errors.New(text) +} + +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + // Resp contains response body + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} + +// Is reports whether any error in errors chain matches target. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As finds the first error in errors chain that matches target, +// and if so, sets target to that error value and returns true. +// Otherwise, it returns false. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go new file mode 100644 index 000000000000..e9b56f2ba336 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -0,0 +1,397 @@ +// Package base contains a "Base" client that is used by the external public.Client and confidential.Client. +// Base holds shared attributes that must be available to both clients and methods that act as +// shared calls. +package base + +import ( + "context" + "errors" + "fmt" + "net/url" + "reflect" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +const ( + // AuthorityPublicCloud is the default AAD authority host + AuthorityPublicCloud = "https://login.microsoftonline.com/common" + scopeSeparator = " " +) + +// manager provides an internal cache. It is defined to allow faking the cache in tests. +// In all production use it is a *storage.Manager. +type manager interface { + Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (storage.TokenResponse, error) + Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) + AllAccounts() []shared.Account + Account(homeAccountID string) shared.Account + RemoveAccount(account shared.Account, clientID string) +} + +// partitionedManager provides an internal cache. It is defined to allow faking the cache in tests. +// In all production use it is a *storage.PartitionedManager. +type partitionedManager interface { + Read(ctx context.Context, authParameters authority.AuthParams) (storage.TokenResponse, error) + Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) +} + +type noopCacheAccessor struct{} + +func (n noopCacheAccessor) Replace(cache cache.Unmarshaler, key string) {} +func (n noopCacheAccessor) Export(cache cache.Marshaler, key string) {} + +// AcquireTokenSilentParameters contains the parameters to acquire a token silently (from cache). +type AcquireTokenSilentParameters struct { + Scopes []string + Account shared.Account + RequestType accesstokens.AppType + Credential *accesstokens.Credential + IsAppCache bool + UserAssertion string + AuthorizationType authority.AuthorizeType +} + +// AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow. +// To use PKCE, set the CodeChallengeParameter. +// Code challenges are used to secure authorization code grants; for more information, visit +// https://tools.ietf.org/html/rfc7636. +type AcquireTokenAuthCodeParameters struct { + Scopes []string + Code string + Challenge string + RedirectURI string + AppType accesstokens.AppType + Credential *accesstokens.Credential +} + +type AcquireTokenOnBehalfOfParameters struct { + Scopes []string + Credential *accesstokens.Credential + UserAssertion string +} + +// AuthResult contains the results of one token acquisition operation in PublicClientApplication +// or ConfidentialClientApplication. For details see https://aka.ms/msal-net-authenticationresult +type AuthResult struct { + Account shared.Account + IDToken accesstokens.IDToken + AccessToken string + ExpiresOn time.Time + GrantedScopes []string + DeclinedScopes []string +} + +// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). +func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { + if err := storageTokenResponse.AccessToken.Validate(); err != nil { + return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err) + } + + account := storageTokenResponse.Account + accessToken := storageTokenResponse.AccessToken.Secret + grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator) + + // Checking if there was an ID token in the cache; this will throw an error in the case of confidential client applications. + var idToken accesstokens.IDToken + if !storageTokenResponse.IDToken.IsZero() { + err := idToken.UnmarshalJSON([]byte(storageTokenResponse.IDToken.Secret)) + if err != nil { + return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) + } + } + return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil +} + +// NewAuthResult creates an AuthResult. +func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Account) (AuthResult, error) { + if len(tokenResponse.DeclinedScopes) > 0 { + return AuthResult{}, fmt.Errorf("token response failed because declined scopes are present: %s", strings.Join(tokenResponse.DeclinedScopes, ",")) + } + return AuthResult{ + Account: account, + IDToken: tokenResponse.IDToken, + AccessToken: tokenResponse.AccessToken, + ExpiresOn: tokenResponse.ExpiresOn.T, + GrantedScopes: tokenResponse.GrantedScopes.Slice, + }, nil +} + +// Client is a base client that provides access to common methods and primatives that +// can be used by multiple clients. +type Client struct { + Token *oauth.Client + manager manager // *storage.Manager or fakeManager in tests + pmanager partitionedManager // *storage.PartitionedManager or fakeManager in tests + + AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New(). + cacheAccessor cache.ExportReplace +} + +// Option is an optional argument to the New constructor. +type Option func(c *Client) + +// WithCacheAccessor allows you to set some type of cache for storing authentication tokens. +func WithCacheAccessor(ca cache.ExportReplace) Option { + return func(c *Client) { + if ca != nil { + c.cacheAccessor = ca + } + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C(sendX5C bool) Option { + return func(c *Client) { + c.AuthParams.SendX5C = sendX5C + } +} + +func WithRegionDetection(region string) Option { + return func(c *Client) { + c.AuthParams.AuthorityInfo.Region = region + } +} + +// New is the constructor for Base. +func New(clientID string, authorityURI string, token *oauth.Client, options ...Option) (Client, error) { + authInfo, err := authority.NewInfoFromAuthorityURI(authorityURI, true) + if err != nil { + return Client{}, err + } + authParams := authority.NewAuthParams(clientID, authInfo) + client := Client{ // Note: Hey, don't even THINK about making Base into *Base. See "design notes" in public.go and confidential.go + Token: token, + AuthParams: authParams, + cacheAccessor: noopCacheAccessor{}, + manager: storage.New(token), + pmanager: storage.NewPartitionedManager(token), + } + for _, o := range options { + o(&client) + } + return client, nil + +} + +// AuthCodeURL creates a URL used to acquire an authorization code. +func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, authParams authority.AuthParams) (string, error) { + endpoints, err := b.Token.ResolveEndpoints(ctx, authParams.AuthorityInfo, "") + if err != nil { + return "", err + } + + baseURL, err := url.Parse(endpoints.AuthorizationEndpoint) + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("client_id", clientID) + v.Add("response_type", "code") + v.Add("redirect_uri", redirectURI) + v.Add("scope", strings.Join(scopes, scopeSeparator)) + if authParams.State != "" { + v.Add("state", authParams.State) + } + if authParams.CodeChallenge != "" { + v.Add("code_challenge", authParams.CodeChallenge) + } + if authParams.CodeChallengeMethod != "" { + v.Add("code_challenge_method", authParams.CodeChallengeMethod) + } + if authParams.Prompt != "" { + v.Add("prompt", authParams.Prompt) + } + // There were left over from an implementation that didn't use any of these. We may + // need to add them later, but as of now aren't needed. + /* + if p.ResponseMode != "" { + urlParams.Add("response_mode", p.ResponseMode) + } + if p.LoginHint != "" { + urlParams.Add("login_hint", p.LoginHint) + } + if p.DomainHint != "" { + urlParams.Add("domain_hint", p.DomainHint) + } + */ + baseURL.RawQuery = v.Encode() + return baseURL.String(), nil +} + +func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilentParameters) (AuthResult, error) { + authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and authParams is not a pointer. + authParams.Scopes = silent.Scopes + authParams.HomeaccountID = silent.Account.HomeAccountID + authParams.AuthorizationType = silent.AuthorizationType + authParams.UserAssertion = silent.UserAssertion + + var storageTokenResponse storage.TokenResponse + var err error + if authParams.AuthorizationType == authority.ATOnBehalfOf { + if s, ok := b.pmanager.(cache.Serializer); ok { + suggestedCacheKey := authParams.CacheKey(silent.IsAppCache) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + storageTokenResponse, err = b.pmanager.Read(ctx, authParams) + if err != nil { + return AuthResult{}, err + } + } else { + if s, ok := b.manager.(cache.Serializer); ok { + suggestedCacheKey := authParams.CacheKey(silent.IsAppCache) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + authParams.AuthorizationType = authority.ATRefreshToken + storageTokenResponse, err = b.manager.Read(ctx, authParams, silent.Account) + if err != nil { + return AuthResult{}, err + } + } + + result, err := AuthResultFromStorage(storageTokenResponse) + if err != nil { + if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() { + return AuthResult{}, errors.New("no token found") + } + + var cc *accesstokens.Credential + if silent.RequestType == accesstokens.ATConfidential { + cc = silent.Credential + } + + token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken) + if err != nil { + return AuthResult{}, err + } + + return b.AuthResultFromToken(ctx, authParams, token, true) + } + return result, nil +} + +func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) { + authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. + authParams.Scopes = authCodeParams.Scopes + authParams.Redirecturi = authCodeParams.RedirectURI + authParams.AuthorizationType = authority.ATAuthCode + + var cc *accesstokens.Credential + if authCodeParams.AppType == accesstokens.ATConfidential { + cc = authCodeParams.Credential + authParams.IsConfidentialClient = true + } + + req, err := accesstokens.NewCodeChallengeRequest(authParams, authCodeParams.AppType, cc, authCodeParams.Code, authCodeParams.Challenge) + if err != nil { + return AuthResult{}, err + } + + token, err := b.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return b.AuthResultFromToken(ctx, authParams, token, true) +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams AcquireTokenOnBehalfOfParameters) (AuthResult, error) { + authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. + authParams.Scopes = onBehalfOfParams.Scopes + authParams.AuthorizationType = authority.ATOnBehalfOf + authParams.UserAssertion = onBehalfOfParams.UserAssertion + + silentParameters := AcquireTokenSilentParameters{ + Scopes: onBehalfOfParams.Scopes, + RequestType: accesstokens.ATConfidential, + Credential: onBehalfOfParams.Credential, + UserAssertion: onBehalfOfParams.UserAssertion, + AuthorizationType: authority.ATOnBehalfOf, + } + token, err := b.AcquireTokenSilent(ctx, silentParameters) + if err != nil { + fmt.Println("Acquire Token Silent failed ") + token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential) + if err != nil { + return AuthResult{}, err + } + return b.AuthResultFromToken(ctx, authParams, token, true) + } + return token, err +} + +func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) { + if !cacheWrite { + return NewAuthResult(token, shared.Account{}) + } + + var account shared.Account + var err error + if authParams.AuthorizationType == authority.ATOnBehalfOf { + if s, ok := b.pmanager.(cache.Serializer); ok { + suggestedCacheKey := token.CacheKey(authParams) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + account, err = b.pmanager.Write(authParams, token) + if err != nil { + return AuthResult{}, err + } + } else { + if s, ok := b.manager.(cache.Serializer); ok { + suggestedCacheKey := token.CacheKey(authParams) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + account, err = b.manager.Write(authParams, token) + if err != nil { + return AuthResult{}, err + } + } + return NewAuthResult(token, account) +} + +func (b Client) AllAccounts() []shared.Account { + if s, ok := b.manager.(cache.Serializer); ok { + suggestedCacheKey := b.AuthParams.CacheKey(false) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + + accounts := b.manager.AllAccounts() + return accounts +} + +func (b Client) Account(homeAccountID string) shared.Account { + authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. + authParams.AuthorizationType = authority.AccountByID + authParams.HomeaccountID = homeAccountID + if s, ok := b.manager.(cache.Serializer); ok { + suggestedCacheKey := b.AuthParams.CacheKey(false) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + account := b.manager.Account(homeAccountID) + return account +} + +// RemoveAccount removes all the ATs, RTs and IDTs from the cache associated with this account. +func (b Client) RemoveAccount(account shared.Account) { + if s, ok := b.manager.(cache.Serializer); ok { + suggestedCacheKey := b.AuthParams.CacheKey(false) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + b.manager.RemoveAccount(account, b.AuthParams.ClientID) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go new file mode 100644 index 000000000000..548c2faebf96 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go @@ -0,0 +1,200 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type Contract struct { + AccessTokens map[string]AccessToken `json:"AccessToken,omitempty"` + RefreshTokens map[string]accesstokens.RefreshToken `json:"RefreshToken,omitempty"` + IDTokens map[string]IDToken `json:"IdToken,omitempty"` + Accounts map[string]shared.Account `json:"Account,omitempty"` + AppMetaData map[string]AppMetaData `json:"AppMetadata,omitempty"` + + AdditionalFields map[string]interface{} +} + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type InMemoryContract struct { + AccessTokensPartition map[string]map[string]AccessToken + RefreshTokensPartition map[string]map[string]accesstokens.RefreshToken + IDTokensPartition map[string]map[string]IDToken + AccountsPartition map[string]map[string]shared.Account + AppMetaData map[string]AppMetaData +} + +// NewContract is the constructor for Contract. +func NewInMemoryContract() *InMemoryContract { + return &InMemoryContract{ + AccessTokensPartition: map[string]map[string]AccessToken{}, + RefreshTokensPartition: map[string]map[string]accesstokens.RefreshToken{}, + IDTokensPartition: map[string]map[string]IDToken{}, + AccountsPartition: map[string]map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + } +} + +// NewContract is the constructor for Contract. +func NewContract() *Contract { + return &Contract{ + AccessTokens: map[string]AccessToken{}, + RefreshTokens: map[string]accesstokens.RefreshToken{}, + IDTokens: map[string]IDToken{}, + Accounts: map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + AdditionalFields: map[string]interface{}{}, + } +} + +// AccessToken is the JSON representation of a MSAL access token for encoding to storage. +type AccessToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + Scopes string `json:"target,omitempty"` + ExpiresOn internalTime.Unix `json:"expires_on,omitempty"` + ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"` + CachedAt internalTime.Unix `json:"cached_at,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccessToken is the constructor for AccessToken. +func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token string) AccessToken { + return AccessToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "AccessToken", + ClientID: clientID, + Secret: token, + Scopes: scopes, + CachedAt: internalTime.Unix{T: cachedAt.UTC()}, + ExpiresOn: internalTime.Unix{T: expiresOn.UTC()}, + ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()}, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AccessToken) Key() string { + return strings.Join( + []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}, + shared.CacheKeySeparator, + ) +} + +// FakeValidate enables tests to fake access token validation +var FakeValidate func(AccessToken) error + +// Validate validates that this AccessToken can be used. +func (a AccessToken) Validate() error { + if FakeValidate != nil { + return FakeValidate(a) + } + if a.CachedAt.T.After(time.Now()) { + return errors.New("access token isn't valid, it was cached at a future time") + } + if a.ExpiresOn.T.Before(time.Now().Add(5 * time.Minute)) { + return fmt.Errorf("access token is expired") + } + if a.CachedAt.T.IsZero() { + return fmt.Errorf("access token does not have CachedAt set") + } + return nil +} + +// IDToken is the JSON representation of an MSAL id token for encoding to storage. +type IDToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + AdditionalFields map[string]interface{} +} + +// IsZero determines if IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// NewIDToken is the constructor for IDToken. +func NewIDToken(homeID, env, realm, clientID, idToken string) IDToken { + return IDToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "IDToken", + ClientID: clientID, + Secret: idToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (id IDToken) Key() string { + return strings.Join( + []string{id.HomeAccountID, id.Environment, id.CredentialType, id.ClientID, id.Realm}, + shared.CacheKeySeparator, + ) +} + +// AppMetaData is the JSON representation of application metadata for encoding to storage. +type AppMetaData struct { + FamilyID string `json:"family_id,omitempty"` + ClientID string `json:"client_id,omitempty"` + Environment string `json:"environment,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAppMetaData is the constructor for AppMetaData. +func NewAppMetaData(familyID, clientID, environment string) AppMetaData { + return AppMetaData{ + FamilyID: familyID, + ClientID: clientID, + Environment: environment, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AppMetaData) Key() string { + return strings.Join( + []string{"AppMetaData", a.Environment, a.ClientID}, + shared.CacheKeySeparator, + ) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go new file mode 100644 index 000000000000..1b82655d8df4 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go @@ -0,0 +1,430 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// PartitionedManager is a partitioned in-memory cache of access tokens, accounts and meta data. +type PartitionedManager struct { + contract *InMemoryContract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// NewPartitionedManager is the constructor for PartitionedManager. +func NewPartitionedManager(requests *oauth.Client) *PartitionedManager { + m := &PartitionedManager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewInMemoryContract() + return m +} + +// Read reads a storage token from the cache if it exists. +func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + userAssertionHash := authParameters.AssertionHash() + partitionKeyFromRequest := userAssertionHash + + accessToken, err := m.readAccessToken(metadata.Aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest) + if err != nil { + return TokenResponse{}, err + } + + AppMetaData, err := m.readAppMetaData(metadata.Aliases, clientID) + if err != nil { + return TokenResponse{}, err + } + familyID := AppMetaData.FamilyID + + refreshToken, err := m.readRefreshToken(metadata.Aliases, familyID, clientID, userAssertionHash, partitionKeyFromRequest) + if err != nil { + return TokenResponse{}, err + } + + idToken, err := m.readIDToken(metadata.Aliases, realm, clientID, userAssertionHash, getPartitionKeyIDTokenRead(accessToken)) + if err != nil { + return TokenResponse{}, err + } + + account, err := m.readAccount(metadata.Aliases, realm, userAssertionHash, idToken.HomeAccountID) + if err != nil { + return TokenResponse{}, err + } + return TokenResponse{ + AccessToken: accessToken, + RefreshToken: refreshToken, + IDToken: idToken, + Account: account, + }, nil +} + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeaccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeaccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + userAssertionHash := authParameters.AssertionHash() + cachedAt := time.Now() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + refreshToken.UserAssertionHash = userAssertionHash + } + if err := m.writeRefreshToken(refreshToken, getPartitionKeyRefreshToken(refreshToken)); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + accessToken.UserAssertionHash = userAssertionHash // get Hash method on this + } + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken, getPartitionKeyAccessToken(accessToken)); err != nil { + return account, err + } + } else { + return shared.Account{}, err + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + idToken.UserAssertionHash = userAssertionHash + } + if err := m.writeIDToken(idToken, getPartitionKeyIDToken(idToken)); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + idTokenJwt.PreferredUsername, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + account.UserAssertionHash = userAssertionHash + } + if err := m.writeAccount(account, getPartitionKeyAccount(account)); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *PartitionedManager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *PartitionedManager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *PartitionedManager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey string) (AccessToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + if accessTokens, ok := m.contract.AccessTokensPartition[partitionKey]; ok { + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range accessTokens { + if at.Realm == realm && at.ClientID == clientID && at.UserAssertionHash == userAssertionHash { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at, nil + } + } + } + } + } + return AccessToken{}, fmt.Errorf("access token not found") +} + +func (m *PartitionedManager) writeAccessToken(accessToken AccessToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.AccessTokensPartition[partitionKey] = make(map[string]AccessToken) + } + m.contract.AccessTokensPartition[partitionKey][key] = accessToken + return nil +} + +func matchFamilyRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string, clientID string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *PartitionedManager) readRefreshToken(envAliases []string, familyID, clientID, userAssertionHash, partitionKey string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshTokenObo(rt, userAssertionHash, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshTokenObo(rt, userAssertionHash, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokensPartition[partitionKey] { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func (m *PartitionedManager) writeRefreshToken(refreshToken accesstokens.RefreshToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := refreshToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.RefreshTokensPartition[partitionKey] = make(map[string]accesstokens.RefreshToken) + } + m.contract.RefreshTokensPartition[partitionKey][key] = refreshToken + return nil +} + +func (m *PartitionedManager) readIDToken(envAliases []string, realm, clientID, userAssertionHash, partitionKey string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokensPartition[partitionKey] { + if idt.Realm == realm && idt.ClientID == clientID && idt.UserAssertionHash == userAssertionHash { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *PartitionedManager) writeIDToken(idToken IDToken, partitionKey string) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.IDTokensPartition[partitionKey] == nil { + m.contract.IDTokensPartition[partitionKey] = make(map[string]IDToken) + } + m.contract.IDTokensPartition[partitionKey][key] = idToken + return nil +} + +func (m *PartitionedManager) readAccount(envAliases []string, realm, UserAssertionHash, partitionKey string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.AccountsPartition[partitionKey] { + if checkAlias(acc.Environment, envAliases) && acc.UserAssertionHash == UserAssertionHash && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *PartitionedManager) writeAccount(account shared.Account, partitionKey string) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.AccountsPartition[partitionKey] == nil { + m.contract.AccountsPartition[partitionKey] = make(map[string]shared.Account) + } + m.contract.AccountsPartition[partitionKey][key] = account + return nil +} + +func (m *PartitionedManager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *PartitionedManager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *PartitionedManager) update(cache *InMemoryContract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *PartitionedManager) Marshal() ([]byte, error) { + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *PartitionedManager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewInMemoryContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} + +func getPartitionKeyAccessToken(item AccessToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyRefreshToken(item accesstokens.RefreshToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyIDToken(item IDToken) string { + return item.HomeAccountID +} + +func getPartitionKeyAccount(item shared.Account) string { + return item.HomeAccountID +} + +func getPartitionKeyIDTokenRead(item AccessToken) string { + return item.HomeAccountID +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go new file mode 100644 index 000000000000..617ba14d5bfd --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -0,0 +1,512 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package storage holds all cached token information for MSAL. This storage can be +// augmented with third-party extensions to provide persistent storage. In that case, +// reads and writes in upper packages will call Marshal() to take the entire in-memory +// representation and write it to storage and Unmarshal() to update the entire in-memory +// storage with what was in the persistent storage. The persistent storage can only be +// accessed in this way because multiple MSAL clients written in multiple languages can +// access the same storage and must adhere to the same method that was defined +// previously. +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// aadInstanceDiscoveryer allows faking in tests. +// It is implemented in production by ops/authority.Client +type aadInstanceDiscoveryer interface { + AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// TokenResponse mimics a token response that was pulled from the cache. +type TokenResponse struct { + RefreshToken accesstokens.RefreshToken + IDToken IDToken // *Credential + AccessToken AccessToken + Account shared.Account +} + +// Manager is an in-memory cache of access tokens, accounts and meta data. This data is +// updated on read/write calls. Unmarshal() replaces all data stored here with whatever +// was given to it on each call. +type Manager struct { + contract *Contract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// New is the constructor for Manager. +func New(requests *oauth.Client) *Manager { + m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewContract() + return m +} + +func checkAlias(alias string, aliases []string) bool { + for _, v := range aliases { + if alias == v { + return true + } + } + return false +} + +func isMatchingScopes(scopesOne []string, scopesTwo string) bool { + newScopesTwo := strings.Split(scopesTwo, scopeSeparator) + scopeCounter := 0 + for _, scope := range scopesOne { + for _, otherScope := range newScopesTwo { + if strings.EqualFold(scope, otherScope) { + scopeCounter++ + continue + } + } + } + return scopeCounter == len(scopesOne) +} + +// Read reads a storage token from the cache if it exists. +func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (TokenResponse, error) { + homeAccountID := authParameters.HomeaccountID + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + + accessToken := m.readAccessToken(homeAccountID, metadata.Aliases, realm, clientID, scopes) + + if account.IsZero() { + return TokenResponse{ + AccessToken: accessToken, + RefreshToken: accesstokens.RefreshToken{}, + IDToken: IDToken{}, + Account: shared.Account{}, + }, nil + } + idToken, err := m.readIDToken(homeAccountID, metadata.Aliases, realm, clientID) + if err != nil { + return TokenResponse{}, err + } + + AppMetaData, err := m.readAppMetaData(metadata.Aliases, clientID) + if err != nil { + return TokenResponse{}, err + } + familyID := AppMetaData.FamilyID + + refreshToken, err := m.readRefreshToken(homeAccountID, metadata.Aliases, familyID, clientID) + if err != nil { + return TokenResponse{}, err + } + account, err = m.readAccount(homeAccountID, metadata.Aliases, realm) + if err != nil { + return TokenResponse{}, err + } + return TokenResponse{ + AccessToken: accessToken, + RefreshToken: refreshToken, + IDToken: idToken, + Account: account, + }, nil +} + +const scopeSeparator = " " + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeaccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeaccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + + cachedAt := time.Now() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if err := m.writeRefreshToken(refreshToken); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + ) + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken); err != nil { + return account, err + } + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if err := m.writeIDToken(idToken); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + idTokenJwt.PreferredUsername, + ) + if err := m.writeAccount(account); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) AccessToken { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range m.contract.AccessTokens { + if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at + } + } + } + } + return AccessToken{} +} + +func (m *Manager) writeAccessToken(accessToken AccessToken) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + m.contract.AccessTokens[key] = accessToken + return nil +} + +func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshToken(rt, homeID, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshToken(rt, homeID, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokens { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error { + key := refreshToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.RefreshTokens[key] = refreshToken + return nil +} + +func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokens { + if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *Manager) writeIDToken(idToken IDToken) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.IDTokens[key] = idToken + return nil +} + +func (m *Manager) AllAccounts() []shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + var accounts []shared.Account + for _, v := range m.contract.Accounts { + accounts = append(accounts, v) + } + + return accounts +} + +func (m *Manager) Account(homeAccountID string) shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, v := range m.contract.Accounts { + if v.HomeAccountID == homeAccountID { + return v + } + } + + return shared.Account{} +} + +func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.Accounts { + if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *Manager) writeAccount(account shared.Account) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.Accounts[key] = account + return nil +} + +func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account. +func (m *Manager) RemoveAccount(account shared.Account, clientID string) { + m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID) + m.removeAccessTokens(account.HomeAccountID, account.Environment) + m.removeIDTokens(account.HomeAccountID, account.Environment) + m.removeAccounts(account.HomeAccountID, account.Environment) +} + +func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, rt := range m.contract.RefreshTokens { + // Check for RTs associated with the account. + if rt.HomeAccountID == homeID && rt.Environment == env { + // Do RT's app ownership check as a precaution, in case family apps + // and 3rd-party apps share same token cache, although they should not. + if rt.ClientID == clientID || rt.FamilyID != "" { + delete(m.contract.RefreshTokens, key) + } + } + } +} + +func (m *Manager) removeAccessTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, at := range m.contract.AccessTokens { + // Remove AT's associated with the account + if at.HomeAccountID == homeID && at.Environment == env { + // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check. + // It means ATs for other apps will also be removed, it is OK because: + // non-family apps are not supposed to share token cache to begin with; + // Even if it happens, we keep other app's RT already, so SSO still works. + delete(m.contract.AccessTokens, key) + } + } +} + +func (m *Manager) removeIDTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, idt := range m.contract.IDTokens { + // Remove ID tokens associated with the account. + if idt.HomeAccountID == homeID && idt.Environment == env { + delete(m.contract.IDTokens, key) + } + } +} + +func (m *Manager) removeAccounts(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, acc := range m.contract.Accounts { + // Remove the specified account. + if acc.HomeAccountID == homeID && acc.Environment == env { + delete(m.contract.Accounts, key) + } + } +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *Manager) update(cache *Contract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *Manager) Marshal() ([]byte, error) { + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *Manager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json new file mode 100644 index 000000000000..1d8181924d14 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json @@ -0,0 +1,56 @@ +{ + "Account": { + "uid.utid-login.windows.net-contoso": { + "username": "John Doe", + "local_account_id": "object1234", + "realm": "contoso", + "environment": "login.windows.net", + "home_account_id": "uid.utid", + "authority_type": "MSSTS" + } + }, + "RefreshToken": { + "uid.utid-login.windows.net-refreshtoken-my_client_id--s2 s1 s3": { + "target": "s2 s1 s3", + "environment": "login.windows.net", + "credential_type": "RefreshToken", + "secret": "a refresh token", + "client_id": "my_client_id", + "home_account_id": "uid.utid" + } + }, + "AccessToken": { + "an-entry": { + "foo": "bar" + }, + "uid.utid-login.windows.net-accesstoken-my_client_id-contoso-s2 s1 s3": { + "environment": "login.windows.net", + "credential_type": "AccessToken", + "secret": "an access token", + "realm": "contoso", + "target": "s2 s1 s3", + "client_id": "my_client_id", + "cached_at": "1000", + "home_account_id": "uid.utid", + "extended_expires_on": "4600", + "expires_on": "4600" + } + }, + "IdToken": { + "uid.utid-login.windows.net-idtoken-my_client_id-contoso-": { + "realm": "contoso", + "environment": "login.windows.net", + "credential_type": "IdToken", + "secret": "header.eyJvaWQiOiAib2JqZWN0MTIzNCIsICJwcmVmZXJyZWRfdXNlcm5hbWUiOiAiSm9obiBEb2UiLCAic3ViIjogInN1YiJ9.signature", + "client_id": "my_client_id", + "home_account_id": "uid.utid" + } + }, + "unknownEntity": {"field1":"1","field2":"whats"}, + "AppMetadata": { + "AppMetadata-login.windows.net-my_client_id": { + "environment": "login.windows.net", + "client_id": "my_client_id" + } + } + } \ No newline at end of file diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go new file mode 100644 index 000000000000..d7927effa527 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// package exported contains internal types that are re-exported from a public package +package exported + +// AssertionRequestOptions has information required to generate a client assertion +type AssertionRequestOptions struct { + // ClientID identifies the application for which an assertion is requested. Used as the assertion's "iss" and "sub" claims. + ClientID string + + // TokenEndpoint is the intended token endpoint. Used as the assertion's "aud" claim. + TokenEndpoint string +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md new file mode 100644 index 000000000000..09edb01b7e43 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md @@ -0,0 +1,140 @@ +# JSON Package Design +Author: John Doak(jdoak@microsoft.com) + +## Why? + +This project needs a special type of marshal/unmarshal not directly supported +by the encoding/json package. + +The need revolves around a few key wants/needs: +- unmarshal and marshal structs representing JSON messages +- fields in the messgage not in the struct must be maintained when unmarshalled +- those same fields must be marshalled back when encoded again + +The initial version used map[string]interface{} to put in the keys that +were known and then any other keys were put into a field called AdditionalFields. + +This has a few negatives: +- Dual marshaling/unmarshalling is required +- Adding a struct field requires manually adding a key by name to be encoded/decoded from the map (which is a loosely coupled construct), which can lead to bugs that aren't detected or have bad side effects +- Tests can become quickly disconnected if those keys aren't put +in tests as well. So you think you have support working, but you +don't. Existing tests were found that didn't test the marshalling output. +- There is no enforcement that if AdditionalFields is required on one struct, it should be on all containers +that don't have custom marshal/unmarshal. + +This package aims to support our needs by providing custom Marshal()/Unmarshal() functions. + +This prevents all the negatives in the initial solution listed above. However, it does add its own negative: +- Custom encoding/decoding via reflection is messy (as can be seen in encoding/json itself) + +Go proverb: Reflection is never clear +Suggested reading: https://blog.golang.org/laws-of-reflection + +## Important design decisions + +- We don't want to understand all JSON decoding rules +- We don't want to deal with all the quoting, commas, etc on decode +- Need support for json.Marshaler/Unmarshaler, so we can support types like time.Time +- If struct does not implement json.Unmarshaler, it must have AdditionalFields defined +- We only support root level objects that are \*struct or struct + +To faciliate these goals, we will utilize the json.Encoder and json.Decoder. +They provide streaming processing (efficient) and return errors on bad JSON. + +Support for json.Marshaler/Unmarshaler allows for us to use non-basic types +that must be specially encoded/decoded (like time.Time objects). + +We don't support types that can't customer unmarshal or have AdditionalFields +in order to prevent future devs from forgetting that important field and +generating bad return values. + +Support for root level objects of \*struct or struct simply acknowledges the +fact that this is designed only for the purposes listed in the Introduction. +Outside that (like encoding a lone number) should be done with the +regular json package (as it will not have additional fields). + +We don't support a few things on json supported reference types and structs: +- \*map: no need for pointers to maps +- \*slice: no need for pointers to slices +- any further pointers on struct after \*struct + +There should never be a need for this in Go. + +## Design + +## State Machines + +This uses state machine designs that based upon the Rob Pike talk on +lexers and parsers: https://www.youtube.com/watch?v=HxaD_trXwRE + +This is the most common pattern for state machines in Go and +the model to follow closesly when dealing with streaming +processing of textual data. + +Our state machines are based on the type: +```go +type stateFn func() (stateFn, error) +``` + +The state machine itself is simply a struct that has methods that +satisfy stateFn. + +Our state machines have a few standard calls +- run(): runs the state machine +- start(): always the first stateFn to be called + +All state machines have the following logic: +* run() is called +* start() is called and returns the next stateFn or error +* stateFn is called + - If returned stateFn(next state) is non-nil, call it + - If error is non-nil, run() returns the error + - If stateFn == nil and err == nil, run() return err == nil + +## Supporting types + +Marshalling/Unmarshalling must support(within top level struct): +- struct +- \*struct +- []struct +- []\*struct +- []map[string]structContainer +- [][]structContainer + +**Term note:** structContainer == type that has a struct or \*struct inside it + +We specifically do not support []interface or map[string]interface +where the interface value would hold some value with a struct in it. + +Those will still marshal/unmarshal, but without support for +AdditionalFields. + +## Marshalling + +The marshalling design will be based around a statemachine design. + +The basic logic is as follows: + +* If struct has custom marshaller, call it and return +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* If struct does not have "AdditionalFields", give an error +* Get struct tag detailing json names to go names, create mapping +* For each public field name + - Write field name out + - If field value is a struct, recursively call our state machine + - Otherwise, use the json.Encoder to write out the value + +## Unmarshalling + +The unmarshalling desin is also based around a statemachine design. The +basic logic is as follows: + +* If struct has custom marhaller, call it +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* Get struct tag detailing json names to go names, create mapping +* For each key found + - If key exists, + - If value is basic type, extract value into struct field using Decoder + - If value is struct type, recursively call statemachine + - If key doesn't exist, add it to AdditionalFields if it exists using Decoder diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go new file mode 100644 index 000000000000..83bd60d419b7 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -0,0 +1,184 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package json provide functions for marshalling an unmarshalling types to JSON. These functions are meant to +// be utilized inside of structs that implement json.Unmarshaler and json.Marshaler interfaces. +// This package provides the additional functionality of writing fields that are not in the struct when marshalling +// to a field called AdditionalFields if that field exists and is a map[string]interface{}. +// When marshalling, if the struct has all the same prerequisites, it will uses the keys in AdditionalFields as +// extra fields. This package uses encoding/json underneath. +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +const addField = "AdditionalFields" +const ( + marshalJSON = "MarshalJSON" + unmarshalJSON = "UnmarshalJSON" +) + +var ( + leftBrace = []byte("{")[0] + rightBrace = []byte("}")[0] + comma = []byte(",")[0] + leftParen = []byte("[")[0] + rightParen = []byte("]")[0] +) + +var mapStrInterType = reflect.TypeOf(map[string]interface{}{}) + +// stateFn defines a state machine function. This will be used in all state +// machines in this package. +type stateFn func() (stateFn, error) + +// Marshal is used to marshal a type into its JSON representation. It +// wraps the stdlib calls in order to marshal a struct or *struct so +// that a field called "AdditionalFields" of type map[string]interface{} +// with "-" used inside struct tag `json:"-"` can be marshalled as if +// they were fields within the struct. +func Marshal(i interface{}) ([]byte, error) { + buff := bytes.Buffer{} + enc := json.NewEncoder(&buff) + enc.SetEscapeHTML(false) + enc.SetIndent("", "") + + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + err := marshalStruct(reflect.ValueOf(i), &buff, enc) + if err != nil { + return nil, err + } + return buff.Bytes(), nil +} + +// Unmarshal unmarshals a []byte representing JSON into i, which must be a *struct. In addition, if the struct has +// a field called AdditionalFields of type map[string]interface{}, JSON data representing fields not in the struct +// will be written as key/value pairs to AdditionalFields. +func Unmarshal(b []byte, i interface{}) error { + if len(b) == 0 { + return nil + } + + jdec := json.NewDecoder(bytes.NewBuffer(b)) + jdec.UseNumber() + return unmarshalStruct(jdec, i) +} + +// MarshalRaw marshals i into a json.RawMessage. If I cannot be marshalled, +// this will panic. This is exposed to help test AdditionalField values +// which are stored as json.RawMessage. +func MarshalRaw(i interface{}) json.RawMessage { + b, err := json.Marshal(i) + if err != nil { + panic(err) + } + return json.RawMessage(b) +} + +// isDelim simply tests to see if a json.Token is a delimeter. +func isDelim(got json.Token) bool { + switch got.(type) { + case json.Delim: + return true + } + return false +} + +// delimIs tests got to see if it is want. +func delimIs(got json.Token, want rune) bool { + switch v := got.(type) { + case json.Delim: + if v == json.Delim(want) { + return true + } + } + return false +} + +// hasMarshalJSON will determine if the value or a pointer to this value has +// the MarshalJSON method. +func hasMarshalJSON(v reflect.Value) bool { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + return false +} + +// callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. +// This will panic if the method is not defined. +func callMarshalJSON(v reflect.Value) ([]byte, error) { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if v.CanAddr() { + v = v.Addr() + } + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) +} + +// hasUnmarshalJSON will determine if the value or a pointer to this value has +// the UnmarshalJSON method. +func hasUnmarshalJSON(v reflect.Value) bool { + // You can't unmarshal on a non-pointer type. + if v.Kind() != reflect.Ptr { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Unmarshaler) + return ok + } + + return false +} + +// hasOmitEmpty indicates if the field has instructed us to not output +// the field if omitempty is set on the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func hasOmitEmpty(tag string) bool { + sl := strings.Split(tag, ",") + for _, str := range sl { + if str == "omitempty" { + return true + } + } + return false +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go new file mode 100644 index 000000000000..cef442f25c86 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go @@ -0,0 +1,333 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// unmarshalMap unmarshal's a map. +func unmarshalMap(dec *json.Decoder, m reflect.Value) error { + if m.Kind() != reflect.Ptr || m.Elem().Kind() != reflect.Map { + panic("unmarshalMap called on non-*map value") + } + mapValueType := m.Elem().Type().Elem() + walk := mapWalk{dec: dec, m: m, valueType: mapValueType} + if err := walk.run(); err != nil { + return err + } + return nil +} + +type mapWalk struct { + dec *json.Decoder + key string + m reflect.Value + valueType reflect.Type +} + +// run runs our decoder state machine. +func (m *mapWalk) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapWalk) start() (stateFn, error) { + // maps can have custom unmarshaler's. + if hasUnmarshalJSON(m.m) { + err := m.dec.Decode(m.m.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the map value is: + // *struct/struct/map/slice + // otherwise use standard decode + t, _ := m.valueBaseType() + switch t.Kind() { + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := m.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to JSON null. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + return m.next, nil + case reflect.Ptr: + return nil, fmt.Errorf("do not support maps with values of '**type' or '*reference") + } + + // This is a basic map type, so just use Decode(). + if err := m.dec.Decode(m.m.Interface()); err != nil { + return nil, err + } + + return nil, nil +} + +func (m *mapWalk) next() (stateFn, error) { + if m.dec.More() { + key, err := m.dec.Token() + if err != nil { + return nil, err + } + m.key = key.(string) + return m.storeValue, nil + } + // No more entries, so remove final }. + _, err := m.dec.Token() + if err != nil { + return nil, err + } + return nil, nil +} + +func (m *mapWalk) storeValue() (stateFn, error) { + v := m.valueType + for { + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + continue + case reflect.Struct: + return m.storeStruct, nil + case reflect.Map: + return m.storeMap, nil + case reflect.Slice: + return m.storeSlice, nil + } + return nil, fmt.Errorf("bug: mapWalk.storeValue() called on unsupported type: %v", v.Kind()) + } +} + +func (m *mapWalk) storeStruct() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalStruct(m.dec, v.Interface()); err != nil { + return nil, err + } + + if m.valueType.Kind() == reflect.Ptr { + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + return m.next, nil + } + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +func (m *mapWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(m.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(m.dec, ptr); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + + return m.next, nil +} + +func (m *mapWalk) storeSlice() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalSlice(m.dec, v); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (m *mapWalk) valueBaseType() (reflect.Type, bool) { + ptr := false + v := m.valueType + if v.Kind() == reflect.Ptr { + ptr = true + v = v.Elem() + } + return v, ptr +} + +// unmarshalSlice unmarshal's the next value, which must be a slice, into +// ptrSlice, which must be a pointer to a slice. newValue() can be use to +// create the slice. +func unmarshalSlice(dec *json.Decoder, ptrSlice reflect.Value) error { + if ptrSlice.Kind() != reflect.Ptr || ptrSlice.Elem().Kind() != reflect.Slice { + panic("unmarshalSlice called on non-*[]slice value") + } + sliceValueType := ptrSlice.Elem().Type().Elem() + walk := sliceWalk{ + dec: dec, + s: ptrSlice, + valueType: sliceValueType, + } + if err := walk.run(); err != nil { + return err + } + + return nil +} + +type sliceWalk struct { + dec *json.Decoder + s reflect.Value // *[]slice + valueType reflect.Type +} + +// run runs our decoder state machine. +func (s *sliceWalk) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceWalk) start() (stateFn, error) { + // slices can have custom unmarshaler's. + if hasUnmarshalJSON(s.s) { + err := s.dec.Decode(s.s.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the slice value is: + // []*struct/[]struct/[]map/[]slice + // otherwise use standard decode + t := s.valueBaseType() + + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("cannot unmarshal into a ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := s.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to nil. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '[') { + return nil, fmt.Errorf("Unmarshal expected opening [, received %v", delim) + } + return s.next, nil + } + + if err := s.dec.Decode(s.s.Interface()); err != nil { + return nil, err + } + return nil, nil +} + +func (s *sliceWalk) next() (stateFn, error) { + if s.dec.More() { + return s.storeValue, nil + } + // Nothing left in the slice, remove closing ] + _, err := s.dec.Token() + return nil, err +} + +func (s *sliceWalk) storeValue() (stateFn, error) { + t := s.valueBaseType() + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("do not support 'pointer to pointer' or 'pointer to reference' types") + case reflect.Struct: + return s.storeStruct, nil + case reflect.Map: + return s.storeMap, nil + case reflect.Slice: + return s.storeSlice, nil + } + return nil, fmt.Errorf("bug: sliceWalk.storeValue() called on unsupported type: %v", t.Kind()) +} + +func (s *sliceWalk) storeStruct() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalStruct(s.dec, v.Interface()); err != nil { + return nil, err + } + + if s.valueType.Kind() == reflect.Ptr { + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + return s.next, nil + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + return s.next, nil +} + +func (s *sliceWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(s.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + + if err := unmarshalMap(s.dec, ptr); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + + return s.next, nil +} + +func (s *sliceWalk) storeSlice() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalSlice(s.dec, v); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + + return s.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (s *sliceWalk) valueBaseType() reflect.Type { + v := s.valueType + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + return v +} + +// newValue() returns a new *type that represents type passed. +func newValue(valueType reflect.Type) reflect.Value { + if valueType.Kind() == reflect.Ptr { + return reflect.New(valueType.Elem()) + } + return reflect.New(valueType) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go new file mode 100644 index 000000000000..df5dc6e11b50 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go @@ -0,0 +1,346 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "unicode" +) + +// marshalStruct takes in i, which must be a *struct or struct and marshals its content +// as JSON into buff (sometimes with writes to buff directly, sometimes via enc). +// This call is recursive for all fields of *struct or struct type. +func marshalStruct(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + // We only care about custom Marshalling a struct. + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: marshal() received a non *struct or struct, received type %T", v.Interface()) + } + + if hasMarshalJSON(v) { + b, err := callMarshalJSON(v) + if err != nil { + return err + } + buff.Write(b) + return nil + } + + t := v.Type() + + // If it has an AdditionalFields field make sure its the right type. + f := v.FieldByName(addField) + if f.Kind() != reflect.Invalid { + if f.Kind() != reflect.Map { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + if !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + } + + translator, err := findFields(v) + if err != nil { + return err + } + + buff.WriteByte(leftBrace) + for x := 0; x < v.NumField(); x++ { + field := v.Field(x) + + // We don't access private fields. + if unicode.IsLower(rune(t.Field(x).Name[0])) { + continue + } + + if t.Field(x).Name == addField { + if v.Field(x).Len() > 0 { + if err := writeAddFields(field.Interface(), buff, enc); err != nil { + return err + } + buff.WriteByte(comma) + } + continue + } + + // If they have omitempty set, we don't write out the field if + // it is the zero value. + if hasOmitEmpty(t.Field(x).Tag.Get("json")) { + if v.Field(x).IsZero() { + continue + } + } + + // Write out the field name part. + jsonName := translator.jsonName(t.Field(x).Name) + buff.WriteString(fmt.Sprintf("%q:", jsonName)) + + if field.Kind() == reflect.Ptr { + field = field.Elem() + } + + if err := marshalStructField(field, buff, enc); err != nil { + return err + } + } + + buff.Truncate(buff.Len() - 1) // Remove final comma + buff.WriteByte(rightBrace) + + return nil +} + +func marshalStructField(field reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + // Determine if we need a trailing comma. + defer buff.WriteByte(comma) + + switch field.Kind() { + // If it was a *struct or struct, we need to recursively all marshal(). + case reflect.Struct: + if field.CanAddr() { + field = field.Addr() + } + return marshalStruct(field, buff, enc) + case reflect.Map: + return marshalMap(field, buff, enc) + case reflect.Slice: + return marshalSlice(field, buff, enc) + } + + // It is just a basic type, so encode it. + if err := enc.Encode(field.Interface()); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + return nil +} + +func marshalMap(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Map { + return fmt.Errorf("bug: marshalMap() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftBrace) + buff.WriteByte(rightBrace) + return nil + } + encoder := mapEncode{m: v, buff: buff, enc: enc} + return encoder.run() +} + +type mapEncode struct { + m reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (m *mapEncode) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapEncode) start() (stateFn, error) { + if hasMarshalJSON(m.m) { + b, err := callMarshalJSON(m.m) + if err != nil { + return nil, err + } + m.buff.Write(b) + return nil, nil + } + + valueBaseType := m.m.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + m.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return m.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := m.enc.Encode(m.m.Interface()); err != nil { + return nil, err + } + m.buff.Truncate(m.buff.Len() - 1) // Remove Encode() added \n + return nil, nil +} + +func (m *mapEncode) encode() (stateFn, error) { + m.buff.WriteByte(leftBrace) + + iter := m.m.MapRange() + for iter.Next() { + // Write the key. + k := iter.Key() + m.buff.WriteString(fmt.Sprintf("%q:", k.String())) + + v := iter.Value() + switch m.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, m.buff, m.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", m.valueBaseType.Kind())) + } + m.buff.WriteByte(comma) + } + m.buff.Truncate(m.buff.Len() - 1) // Remove final comma + m.buff.WriteByte(rightBrace) + + return nil, nil +} + +func marshalSlice(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Slice { + return fmt.Errorf("bug: marshalSlice() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftParen) + buff.WriteByte(rightParen) + return nil + } + encoder := sliceEncode{s: v, buff: buff, enc: enc} + return encoder.run() +} + +type sliceEncode struct { + s reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (s *sliceEncode) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceEncode) start() (stateFn, error) { + if hasMarshalJSON(s.s) { + b, err := callMarshalJSON(s.s) + if err != nil { + return nil, err + } + s.buff.Write(b) + return nil, nil + } + + valueBaseType := s.s.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + s.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return s.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := s.enc.Encode(s.s.Interface()); err != nil { + return nil, err + } + s.buff.Truncate(s.buff.Len() - 1) // Remove Encode added \n + + return nil, nil +} + +func (s *sliceEncode) encode() (stateFn, error) { + s.buff.WriteByte(leftParen) + for i := 0; i < s.s.Len(); i++ { + v := s.s.Index(i) + switch s.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, s.buff, s.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", s.valueBaseType.Kind())) + } + s.buff.WriteByte(comma) + } + s.buff.Truncate(s.buff.Len() - 1) // Remove final comma + s.buff.WriteByte(rightParen) + return nil, nil +} + +// writeAddFields writes the AdditionalFields struct field out to JSON as field +// values. i must be a map[string]interface{} or this will panic. +func writeAddFields(i interface{}, buff *bytes.Buffer, enc *json.Encoder) error { + m := i.(map[string]interface{}) + + x := 0 + for k, v := range m { + buff.WriteString(fmt.Sprintf("%q:", k)) + if err := enc.Encode(v); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + if x+1 != len(m) { + buff.WriteByte(comma) + } + x++ + } + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go new file mode 100644 index 000000000000..07751544a282 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go @@ -0,0 +1,290 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +func unmarshalStruct(jdec *json.Decoder, i interface{}) error { + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + + if hasUnmarshalJSON(v) { + // Indicates that this type has a custom Unmarshaler. + return jdec.Decode(v.Addr().Interface()) + } + + f := v.FieldByName(addField) + if f.Kind() == reflect.Invalid { + return fmt.Errorf("Unmarshal(%T) only supports structs that have the field AdditionalFields or implements json.Unmarshaler", i) + } + + if f.Kind() != reflect.Map || !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", i) + } + + dec := newDecoder(jdec, v) + return dec.run() +} + +type decoder struct { + dec *json.Decoder + value reflect.Value // This will be a reflect.Struct + translator translateFields + key string +} + +func newDecoder(dec *json.Decoder, value reflect.Value) *decoder { + return &decoder{value: value, dec: dec} +} + +// run runs our decoder state machine. +func (d *decoder) run() error { + var state = d.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +// start looks for our opening delimeter '{' and then transitions to looping through our fields. +func (d *decoder) start() (stateFn, error) { + var err error + d.translator, err = findFields(d.value) + if err != nil { + return nil, err + } + + delim, err := d.dec.Token() + if err != nil { + return nil, err + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + + return d.next, nil +} + +// next gets the next struct field name from the raw json or stops the machine if we get our closing }. +func (d *decoder) next() (stateFn, error) { + if !d.dec.More() { + // Remove the closing }. + if _, err := d.dec.Token(); err != nil { + return nil, err + } + return nil, nil + } + + key, err := d.dec.Token() + if err != nil { + return nil, err + } + + d.key = key.(string) + return d.storeValue, nil +} + +// storeValue takes the next value and stores it our struct. If the field can't be found +// in the struct, it pushes the operation to storeAdditional(). +func (d *decoder) storeValue() (stateFn, error) { + goName := d.translator.goName(d.key) + if goName == "" { + goName = d.key + } + + // We don't have the field in the struct, so it goes in AdditionalFields. + f := d.value.FieldByName(goName) + if f.Kind() == reflect.Invalid { + return d.storeAdditional, nil + } + + // Indicates that this type has a custom Unmarshaler. + if hasUnmarshalJSON(f) { + err := d.dec.Decode(f.Addr().Interface()) + if err != nil { + return nil, err + } + return d.next, nil + } + + t, isPtr, err := fieldBaseType(d.value, goName) + if err != nil { + return nil, fmt.Errorf("type(%s) had field(%s) %w", d.value.Type().Name(), goName, err) + } + + switch t.Kind() { + // We need to recursively call ourselves on any *struct or struct. + case reflect.Struct: + if isPtr { + if f.IsNil() { + f.Set(reflect.New(t)) + } + } else { + f = f.Addr() + } + if err := unmarshalStruct(d.dec, f.Interface()); err != nil { + return nil, err + } + return d.next, nil + case reflect.Map: + v := reflect.MakeMap(f.Type()) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + case reflect.Slice: + v := reflect.MakeSlice(f.Type(), 0, 0) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalSlice(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + } + + if !isPtr { + f = f.Addr() + } + + // For values that are pointers, we need them to be non-nil in order + // to decode into them. + if f.IsNil() { + f.Set(reflect.New(t)) + } + + if err := d.dec.Decode(f.Interface()); err != nil { + return nil, err + } + + return d.next, nil +} + +// storeAdditional pushes the key/value into our .AdditionalFields map. +func (d *decoder) storeAdditional() (stateFn, error) { + rw := json.RawMessage{} + if err := d.dec.Decode(&rw); err != nil { + return nil, err + } + field := d.value.FieldByName(addField) + if field.IsNil() { + field.Set(reflect.MakeMap(field.Type())) + } + field.SetMapIndex(reflect.ValueOf(d.key), reflect.ValueOf(rw)) + return d.next, nil +} + +func fieldBaseType(v reflect.Value, fieldName string) (t reflect.Type, isPtr bool, err error) { + sf, ok := v.Type().FieldByName(fieldName) + if !ok { + return nil, false, fmt.Errorf("bug: fieldBaseType() lookup of field(%s) on type(%s): do not have field", fieldName, v.Type().Name()) + } + t = sf.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + isPtr = true + } + if t.Kind() == reflect.Ptr { + return nil, isPtr, fmt.Errorf("received pointer to pointer type, not supported") + } + return t, isPtr, nil +} + +type translateField struct { + jsonName string + goName string +} + +// translateFields is a list of translateFields with a handy lookup method. +type translateFields []translateField + +// goName loops through a list of fields looking for one contaning the jsonName and +// returning the goName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) goName(jsonName string) string { + for _, entry := range t { + if entry.jsonName == jsonName { + return entry.goName + } + } + return "" +} + +// jsonName loops through a list of fields looking for one contaning the goName and +// returning the jsonName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) jsonName(goName string) string { + for _, entry := range t { + if entry.goName == goName { + return entry.jsonName + } + } + return "" +} + +var umarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + +// findFields parses a struct and writes the field tags for lookup. It will return an error +// if any field has a type of *struct or struct that does not implement json.Marshaler. +func findFields(v reflect.Value) (translateFields, error) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("findFields received a %s type, expected *struct or struct", v.Type().Name()) + } + tfs := make([]translateField, 0, v.NumField()) + for i := 0; i < v.NumField(); i++ { + tf := translateField{ + goName: v.Type().Field(i).Name, + jsonName: parseTag(v.Type().Field(i).Tag.Get("json")), + } + switch tf.jsonName { + case "", "-": + tf.jsonName = tf.goName + } + tfs = append(tfs, tf) + + f := v.Field(i) + if f.Kind() == reflect.Ptr { + f = f.Elem() + } + if f.Kind() == reflect.Struct { + if f.Type().Implements(umarshalerType) { + return nil, fmt.Errorf("struct type %q which has field %q which "+ + "doesn't implement json.Unmarshaler", v.Type().Name(), v.Type().Field(i).Name) + } + } + } + return tfs, nil +} + +// parseTag just returns the first entry in the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func parseTag(tag string) string { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx] + } + return tag +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go new file mode 100644 index 000000000000..a1c99621e9fc --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package time provides for custom types to translate time from JSON and other formats +// into time.Time objects. +package time + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Unix provides a type that can marshal and unmarshal a string representation +// of the unix epoch into a time.Time object. +type Unix struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (u Unix) MarshalJSON() ([]byte, error) { + if u.T.IsZero() { + return []byte(""), nil + } + return []byte(fmt.Sprintf("%q", strconv.FormatInt(u.T.Unix(), 10))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (u *Unix) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + u.T = time.Unix(int64(i), 0) + return nil +} + +// DurationTime provides a type that can marshal and unmarshal a string representation +// of a duration from now into a time.Time object. +// Note: I'm not sure this is the best way to do this. What happens is we get a field +// called "expires_in" that represents the seconds from now that this expires. We +// turn that into a time we call .ExpiresOn. But maybe we should be recording +// when the token was received at .TokenRecieved and .ExpiresIn should remain as a duration. +// Then we could have a method called ExpiresOn(). Honestly, the whole thing is +// bad because the server doesn't return a concrete time. I think this is +// cleaner, but its not great either. +type DurationTime struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (d DurationTime) MarshalJSON() ([]byte, error) { + if d.T.IsZero() { + return []byte(""), nil + } + + dt := time.Until(d.T) + return []byte(fmt.Sprintf("%d", int64(dt*time.Second))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (d *DurationTime) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + d.T = time.Now().Add(time.Duration(i) * time.Second) + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go new file mode 100644 index 000000000000..41f4373fa585 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -0,0 +1,176 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package local contains a local HTTP server used with interactive authentication. +package local + +import ( + "context" + "fmt" + "net" + "net/http" + "strconv" + "strings" +) + +var okPage = []byte(` + + + + + Authentication Complete + + +

    Authentication complete. You can return to the application. Feel free to close this browser tab.

    + + +`) + +const failPage = ` + + + + + Authentication Failed + + +

    Authentication failed. You can return to the application. Feel free to close this browser tab.

    +

    Error details: error %s error_description: %s

    + + +` + +// Result is the result from the redirect. +type Result struct { + // Code is the code sent by the authority server. + Code string + // Err is set if there was an error. + Err error +} + +// Server is an HTTP server. +type Server struct { + // Addr is the address the server is listening on. + Addr string + resultCh chan Result + s *http.Server + reqState string +} + +// New creates a local HTTP server and starts it. +func New(reqState string, port int) (*Server, error) { + var l net.Listener + var err error + var portStr string + if port > 0 { + // use port provided by caller + l, err = net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + portStr = strconv.FormatInt(int64(port), 10) + } else { + // find a free port + for i := 0; i < 10; i++ { + l, err = net.Listen("tcp", "localhost:0") + if err != nil { + continue + } + addr := l.Addr().String() + portStr = addr[strings.LastIndex(addr, ":")+1:] + break + } + } + if err != nil { + return nil, err + } + + serv := &Server{ + Addr: fmt.Sprintf("http://localhost:%s", portStr), + s: &http.Server{Addr: "localhost:0"}, + reqState: reqState, + resultCh: make(chan Result, 1), + } + serv.s.Handler = http.HandlerFunc(serv.handler) + + if err := serv.start(l); err != nil { + return nil, err + } + + return serv, nil +} + +func (s *Server) start(l net.Listener) error { + go func() { + err := s.s.Serve(l) + if err != nil { + select { + case s.resultCh <- Result{Err: err}: + default: + } + } + }() + + return nil +} + +// Result gets the result of the redirect operation. Once a single result is returned, the server +// is shutdown. ctx deadline will be honored. +func (s *Server) Result(ctx context.Context) Result { + select { + case <-ctx.Done(): + return Result{Err: ctx.Err()} + case r := <-s.resultCh: + return r + } +} + +// Shutdown shuts down the server. +func (s *Server) Shutdown() { + // Note: You might get clever and think you can do this in handler() as a defer, you can't. + _ = s.s.Shutdown(context.Background()) +} + +func (s *Server) putResult(r Result) { + select { + case s.resultCh <- r: + default: + } +} + +func (s *Server) handler(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query() + + headerErr := q.Get("error") + if headerErr != "" { + desc := q.Get("error_description") + // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, + // change this to s.error() and make s.error() write the failPage instead of an error code. + _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) + s.putResult(Result{Err: fmt.Errorf(desc)}) + return + } + + respState := q.Get("state") + switch respState { + case s.reqState: + case "": + s.error(w, http.StatusInternalServerError, "server didn't send OAuth state") + return + default: + s.error(w, http.StatusInternalServerError, "mismatched OAuth state, req(%s), resp(%s)", s.reqState, respState) + return + } + + code := q.Get("code") + if code == "" { + s.error(w, http.StatusInternalServerError, "authorization code missing in query string") + return + } + + _, _ = w.Write(okPage) + s.putResult(Result{Code: code}) +} + +func (s *Server) error(w http.ResponseWriter, code int, str string, i ...interface{}) { + err := fmt.Errorf(str, i...) + http.Error(w, err.Error(), code) + s.putResult(Result{Err: err}) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go new file mode 100644 index 000000000000..b36943ce6e47 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -0,0 +1,272 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package oauth + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" +) + +// ResolveEndpointer contains the methods for resolving authority endpoints. +type ResolveEndpointer interface { + ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) +} + +// AccessTokens contains the methods for fetching tokens from different sources. +type AccessTokens interface { + DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (accesstokens.DeviceCodeResult, error) + FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (accesstokens.TokenResponse, error) + FromAuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) + FromRefreshToken(ctx context.Context, appType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken string) (accesstokens.TokenResponse, error) + FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (accesstokens.TokenResponse, error) + FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (accesstokens.TokenResponse, error) + FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (accesstokens.TokenResponse, error) + FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (accesstokens.TokenResponse, error) + FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult accesstokens.DeviceCodeResult) (accesstokens.TokenResponse, error) + FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (accesstokens.TokenResponse, error) +} + +// FetchAuthority will be implemented by authority.Authority. +type FetchAuthority interface { + UserRealm(context.Context, authority.AuthParams) (authority.UserRealm, error) + AADInstanceDiscovery(context.Context, authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// FetchWSTrust contains the methods for interacting with WSTrust endpoints. +type FetchWSTrust interface { + Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) + SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (wstrust.SamlTokenInfo, error) +} + +// Client provides tokens for various types of token requests. +type Client struct { + Resolver ResolveEndpointer + AccessTokens AccessTokens + Authority FetchAuthority + WSTrust FetchWSTrust +} + +// New is the constructor for Token. +func New(httpClient ops.HTTPClient) *Client { + r := ops.New(httpClient) + return &Client{ + Resolver: newAuthorityEndpoint(r), + AccessTokens: r.AccessTokens(), + Authority: r.Authority(), + WSTrust: r.WSTrust(), + } +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance. +func (t *Client) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + return t.Resolver.ResolveEndpoints(ctx, authorityInfo, userPrincipalName) +} + +func (t *Client) AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) { + return t.Authority.AADInstanceDiscovery(ctx, authorityInfo) +} + +// AuthCode returns a token based on an authorization code. +func (t *Client) AuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) { + if err := t.resolveEndpoint(ctx, &req.AuthParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + tResp, err := t.AccessTokens.FromAuthCode(ctx, req) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("could not retrieve token from auth code: %w", err) + } + return tResp, nil +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromClientSecret(ctx, authParams, cred.Secret) + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromAssertion(ctx, authParams, jwt) +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) OnBehalfOf(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromUserAssertionClientSecret(ctx, authParams, authParams.UserAssertion, cred.Secret) + + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUserAssertionClientCertificate(ctx, authParams, authParams.UserAssertion, jwt) +} + +func (t *Client) Refresh(ctx context.Context, reqType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken accesstokens.RefreshToken) (accesstokens.TokenResponse, error) { + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + return t.AccessTokens.FromRefreshToken(ctx, reqType, authParams, cc, refreshToken.Secret) +} + +// UsernamePassword retrieves a token where a username and password is used. However, if this is +// a user realm of "Federated", this uses SAML tokens. If "Managed", uses normal username/password. +func (t *Client) UsernamePassword(ctx context.Context, authParams authority.AuthParams) (accesstokens.TokenResponse, error) { + if authParams.AuthorityInfo.AuthorityType == authority.ADFS { + if err := t.resolveEndpoint(ctx, &authParams, authParams.Username); err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + userRealm, err := t.Authority.UserRealm(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("problem getting user realm(user: %s) from authority: %w", authParams.Username, err) + } + + switch userRealm.AccountType { + case authority.Federated: + mexDoc, err := t.WSTrust.Mex(ctx, userRealm.FederationMetadataURL) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("problem getting mex doc from federated url(%s): %w", userRealm.FederationMetadataURL, err) + } + + saml, err := t.WSTrust.SAMLTokenInfo(ctx, authParams, userRealm.CloudAudienceURN, mexDoc.UsernamePasswordEndpoint) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("problem getting SAML token info: %w", err) + } + return t.AccessTokens.FromSamlGrant(ctx, authParams, saml) + case authority.Managed: + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + return accesstokens.TokenResponse{}, errors.New("unknown account type") +} + +// DeviceCode is the result of a call to Token.DeviceCode(). +type DeviceCode struct { + // Result is the device code result from the first call in the device code flow. This allows + // the caller to retrieve the displayed code that is used to authorize on the second device. + Result accesstokens.DeviceCodeResult + authParams authority.AuthParams + + accessTokens AccessTokens +} + +// Token returns a token AFTER the user uses the user code on the second device. This will block +// until either: (1) the code is input by the user and the service releases a token, (2) the token +// expires, (3) the Context passed to .DeviceCode() is cancelled or expires, (4) some other service +// error occurs. +func (d DeviceCode) Token(ctx context.Context) (accesstokens.TokenResponse, error) { + if d.accessTokens == nil { + return accesstokens.TokenResponse{}, fmt.Errorf("DeviceCode was either created outside its package or the creating method had an error. DeviceCode is not valid") + } + + var cancel context.CancelFunc + d.Result.ExpiresOn.Sub(time.Now().UTC()) + if deadline, ok := ctx.Deadline(); !ok || d.Result.ExpiresOn.Before(deadline) { + ctx, cancel = context.WithDeadline(ctx, d.Result.ExpiresOn) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + var interval = 50 * time.Millisecond + timer := time.NewTimer(interval) + defer timer.Stop() + + for { + timer.Reset(interval) + select { + case <-ctx.Done(): + return accesstokens.TokenResponse{}, ctx.Err() + case <-timer.C: + interval += interval * 2 + if interval > 5*time.Second { + interval = 5 * time.Second + } + } + + token, err := d.accessTokens.FromDeviceCodeResult(ctx, d.authParams, d.Result) + if err != nil && isWaitDeviceCodeErr(err) { + continue + } + return token, err // This handles if it was a non-wait error or success + } +} + +type deviceCodeError struct { + Error string `json:"error"` +} + +func isWaitDeviceCodeErr(err error) bool { + var c errors.CallErr + if !errors.As(err, &c) { + return false + } + if c.Resp.StatusCode != 400 { + return false + } + var dCErr deviceCodeError + defer c.Resp.Body.Close() + body, err := ioutil.ReadAll(c.Resp.Body) + if err != nil { + return false + } + err = json.Unmarshal(body, &dCErr) + if err != nil { + return false + } + if dCErr.Error == "authorization_pending" || dCErr.Error == "slow_down" { + return true + } + return false +} + +// DeviceCode returns a DeviceCode object that can be used to get the code that must be entered on the second +// device and optionally the token once the code has been entered on the second device. +func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams) (DeviceCode, error) { + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return DeviceCode{}, err + } + + dcr, err := t.AccessTokens.DeviceCodeResult(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dcr, authParams: authParams, accessTokens: t.AccessTokens}, nil +} + +func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { + endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) + if err != nil { + return fmt.Errorf("unable to resolve an endpoint: %s", err) + } + authParams.Endpoints = endpoints + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go new file mode 100644 index 000000000000..ae9ae0aebd20 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -0,0 +1,408 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package accesstokens exposes a REST client for querying backend systems to get various types of +access tokens (oauth) for use in authentication. + +These calls are of type "application/x-www-form-urlencoded". This means we use url.Values to +represent arguments and then encode them into the POST body message. We receive JSON in +return for the requests. The request definition is defined in https://tools.ietf.org/html/rfc7521#section-4.2 . +*/ +package accesstokens + +import ( + "context" + "crypto" + + /* #nosec */ + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/golang-jwt/jwt/v4" + "github.com/google/uuid" +) + +const ( + grantType = "grant_type" + deviceCode = "device_code" + clientID = "client_id" + clientInfo = "client_info" + clientInfoVal = "1" + username = "username" + password = "password" +) + +//go:generate stringer -type=AppType + +// AppType is whether the authorization code flow is for a public or confidential client. +type AppType int8 + +const ( + // ATUnknown is the zero value when the type hasn't been set. + ATUnknown AppType = iota + // ATPublic indicates this if for the Public.Client. + ATPublic + // ATConfidential indicates this if for the Confidential.Client. + ATConfidential +) + +type urlFormCaller interface { + URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error +} + +// DeviceCodeResponse represents the HTTP response received from the device code endpoint +type DeviceCodeResponse struct { + authority.OAuthResponseBase + + UserCode string `json:"user_code"` + DeviceCode string `json:"device_code"` + VerificationURL string `json:"verification_url"` + ExpiresIn int `json:"expires_in"` + Interval int `json:"interval"` + Message string `json:"message"` + + AdditionalFields map[string]interface{} +} + +// Convert converts the DeviceCodeResponse to a DeviceCodeResult +func (dcr DeviceCodeResponse) Convert(clientID string, scopes []string) DeviceCodeResult { + expiresOn := time.Now().UTC().Add(time.Duration(dcr.ExpiresIn) * time.Second) + return NewDeviceCodeResult(dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, expiresOn, dcr.Interval, dcr.Message, clientID, scopes) +} + +// Credential represents the credential used in confidential client flows. This can be either +// a Secret or Cert/Key. +type Credential struct { + // Secret contains the credential secret if we are doing auth by secret. + Secret string + + // Cert is the public certificate, if we're authenticating by certificate. + Cert *x509.Certificate + // Key is the private key for signing, if we're authenticating by certificate. + Key crypto.PrivateKey + // X5c is the JWT assertion's x5c header value, required for SN/I authentication. + X5c []string + + // AssertionCallback is a function provided by the application, if we're authenticating by assertion. + AssertionCallback func(context.Context, exported.AssertionRequestOptions) (string, error) +} + +// JWT gets the jwt assertion when the credential is not using a secret. +func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (string, error) { + if c.AssertionCallback != nil { + options := exported.AssertionRequestOptions{ + ClientID: authParams.ClientID, + TokenEndpoint: authParams.Endpoints.TokenEndpoint, + } + return c.AssertionCallback(ctx, options) + } + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "aud": authParams.Endpoints.TokenEndpoint, + "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)), + "iss": authParams.ClientID, + "jti": uuid.New().String(), + "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)), + "sub": authParams.ClientID, + }) + token.Header = map[string]interface{}{ + "alg": "RS256", + "typ": "JWT", + "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)), + } + + if authParams.SendX5C { + token.Header["x5c"] = c.X5c + } + + assertion, err := token.SignedString(c.Key) + if err != nil { + return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err) + } + return assertion, nil +} + +// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT. +// https://tools.ietf.org/html/rfc7517#section-4.8 +func thumbprint(cert *x509.Certificate) []byte { + /* #nosec */ + a := sha1.Sum(cert.Raw) + return a[:] +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm urlFormCaller + + testing bool +} + +// FromUsernamePassword uses a username and password to get an access token. +func (c Client) FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + qv := url.Values{} + qv.Set(grantType, grant.Password) + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +// AuthCodeRequest stores the values required to request a token from the authority using an authorization code +type AuthCodeRequest struct { + AuthParams authority.AuthParams + Code string + CodeChallenge string + Credential *Credential + AppType AppType +} + +// NewCodeChallengeRequest returns an AuthCodeRequest that uses a code challenge.. +func NewCodeChallengeRequest(params authority.AuthParams, appType AppType, cc *Credential, code, challenge string) (AuthCodeRequest, error) { + if appType == ATUnknown { + return AuthCodeRequest{}, fmt.Errorf("bug: NewCodeChallengeRequest() called with AppType == ATUnknown") + } + return AuthCodeRequest{ + AuthParams: params, + AppType: appType, + Code: code, + CodeChallenge: challenge, + Credential: cc, + }, nil +} + +// FromAuthCode uses an authorization code to retrieve an access token. +func (c Client) FromAuthCode(ctx context.Context, req AuthCodeRequest) (TokenResponse, error) { + var qv url.Values + + switch req.AppType { + case ATUnknown: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == ATUnknown") + case ATConfidential: + var err error + if req.Credential == nil { + return TokenResponse{}, fmt.Errorf("AuthCodeRequest had nil Credential for Confidential app") + } + qv, err = prepURLVals(ctx, req.Credential, req.AuthParams) + if err != nil { + return TokenResponse{}, err + } + case ATPublic: + qv = url.Values{} + default: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == %v, which we do not recongnize", req.AppType) + } + + qv.Set(grantType, grant.AuthCode) + qv.Set("code", req.Code) + qv.Set("code_verifier", req.CodeChallenge) + qv.Set("redirect_uri", req.AuthParams.Redirecturi) + qv.Set(clientID, req.AuthParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, req.AuthParams) + + return c.doTokenResp(ctx, req.AuthParams, qv) +} + +// FromRefreshToken uses a refresh token (for refreshing credentials) to get a new access token. +func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParams authority.AuthParams, cc *Credential, refreshToken string) (TokenResponse, error) { + qv := url.Values{} + if appType == ATConfidential { + var err error + qv, err = prepURLVals(ctx, cc, authParams) + if err != nil { + return TokenResponse{}, err + } + } + qv.Set(grantType, grant.RefreshToken) + qv.Set(clientID, authParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("refresh_token", refreshToken) + addScopeQueryParam(qv, authParams) + + return c.doTokenResp(ctx, authParams, qv) +} + +// FromClientSecret uses a client's secret (aka password) to get a new token. +func (c Client) FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_secret", clientSecret) + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromClientSecret(): %w", err) + } + return token, nil +} + +func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) { + qv := url.Values{} + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromAssertion(): %w", err) + } + return token, nil +} + +func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + qv.Set(grantType, grant.JWT) + qv.Set(clientID, authParameters.ClientID) + qv.Set("client_secret", clientSecret) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (TokenResponse, error) { + qv := url.Values{} + qv.Set(grantType, grant.JWT) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (DeviceCodeResult, error) { + qv := url.Values{} + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + endpoint := strings.Replace(authParameters.Endpoints.TokenEndpoint, "token", "devicecode", -1) + + resp := DeviceCodeResponse{} + err := c.Comm.URLFormCall(ctx, endpoint, qv, &resp) + if err != nil { + return DeviceCodeResult{}, err + } + + return resp.Convert(authParameters.ClientID, authParameters.Scopes), nil +} + +func (c Client) FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult DeviceCodeResult) (TokenResponse, error) { + qv := url.Values{} + qv.Set(grantType, grant.DeviceCode) + qv.Set(deviceCode, deviceCodeResult.DeviceCode) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (TokenResponse, error) { + qv := url.Values{} + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("assertion", base64.StdEncoding.WithPadding(base64.StdPadding).EncodeToString([]byte(samlGrant.Assertion))) + addScopeQueryParam(qv, authParameters) + + switch samlGrant.AssertionType { + case grant.SAMLV1: + qv.Set(grantType, grant.SAMLV1) + case grant.SAMLV2: + qv.Set(grantType, grant.SAMLV2) + default: + return TokenResponse{}, fmt.Errorf("GetAccessTokenFromSamlGrant returned unknown SAML assertion type: %q", samlGrant.AssertionType) + } + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams, qv url.Values) (TokenResponse, error) { + resp := TokenResponse{} + err := c.Comm.URLFormCall(ctx, authParams.Endpoints.TokenEndpoint, qv, &resp) + if err != nil { + return resp, err + } + resp.ComputeScope(authParams) + if c.testing { + return resp, nil + } + return resp, resp.Validate() +} + +// prepURLVals returns an url.Values that sets various key/values if we are doing secrets +// or JWT assertions. +func prepURLVals(ctx context.Context, cc *Credential, authParams authority.AuthParams) (url.Values, error) { + params := url.Values{} + if cc.Secret != "" { + params.Set("client_secret", cc.Secret) + return params, nil + } + + jwt, err := cc.JWT(ctx, authParams) + if err != nil { + return nil, err + } + params.Set("client_assertion", jwt) + params.Set("client_assertion_type", grant.ClientAssertion) + return params, nil +} + +// openid required to get an id token +// offline_access required to get a refresh token +// profile required to get the client_info field back +var detectDefaultScopes = map[string]bool{ + "openid": true, + "offline_access": true, + "profile": true, +} + +var defaultScopes = []string{"openid", "offline_access", "profile"} + +func AppendDefaultScopes(authParameters authority.AuthParams) []string { + scopes := make([]string, 0, len(authParameters.Scopes)+len(defaultScopes)) + for _, scope := range authParameters.Scopes { + s := strings.TrimSpace(scope) + if s == "" { + continue + } + if detectDefaultScopes[scope] { + continue + } + scopes = append(scopes, scope) + } + scopes = append(scopes, defaultScopes...) + return scopes +} + +func addScopeQueryParam(queryParams url.Values, authParameters authority.AuthParams) { + scopes := AppendDefaultScopes(authParameters) + queryParams.Set("scope", strings.Join(scopes, " ")) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go new file mode 100644 index 000000000000..3bec4a67cf10 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=AppType"; DO NOT EDIT. + +package accesstokens + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATPublic-1] + _ = x[ATConfidential-2] +} + +const _AppType_name = "ATUnknownATPublicATConfidential" + +var _AppType_index = [...]uint8{0, 9, 17, 31} + +func (i AppType) String() string { + if i < 0 || i >= AppType(len(_AppType_index)-1) { + return "AppType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AppType_name[_AppType_index[i]:_AppType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go new file mode 100644 index 000000000000..cc847001979d --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go @@ -0,0 +1,332 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package accesstokens + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// IDToken consists of all the information used to validate a user. +// https://docs.microsoft.com/azure/active-directory/develop/id-tokens . +type IDToken struct { + PreferredUsername string `json:"preferred_username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + Oid string `json:"oid,omitempty"` + TenantID string `json:"tid,omitempty"` + Subject string `json:"sub,omitempty"` + UPN string `json:"upn,omitempty"` + Email string `json:"email,omitempty"` + AlternativeID string `json:"alternative_id,omitempty"` + Issuer string `json:"iss,omitempty"` + Audience string `json:"aud,omitempty"` + ExpirationTime int64 `json:"exp,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + RawToken string + + AdditionalFields map[string]interface{} +} + +var null = []byte("null") + +// UnmarshalJSON implements json.Unmarshaler. +func (i *IDToken) UnmarshalJSON(b []byte) error { + if bytes.Equal(null, b) { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type idToken2 IDToken + + jwt := strings.Trim(string(b), `"`) + jwtArr := strings.Split(jwt, ".") + if len(jwtArr) < 2 { + return errors.New("IDToken returned from server is invalid") + } + + jwtPart := jwtArr[1] + jwtDecoded, err := decodeJWT(jwtPart) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken, problem decoding JWT: %w", err) + } + + token := idToken2{} + err = json.Unmarshal(jwtDecoded, &token) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken: %w", err) + } + token.RawToken = jwt + + *i = IDToken(token) + return nil +} + +// IsZero indicates if the IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// LocalAccountID extracts an account's local account ID from an ID token. +func (i IDToken) LocalAccountID() string { + if i.Oid != "" { + return i.Oid + } + return i.Subject +} + +// jwtDecoder is provided to allow tests to provide their own. +var jwtDecoder = decodeJWT + +// ClientInfo is used to create a Home Account ID for an account. +type ClientInfo struct { + UID string `json:"uid"` + UTID string `json:"utid"` + + AdditionalFields map[string]interface{} +} + +// UnmarshalJSON implements json.Unmarshaler.s +func (c *ClientInfo) UnmarshalJSON(b []byte) error { + s := strings.Trim(string(b), `"`) + // Client info may be empty in some flows, e.g. certificate exchange. + if len(s) == 0 { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type clientInfo2 ClientInfo + + raw, err := jwtDecoder(s) + if err != nil { + return fmt.Errorf("TokenResponse client_info field had JWT decode error: %w", err) + } + + var c2 clientInfo2 + + err = json.Unmarshal(raw, &c2) + if err != nil { + return fmt.Errorf("was unable to unmarshal decoded JWT in TokenRespone to ClientInfo: %w", err) + } + + *c = ClientInfo(c2) + return nil +} + +// HomeAccountID creates the home account ID. +func (c ClientInfo) HomeAccountID() string { + if c.UID == "" || c.UTID == "" { + return "" + } + return fmt.Sprintf("%s.%s", c.UID, c.UTID) +} + +// Scopes represents scopes in a TokenResponse. +type Scopes struct { + Slice []string +} + +// UnmarshalJSON implements json.Unmarshal. +func (s *Scopes) UnmarshalJSON(b []byte) error { + str := strings.Trim(string(b), `"`) + if len(str) == 0 { + return nil + } + sl := strings.Split(str, " ") + s.Slice = sl + return nil +} + +// TokenResponse is the information that is returned from a token endpoint during a token acquisition flow. +type TokenResponse struct { + authority.OAuthResponseBase + + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + FamilyID string `json:"foci"` + IDToken IDToken `json:"id_token"` + ClientInfo ClientInfo `json:"client_info"` + ExpiresOn internalTime.DurationTime `json:"expires_in"` + ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"` + GrantedScopes Scopes `json:"scope"` + DeclinedScopes []string // This is derived + + AdditionalFields map[string]interface{} + + scopesComputed bool +} + +// ComputeScope computes the final scopes based on what was granted by the server and +// what our AuthParams were from the authority server. Per OAuth spec, if no scopes are returned, the response should be treated as if all scopes were granted +// This behavior can be observed in client assertion flows, but can happen at any time, this check ensures we treat +// those special responses properly Link to spec: https://tools.ietf.org/html/rfc6749#section-3.3 +func (tr *TokenResponse) ComputeScope(authParams authority.AuthParams) { + if len(tr.GrantedScopes.Slice) == 0 { + tr.GrantedScopes = Scopes{Slice: authParams.Scopes} + } else { + tr.DeclinedScopes = findDeclinedScopes(authParams.Scopes, tr.GrantedScopes.Slice) + } + tr.scopesComputed = true +} + +// Validate validates the TokenResponse has basic valid values. It must be called +// after ComputeScopes() is called. +func (tr *TokenResponse) Validate() error { + if tr.Error != "" { + return fmt.Errorf("%s: %s", tr.Error, tr.ErrorDescription) + } + + if tr.AccessToken == "" { + return errors.New("response is missing access_token") + } + + if !tr.scopesComputed { + return fmt.Errorf("TokenResponse hasn't had ScopesComputed() called") + } + return nil +} + +func (tr *TokenResponse) CacheKey(authParams authority.AuthParams) string { + if authParams.AuthorizationType == authority.ATOnBehalfOf { + return authParams.AssertionHash() + } + if authParams.AuthorizationType == authority.ATClientCredentials { + return authParams.AppKey() + } + if authParams.IsConfidentialClient || authParams.AuthorizationType == authority.ATRefreshToken { + return tr.ClientInfo.HomeAccountID() + } + return "" +} + +func findDeclinedScopes(requestedScopes []string, grantedScopes []string) []string { + declined := []string{} + grantedMap := map[string]bool{} + for _, s := range grantedScopes { + grantedMap[strings.ToLower(s)] = true + } + // Comparing the requested scopes with the granted scopes to see if there are any scopes that have been declined. + for _, r := range requestedScopes { + if !grantedMap[strings.ToLower(r)] { + declined = append(declined, r) + } + } + return declined +} + +// decodeJWT decodes a JWT and converts it to a byte array representing a JSON object +// JWT has headers and payload base64url encoded without padding +// https://tools.ietf.org/html/rfc7519#section-3 and +// https://tools.ietf.org/html/rfc7515#section-2 +func decodeJWT(data string) ([]byte, error) { + // https://tools.ietf.org/html/rfc7515#appendix-C + return base64.RawURLEncoding.DecodeString(data) +} + +// RefreshToken is the JSON representation of a MSAL refresh token for encoding to storage. +type RefreshToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + FamilyID string `json:"family_id,omitempty"` + Secret string `json:"secret,omitempty"` + Realm string `json:"realm,omitempty"` + Target string `json:"target,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewRefreshToken is the constructor for RefreshToken. +func NewRefreshToken(homeID, env, clientID, refreshToken, familyID string) RefreshToken { + return RefreshToken{ + HomeAccountID: homeID, + Environment: env, + CredentialType: "RefreshToken", + ClientID: clientID, + FamilyID: familyID, + Secret: refreshToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (rt RefreshToken) Key() string { + var fourth = rt.FamilyID + if fourth == "" { + fourth = rt.ClientID + } + + return strings.Join( + []string{rt.HomeAccountID, rt.Environment, rt.CredentialType, fourth}, + shared.CacheKeySeparator, + ) +} + +func (rt RefreshToken) GetSecret() string { + return rt.Secret +} + +// DeviceCodeResult stores the response from the STS device code endpoint. +type DeviceCodeResult struct { + // UserCode is the code the user needs to provide when authentication at the verification URI. + UserCode string + // DeviceCode is the code used in the access token request. + DeviceCode string + // VerificationURL is the the URL where user can authenticate. + VerificationURL string + // ExpiresOn is the expiration time of device code in seconds. + ExpiresOn time.Time + // Interval is the interval at which the STS should be polled at. + Interval int + // Message is the message which should be displayed to the user. + Message string + // ClientID is the UUID issued by the authorization server for your application. + ClientID string + // Scopes is the OpenID scopes used to request access a protected API. + Scopes []string +} + +// NewDeviceCodeResult creates a DeviceCodeResult instance. +func NewDeviceCodeResult(userCode, deviceCode, verificationURL string, expiresOn time.Time, interval int, message, clientID string, scopes []string) DeviceCodeResult { + return DeviceCodeResult{userCode, deviceCode, verificationURL, expiresOn, interval, message, clientID, scopes} +} + +func (dcr DeviceCodeResult) String() string { + return fmt.Sprintf("UserCode: (%v)\nDeviceCode: (%v)\nURL: (%v)\nMessage: (%v)\n", dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, dcr.Message) + +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go new file mode 100644 index 000000000000..6004f4a26072 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -0,0 +1,418 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package authority + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/google/uuid" +) + +const ( + authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" + instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" + tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" + regionName = "REGION_NAME" + defaultAPIVersion = "2021-10-01" + imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion + defaultHost = "login.microsoftonline.com" + autoDetectRegion = "TryAutoDetect" +) + +type jsonCaller interface { + JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error +} + +var aadTrustedHostList = map[string]bool{ + "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list + "login.chinacloudapi.cn": true, // Microsoft Azure China + "login.microsoftonline.de": true, // Microsoft Azure Blackforest + "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy + "login.microsoftonline.us": true, // Microsoft Azure US Government + "login.microsoftonline.com": true, // Microsoft Azure Worldwide + "login.cloudgovapi.us": true, // Microsoft Azure US Government +} + +// TrustedHost checks if an AAD host is trusted/valid. +func TrustedHost(host string) bool { + if _, ok := aadTrustedHostList[host]; ok { + return true + } + return false +} + +type OAuthResponseBase struct { + Error string `json:"error"` + SubError string `json:"suberror"` + ErrorDescription string `json:"error_description"` + ErrorCodes []int `json:"error_codes"` + CorrelationID string `json:"correlation_id"` + Claims string `json:"claims"` +} + +// TenantDiscoveryResponse is the tenant endpoints from the OpenID configuration endpoint. +type TenantDiscoveryResponse struct { + OAuthResponseBase + + AuthorizationEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` + Issuer string `json:"issuer"` + + AdditionalFields map[string]interface{} +} + +// Validate validates that the response had the correct values required. +func (r *TenantDiscoveryResponse) Validate() error { + switch "" { + case r.AuthorizationEndpoint: + return errors.New("TenantDiscoveryResponse: authorize endpoint was not found in the openid configuration") + case r.TokenEndpoint: + return errors.New("TenantDiscoveryResponse: token endpoint was not found in the openid configuration") + case r.Issuer: + return errors.New("TenantDiscoveryResponse: issuer was not found in the openid configuration") + } + return nil +} + +type InstanceDiscoveryMetadata struct { + PreferredNetwork string `json:"preferred_network"` + PreferredCache string `json:"preferred_cache"` + Aliases []string `json:"aliases"` + + AdditionalFields map[string]interface{} +} + +type InstanceDiscoveryResponse struct { + TenantDiscoveryEndpoint string `json:"tenant_discovery_endpoint"` + Metadata []InstanceDiscoveryMetadata `json:"metadata"` + + AdditionalFields map[string]interface{} +} + +//go:generate stringer -type=AuthorizeType + +// AuthorizeType represents the type of token flow. +type AuthorizeType int + +// These are all the types of token flows. +const ( + ATUnknown AuthorizeType = iota + ATUsernamePassword + ATWindowsIntegrated + ATAuthCode + ATInteractive + ATClientCredentials + ATDeviceCode + ATRefreshToken + AccountByID + ATOnBehalfOf +) + +// These are all authority types +const ( + AAD = "MSSTS" + ADFS = "ADFS" +) + +// AuthParams represents the parameters used for authorization for token acquisition. +type AuthParams struct { + AuthorityInfo Info + CorrelationID string + Endpoints Endpoints + ClientID string + // Redirecturi is used for auth flows that specify a redirect URI (e.g. local server for interactive auth flow). + Redirecturi string + HomeaccountID string + // Username is the user-name portion for username/password auth flow. + Username string + // Password is the password portion for username/password auth flow. + Password string + // Scopes is the list of scopes the user consents to. + Scopes []string + // AuthorizationType specifies the auth flow being used. + AuthorizationType AuthorizeType + // State is a random value used to prevent cross-site request forgery attacks. + State string + // CodeChallenge is derived from a code verifier and is sent in the auth request. + CodeChallenge string + // CodeChallengeMethod describes the method used to create the CodeChallenge. + CodeChallengeMethod string + // Prompt specifies the user prompt type during interactive auth. + Prompt string + // IsConfidentialClient specifies if it is a confidential client. + IsConfidentialClient bool + // SendX5C specifies if x5c claim(public key of the certificate) should be sent to STS. + SendX5C bool + // UserAssertion is the access token used to acquire token on behalf of user + UserAssertion string +} + +// NewAuthParams creates an authorization parameters object. +func NewAuthParams(clientID string, authorityInfo Info) AuthParams { + return AuthParams{ + ClientID: clientID, + AuthorityInfo: authorityInfo, + CorrelationID: uuid.New().String(), + } +} + +// Info consists of information about the authority. +type Info struct { + Host string + CanonicalAuthorityURI string + AuthorityType string + UserRealmURIPrefix string + ValidateAuthority bool + Tenant string + Region string +} + +func firstPathSegment(u *url.URL) (string, error) { + pathParts := strings.Split(u.EscapedPath(), "/") + if len(pathParts) >= 2 { + return pathParts[1], nil + } + + return "", errors.New("authority does not have two segments") +} + +// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. +func NewInfoFromAuthorityURI(authorityURI string, validateAuthority bool) (Info, error) { + authorityURI = strings.ToLower(authorityURI) + var authorityType string + u, err := url.Parse(authorityURI) + if err != nil { + return Info{}, fmt.Errorf("authorityURI passed could not be parsed: %w", err) + } + if u.Scheme != "https" { + return Info{}, fmt.Errorf("authorityURI(%s) must have scheme https", authorityURI) + } + + tenant, err := firstPathSegment(u) + if tenant == "adfs" { + authorityType = ADFS + } else { + authorityType = AAD + } + + if err != nil { + return Info{}, err + } + + return Info{ + Host: u.Hostname(), + CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Hostname(), tenant), + AuthorityType: authorityType, + UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), + ValidateAuthority: validateAuthority, + Tenant: tenant, + }, nil +} + +// Endpoints consists of the endpoints from the tenant discovery response. +type Endpoints struct { + AuthorizationEndpoint string + TokenEndpoint string + selfSignedJwtAudience string + authorityHost string +} + +// NewEndpoints creates an Endpoints object. +func NewEndpoints(authorizationEndpoint string, tokenEndpoint string, selfSignedJwtAudience string, authorityHost string) Endpoints { + return Endpoints{authorizationEndpoint, tokenEndpoint, selfSignedJwtAudience, authorityHost} +} + +// UserRealmAccountType refers to the type of user realm. +type UserRealmAccountType string + +// These are the different types of user realms. +const ( + Unknown UserRealmAccountType = "" + Federated UserRealmAccountType = "Federated" + Managed UserRealmAccountType = "Managed" +) + +// UserRealm is used for the username password request to determine user type +type UserRealm struct { + AccountType UserRealmAccountType `json:"account_type"` + DomainName string `json:"domain_name"` + CloudInstanceName string `json:"cloud_instance_name"` + CloudAudienceURN string `json:"cloud_audience_urn"` + + // required if accountType is Federated + FederationProtocol string `json:"federation_protocol"` + FederationMetadataURL string `json:"federation_metadata_url"` + + AdditionalFields map[string]interface{} +} + +func (u UserRealm) validate() error { + switch "" { + case string(u.AccountType): + return errors.New("the account type (Federated or Managed) is missing") + case u.DomainName: + return errors.New("domain name of user realm is missing") + case u.CloudInstanceName: + return errors.New("cloud instance name of user realm is missing") + case u.CloudAudienceURN: + return errors.New("cloud Instance URN is missing") + } + + if u.AccountType == Federated { + switch "" { + case u.FederationProtocol: + return errors.New("federation protocol of user realm is missing") + case u.FederationMetadataURL: + return errors.New("federation metadata URL of user realm is missing") + } + } + return nil +} + +// Client represents the REST calls to authority backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm jsonCaller // *comm.Client +} + +func (c Client) UserRealm(ctx context.Context, authParams AuthParams) (UserRealm, error) { + endpoint := fmt.Sprintf("https://%s/common/UserRealm/%s", authParams.Endpoints.authorityHost, url.PathEscape(authParams.Username)) + qv := url.Values{ + "api-version": []string{"1.0"}, + } + + resp := UserRealm{} + err := c.Comm.JSONCall( + ctx, + endpoint, + http.Header{"client-request-id": []string{authParams.CorrelationID}}, + qv, + nil, + &resp, + ) + if err != nil { + return resp, err + } + + return resp, resp.validate() +} + +func (c Client) GetTenantDiscoveryResponse(ctx context.Context, openIDConfigurationEndpoint string) (TenantDiscoveryResponse, error) { + resp := TenantDiscoveryResponse{} + err := c.Comm.JSONCall( + ctx, + openIDConfigurationEndpoint, + http.Header{}, + nil, + nil, + &resp, + ) + + return resp, err +} + +func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (InstanceDiscoveryResponse, error) { + region := "" + var err error + resp := InstanceDiscoveryResponse{} + if authorityInfo.Region != "" && authorityInfo.Region != autoDetectRegion { + region = authorityInfo.Region + } else if authorityInfo.Region == autoDetectRegion { + region = detectRegion(ctx) + } + if region != "" { + environment := authorityInfo.Host + switch environment { + case "login.microsoft.com", "login.windows.net", "sts.windows.net", defaultHost: + environment = "r." + defaultHost + } + resp.TenantDiscoveryEndpoint = fmt.Sprintf(tenantDiscoveryEndpointWithRegion, region, environment, authorityInfo.Tenant) + metadata := InstanceDiscoveryMetadata{ + PreferredNetwork: fmt.Sprintf("%v.%v", region, authorityInfo.Host), + PreferredCache: authorityInfo.Host, + Aliases: []string{fmt.Sprintf("%v.%v", region, authorityInfo.Host), authorityInfo.Host}, + } + resp.Metadata = []InstanceDiscoveryMetadata{metadata} + } else { + qv := url.Values{} + qv.Set("api-version", "1.1") + qv.Set("authorization_endpoint", fmt.Sprintf(authorizationEndpoint, authorityInfo.Host, authorityInfo.Tenant)) + + discoveryHost := defaultHost + if TrustedHost(authorityInfo.Host) { + discoveryHost = authorityInfo.Host + } + + endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost) + err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp) + } + return resp, err +} + +func detectRegion(ctx context.Context) string { + region := os.Getenv(regionName) + if region != "" { + region = strings.ReplaceAll(region, " ", "") + return strings.ToLower(region) + } + // HTTP call to IMDS endpoint to get region + // Refer : https://identitydivision.visualstudio.com/DevEx/_git/AuthLibrariesApiReview?path=%2FPinAuthToRegion%2FAAD%20SDK%20Proposal%20to%20Pin%20Auth%20to%20region.md&_a=preview&version=GBdev + // Set a 2 second timeout for this http client which only does calls to IMDS endpoint + client := http.Client{ + Timeout: time.Duration(2 * time.Second), + } + req, _ := http.NewRequest("GET", imdsEndpoint, nil) + req.Header.Set("Metadata", "true") + resp, err := client.Do(req) + // If the request times out or there is an error, it is retried once + if err != nil || resp.StatusCode != 200 { + resp, err = client.Do(req) + if err != nil || resp.StatusCode != 200 { + return "" + } + } + defer resp.Body.Close() + response, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "" + } + return string(response) +} + +func (a *AuthParams) CacheKey(isAppCache bool) string { + if a.AuthorizationType == ATOnBehalfOf { + return a.AssertionHash() + } + if a.AuthorizationType == ATClientCredentials || isAppCache { + return a.AppKey() + } + if a.AuthorizationType == ATRefreshToken || a.AuthorizationType == AccountByID { + return a.HomeaccountID + } + return "" +} +func (a *AuthParams) AssertionHash() string { + hasher := sha256.New() + // Per documentation this never returns an error : https://pkg.go.dev/hash#pkg-types + _, _ = hasher.Write([]byte(a.UserAssertion)) + sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + return sha +} + +func (a *AuthParams) AppKey() string { + if a.AuthorityInfo.Tenant != "" { + return fmt.Sprintf("%s_%s_AppTokenCache", a.ClientID, a.AuthorityInfo.Tenant) + } + return fmt.Sprintf("%s__AppTokenCache", a.ClientID) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go new file mode 100644 index 000000000000..10039773b067 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=AuthorizeType"; DO NOT EDIT. + +package authority + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATUsernamePassword-1] + _ = x[ATWindowsIntegrated-2] + _ = x[ATAuthCode-3] + _ = x[ATInteractive-4] + _ = x[ATClientCredentials-5] + _ = x[ATDeviceCode-6] + _ = x[ATRefreshToken-7] +} + +const _AuthorizeType_name = "ATUnknownATUsernamePasswordATWindowsIntegratedATAuthCodeATInteractiveATClientCredentialsATDeviceCodeATRefreshToken" + +var _AuthorizeType_index = [...]uint8{0, 9, 27, 46, 56, 69, 88, 100, 114} + +func (i AuthorizeType) String() string { + if i < 0 || i >= AuthorizeType(len(_AuthorizeType_index)-1) { + return "AuthorizeType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AuthorizeType_name[_AuthorizeType_index[i]:_AuthorizeType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go new file mode 100644 index 000000000000..0620b3e41344 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -0,0 +1,321 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package comm provides helpers for communicating with HTTP backends. +package comm + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "runtime" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" + "github.com/google/uuid" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient interface { + // Do sends an HTTP request and returns an HTTP response. + Do(req *http.Request) (*http.Response, error) + + // CloseIdleConnections closes any idle connections in a "keep-alive" state. + CloseIdleConnections() +} + +// Client provides a wrapper to our *http.Client that handles compression and serialization needs. +type Client struct { + client HTTPClient +} + +// New returns a new Client object. +func New(httpClient HTTPClient) *Client { + if httpClient == nil { + panic("http.Client cannot == nil") + } + + return &Client{client: httpClient} +} + +// JSONCall connects to the REST endpoint passing the HTTP query values, headers and JSON conversion +// of body in the HTTP body. It automatically handles compression and decompression with gzip. The response is JSON +// unmarshalled into resp. resp must be a pointer to a struct. If the body struct contains a field called +// "AdditionalFields" we use a custom marshal/unmarshal engine. +func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error { + if qv == nil { + qv = url.Values{} + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + // Choose a JSON marshal/unmarshal depending on if we have AdditionalFields attribute. + var marshal = json.Marshal + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + marshal = customJSON.Marshal + unmarshal = customJSON.Unmarshal + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + addStdHeaders(headers) + + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if body != nil { + // Note: In case your wondering why we are not gzip encoding.... + // I'm not sure if these various services support gzip on send. + headers.Add("Content-Type", "application/json; charset=utf-8") + data, err := marshal(body) + if err != nil { + return fmt.Errorf("bug: conn.Call(): could not marshal the body object: %w", err) + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + req.Method = http.MethodPost + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data)) + } + } + return nil +} + +// XMLCall connects to an endpoint and decodes the XML response into resp. This is used when +// sending application/xml . If sending XML via SOAP, use SOAPCall(). +func (c *Client) XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error { + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/xml; charset=utf-8") // This was not set in he original Mex(), but... + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, "", resp) +} + +// SOAPCall returns the SOAP message given an endpoint, action, body of the request and the response object to marshal into. +func (c *Client) SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error { + if body == "" { + return fmt.Errorf("cannot make a SOAP call with body set to empty string") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/soap+xml; charset=utf-8") + headers.Set("SOAPAction", action) + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, body, resp) +} + +// xmlCall sends an XML in body and decodes into resp. This simply does the transport and relies on +// an upper level call to set things such as SOAP parameters and Content-Type, if required. +func (c *Client) xmlCall(ctx context.Context, u *url.URL, headers http.Header, body string, resp interface{}) error { + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if len(body) > 0 { + req.Method = http.MethodPost + req.Body = ioutil.NopCloser(strings.NewReader(body)) + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + return xml.Unmarshal(data, resp) +} + +// URLFormCall is used to make a call where we need to send application/x-www-form-urlencoded data +// to the backend and receive JSON back. qv will be encoded into the request body. +func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error { + if len(qv) == 0 { + return fmt.Errorf("URLFormCall() requires qv to have non-zero length") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + + headers := http.Header{} + headers.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + addStdHeaders(headers) + + enc := qv.Encode() + + req := &http.Request{ + Method: http.MethodPost, + URL: u, + Header: headers, + ContentLength: int64(len(enc)), + Body: ioutil.NopCloser(strings.NewReader(enc)), + GetBody: func() (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader(enc)), nil + }, + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + unmarshal = customJSON.Unmarshal + } + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data)) + } + } + return nil +} + +// do makes the HTTP call to the server and returns the contents of the body. +func (c *Client) do(ctx context.Context, req *http.Request) ([]byte, error) { + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 30*time.Second) + defer cancel() + } + req = req.WithContext(ctx) + + reply, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("server response error:\n %w", err) + } + defer reply.Body.Close() + + data, err := c.readBody(reply) + if err != nil { + return nil, fmt.Errorf("could not read the body of an HTTP Response: %w", err) + } + reply.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + + // NOTE: This doesn't happen immediately after the call so that we can get an error message + // from the server and include it in our error. + switch reply.StatusCode { + case 200, 201: + default: + sd := strings.TrimSpace(string(data)) + if sd != "" { + // We probably have the error in the body. + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, sd), + } + } + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, reply.StatusCode), + } + } + + return data, nil +} + +// checkResp checks a response object o make sure it is a pointer to a struct. +func (c *Client) checkResp(v reflect.Value) error { + if v.Kind() != reflect.Ptr { + return fmt.Errorf("bug: resp argument must a *struct, was %T", v.Interface()) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: resp argument must be a *struct, was %T", v.Interface()) + } + return nil +} + +// readBody reads the body out of an *http.Response. It supports gzip encoded responses. +func (c *Client) readBody(resp *http.Response) ([]byte, error) { + var reader io.Reader = resp.Body + switch resp.Header.Get("Content-Encoding") { + case "": + // Do nothing + case "gzip": + reader = gzipDecompress(resp.Body) + default: + return nil, fmt.Errorf("bug: comm.Client.JSONCall(): content was send with unsupported content-encoding %s", resp.Header.Get("Content-Encoding")) + } + return ioutil.ReadAll(reader) +} + +var testID string + +// addStdHeaders adds the standard headers we use on all calls. +func addStdHeaders(headers http.Header) http.Header { + headers.Set("Accept-Encoding", "gzip") + // So that I can have a static id for tests. + if testID != "" { + headers.Set("client-request-id", testID) + headers.Set("Return-Client-Request-Id", "false") + } else { + headers.Set("client-request-id", uuid.New().String()) + headers.Set("Return-Client-Request-Id", "false") + } + headers.Set("x-client-sku", "MSAL.Go") + headers.Set("x-client-os", runtime.GOOS) + headers.Set("x-client-cpu", runtime.GOARCH) + headers.Set("x-client-ver", version.Version) + return headers +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go new file mode 100644 index 000000000000..4d3dbfcf0a6b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package comm + +import ( + "compress/gzip" + "io" +) + +func gzipDecompress(r io.Reader) io.Reader { + gzipReader, _ := gzip.NewReader(r) + + pipeOut, pipeIn := io.Pipe() + go func() { + // decompression bomb would have to come from Azure services. + // If we want to limit, we should do that in comm.do(). + _, err := io.Copy(pipeIn, gzipReader) //nolint + if err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + gzipReader.Close() + return + } + if err := gzipReader.Close(); err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + return + } + pipeIn.Close() + }() + return pipeOut +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go new file mode 100644 index 000000000000..b628f61ac081 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package grant holds types of grants issued by authorization services. +package grant + +const ( + Password = "password" + JWT = "urn:ietf:params:oauth:grant-type:jwt-bearer" + SAMLV1 = "urn:ietf:params:oauth:grant-type:saml1_1-bearer" + SAMLV2 = "urn:ietf:params:oauth:grant-type:saml2-bearer" + DeviceCode = "device_code" + AuthCode = "authorization_code" + RefreshToken = "refresh_token" + ClientCredential = "client_credentials" + ClientAssertion = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" +) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go new file mode 100644 index 000000000000..1f9c543fa3b2 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package ops provides operations to various backend services using REST clients. + +The REST type provides several clients that can be used to communicate to backends. +Usage is simple: + + rest := ops.New() + + // Creates an authority client and calls the UserRealm() method. + userRealm, err := rest.Authority().UserRealm(ctx, authParameters) + if err != nil { + // Do something + } +*/ +package ops + +import ( + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient = comm.HTTPClient + +// REST provides REST clients for communicating with various backends used by MSAL. +type REST struct { + client *comm.Client +} + +// New is the constructor for REST. +func New(httpClient HTTPClient) *REST { + return &REST{client: comm.New(httpClient)} +} + +// Authority returns a client for querying information about various authorities. +func (r *REST) Authority() authority.Client { + return authority.Client{Comm: r.client} +} + +// AccessTokens returns a client that can be used to get various access tokens for +// authorization purposes. +func (r *REST) AccessTokens() accesstokens.Client { + return accesstokens.Client{Comm: r.client} +} + +// WSTrust provides access to various metadata in a WSTrust service. This data can +// be used to gain tokens based on SAML data using the client provided by AccessTokens(). +func (r *REST) WSTrust() wstrust.Client { + return wstrust.Client{Comm: r.client} +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go new file mode 100644 index 000000000000..a2bb6278ae5f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=endpointType"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[etUnknown-0] + _ = x[etUsernamePassword-1] + _ = x[etWindowsTransport-2] +} + +const _endpointType_name = "etUnknownetUsernamePasswordetWindowsTransport" + +var _endpointType_index = [...]uint8{0, 9, 27, 45} + +func (i endpointType) String() string { + if i < 0 || i >= endpointType(len(_endpointType_index)-1) { + return "endpointType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _endpointType_name[_endpointType_index[i]:_endpointType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go new file mode 100644 index 000000000000..6497270028d8 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go @@ -0,0 +1,394 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +type Definitions struct { + XMLName xml.Name `xml:"definitions"` + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + TargetNamespace string `xml:"targetNamespace,attr"` + WSDL string `xml:"wsdl,attr"` + XSD string `xml:"xsd,attr"` + T string `xml:"t,attr"` + SOAPENC string `xml:"soapenc,attr"` + SOAP string `xml:"soap,attr"` + TNS string `xml:"tns,attr"` + MSC string `xml:"msc,attr"` + WSAM string `xml:"wsam,attr"` + SOAP12 string `xml:"soap12,attr"` + WSA10 string `xml:"wsa10,attr"` + WSA string `xml:"wsa,attr"` + WSAW string `xml:"wsaw,attr"` + WSX string `xml:"wsx,attr"` + WSAP string `xml:"wsap,attr"` + WSU string `xml:"wsu,attr"` + Trust string `xml:"trust,attr"` + WSP string `xml:"wsp,attr"` + Policy []Policy `xml:"Policy"` + Types Types `xml:"types"` + Message []Message `xml:"message"` + PortType []PortType `xml:"portType"` + Binding []Binding `xml:"binding"` + Service Service `xml:"service"` +} + +type Policy struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + ExactlyOne ExactlyOne `xml:"ExactlyOne"` +} + +type ExactlyOne struct { + Text string `xml:",chardata"` + All All `xml:"All"` +} + +type All struct { + Text string `xml:",chardata"` + NegotiateAuthentication NegotiateAuthentication `xml:"NegotiateAuthentication"` + TransportBinding TransportBinding `xml:"TransportBinding"` + UsingAddressing Text `xml:"UsingAddressing"` + EndorsingSupportingTokens EndorsingSupportingTokens `xml:"EndorsingSupportingTokens"` + WSS11 WSS11 `xml:"Wss11"` + Trust10 Trust10 `xml:"Trust10"` + SignedSupportingTokens SignedSupportingTokens `xml:"SignedSupportingTokens"` + Trust13 WSTrust13 `xml:"Trust13"` + SignedEncryptedSupportingTokens SignedEncryptedSupportingTokens `xml:"SignedEncryptedSupportingTokens"` +} + +type NegotiateAuthentication struct { + Text string `xml:",chardata"` + HTTP string `xml:"http,attr"` + XMLName xml.Name +} + +type TransportBinding struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy TransportBindingPolicy `xml:"Policy"` +} + +type TransportBindingPolicy struct { + Text string `xml:",chardata"` + TransportToken TransportToken `xml:"TransportToken"` + AlgorithmSuite AlgorithmSuite `xml:"AlgorithmSuite"` + Layout Layout `xml:"Layout"` + IncludeTimestamp Text `xml:"IncludeTimestamp"` +} + +type TransportToken struct { + Text string `xml:",chardata"` + Policy TransportTokenPolicy `xml:"Policy"` +} + +type TransportTokenPolicy struct { + Text string `xml:",chardata"` + HTTPSToken HTTPSToken `xml:"HttpsToken"` +} + +type HTTPSToken struct { + Text string `xml:",chardata"` + RequireClientCertificate string `xml:"RequireClientCertificate,attr"` +} + +type AlgorithmSuite struct { + Text string `xml:",chardata"` + Policy AlgorithmSuitePolicy `xml:"Policy"` +} + +type AlgorithmSuitePolicy struct { + Text string `xml:",chardata"` + Basic256 Text `xml:"Basic256"` + Basic128 Text `xml:"Basic128"` +} + +type Layout struct { + Text string `xml:",chardata"` + Policy LayoutPolicy `xml:"Policy"` +} + +type LayoutPolicy struct { + Text string `xml:",chardata"` + Strict Text `xml:"Strict"` +} + +type EndorsingSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy EndorsingSupportingTokensPolicy `xml:"Policy"` +} + +type EndorsingSupportingTokensPolicy struct { + Text string `xml:",chardata"` + X509Token X509Token `xml:"X509Token"` + RSAToken RSAToken `xml:"RsaToken"` + SignedParts SignedParts `xml:"SignedParts"` + KerberosToken KerberosToken `xml:"KerberosToken"` + IssuedToken IssuedToken `xml:"IssuedToken"` + KeyValueToken KeyValueToken `xml:"KeyValueToken"` +} + +type X509Token struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy X509TokenPolicy `xml:"Policy"` +} + +type X509TokenPolicy struct { + Text string `xml:",chardata"` + RequireThumbprintReference Text `xml:"RequireThumbprintReference"` + WSSX509V3Token10 Text `xml:"WssX509V3Token10"` +} + +type RSAToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` + MSSP string `xml:"mssp,attr"` +} + +type SignedParts struct { + Text string `xml:",chardata"` + Header SignedPartsHeader `xml:"Header"` +} + +type SignedPartsHeader struct { + Text string `xml:",chardata"` + Name string `xml:"Name,attr"` + Namespace string `xml:"Namespace,attr"` +} + +type KerberosToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy KerberosTokenPolicy `xml:"Policy"` +} + +type KerberosTokenPolicy struct { + Text string `xml:",chardata"` + WSSGSSKerberosV5ApReqToken11 Text `xml:"WssGssKerberosV5ApReqToken11"` +} + +type IssuedToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + RequestSecurityTokenTemplate RequestSecurityTokenTemplate `xml:"RequestSecurityTokenTemplate"` + Policy IssuedTokenPolicy `xml:"Policy"` +} + +type RequestSecurityTokenTemplate struct { + Text string `xml:",chardata"` + KeyType Text `xml:"KeyType"` + EncryptWith Text `xml:"EncryptWith"` + SignatureAlgorithm Text `xml:"SignatureAlgorithm"` + CanonicalizationAlgorithm Text `xml:"CanonicalizationAlgorithm"` + EncryptionAlgorithm Text `xml:"EncryptionAlgorithm"` + KeySize Text `xml:"KeySize"` + KeyWrapAlgorithm Text `xml:"KeyWrapAlgorithm"` +} + +type IssuedTokenPolicy struct { + Text string `xml:",chardata"` + RequireInternalReference Text `xml:"RequireInternalReference"` +} + +type KeyValueToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` +} + +type WSS11 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Wss11Policy `xml:"Policy"` +} + +type Wss11Policy struct { + Text string `xml:",chardata"` + MustSupportRefThumbprint Text `xml:"MustSupportRefThumbprint"` +} + +type Trust10 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Trust10Policy `xml:"Policy"` +} + +type Trust10Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type SupportingTokensPolicy struct { + Text string `xml:",chardata"` + UsernameToken UsernameToken `xml:"UsernameToken"` +} +type UsernameToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy UsernameTokenPolicy `xml:"Policy"` +} + +type UsernameTokenPolicy struct { + Text string `xml:",chardata"` + WSSUsernameToken10 WSSUsernameToken10 `xml:"WssUsernameToken10"` +} + +type WSSUsernameToken10 struct { + Text string `xml:",chardata"` + XMLName xml.Name +} + +type WSTrust13 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy WSTrust13Policy `xml:"Policy"` +} + +type WSTrust13Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedEncryptedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type Types struct { + Text string `xml:",chardata"` + Schema Schema `xml:"schema"` +} + +type Schema struct { + Text string `xml:",chardata"` + TargetNamespace string `xml:"targetNamespace,attr"` + Import []Import `xml:"import"` +} + +type Import struct { + Text string `xml:",chardata"` + SchemaLocation string `xml:"schemaLocation,attr"` + Namespace string `xml:"namespace,attr"` +} + +type Message struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Part Part `xml:"part"` +} + +type Part struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Element string `xml:"element,attr"` +} + +type PortType struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation Operation `xml:"operation"` +} + +type Operation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Input OperationIO `xml:"input"` + Output OperationIO `xml:"output"` +} + +type OperationIO struct { + Text string `xml:",chardata"` + Action string `xml:"Action,attr"` + Message string `xml:"message,attr"` + Body OperationIOBody `xml:"body"` +} + +type OperationIOBody struct { + Text string `xml:",chardata"` + Use string `xml:"use,attr"` +} + +type Binding struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + PolicyReference PolicyReference `xml:"PolicyReference"` + Binding DefinitionsBinding `xml:"binding"` + Operation BindingOperation `xml:"operation"` +} + +type PolicyReference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` +} + +type DefinitionsBinding struct { + Text string `xml:",chardata"` + Transport string `xml:"transport,attr"` +} + +type BindingOperation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation BindingOperationOperation `xml:"operation"` + Input BindingOperationIO `xml:"input"` + Output BindingOperationIO `xml:"output"` +} + +type BindingOperationOperation struct { + Text string `xml:",chardata"` + SoapAction string `xml:"soapAction,attr"` + Style string `xml:"style,attr"` +} + +type BindingOperationIO struct { + Text string `xml:",chardata"` + Body OperationIOBody `xml:"body"` +} + +type Service struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Port []Port `xml:"port"` +} + +type Port struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Binding string `xml:"binding,attr"` + Address Address `xml:"address"` + EndpointReference PortEndpointReference `xml:"EndpointReference"` +} + +type Address struct { + Text string `xml:",chardata"` + Location string `xml:"location,attr"` +} + +type PortEndpointReference struct { + Text string `xml:",chardata"` + Address Text `xml:"Address"` + Identity Identity `xml:"Identity"` +} + +type Identity struct { + Text string `xml:",chardata"` + XMLNS string `xml:"xmlns,attr"` + SPN Text `xml:"Spn"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go new file mode 100644 index 000000000000..7d0725565777 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go @@ -0,0 +1,230 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +// TODO(msal): Someone (and it ain't gonna be me) needs to document these attributes or +// at the least put a link to RFC. + +type SAMLDefinitions struct { + XMLName xml.Name `xml:"Envelope"` + Text string `xml:",chardata"` + S string `xml:"s,attr"` + A string `xml:"a,attr"` + U string `xml:"u,attr"` + Header Header `xml:"Header"` + Body Body `xml:"Body"` +} + +type Header struct { + Text string `xml:",chardata"` + Action Action `xml:"Action"` + Security Security `xml:"Security"` +} + +type Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` +} + +type Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` + O string `xml:"o,attr"` + Timestamp Timestamp `xml:"Timestamp"` +} + +type Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + Created Text `xml:"Created"` + Expires Text `xml:"Expires"` +} + +type Text struct { + Text string `xml:",chardata"` +} + +type Body struct { + Text string `xml:",chardata"` + RequestSecurityTokenResponseCollection RequestSecurityTokenResponseCollection `xml:"RequestSecurityTokenResponseCollection"` +} + +type RequestSecurityTokenResponseCollection struct { + Text string `xml:",chardata"` + Trust string `xml:"trust,attr"` + RequestSecurityTokenResponse []RequestSecurityTokenResponse `xml:"RequestSecurityTokenResponse"` +} + +type RequestSecurityTokenResponse struct { + Text string `xml:",chardata"` + Lifetime Lifetime `xml:"Lifetime"` + AppliesTo AppliesTo `xml:"AppliesTo"` + RequestedSecurityToken RequestedSecurityToken `xml:"RequestedSecurityToken"` + RequestedAttachedReference RequestedAttachedReference `xml:"RequestedAttachedReference"` + RequestedUnattachedReference RequestedUnattachedReference `xml:"RequestedUnattachedReference"` + TokenType Text `xml:"TokenType"` + RequestType Text `xml:"RequestType"` + KeyType Text `xml:"KeyType"` +} + +type Lifetime struct { + Text string `xml:",chardata"` + Created WSUTimestamp `xml:"Created"` + Expires WSUTimestamp `xml:"Expires"` +} + +type WSUTimestamp struct { + Text string `xml:",chardata"` + Wsu string `xml:"wsu,attr"` +} + +type AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"wsp,attr"` + EndpointReference EndpointReference `xml:"EndpointReference"` +} + +type EndpointReference struct { + Text string `xml:",chardata"` + Wsa string `xml:"wsa,attr"` + Address Text `xml:"Address"` +} + +type RequestedSecurityToken struct { + Text string `xml:",chardata"` + AssertionRawXML string `xml:",innerxml"` + Assertion Assertion `xml:"Assertion"` +} + +type Assertion struct { + XMLName xml.Name // Normally its `xml:"Assertion"`, but I think they want to capture the xmlns + Text string `xml:",chardata"` + MajorVersion string `xml:"MajorVersion,attr"` + MinorVersion string `xml:"MinorVersion,attr"` + AssertionID string `xml:"AssertionID,attr"` + Issuer string `xml:"Issuer,attr"` + IssueInstant string `xml:"IssueInstant,attr"` + Saml string `xml:"saml,attr"` + Conditions Conditions `xml:"Conditions"` + AttributeStatement AttributeStatement `xml:"AttributeStatement"` + AuthenticationStatement AuthenticationStatement `xml:"AuthenticationStatement"` + Signature Signature `xml:"Signature"` +} + +type Conditions struct { + Text string `xml:",chardata"` + NotBefore string `xml:"NotBefore,attr"` + NotOnOrAfter string `xml:"NotOnOrAfter,attr"` + AudienceRestrictionCondition AudienceRestrictionCondition `xml:"AudienceRestrictionCondition"` +} + +type AudienceRestrictionCondition struct { + Text string `xml:",chardata"` + Audience Text `xml:"Audience"` +} + +type AttributeStatement struct { + Text string `xml:",chardata"` + Subject Subject `xml:"Subject"` + Attribute []Attribute `xml:"Attribute"` +} + +type Subject struct { + Text string `xml:",chardata"` + NameIdentifier NameIdentifier `xml:"NameIdentifier"` + SubjectConfirmation SubjectConfirmation `xml:"SubjectConfirmation"` +} + +type NameIdentifier struct { + Text string `xml:",chardata"` + Format string `xml:"Format,attr"` +} + +type SubjectConfirmation struct { + Text string `xml:",chardata"` + ConfirmationMethod Text `xml:"ConfirmationMethod"` +} + +type Attribute struct { + Text string `xml:",chardata"` + AttributeName string `xml:"AttributeName,attr"` + AttributeNamespace string `xml:"AttributeNamespace,attr"` + AttributeValue Text `xml:"AttributeValue"` +} + +type AuthenticationStatement struct { + Text string `xml:",chardata"` + AuthenticationMethod string `xml:"AuthenticationMethod,attr"` + AuthenticationInstant string `xml:"AuthenticationInstant,attr"` + Subject Subject `xml:"Subject"` +} + +type Signature struct { + Text string `xml:",chardata"` + Ds string `xml:"ds,attr"` + SignedInfo SignedInfo `xml:"SignedInfo"` + SignatureValue Text `xml:"SignatureValue"` + KeyInfo KeyInfo `xml:"KeyInfo"` +} + +type SignedInfo struct { + Text string `xml:",chardata"` + CanonicalizationMethod Method `xml:"CanonicalizationMethod"` + SignatureMethod Method `xml:"SignatureMethod"` + Reference Reference `xml:"Reference"` +} + +type Method struct { + Text string `xml:",chardata"` + Algorithm string `xml:"Algorithm,attr"` +} + +type Reference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` + Transforms Transforms `xml:"Transforms"` + DigestMethod Method `xml:"DigestMethod"` + DigestValue Text `xml:"DigestValue"` +} + +type Transforms struct { + Text string `xml:",chardata"` + Transform []Method `xml:"Transform"` +} + +type KeyInfo struct { + Text string `xml:",chardata"` + Xmlns string `xml:"xmlns,attr"` + X509Data X509Data `xml:"X509Data"` +} + +type X509Data struct { + Text string `xml:",chardata"` + X509Certificate Text `xml:"X509Certificate"` +} + +type RequestedAttachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} + +type SecurityTokenReference struct { + Text string `xml:",chardata"` + TokenType string `xml:"TokenType,attr"` + O string `xml:"o,attr"` + K string `xml:"k,attr"` + KeyIdentifier KeyIdentifier `xml:"KeyIdentifier"` +} + +type KeyIdentifier struct { + Text string `xml:",chardata"` + ValueType string `xml:"ValueType,attr"` +} + +type RequestedUnattachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go new file mode 100644 index 000000000000..6fe5efa8a9ab --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Version"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TrustUnknown-0] + _ = x[Trust2005-1] + _ = x[Trust13-2] +} + +const _Version_name = "TrustUnknownTrust2005Trust13" + +var _Version_index = [...]uint8{0, 12, 21, 28} + +func (i Version) String() string { + if i < 0 || i >= Version(len(_Version_index)-1) { + return "Version(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Version_name[_Version_index[i]:_Version_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go new file mode 100644 index 000000000000..8fad5efb5de5 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go @@ -0,0 +1,199 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "encoding/xml" + "fmt" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + uuid "github.com/google/uuid" +) + +//go:generate stringer -type=Version + +type Version int + +const ( + TrustUnknown Version = iota + Trust2005 + Trust13 +) + +// Endpoint represents a WSTrust endpoint. +type Endpoint struct { + // Version is the version of the endpoint. + Version Version + // URL is the URL of the endpoint. + URL string +} + +type wsTrustTokenRequestEnvelope struct { + XMLName xml.Name `xml:"s:Envelope"` + Text string `xml:",chardata"` + S string `xml:"xmlns:s,attr"` + Wsa string `xml:"xmlns:wsa,attr"` + Wsu string `xml:"xmlns:wsu,attr"` + Header struct { + Text string `xml:",chardata"` + Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:Action"` + MessageID struct { + Text string `xml:",chardata"` + } `xml:"wsa:messageID"` + ReplyTo struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:ReplyTo"` + To struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:To"` + Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + Wsse string `xml:"xmlns:wsse,attr"` + Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Created struct { + Text string `xml:",chardata"` + } `xml:"wsu:Created"` + Expires struct { + Text string `xml:",chardata"` + } `xml:"wsu:Expires"` + } `xml:"wsu:Timestamp"` + UsernameToken struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Username struct { + Text string `xml:",chardata"` + } `xml:"wsse:Username"` + Password struct { + Text string `xml:",chardata"` + } `xml:"wsse:Password"` + } `xml:"wsse:UsernameToken"` + } `xml:"wsse:Security"` + } `xml:"s:Header"` + Body struct { + Text string `xml:",chardata"` + RequestSecurityToken struct { + Text string `xml:",chardata"` + Wst string `xml:"xmlns:wst,attr"` + AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"xmlns:wsp,attr"` + EndpointReference struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:EndpointReference"` + } `xml:"wsp:AppliesTo"` + KeyType struct { + Text string `xml:",chardata"` + } `xml:"wst:KeyType"` + RequestType struct { + Text string `xml:",chardata"` + } `xml:"wst:RequestType"` + } `xml:"wst:RequestSecurityToken"` + } `xml:"s:Body"` +} + +func buildTimeString(t time.Time) string { + // Golang time formats are weird: https://stackoverflow.com/questions/20234104/how-to-format-current-time-using-a-yyyymmddhhmmss-format + return t.Format("2006-01-02T15:04:05.000Z") +} + +func (wte *Endpoint) buildTokenRequestMessage(authType authority.AuthorizeType, cloudAudienceURN string, username string, password string) (string, error) { + var soapAction string + var trustNamespace string + var keyType string + var requestType string + + createdTime := time.Now().UTC() + expiresTime := createdTime.Add(10 * time.Minute) + + switch wte.Version { + case Trust2005: + soapAction = trust2005Spec + trustNamespace = "http://schemas.xmlsoap.org/ws/2005/02/trust" + keyType = "http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey" + requestType = "http://schemas.xmlsoap.org/ws/2005/02/trust/Issue" + case Trust13: + soapAction = trust13Spec + trustNamespace = "http://docs.oasis-open.org/ws-sx/ws-trust/200512" + keyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer" + requestType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Issue" + default: + return "", fmt.Errorf("buildTokenRequestMessage had Version == %q, which is not recognized", wte.Version) + } + + var envelope wsTrustTokenRequestEnvelope + + messageUUID := uuid.New() + + envelope.S = "http://www.w3.org/2003/05/soap-envelope" + envelope.Wsa = "http://www.w3.org/2005/08/addressing" + envelope.Wsu = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" + + envelope.Header.Action.MustUnderstand = "1" + envelope.Header.Action.Text = soapAction + envelope.Header.MessageID.Text = "urn:uuid:" + messageUUID.String() + envelope.Header.ReplyTo.Address.Text = "http://www.w3.org/2005/08/addressing/anonymous" + envelope.Header.To.MustUnderstand = "1" + envelope.Header.To.Text = wte.URL + + switch authType { + case authority.ATUnknown: + return "", fmt.Errorf("buildTokenRequestMessage had no authority type(%v)", authType) + case authority.ATUsernamePassword: + endpointUUID := uuid.New() + + var trustID string + if wte.Version == Trust2005 { + trustID = "UnPwSecTok2005-" + endpointUUID.String() + } else { + trustID = "UnPwSecTok13-" + endpointUUID.String() + } + + envelope.Header.Security.MustUnderstand = "1" + envelope.Header.Security.Wsse = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" + envelope.Header.Security.Timestamp.ID = "MSATimeStamp" + envelope.Header.Security.Timestamp.Created.Text = buildTimeString(createdTime) + envelope.Header.Security.Timestamp.Expires.Text = buildTimeString(expiresTime) + envelope.Header.Security.UsernameToken.ID = trustID + envelope.Header.Security.UsernameToken.Username.Text = username + envelope.Header.Security.UsernameToken.Password.Text = password + default: + // This is just to note that we don't do anything for other cases. + // We aren't missing anything I know of. + } + + envelope.Body.RequestSecurityToken.Wst = trustNamespace + envelope.Body.RequestSecurityToken.AppliesTo.Wsp = "http://schemas.xmlsoap.org/ws/2004/09/policy" + envelope.Body.RequestSecurityToken.AppliesTo.EndpointReference.Address.Text = cloudAudienceURN + envelope.Body.RequestSecurityToken.KeyType.Text = keyType + envelope.Body.RequestSecurityToken.RequestType.Text = requestType + + output, err := xml.Marshal(envelope) + if err != nil { + return "", err + } + + return string(output), nil +} + +func (wte *Endpoint) BuildTokenRequestMessageWIA(cloudAudienceURN string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATWindowsIntegrated, cloudAudienceURN, "", "") +} + +func (wte *Endpoint) BuildTokenRequestMessageUsernamePassword(cloudAudienceURN string, username string, password string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATUsernamePassword, cloudAudienceURN, username, password) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go new file mode 100644 index 000000000000..e3d19886ebc5 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go @@ -0,0 +1,159 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "errors" + "fmt" + "strings" +) + +//go:generate stringer -type=endpointType + +type endpointType int + +const ( + etUnknown endpointType = iota + etUsernamePassword + etWindowsTransport +) + +type wsEndpointData struct { + Version Version + EndpointType endpointType +} + +const trust13Spec string = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" +const trust2005Spec string = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" + +type MexDocument struct { + UsernamePasswordEndpoint Endpoint + WindowsTransportEndpoint Endpoint + policies map[string]endpointType + bindings map[string]wsEndpointData +} + +func updateEndpoint(cached *Endpoint, found Endpoint) { + if cached == nil || cached.Version == TrustUnknown { + *cached = found + return + } + if (*cached).Version == Trust2005 && found.Version == Trust13 { + *cached = found + return + } +} + +// TODO(msal): Someone needs to write tests for everything below. + +// NewFromDef creates a new MexDocument. +func NewFromDef(defs Definitions) (MexDocument, error) { + policies, err := policies(defs) + if err != nil { + return MexDocument{}, err + } + + bindings, err := bindings(defs, policies) + if err != nil { + return MexDocument{}, err + } + + userPass, windows, err := endpoints(defs, bindings) + if err != nil { + return MexDocument{}, err + } + + return MexDocument{ + UsernamePasswordEndpoint: userPass, + WindowsTransportEndpoint: windows, + policies: policies, + bindings: bindings, + }, nil +} + +func policies(defs Definitions) (map[string]endpointType, error) { + policies := make(map[string]endpointType, len(defs.Policy)) + + for _, policy := range defs.Policy { + if policy.ExactlyOne.All.NegotiateAuthentication.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etWindowsTransport + } + } + + if policy.ExactlyOne.All.SignedEncryptedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + if policy.ExactlyOne.All.SignedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + } + + if len(policies) == 0 { + return policies, errors.New("no policies for mex document") + } + + return policies, nil +} + +func bindings(defs Definitions, policies map[string]endpointType) (map[string]wsEndpointData, error) { + bindings := make(map[string]wsEndpointData, len(defs.Binding)) + + for _, binding := range defs.Binding { + policyName := binding.PolicyReference.URI + transport := binding.Binding.Transport + + if transport == "http://schemas.xmlsoap.org/soap/http" { + if policy, ok := policies[policyName]; ok { + bindingName := binding.Name + specVersion := binding.Operation.Operation.SoapAction + + if specVersion == trust13Spec { + bindings[bindingName] = wsEndpointData{Trust13, policy} + } else if specVersion == trust2005Spec { + bindings[bindingName] = wsEndpointData{Trust2005, policy} + } else { + return nil, errors.New("found unknown spec version in mex document") + } + } + } + } + return bindings, nil +} + +func endpoints(defs Definitions, bindings map[string]wsEndpointData) (userPass, windows Endpoint, err error) { + for _, port := range defs.Service.Port { + bindingName := port.Binding + + index := strings.Index(bindingName, ":") + if index != -1 { + bindingName = bindingName[index+1:] + } + + if binding, ok := bindings[bindingName]; ok { + url := strings.TrimSpace(port.EndpointReference.Address.Text) + if url == "" { + return Endpoint{}, Endpoint{}, fmt.Errorf("MexDocument cannot have blank URL endpoint") + } + if binding.Version == TrustUnknown { + return Endpoint{}, Endpoint{}, fmt.Errorf("endpoint version unknown") + } + endpoint := Endpoint{Version: binding.Version, URL: url} + + switch binding.EndpointType { + case etUsernamePassword: + updateEndpoint(&userPass, endpoint) + case etWindowsTransport: + updateEndpoint(&windows, endpoint) + default: + return Endpoint{}, Endpoint{}, errors.New("found unknown port type in MEX document") + } + } + } + return userPass, windows, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go new file mode 100644 index 000000000000..47cd4c692d62 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package wstrust provides a client for communicating with a WSTrust (https://en.wikipedia.org/wiki/WS-Trust#:~:text=WS%2DTrust%20is%20a%20WS,in%20a%20secure%20message%20exchange.) +for the purposes of extracting metadata from the service. This data can be used to acquire +tokens using the accesstokens.Client.GetAccessTokenFromSamlGrant() call. +*/ +package wstrust + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" +) + +type xmlCaller interface { + XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error + SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error +} + +type SamlTokenInfo struct { + AssertionType string // Should be either constants SAMLV1Grant or SAMLV2Grant. + Assertion string +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm xmlCaller +} + +// TODO(msal): This allows me to call Mex without having a real Def file on line 45. +// This would fail because policies() would not find a policy. This is easy enough to +// fix in test data, but.... Definitions is defined with built in structs. That needs +// to be pulled apart and until then I have this hack in. +var newFromDef = defs.NewFromDef + +// Mex provides metadata about a wstrust service. +func (c Client) Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) { + resp := defs.Definitions{} + err := c.Comm.XMLCall( + ctx, + federationMetadataURL, + http.Header{}, + nil, + &resp, + ) + if err != nil { + return defs.MexDocument{}, err + } + + return newFromDef(resp) +} + +const ( + SoapActionDefault = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" + + // Note: Commented out because this action is not supported. It was in the original code + // but only used in a switch where it errored. Since there was only one value, a default + // worked better. However, buildTokenRequestMessage() had 2005 support. I'm not actually + // sure what's going on here. It like we have half support. For now this is here just + // for documentation purposes in case we are going to add support. + // + // SoapActionWSTrust2005 = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" +) + +// SAMLTokenInfo provides SAML information that is used to generate a SAML token. +func (c Client) SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (SamlTokenInfo, error) { + var wsTrustRequestMessage string + var err error + + switch authParameters.AuthorizationType { + case authority.ATWindowsIntegrated: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageWIA(cloudAudienceURN) + if err != nil { + return SamlTokenInfo{}, err + } + case authority.ATUsernamePassword: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageUsernamePassword( + cloudAudienceURN, authParameters.Username, authParameters.Password) + if err != nil { + return SamlTokenInfo{}, err + } + default: + return SamlTokenInfo{}, fmt.Errorf("unknown auth type %v", authParameters.AuthorizationType) + } + + var soapAction string + switch endpoint.Version { + case defs.Trust13: + soapAction = SoapActionDefault + case defs.Trust2005: + return SamlTokenInfo{}, errors.New("WS Trust 2005 support is not implemented") + default: + return SamlTokenInfo{}, fmt.Errorf("the SOAP endpoint for a wstrust call had an invalid version: %v", endpoint.Version) + } + + resp := defs.SAMLDefinitions{} + err = c.Comm.SOAPCall(ctx, endpoint.URL, soapAction, http.Header{}, nil, wsTrustRequestMessage, &resp) + if err != nil { + return SamlTokenInfo{}, err + } + + return c.samlAssertion(resp) +} + +const ( + samlv1Assertion = "urn:oasis:names:tc:SAML:1.0:assertion" + samlv2Assertion = "urn:oasis:names:tc:SAML:2.0:assertion" +) + +func (c Client) samlAssertion(def defs.SAMLDefinitions) (SamlTokenInfo, error) { + for _, tokenResponse := range def.Body.RequestSecurityTokenResponseCollection.RequestSecurityTokenResponse { + token := tokenResponse.RequestedSecurityToken + if token.Assertion.XMLName.Local != "" { + assertion := token.AssertionRawXML + + samlVersion := token.Assertion.Saml + switch samlVersion { + case samlv1Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV1, Assertion: assertion}, nil + case samlv2Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV2, Assertion: assertion}, nil + } + return SamlTokenInfo{}, fmt.Errorf("couldn't parse SAML assertion, version unknown: %q", samlVersion) + } + } + return SamlTokenInfo{}, errors.New("unknown WS-Trust version") +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go new file mode 100644 index 000000000000..893ef4814f78 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -0,0 +1,152 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// TODO(msal): Write some tests. The original code this came from didn't have tests and I'm too +// tired at this point to do it. It, like many other *Manager code I found was broken because +// they didn't have mutex protection. + +package oauth + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" +) + +// ADFS is an active directory federation service authority type. +const ADFS = "ADFS" + +type cacheEntry struct { + Endpoints authority.Endpoints + ValidForDomainsInList map[string]bool +} + +func createcacheEntry(endpoints authority.Endpoints) cacheEntry { + return cacheEntry{endpoints, map[string]bool{}} +} + +// AuthorityEndpoint retrieves endpoints from an authority for auth and token acquisition. +type authorityEndpoint struct { + rest *ops.REST + + mu sync.Mutex + cache map[string]cacheEntry +} + +// newAuthorityEndpoint is the constructor for AuthorityEndpoint. +func newAuthorityEndpoint(rest *ops.REST) *authorityEndpoint { + m := &authorityEndpoint{rest: rest, cache: map[string]cacheEntry{}} + return m +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance +func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + if authorityInfo.AuthorityType == ADFS && len(userPrincipalName) == 0 { + return authority.Endpoints{}, errors.New("UPN required for authority validation for ADFS") + } + + if endpoints, found := m.cachedEndpoints(authorityInfo, userPrincipalName); found { + return endpoints, nil + } + + endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName) + if err != nil { + return authority.Endpoints{}, err + } + + resp, err := m.rest.Authority().GetTenantDiscoveryResponse(ctx, endpoint) + if err != nil { + return authority.Endpoints{}, err + } + if err := resp.Validate(); err != nil { + return authority.Endpoints{}, fmt.Errorf("ResolveEndpoints(): %w", err) + } + + tenant := authorityInfo.Tenant + + endpoints := authority.NewEndpoints( + strings.Replace(resp.AuthorizationEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.TokenEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.Issuer, "{tenant}", tenant, -1), + authorityInfo.Host) + + m.addCachedEndpoints(authorityInfo, userPrincipalName, endpoints) + + return endpoints, nil +} + +// cachedEndpoints returns a the cached endpoints if they exists. If not, we return false. +func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, bool) { + m.mu.Lock() + defer m.mu.Unlock() + + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + if authorityInfo.AuthorityType == ADFS { + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok { + return cacheEntry.Endpoints, true + } + } + } + return cacheEntry.Endpoints, true + } + return authority.Endpoints{}, false +} + +func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, userPrincipalName string, endpoints authority.Endpoints) { + m.mu.Lock() + defer m.mu.Unlock() + + updatedCacheEntry := createcacheEntry(endpoints) + + if authorityInfo.AuthorityType == ADFS { + // Since we're here, we've made a call to the backend. We want to ensure we're caching + // the latest values from the server. + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + for k := range cacheEntry.ValidForDomainsInList { + updatedCacheEntry.ValidForDomainsInList[k] = true + } + } + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + updatedCacheEntry.ValidForDomainsInList[domain] = true + } + } + + m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry +} + +func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) { + if authorityInfo.Tenant == "adfs" { + return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil + } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + } else if authorityInfo.Region != "" { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + + } + + return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil +} + +func adfsDomainFromUpn(userPrincipalName string) (string, error) { + parts := strings.Split(userPrincipalName, "@") + if len(parts) < 2 { + return "", errors.New("no @ present in user principal name") + } + return parts[1], nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go new file mode 100644 index 000000000000..f7e12a71bf31 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go @@ -0,0 +1,71 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package shared + +import ( + "net/http" + "reflect" + "strings" +) + +const ( + // CacheKeySeparator is used in creating the keys of the cache. + CacheKeySeparator = "-" +) + +type Account struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + LocalAccountID string `json:"local_account_id,omitempty"` + AuthorityType string `json:"authority_type,omitempty"` + PreferredUsername string `json:"username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + AlternativeID string `json:"alternative_account_id,omitempty"` + RawClientInfo string `json:"client_info,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccount creates an account. +func NewAccount(homeAccountID, env, realm, localAccountID, authorityType, username string) Account { + return Account{ + HomeAccountID: homeAccountID, + Environment: env, + Realm: realm, + LocalAccountID: localAccountID, + AuthorityType: authorityType, + PreferredUsername: username, + } +} + +// Key creates the key for storing accounts in the cache. +func (acc Account) Key() string { + return strings.Join([]string{acc.HomeAccountID, acc.Environment, acc.Realm}, CacheKeySeparator) +} + +// IsZero checks the zero value of account. +func (acc Account) IsZero() bool { + v := reflect.ValueOf(acc) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// DefaultClient is our default shared HTTP client. +var DefaultClient = &http.Client{} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go new file mode 100644 index 000000000000..2ffdf0305da6 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -0,0 +1,8 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package version keeps the version number of the client package. +package version + +// Version is the version of this client package that is communicated to the server. +const Version = "0.6.0" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go new file mode 100644 index 000000000000..19118c25a2c9 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -0,0 +1,398 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package public provides a client for authentication of "public" applications. A "public" +application is defined as an app that runs on client devices (android, ios, windows, linux, ...). +These devices are "untrusted" and access resources via web APIs that must authenticate. +*/ +package public + +/* +Design note: + +public.Client uses client.Base as an embedded type. client.Base statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be includee in the package documentation. + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/url" + "strconv" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" + "github.com/google/uuid" + "github.com/pkg/browser" +) + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type Account = shared.Account + +// Options configures the Client's behavior. +type Options struct { + // Accessor controls cache persistence. By default there is no cache persistence. + // This can be set with the WithCache() option. + Accessor cache.ExportReplace + + // The host of the Azure Active Directory authority. The default is https://login.microsoftonline.com/common. + // This can be changed with the WithAuthority() option. + Authority string + + // The HTTP client used for making requests. + // It defaults to a shared http.Client. + HTTPClient ops.HTTPClient +} + +func (p *Options) validate() error { + u, err := url.Parse(p.Authority) + if err != nil { + return fmt.Errorf("Authority options cannot be URL parsed: %w", err) + } + if u.Scheme != "https" { + return fmt.Errorf("Authority(%s) did not start with https://", u.String()) + } + return nil +} + +// Option is an optional argument to the New constructor. +type Option func(o *Options) + +// WithAuthority allows for a custom authority to be set. This must be a valid https url. +func WithAuthority(authority string) Option { + return func(o *Options) { + o.Authority = authority + } +} + +// WithCache allows you to set some type of cache for storing authentication tokens. +func WithCache(accessor cache.ExportReplace) Option { + return func(o *Options) { + o.Accessor = accessor + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *Options) { + o.HTTPClient = httpClient + } +} + +// Client is a representation of authentication client for public applications as defined in the +// package doc. For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications. +type Client struct { + base base.Client +} + +// New is the constructor for Client. +func New(clientID string, options ...Option) (Client, error) { + opts := Options{ + Authority: base.AuthorityPublicCloud, + HTTPClient: shared.DefaultClient, + } + + for _, o := range options { + o(&opts) + } + if err := opts.validate(); err != nil { + return Client{}, err + } + + base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), base.WithCacheAccessor(opts.Accessor)) + if err != nil { + return Client{}, err + } + return Client{base}, nil +} + +// CreateAuthCodeURL creates a URL used to acquire an authorization code. +func (pca Client) CreateAuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string) (string, error) { + return pca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, pca.base.AuthParams) +} + +// AcquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type AcquireTokenSilentOptions struct { + // Account represents the account to use. To set, use the WithSilentAccount() option. + Account Account +} + +// AcquireTokenSilentOption changes options inside AcquireTokenSilentOptions used in .AcquireTokenSilent(). +type AcquireTokenSilentOption func(a *AcquireTokenSilentOptions) + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) AcquireTokenSilentOption { + return func(a *AcquireTokenSilentOptions) { + a.Account = account + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, options ...AcquireTokenSilentOption) (AuthResult, error) { + opts := AcquireTokenSilentOptions{} + for _, o := range options { + o(&opts) + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: opts.Account, + RequestType: accesstokens.ATPublic, + IsAppCache: false, + } + + return pca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication. +// NOTE: this flow is NOT recommended. +func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string) (AuthResult, error) { + authParams := pca.base.AuthParams + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATUsernamePassword + authParams.Username = username + authParams.Password = password + + token, err := pca.base.Token.UsernamePassword(ctx, authParams) + if err != nil { + return AuthResult{}, err + } + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type DeviceCodeResult = accesstokens.DeviceCodeResult + +// DeviceCode provides the results of the device code flows first stage (containing the code) +// that must be entered on the second device and provides a method to retrieve the AuthenticationResult +// once that code has been entered and verified. +type DeviceCode struct { + // Result holds the information about the device code (such as the code). + Result DeviceCodeResult + + authParams authority.AuthParams + client Client + dc oauth.DeviceCode +} + +// AuthenticationResult retreives the AuthenticationResult once the user enters the code +// on the second device. Until then it blocks until the .AcquireTokenByDeviceCode() context +// is cancelled or the token expires. +func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error) { + token, err := d.dc.Token(ctx) + if err != nil { + return AuthResult{}, err + } + return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true) +} + +// AcquireTokenByDeviceCode acquires a security token from the authority, by acquiring a device code and using that to acquire the token. +// Users need to create an AcquireTokenDeviceCodeParameters instance and pass it in. +func (pca Client) AcquireTokenByDeviceCode(ctx context.Context, scopes []string) (DeviceCode, error) { + authParams := pca.base.AuthParams + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATDeviceCode + + dc, err := pca.base.Token.DeviceCode(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dc.Result, authParams: authParams, client: pca, dc: dc}, nil +} + +// AcquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type AcquireTokenByAuthCodeOptions struct { + Challenge string +} + +// AcquireTokenByAuthCodeOption changes options inside AcquireTokenByAuthCodeOptions used in .AcquireTokenByAuthCode(). +type AcquireTokenByAuthCodeOption func(a *AcquireTokenByAuthCodeOptions) + +// WithChallenge allows you to provide a code for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) AcquireTokenByAuthCodeOption { + return func(a *AcquireTokenByAuthCodeOptions) { + a.Challenge = challenge + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +func (pca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...AcquireTokenByAuthCodeOption) (AuthResult, error) { + opts := AcquireTokenByAuthCodeOptions{} + for _, o := range options { + o(&opts) + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: opts.Challenge, + AppType: accesstokens.ATPublic, + RedirectURI: redirectURI, + } + + return pca.base.AcquireTokenByAuthCode(ctx, params) +} + +// Accounts gets all the accounts in the token cache. +// If there are no accounts in the cache the returned slice is empty. +func (pca Client) Accounts() []Account { + return pca.base.AllAccounts() +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (pca Client) RemoveAccount(account Account) error { + pca.base.RemoveAccount(account) + return nil +} + +// InteractiveAuthOptions contains the optional parameters used to acquire an access token for interactive auth code flow. +type InteractiveAuthOptions struct { + // Used to specify a custom port for the local server. http://localhost:portnumber + // All other URI components are ignored. + RedirectURI string +} + +// InteractiveAuthOption changes options inside InteractiveAuthOptions used in .AcquireTokenInteractive(). +type InteractiveAuthOption func(*InteractiveAuthOptions) + +// WithRedirectURI uses the specified redirect URI for interactive auth. +func WithRedirectURI(redirectURI string) InteractiveAuthOption { + return func(o *InteractiveAuthOptions) { + o.RedirectURI = redirectURI + } +} + +// AcquireTokenInteractive acquires a security token from the authority using the default web browser to select the account. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/msal-authentication-flows#interactive-and-non-interactive-authentication +func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, options ...InteractiveAuthOption) (AuthResult, error) { + opts := InteractiveAuthOptions{} + for _, opt := range options { + opt(&opts) + } + // the code verifier is a random 32-byte sequence that's been base-64 encoded without padding. + // it's used to prevent MitM attacks during auth code flow, see https://tools.ietf.org/html/rfc7636 + cv, challenge, err := codeVerifier() + if err != nil { + return AuthResult{}, err + } + var redirectURL *url.URL + if opts.RedirectURI != "" { + redirectURL, err = url.Parse(opts.RedirectURI) + if err != nil { + return AuthResult{}, err + } + } + authParams := pca.base.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATInteractive + authParams.CodeChallenge = challenge + authParams.CodeChallengeMethod = "S256" + authParams.State = uuid.New().String() + authParams.Prompt = "select_account" + res, err := pca.browserLogin(ctx, redirectURL, authParams) + if err != nil { + return AuthResult{}, err + } + authParams.Redirecturi = res.redirectURI + + req, err := accesstokens.NewCodeChallengeRequest(authParams, accesstokens.ATPublic, nil, res.authCode, cv) + if err != nil { + return AuthResult{}, err + } + + token, err := pca.base.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type interactiveAuthResult struct { + authCode string + redirectURI string +} + +// provides a test hook to simulate opening a browser +var browserOpenURL = func(authURL string) error { + return browser.OpenURL(authURL) +} + +// parses the port number from the provided URL. +// returns 0 if nil or no port is specified. +func parsePort(u *url.URL) (int, error) { + if u == nil { + return 0, nil + } + p := u.Port() + if p == "" { + return 0, nil + } + return strconv.Atoi(p) +} + +// browserLogin launches the system browser for interactive login +func (pca Client) browserLogin(ctx context.Context, redirectURI *url.URL, params authority.AuthParams) (interactiveAuthResult, error) { + // start local redirect server so login can call us back + port, err := parsePort(redirectURI) + if err != nil { + return interactiveAuthResult{}, err + } + srv, err := local.New(params.State, port) + if err != nil { + return interactiveAuthResult{}, err + } + defer srv.Shutdown() + params.Scopes = accesstokens.AppendDefaultScopes(params) + authURL, err := pca.base.AuthCodeURL(ctx, params.ClientID, srv.Addr, params.Scopes, params) + if err != nil { + return interactiveAuthResult{}, err + } + // open browser window so user can select credentials + if err := browserOpenURL(authURL); err != nil { + return interactiveAuthResult{}, err + } + // now wait until the logic calls us back + res := srv.Result(ctx) + if res.Err != nil { + return interactiveAuthResult{}, res.Err + } + return interactiveAuthResult{ + authCode: res.Code, + redirectURI: srv.Addr, + }, nil +} + +// creates a code verifier string along with its SHA256 hash which +// is used as the challenge when requesting an auth code. +// used in interactive auth flow for PKCE. +func codeVerifier() (codeVerifier string, challenge string, err error) { + cvBytes := make([]byte, 32) + if _, err = rand.Read(cvBytes); err != nil { + return + } + codeVerifier = base64.RawURLEncoding.EncodeToString(cvBytes) + // for PKCE, create a hash of the code verifier + cvh := sha256.Sum256([]byte(codeVerifier)) + challenge = base64.RawURLEncoding.EncodeToString(cvh[:]) + return +} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go index 689e4da6bdac..2342a7fcd6fb 100644 --- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -113,6 +113,69 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta return hdr } +// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file +// from the tar header and returns the security descriptor into a byte slice. +func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) { + // Maintaining old SDDL-based behavior for backward + // compatibility. All new tar headers written by this library + // will have raw binary for the security descriptor. + var sd []byte + var err error + if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { + sd, err = winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return nil, err + } + } + if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { + sd, err = base64.StdEncoding.DecodeString(sdraw) + if err != nil { + return nil, err + } + } + return sd, nil +} + +// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the +// current file from the tar header and returns it as a byte slice. +func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) { + var eas []winio.ExtendedAttribute + var eadata []byte + var err error + for k, v := range hdr.PAXRecords { + if !strings.HasPrefix(k, hdrEaPrefix) { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return nil, err + } + eas = append(eas, winio.ExtendedAttribute{ + Name: k[len(hdrEaPrefix):], + Value: data, + }) + } + if len(eas) != 0 { + eadata, err = winio.EncodeExtendedAttributes(eas) + if err != nil { + return nil, err + } + } + return eadata, nil +} + +// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header +// and encodes it into a byte slice. The file for which this function is called must be a +// symlink. +func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte { + _, isMountPoint := hdr.PAXRecords[hdrMountPoint] + rp := winio.ReparsePoint{ + Target: filepath.FromSlash(hdr.Linkname), + IsMountPoint: isMountPoint, + } + return winio.EncodeReparsePoint(&rp) +} + // WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. // // This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. @@ -358,21 +421,10 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win // tar file that was not processed, or io.EOF is there are no more. func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { bw := winio.NewBackupStreamWriter(w) - var sd []byte - var err error - // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written - // by this library will have raw binary for the security descriptor. - if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { - sd, err = winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return nil, err - } - } - if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { - sd, err = base64.StdEncoding.DecodeString(sdraw) - if err != nil { - return nil, err - } + + sd, err := SecurityDescriptorFromTarHeader(hdr) + if err != nil { + return nil, err } if len(sd) != 0 { bhdr := winio.BackupHeader{ @@ -388,25 +440,12 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( return nil, err } } - var eas []winio.ExtendedAttribute - for k, v := range hdr.PAXRecords { - if !strings.HasPrefix(k, hdrEaPrefix) { - continue - } - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return nil, err - } - eas = append(eas, winio.ExtendedAttribute{ - Name: k[len(hdrEaPrefix):], - Value: data, - }) + + eadata, err := ExtendedAttributesFromTarHeader(hdr) + if err != nil { + return nil, err } - if len(eas) != 0 { - eadata, err := winio.EncodeExtendedAttributes(eas) - if err != nil { - return nil, err - } + if len(eadata) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupEaData, Size: int64(len(eadata)), @@ -420,13 +459,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( return nil, err } } + if hdr.Typeflag == tar.TypeSymlink { - _, isMountPoint := hdr.PAXRecords[hdrMountPoint] - rp := winio.ReparsePoint{ - Target: filepath.FromSlash(hdr.Linkname), - IsMountPoint: isMountPoint, - } - reparse := winio.EncodeReparsePoint(&rp) + reparse := EncodeReparsePointFromTarHeader(hdr) bhdr := winio.BackupHeader{ Id: winio.BackupReparseData, Size: int64(len(reparse)), @@ -439,7 +474,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( if err != nil { return nil, err } + } + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { bhdr := winio.BackupHeader{ Id: winio.BackupData, diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 0385e4108129..293ab54c80c3 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -143,6 +144,11 @@ func (f *win32File) Close() error { return nil } +// IsClosed checks if the file has been closed +func (f *win32File) IsClosed() bool { + return f.closing.isSet() +} + // prepareIo prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. func (f *win32File) prepareIo() (*ioOperation, error) { diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index b632f8f8bb98..b2b644d002aa 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -252,15 +253,23 @@ func (conn *HvsockConn) Close() error { return conn.sock.Close() } +func (conn *HvsockConn) IsClosed() bool { + return conn.sock.IsClosed() +} + func (conn *HvsockConn) shutdown(how int) error { - err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) + if conn.IsClosed() { + return ErrFileClosed + } + + err := syscall.Shutdown(conn.sock.handle, how) if err != nil { return os.NewSyscallError("shutdown", err) } return nil } -// CloseRead shuts down the read end of the socket. +// CloseRead shuts down the read end of the socket, preventing future read operations. func (conn *HvsockConn) CloseRead() error { err := conn.shutdown(syscall.SHUT_RD) if err != nil { @@ -269,8 +278,8 @@ func (conn *HvsockConn) CloseRead() error { return nil } -// CloseWrite shuts down the write end of the socket, notifying the other endpoint that -// no more data will be written. +// CloseWrite shuts down the write end of the socket, preventing future write operations and +// notifying the other endpoint that no more data will be written. func (conn *HvsockConn) CloseWrite() error { err := conn.shutdown(syscall.SHUT_WR) if err != nil { diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go index f497c0e39178..2d9161e2deee 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -14,8 +14,6 @@ import ( "encoding/binary" "fmt" "strconv" - - "golang.org/x/sys/windows" ) // Variant specifies which GUID variant (or "type") of the GUID. It determines @@ -41,13 +39,6 @@ type Version uint8 var _ = (encoding.TextMarshaler)(GUID{}) var _ = (encoding.TextUnmarshaler)(&GUID{}) -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID - // NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. func NewV4() (GUID, error) { var b [16]byte diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go new file mode 100644 index 000000000000..f64d828c0ba4 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -0,0 +1,15 @@ +// +build !windows + +package guid + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type as that is only available to builds +// targeted at `windows`. The representation matches that used by native Windows +// code. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go new file mode 100644 index 000000000000..83617f4eee9a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -0,0 +1,10 @@ +package guid + +import "golang.org/x/sys/windows" + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go index fca241590cca..602920786c90 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go @@ -3,11 +3,10 @@ package security import ( + "fmt" "os" "syscall" "unsafe" - - "github.com/pkg/errors" ) type ( @@ -72,7 +71,7 @@ func GrantVmGroupAccess(name string) error { // Stat (to determine if `name` is a directory). s, err := os.Stat(name) if err != nil { - return errors.Wrapf(err, "%s os.Stat %s", gvmga, name) + return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err) } // Get a handle to the file/directory. Must defer Close on success. @@ -88,7 +87,7 @@ func GrantVmGroupAccess(name string) error { sd := uintptr(0) origDACL := uintptr(0) if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { - return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name) + return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err) } defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) @@ -102,7 +101,7 @@ func GrantVmGroupAccess(name string) error { // And finally use SetSecurityInfo to apply the updated DACL. if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { - return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name) + return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err) } return nil @@ -120,7 +119,7 @@ func createFile(name string, isDir bool) (syscall.Handle, error) { } fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) if err != nil { - return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name) + return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err) } return fd, nil } @@ -131,7 +130,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp // Generate pointers to the SIDs based on the string SIDs sid, err := syscall.StringToSid(sidVmGroup) if err != nil { - return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup) + return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err) } inheritance := inheritModeNoInheritance @@ -154,7 +153,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp modifiedDACL := uintptr(0) if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { - return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name) + return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err) } return modifiedDACL, nil diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go index a33a36c0ffba..f7f78fc23048 100644 --- a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go +++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package vhd @@ -7,14 +8,13 @@ import ( "syscall" "github.com/Microsoft/go-winio/pkg/guid" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) //go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go //sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk -//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk +//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk //sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk //sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk //sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath @@ -62,13 +62,27 @@ type OpenVirtualDiskParameters struct { Version2 OpenVersion2 } +// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, +// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating +// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods. +type openVersion2 struct { + getInfoOnly int32 + readOnly int32 + resiliencyGUID guid.GUID +} + +type openVirtualDiskParameters struct { + version uint32 + version2 openVersion2 +} + type AttachVersion2 struct { RestrictedOffset uint64 RestrictedLength uint64 } type AttachVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 + Version uint32 Version2 AttachVersion2 } @@ -146,16 +160,13 @@ func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { return err } - if err := syscall.CloseHandle(handle); err != nil { - return err - } - return nil + return syscall.CloseHandle(handle) } // DetachVirtualDisk detaches a virtual hard disk by handle. func DetachVirtualDisk(handle syscall.Handle) (err error) { if err := detachVirtualDisk(handle, 0, 0); err != nil { - return errors.Wrap(err, "failed to detach virtual disk") + return fmt.Errorf("failed to detach virtual disk: %w", err) } return nil } @@ -185,7 +196,7 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua parameters, nil, ); err != nil { - return errors.Wrap(err, "failed to attach virtual disk") + return fmt.Errorf("failed to attach virtual disk: %w", err) } return nil } @@ -209,7 +220,7 @@ func AttachVhd(path string) (err error) { AttachVirtualDiskFlagNone, ¶ms, ); err != nil { - return errors.Wrap(err, "failed to attach virtual disk") + return fmt.Errorf("failed to attach virtual disk: %w", err) } return nil } @@ -234,19 +245,35 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual var ( handle syscall.Handle defaultType VirtualStorageType + getInfoOnly int32 + readOnly int32 ) if parameters.Version != 2 { return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) } + if parameters.Version2.GetInfoOnly { + getInfoOnly = 1 + } + if parameters.Version2.ReadOnly { + readOnly = 1 + } + params := &openVirtualDiskParameters{ + version: parameters.Version, + version2: openVersion2{ + getInfoOnly, + readOnly, + parameters.Version2.ResiliencyGUID, + }, + } if err := openVirtualDisk( &defaultType, vhdPath, uint32(virtualDiskAccessMask), uint32(openVirtualDiskFlags), - parameters, + params, &handle, ); err != nil { - return 0, errors.Wrap(err, "failed to open virtual disk") + return 0, fmt.Errorf("failed to open virtual disk: %w", err) } return handle, nil } @@ -272,7 +299,7 @@ func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, nil, &handle, ); err != nil { - return handle, errors.Wrap(err, "failed to create virtual disk") + return handle, fmt.Errorf("failed to create virtual disk: %w", err) } return handle, nil } @@ -290,7 +317,7 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { &diskPathSizeInBytes, &diskPhysicalPathBuf[0], ); err != nil { - return "", errors.Wrap(err, "failed to get disk physical path") + return "", fmt.Errorf("failed to get disk physical path: %w", err) } return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil } @@ -314,10 +341,10 @@ func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error createParams, ) if err != nil { - return fmt.Errorf("failed to create differencing vhd: %s", err) + return fmt.Errorf("failed to create differencing vhd: %w", err) } if err := syscall.CloseHandle(vhdHandle); err != nil { - return fmt.Errorf("failed to close differencing vhd handle: %s", err) + return fmt.Errorf("failed to close differencing vhd handle: %w", err) } return nil } diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go index 7fb5f3651b95..1d7498db3bee 100644 --- a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go +++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -88,7 +88,7 @@ func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint return } -func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) { +func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { var _p0 *uint16 _p0, win32err = syscall.UTF16PtrFromString(path) if win32err != nil { @@ -97,7 +97,7 @@ func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtua return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) } -func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) { +func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) if r0 != 0 { win32err = syscall.Errno(r0) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go index 27a62a723861..f46af33bb650 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go @@ -86,6 +86,12 @@ type Container interface { // container to be terminated by some error condition (including calling // Close). Wait() error + // WaitChannel returns the wait channel of the container + WaitChannel() <-chan struct{} + // WaitError returns the container termination error. + // This function should only be called after the channel in WaitChannel() + // is closed. Otherwise it is not thread safe. + WaitError() error // Modify sends a request to modify container resources Modify(ctx context.Context, config interface{}) error } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go index e21354ffd66a..295d4b849c3e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -154,7 +154,7 @@ func (e *HcsError) Error() string { func (e *HcsError) Temporary() bool { err, ok := e.Err.(net.Error) - return ok && err.Temporary() + return ok && err.Temporary() //nolint:staticcheck } func (e *HcsError) Timeout() bool { @@ -193,7 +193,7 @@ func (e *SystemError) Error() string { func (e *SystemError) Temporary() bool { err, ok := e.Err.(net.Error) - return ok && err.Temporary() + return ok && err.Temporary() //nolint:staticcheck } func (e *SystemError) Timeout() bool { @@ -224,7 +224,7 @@ func (e *ProcessError) Error() string { func (e *ProcessError) Temporary() bool { err, ok := e.Err.(net.Error) - return ok && err.Temporary() + return ok && err.Temporary() //nolint:staticcheck } func (e *ProcessError) Timeout() bool { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index 75499c967f0f..a76f6b253e2d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -4,17 +4,22 @@ import ( "context" "encoding/json" "errors" + "fmt" "strings" "sync" "syscall" + "time" "github.com/Microsoft/hcsshim/internal/cow" "github.com/Microsoft/hcsshim/internal/hcs/schema1" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/jobobject" "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/timeout" "github.com/Microsoft/hcsshim/internal/vmcompute" + "github.com/sirupsen/logrus" "go.opencensus.io/trace" ) @@ -28,7 +33,8 @@ type System struct { waitBlock chan struct{} waitError error exitError error - os, typ string + os, typ, owner string + startTime time.Time } func newSystem(id string) *System { @@ -38,6 +44,11 @@ func newSystem(id string) *System { } } +// Implementation detail for silo naming, this should NOT be relied upon very heavily. +func siloNameFmt(containerID string) string { + return fmt.Sprintf(`\Container_%s`, containerID) +} + // CreateComputeSystem creates a new compute system with the given configuration but does not start it. func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) { operation := "hcs::CreateComputeSystem" @@ -127,6 +138,7 @@ func (computeSystem *System) getCachedProperties(ctx context.Context) error { } computeSystem.typ = strings.ToLower(props.SystemType) computeSystem.os = strings.ToLower(props.RuntimeOSType) + computeSystem.owner = strings.ToLower(props.Owner) if computeSystem.os == "" && computeSystem.typ == "container" { // Pre-RS5 HCS did not return the OS, but it only supported containers // that ran Windows. @@ -195,7 +207,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) { if err != nil { return makeSystemError(computeSystem, operation, err, events) } - + computeSystem.startTime = time.Now() return nil } @@ -275,11 +287,19 @@ func (computeSystem *System) waitBackground() { oc.SetSpanStatus(span, err) } +func (computeSystem *System) WaitChannel() <-chan struct{} { + return computeSystem.waitBlock +} + +func (computeSystem *System) WaitError() error { + return computeSystem.waitError +} + // Wait synchronously waits for the compute system to shutdown or terminate. If // the compute system has already exited returns the previous error (if any). func (computeSystem *System) Wait() error { - <-computeSystem.waitBlock - return computeSystem.waitError + <-computeSystem.WaitChannel() + return computeSystem.WaitError() } // ExitError returns an error describing the reason the compute system terminated. @@ -324,11 +344,115 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr return properties, nil } -// PropertiesV2 returns the requested container properties targeting a V2 schema container. -func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() +// queryInProc handles querying for container properties without reaching out to HCS. `props` +// will be updated to contain any data returned from the queries present in `types`. If any properties +// failed to be queried they will be tallied up and returned in as the first return value. Failures on +// query are NOT considered errors; the only failure case for this method is if the containers job object +// cannot be opened. +func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) { + // In the future we can make use of some new functionality in the HCS that allows you + // to pass a job object for HCS to use for the container. Currently, the only way we'll + // be able to open the job/silo is if we're running as SYSTEM. + jobOptions := &jobobject.Options{ + UseNTVariant: true, + Name: siloNameFmt(computeSystem.id), + } + job, err := jobobject.Open(ctx, jobOptions) + if err != nil { + return nil, err + } + defer job.Close() + + var fallbackQueryTypes []hcsschema.PropertyType + for _, propType := range types { + switch propType { + case hcsschema.PTStatistics: + // Handle a bad caller asking for the same type twice. No use in re-querying if this is + // filled in already. + if props.Statistics == nil { + props.Statistics, err = computeSystem.statisticsInProc(job) + if err != nil { + log.G(ctx).WithError(err).Warn("failed to get statistics in-proc") + + fallbackQueryTypes = append(fallbackQueryTypes, propType) + } + } + default: + fallbackQueryTypes = append(fallbackQueryTypes, propType) + } + } + + return fallbackQueryTypes, nil +} + +// statisticsInProc emulates what HCS does to grab statistics for a given container with a small +// change to make grabbing the private working set total much more efficient. +func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) { + // Start timestamp for these stats before we grab them to match HCS + timestamp := time.Now() + + memInfo, err := job.QueryMemoryStats() + if err != nil { + return nil, err + } + + processorInfo, err := job.QueryProcessorStats() + if err != nil { + return nil, err + } + + storageInfo, err := job.QueryStorageStats() + if err != nil { + return nil, err + } + + // This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation + // with the class SystemProcessInformation which returns an array containing system information for *every* + // process running on the machine. They then grab the pids that are running in the container and filter down + // the entries in the array to only what's running in that silo and start tallying up the total. This doesn't + // work well as performance should get worse if more processess are running on the machine in general and not + // just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored + // as well which isn't great and is wasted work to fetch. + // + // HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private + // working set ourselves and ask for everything else seperately. The optimization we can make here is + // to open the silo ourselves and do the same queries for the rest of the info, as well as calculating + // the private working set in a more efficient manner by: + // + // 1. Find the pids running in the silo + // 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access) + // 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters + // 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2. + privateWorkingSet, err := job.QueryPrivateWorkingSet() + if err != nil { + return nil, err + } + + return &hcsschema.Statistics{ + Timestamp: timestamp, + ContainerStartTime: computeSystem.startTime, + Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100, + Memory: &hcsschema.MemoryStats{ + MemoryUsageCommitBytes: memInfo.JobMemory, + MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed, + MemoryUsagePrivateWorkingSetBytes: privateWorkingSet, + }, + Processor: &hcsschema.ProcessorStats{ + RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime), + RuntimeUser100ns: uint64(processorInfo.TotalUserTime), + TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime), + }, + Storage: &hcsschema.StorageStats{ + ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount), + ReadSizeBytes: storageInfo.ReadStats.TotalSize, + WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount), + WriteSizeBytes: storageInfo.WriteStats.TotalSize, + }, + }, nil +} +// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types. +func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) { operation := "hcs::System::PropertiesV2" queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) @@ -345,12 +469,66 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem if propertiesJSON == "" { return nil, ErrUnexpectedValue } - properties := &hcsschema.Properties{} - if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + props := &hcsschema.Properties{} + if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil { return nil, makeSystemError(computeSystem, operation, err, nil) } - return properties, nil + return props, nil +} + +// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system. +func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + // Let HCS tally up the total for VM based queries instead of querying ourselves. + if computeSystem.typ != "container" { + return computeSystem.hcsPropertiesV2Query(ctx, types) + } + + // Define a starter Properties struct with the default fields returned from every + // query. Owner is only returned from Statistics but it's harmless to include. + properties := &hcsschema.Properties{ + Id: computeSystem.id, + SystemType: computeSystem.typ, + RuntimeOsType: computeSystem.os, + Owner: computeSystem.owner, + } + + logEntry := log.G(ctx) + // First lets try and query ourselves without reaching to HCS. If any of the queries fail + // we'll take note and fallback to querying HCS for any of the failed types. + fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types) + if err == nil && len(fallbackTypes) == 0 { + return properties, nil + } else if err != nil { + logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err)) + fallbackTypes = types + } + + logEntry.WithFields(logrus.Fields{ + logfields.ContainerID: computeSystem.id, + "propertyTypes": fallbackTypes, + }).Info("falling back to HCS for property type queries") + + hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes) + if err != nil { + return nil, err + } + + // Now add in anything that we might have successfully queried in process. + if properties.Statistics != nil { + hcsProperties.Statistics = properties.Statistics + hcsProperties.Owner = properties.Owner + } + + // For future support for querying processlist in-proc as well. + if properties.ProcessList != nil { + hcsProperties.ProcessList = properties.ProcessList + } + + return hcsProperties, nil } // Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go index 591a2631e45f..84b368218474 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go @@ -21,10 +21,11 @@ const ( ) type NatPolicy struct { - Type PolicyType `json:"Type"` - Protocol string `json:",omitempty"` - InternalPort uint16 `json:",omitempty"` - ExternalPort uint16 `json:",omitempty"` + Type PolicyType `json:"Type"` + Protocol string `json:",omitempty"` + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` + ExternalPortReserved bool `json:",omitempty"` } type QosPolicy struct { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go new file mode 100644 index 000000000000..5d6acd69e618 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go @@ -0,0 +1,111 @@ +package jobobject + +import ( + "context" + "fmt" + "sync" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/queue" + "github.com/Microsoft/hcsshim/internal/winapi" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var ( + ioInitOnce sync.Once + initIOErr error + // Global iocp handle that will be re-used for every job object + ioCompletionPort windows.Handle + // Mapping of job handle to queue to place notifications in. + jobMap sync.Map +) + +// MsgAllProcessesExited is a type representing a message that every process in a job has exited. +type MsgAllProcessesExited struct{} + +// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently. +// This should not be treated as an error. +type MsgUnimplemented struct{} + +// pollIOCP polls the io completion port forever. +func pollIOCP(ctx context.Context, iocpHandle windows.Handle) { + var ( + overlapped uintptr + code uint32 + key uintptr + ) + + for { + err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE) + if err != nil { + log.G(ctx).WithError(err).Error("failed to poll for job object message") + continue + } + if val, ok := jobMap.Load(key); ok { + msq, ok := val.(*queue.MessageQueue) + if !ok { + log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map") + continue + } + notification, err := parseMessage(code, overlapped) + if err != nil { + log.G(ctx).WithFields(logrus.Fields{ + "code": code, + "overlapped": overlapped, + }).Warn("failed to parse job object message") + continue + } + if err := msq.Enqueue(notification); err == queue.ErrQueueClosed { + // Write will only return an error when the queue is closed. + // The only time a queue would ever be closed is when we call `Close` on + // the job it belongs to which also removes it from the jobMap, so something + // went wrong here. We can't return as this is reading messages for all jobs + // so just log it and move on. + log.G(ctx).WithFields(logrus.Fields{ + "code": code, + "overlapped": overlapped, + }).Warn("tried to write to a closed queue") + continue + } + } else { + log.G(ctx).Warn("received a message for a job not present in the mapping") + } + } +} + +func parseMessage(code uint32, overlapped uintptr) (interface{}, error) { + // Check code and parse out relevant information related to that notification + // that we care about. For now all we handle is the message that all processes + // in the job have exited. + switch code { + case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO: + return MsgAllProcessesExited{}, nil + // Other messages for completeness and a check to make sure that if we fall + // into the default case that this is a code we don't know how to handle. + case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME: + case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME: + case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT: + case winapi.JOB_OBJECT_MSG_NEW_PROCESS: + case winapi.JOB_OBJECT_MSG_EXIT_PROCESS: + case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS: + case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT: + case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT: + case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT: + default: + return nil, fmt.Errorf("unknown job notification type: %d", code) + } + return MsgUnimplemented{}, nil +} + +// Assigns an IO completion port to get notified of events for the registered job +// object. +func attachIOCP(job windows.Handle, iocp windows.Handle) error { + info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{ + CompletionKey: job, + CompletionPort: iocp, + } + _, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go new file mode 100644 index 000000000000..c9fdd921a7f8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -0,0 +1,538 @@ +package jobobject + +import ( + "context" + "errors" + "fmt" + "sync" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/queue" + "github.com/Microsoft/hcsshim/internal/winapi" + "golang.org/x/sys/windows" +) + +// This file provides higher level constructs for the win32 job object API. +// Most of the core creation and management functions are already present in "golang.org/x/sys/windows" +// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information +// structs and associated limit flags. Whatever is not present from the job object API +// in golang.org/x/sys/windows is located in /internal/winapi. +// +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects + +// JobObject is a high level wrapper around a Windows job object. Holds a handle to +// the job, a queue to receive iocp notifications about the lifecycle +// of the job and a mutex for synchronized handle access. +type JobObject struct { + handle windows.Handle + mq *queue.MessageQueue + handleLock sync.RWMutex +} + +// JobLimits represents the resource constraints that can be applied to a job object. +type JobLimits struct { + CPULimit uint32 + CPUWeight uint32 + MemoryLimitInBytes uint64 + MaxIOPS int64 + MaxBandwidth int64 +} + +type CPURateControlType uint32 + +const ( + WeightBased CPURateControlType = iota + RateBased +) + +// Processor resource controls +const ( + cpuLimitMin = 1 + cpuLimitMax = 10000 + cpuWeightMin = 1 + cpuWeightMax = 9 +) + +var ( + ErrAlreadyClosed = errors.New("the handle has already been closed") + ErrNotRegistered = errors.New("job is not registered to receive notifications") +) + +// Options represents the set of configurable options when making or opening a job object. +type Options struct { + // `Name` specifies the name of the job object if a named job object is desired. + Name string + // `Notifications` specifies if the job will be registered to receive notifications. + // Defaults to false. + Notifications bool + // `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject. + // Defaults to false. + UseNTVariant bool + // `IOTracking` enables tracking I/O statistics on the job object. More specifically this + // calls SetInformationJobObject with the JobObjectIoAttribution class. + EnableIOTracking bool +} + +// Create creates a job object. +// +// If options.Name is an empty string, the job will not be assigned a name. +// +// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`. +// +// If `options` is nil, use default option values. +// +// Returns a JobObject structure and an error if there is one. +func Create(ctx context.Context, options *Options) (_ *JobObject, err error) { + if options == nil { + options = &Options{} + } + + var jobName *winapi.UnicodeString + if options.Name != "" { + jobName, err = winapi.NewUnicodeString(options.Name) + if err != nil { + return nil, err + } + } + + var jobHandle windows.Handle + if options.UseNTVariant { + oa := winapi.ObjectAttributes{ + Length: unsafe.Sizeof(winapi.ObjectAttributes{}), + ObjectName: jobName, + Attributes: 0, + } + status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa) + if status != 0 { + return nil, winapi.RtlNtStatusToDosError(status) + } + } else { + var jobNameBuf *uint16 + if jobName != nil && jobName.Buffer != nil { + jobNameBuf = jobName.Buffer + } + jobHandle, err = windows.CreateJobObject(nil, jobNameBuf) + if err != nil { + return nil, err + } + } + + defer func() { + if err != nil { + windows.Close(jobHandle) + } + }() + + job := &JobObject{ + handle: jobHandle, + } + + // If the IOCP we'll be using to receive messages for all jobs hasn't been + // created, create it and start polling. + if options.Notifications { + mq, err := setupNotifications(ctx, job) + if err != nil { + return nil, err + } + job.mq = mq + } + + if options.EnableIOTracking { + if err := enableIOTracking(jobHandle); err != nil { + return nil, err + } + } + + return job, nil +} + +// Open opens an existing job object with name provided in `options`. If no name is provided +// return an error since we need to know what job object to open. +// +// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`. +// +// Returns a JobObject structure and an error if there is one. +func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { + if options == nil || (options != nil && options.Name == "") { + return nil, errors.New("no job object name specified to open") + } + + unicodeJobName, err := winapi.NewUnicodeString(options.Name) + if err != nil { + return nil, err + } + + var jobHandle windows.Handle + if options != nil && options.UseNTVariant { + oa := winapi.ObjectAttributes{ + Length: unsafe.Sizeof(winapi.ObjectAttributes{}), + ObjectName: unicodeJobName, + Attributes: 0, + } + status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa) + if status != 0 { + return nil, winapi.RtlNtStatusToDosError(status) + } + } else { + jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer) + if err != nil { + return nil, err + } + } + + defer func() { + if err != nil { + windows.Close(jobHandle) + } + }() + + job := &JobObject{ + handle: jobHandle, + } + + // If the IOCP we'll be using to receive messages for all jobs hasn't been + // created, create it and start polling. + if options != nil && options.Notifications { + mq, err := setupNotifications(ctx, job) + if err != nil { + return nil, err + } + job.mq = mq + } + + return job, nil +} + +// helper function to setup notifications for creating/opening a job object +func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + ioInitOnce.Do(func() { + h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + initIOErr = err + return + } + ioCompletionPort = h + go pollIOCP(ctx, h) + }) + + if initIOErr != nil { + return nil, initIOErr + } + + mq := queue.NewMessageQueue() + jobMap.Store(uintptr(job.handle), mq) + if err := attachIOCP(job.handle, ioCompletionPort); err != nil { + jobMap.Delete(uintptr(job.handle)) + return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err) + } + return mq, nil +} + +// PollNotification will poll for a job object notification. This call should only be called once +// per job (ideally in a goroutine loop) and will block if there is not a notification ready. +// This call will return immediately with error `ErrNotRegistered` if the job was not registered +// to receive notifications during `Create`. Internally, messages will be queued and there +// is no worry of messages being dropped. +func (job *JobObject) PollNotification() (interface{}, error) { + if job.mq == nil { + return nil, ErrNotRegistered + } + return job.mq.Dequeue() +} + +// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to +// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process +// has already started running. +func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if err := attrList.Update( + winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST, + unsafe.Pointer(&job.handle), + unsafe.Sizeof(job.handle), + ); err != nil { + return fmt.Errorf("failed to update proc thread attributes for job object: %w", err) + } + + return nil +} + +// Close closes the job object handle. +func (job *JobObject) Close() error { + job.handleLock.Lock() + defer job.handleLock.Unlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if err := windows.Close(job.handle); err != nil { + return err + } + + if job.mq != nil { + job.mq.Close() + } + // Handles now invalid so if the map entry to receive notifications for this job still + // exists remove it so we can stop receiving notifications. + if _, ok := jobMap.Load(uintptr(job.handle)); ok { + jobMap.Delete(uintptr(job.handle)) + } + + job.handle = 0 + return nil +} + +// Assign assigns a process to the job object. +func (job *JobObject) Assign(pid uint32) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if pid == 0 { + return errors.New("invalid pid: 0") + } + hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid) + if err != nil { + return err + } + defer windows.Close(hProc) + return windows.AssignProcessToJobObject(job.handle, hProc) +} + +// Terminate terminates the job, essentially calls TerminateProcess on every process in the +// job. +func (job *JobObject) Terminate(exitCode uint32) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + if job.handle == 0 { + return ErrAlreadyClosed + } + return windows.TerminateJobObject(job.handle, exitCode) +} + +// Pids returns all of the process IDs in the job object. +func (job *JobObject) Pids() ([]uint32, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{} + err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectBasicProcessIdList, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ) + + // This is either the case where there is only one process or no processes in + // the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList + // is 1 and just return this, otherwise return an empty slice. + if err == nil { + if info.NumberOfProcessIdsInList == 1 { + return []uint32{uint32(info.ProcessIdList[0])}, nil + } + // Return empty slice instead of nil to play well with the caller of this. + // Do not return an error if no processes are running inside the job + return []uint32{}, nil + } + + if err != winapi.ERROR_MORE_DATA { + return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err) + } + + jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1)) + buf := make([]byte, jobBasicProcessIDListSize) + if err = winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectBasicProcessIdList, + unsafe.Pointer(&buf[0]), + uint32(len(buf)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err) + } + + bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0])) + pids := make([]uint32, bufInfo.NumberOfProcessIdsInList) + for i, bufPid := range bufInfo.AllPids() { + pids[i] = uint32(bufPid) + } + return pids, nil +} + +// QueryMemoryStats gets the memory stats for the job object. +func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectMemoryUsageInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for job object memory stats: %w", err) + } + return &info, nil +} + +// QueryProcessorStats gets the processor stats for the job object. +func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectBasicAccountingInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for job object process stats: %w", err) + } + return &info, nil +} + +// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error +// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking() +// hasn't been called since creation of the job. +func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{ + ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE, + } + if err := winapi.QueryInformationJobObject( + job.handle, + winapi.JobObjectIoAttribution, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("failed to query for job object storage stats: %w", err) + } + return &info, nil +} + +// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the +// private working set for every process running in the job. +func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) { + pids, err := job.Pids() + if err != nil { + return 0, err + } + + openAndQueryWorkingSet := func(pid uint32) (uint64, error) { + h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid) + if err != nil { + // Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a + // case where one of the pids in the job exited before we open. + return 0, nil + } + defer func() { + _ = windows.Close(h) + }() + // Check if the process is actually running in the job still. There's a small chance + // that the process could have exited and had its pid re-used between grabbing the pids + // in the job and opening the handle to it above. + var inJob int32 + if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil { + // This shouldn't fail unless we have incorrect access rights which we control + // here so probably best to error out if this failed. + return 0, err + } + // Don't report stats for this process as it's not running in the job. This shouldn't be + // an error condition though. + if inJob == 0 { + return 0, nil + } + + var vmCounters winapi.VM_COUNTERS_EX2 + status := winapi.NtQueryInformationProcess( + h, + winapi.ProcessVmCounters, + unsafe.Pointer(&vmCounters), + uint32(unsafe.Sizeof(vmCounters)), + nil, + ) + if !winapi.NTSuccess(status) { + return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status)) + } + return uint64(vmCounters.PrivateWorkingSetSize), nil + } + + var jobWorkingSetSize uint64 + for _, pid := range pids { + workingSet, err := openAndQueryWorkingSet(pid) + if err != nil { + return 0, err + } + jobWorkingSetSize += workingSet + } + + return jobWorkingSetSize, nil +} + +// SetIOTracking enables IO tracking for processes in the job object. +// This enables use of the QueryStorageStats method. +func (job *JobObject) SetIOTracking() error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + return enableIOTracking(job.handle) +} + +func enableIOTracking(job windows.Handle) error { + info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{ + ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE, + } + if _, err := windows.SetInformationJobObject( + job, + winapi.JobObjectIoAttribution, + uintptr(unsafe.Pointer(&info)), + uint32(unsafe.Sizeof(info)), + ); err != nil { + return fmt.Errorf("failed to enable IO tracking on job object: %w", err) + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go new file mode 100644 index 000000000000..4efde292c49d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go @@ -0,0 +1,315 @@ +package jobobject + +import ( + "errors" + "fmt" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/winapi" + "golang.org/x/sys/windows" +) + +const ( + memoryLimitMax uint64 = 0xffffffffffffffff +) + +func isFlagSet(flag, controlFlags uint32) bool { + return (flag & controlFlags) == flag +} + +// SetResourceLimits sets resource limits on the job object (cpu, memory, storage). +func (job *JobObject) SetResourceLimits(limits *JobLimits) error { + // Go through and check what limits were specified and apply them to the job. + if limits.MemoryLimitInBytes != 0 { + if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil { + return fmt.Errorf("failed to set job object memory limit: %w", err) + } + } + + if limits.CPULimit != 0 { + if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil { + return fmt.Errorf("failed to set job object cpu limit: %w", err) + } + } else if limits.CPUWeight != 0 { + if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil { + return fmt.Errorf("failed to set job object cpu limit: %w", err) + } + } + + if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 { + if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil { + return fmt.Errorf("failed to set io limit on job object: %w", err) + } + } + return nil +} + +// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate +// all processes in the job on the last open handle being closed. +func (job *JobObject) SetTerminateOnLastHandleClose() error { + info, err := job.getExtendedInformation() + if err != nil { + return err + } + info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE + return job.setExtendedInformation(info) +} + +// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`. +func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error { + if memoryLimitInBytes >= memoryLimitMax { + return errors.New("memory limit specified exceeds the max size") + } + + info, err := job.getExtendedInformation() + if err != nil { + return err + } + + info.JobMemoryLimit = uintptr(memoryLimitInBytes) + info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY + return job.setExtendedInformation(info) +} + +// GetMemoryLimit gets the memory limit in bytes of the job object. +func (job *JobObject) GetMemoryLimit() (uint64, error) { + info, err := job.getExtendedInformation() + if err != nil { + return 0, err + } + return uint64(info.JobMemoryLimit), nil +} + +// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to +// `rateControlValue` for the job object. +func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error { + cpuInfo, err := job.getCPURateControlInformation() + if err != nil { + return err + } + switch rateControlType { + case WeightBased: + if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax { + return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue) + } + cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED + cpuInfo.Value = rateControlValue + case RateBased: + if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax { + return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue) + } + cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP + cpuInfo.Value = rateControlValue + default: + return errors.New("invalid job object cpu rate control type") + } + return job.setCPURateControlInfo(cpuInfo) +} + +// GetCPULimit gets the cpu limits for the job object. +// `rateControlType` is used to indicate what type of cpu limit to query for. +func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) { + info, err := job.getCPURateControlInformation() + if err != nil { + return 0, err + } + + if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) { + return 0, errors.New("the job does not have cpu rate control enabled") + } + + switch rateControlType { + case WeightBased: + if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) { + return 0, errors.New("cannot get cpu weight for job object without cpu weight option set") + } + case RateBased: + if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) { + return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set") + } + default: + return 0, errors.New("invalid job object cpu rate control type") + } + return info.Value, nil +} + +// SetCPUAffinity sets the processor affinity for the job object. +// The affinity is passed in as a bitmask. +func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { + info, err := job.getExtendedInformation() + if err != nil { + return err + } + info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY) + info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) + return job.setExtendedInformation(info) +} + +// GetCPUAffinity gets the processor affinity for the job object. +// The returned affinity is a bitmask. +func (job *JobObject) GetCPUAffinity() (uint64, error) { + info, err := job.getExtendedInformation() + if err != nil { + return 0, err + } + return uint64(info.BasicLimitInformation.Affinity), nil +} + +// SetIOLimit sets the IO limits specified on the job object. +func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error { + ioInfo, err := job.getIOLimit() + if err != nil { + return err + } + ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE + if maxBandwidth != 0 { + ioInfo.MaxBandwidth = maxBandwidth + } + if maxIOPS != 0 { + ioInfo.MaxIops = maxIOPS + } + return job.setIORateControlInfo(ioInfo) +} + +// GetIOMaxBandwidthLimit gets the max bandwidth for the job object. +func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) { + info, err := job.getIOLimit() + if err != nil { + return 0, err + } + return info.MaxBandwidth, nil +} + +// GetIOMaxIopsLimit gets the max iops for the job object. +func (job *JobObject) GetIOMaxIopsLimit() (int64, error) { + info, err := job.getIOLimit() + if err != nil { + return 0, err + } + return info.MaxIops, nil +} + +// Helper function for getting a job object's extended information. +func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + windows.JobObjectExtendedLimitInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("query %v returned error: %w", info, err) + } + return &info, nil +} + +// Helper function for getting a job object's CPU rate control information. +func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{} + if err := winapi.QueryInformationJobObject( + job.handle, + windows.JobObjectCpuRateControlInformation, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + nil, + ); err != nil { + return nil, fmt.Errorf("query %v returned error: %w", info, err) + } + return &info, nil +} + +// Helper function for setting a job object's extended information. +func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if _, err := windows.SetInformationJobObject( + job.handle, + windows.JobObjectExtendedLimitInformation, + uintptr(unsafe.Pointer(info)), + uint32(unsafe.Sizeof(*info)), + ); err != nil { + return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err) + } + return nil +} + +// Helper function for querying job handle for IO limit information. +func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return nil, ErrAlreadyClosed + } + + ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{} + var blockCount uint32 = 1 + + if _, err := winapi.QueryIoRateControlInformationJobObject( + job.handle, + nil, + &ioInfo, + &blockCount, + ); err != nil { + return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err) + } + + if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) { + return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo) + } + return ioInfo, nil +} + +// Helper function for setting a job object's IO rate control information. +func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil { + return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err) + } + return nil +} + +// Helper function for setting a job object's CPU rate control information. +func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + if _, err := windows.SetInformationJobObject( + job.handle, + windows.JobObjectCpuRateControlInformation, + uintptr(unsafe.Pointer(cpuInfo)), + uint32(unsafe.Sizeof(cpuInfo)), + ); err != nil { + return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err) + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go new file mode 100644 index 000000000000..4eb9bb9f1f39 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go @@ -0,0 +1,92 @@ +package queue + +import ( + "errors" + "sync" +) + +var ErrQueueClosed = errors.New("the queue is closed for reading and writing") + +// MessageQueue represents a threadsafe message queue to be used to retrieve or +// write messages to. +type MessageQueue struct { + m *sync.RWMutex + c *sync.Cond + messages []interface{} + closed bool +} + +// NewMessageQueue returns a new MessageQueue. +func NewMessageQueue() *MessageQueue { + m := &sync.RWMutex{} + return &MessageQueue{ + m: m, + c: sync.NewCond(m), + messages: []interface{}{}, + } +} + +// Enqueue writes `msg` to the queue. +func (mq *MessageQueue) Enqueue(msg interface{}) error { + mq.m.Lock() + defer mq.m.Unlock() + + if mq.closed { + return ErrQueueClosed + } + mq.messages = append(mq.messages, msg) + // Signal a waiter that there is now a value available in the queue. + mq.c.Signal() + return nil +} + +// Dequeue will read a value from the queue and remove it. If the queue +// is empty, this will block until the queue is closed or a value gets enqueued. +func (mq *MessageQueue) Dequeue() (interface{}, error) { + mq.m.Lock() + defer mq.m.Unlock() + + for !mq.closed && mq.size() == 0 { + mq.c.Wait() + } + + // We got woken up, check if it's because the queue got closed. + if mq.closed { + return nil, ErrQueueClosed + } + + val := mq.messages[0] + mq.messages[0] = nil + mq.messages = mq.messages[1:] + return val, nil +} + +// Size returns the size of the queue. +func (mq *MessageQueue) Size() int { + mq.m.RLock() + defer mq.m.RUnlock() + return mq.size() +} + +// Nonexported size check to check if the queue is empty inside already locked functions. +func (mq *MessageQueue) size() int { + return len(mq.messages) +} + +// Close closes the queue for future writes or reads. Any attempts to read or write from the +// queue after close will return ErrQueueClosed. This is safe to call multiple times. +func (mq *MessageQueue) Close() { + mq.m.Lock() + defer mq.m.Unlock() + + // Already closed, noop + if mq.closed { + return + } + + mq.messages = nil + mq.closed = true + // If there's anybody currently waiting on a value from Dequeue, we need to + // broadcast so the read(s) can return ErrQueueClosed. + mq.c.Broadcast() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go deleted file mode 100644 index 4e609cbf1cdb..000000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go +++ /dev/null @@ -1,3 +0,0 @@ -package winapi - -//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go index ba12b1ad92e3..7eb13f8f0a83 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go @@ -24,7 +24,10 @@ const ( // Access rights for creating or opening job objects. // // https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights -const JOB_OBJECT_ALL_ACCESS = 0x1F001F +const ( + JOB_OBJECT_QUERY = 0x0004 + JOB_OBJECT_ALL_ACCESS = 0x1F001F +) // IO limit flags // @@ -93,7 +96,7 @@ type JOBOBJECT_BASIC_PROCESS_ID_LIST struct { // AllPids returns all the process Ids in the job object. func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr { - return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList] + return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList] } // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information @@ -162,7 +165,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { // PBOOL Result // ); // -//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob +//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob // BOOL QueryInformationJobObject( // HANDLE hJob, @@ -172,7 +175,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { // LPDWORD lpReturnLength // ); // -//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject +//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject // HANDLE OpenJobObjectW( // DWORD dwDesiredAccess, diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go index 37839435b939..222529f433a5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go @@ -6,3 +6,60 @@ const ( PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016 PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D ) + +// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures. +const ProcessVmCounters = 3 + +// __kernel_entry NTSTATUS NtQueryInformationProcess( +// [in] HANDLE ProcessHandle, +// [in] PROCESSINFOCLASS ProcessInformationClass, +// [out] PVOID ProcessInformation, +// [in] ULONG ProcessInformationLength, +// [out, optional] PULONG ReturnLength +// ); +// +//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess + +// typedef struct _VM_COUNTERS_EX +// { +// SIZE_T PeakVirtualSize; +// SIZE_T VirtualSize; +// ULONG PageFaultCount; +// SIZE_T PeakWorkingSetSize; +// SIZE_T WorkingSetSize; +// SIZE_T QuotaPeakPagedPoolUsage; +// SIZE_T QuotaPagedPoolUsage; +// SIZE_T QuotaPeakNonPagedPoolUsage; +// SIZE_T QuotaNonPagedPoolUsage; +// SIZE_T PagefileUsage; +// SIZE_T PeakPagefileUsage; +// SIZE_T PrivateUsage; +// } VM_COUNTERS_EX, *PVM_COUNTERS_EX; +// +type VM_COUNTERS_EX struct { + PeakVirtualSize uintptr + VirtualSize uintptr + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivateUsage uintptr +} + +// typedef struct _VM_COUNTERS_EX2 +// { +// VM_COUNTERS_EX CountersEx; +// SIZE_T PrivateWorkingSetSize; +// SIZE_T SharedCommitUsage; +// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2; +// +type VM_COUNTERS_EX2 struct { + CountersEx VM_COUNTERS_EX + PrivateWorkingSetSize uintptr + SharedCommitUsage uintptr +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go index 327f57d7c296..78fe01a4b412 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go @@ -12,7 +12,8 @@ const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 // ULONG SystemInformationLength, // PULONG ReturnLength // ); -//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation +// +//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation type SYSTEM_PROCESS_INFORMATION struct { NextEntryOffset uint32 // ULONG diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go index 1d4ba3c4f8ed..d2cc9d9fba6d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go @@ -2,4 +2,4 @@ // be thought of as an extension to golang.org/x/sys/windows. package winapi -//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go +//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index 4eb64b4c0c46..1f16cf0b8e15 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -50,7 +50,6 @@ var ( procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") procSearchPathW = modkernel32.NewProc("SearchPathW") procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") @@ -61,6 +60,7 @@ var ( procLogonUserW = modadvapi32.NewProc("LogonUserW") procLocalAlloc = modkernel32.NewProc("LocalAlloc") procLocalFree = modkernel32.NewProc("LocalFree") + procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") @@ -100,7 +100,7 @@ func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { return } -func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) { +func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) { r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) status = uint32(r0) return @@ -140,19 +140,7 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, return } -func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) { +func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) { r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) if r1 == 0 { if e1 != 0 { @@ -164,7 +152,7 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result return } -func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { +func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) if r1 == 0 { if e1 != 0 { @@ -256,6 +244,12 @@ func LocalFree(ptr uintptr) { return } +func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0) + status = uint32(r0) + return +} + func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) amount = uint32(r0) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/.gitignore b/vendor/github.com/aws/aws-sdk-go-v2/.gitignore new file mode 100644 index 000000000000..18719daa8a76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/.gitignore @@ -0,0 +1,11 @@ +dist +/doc +/doc-staging +.yardoc +Gemfile.lock +/internal/awstesting/integration/smoke/**/importmarker__.go +/internal/awstesting/integration/smoke/_test/ +/vendor +/private/model/cli/gen-api/gen-api +.gradle/ +build/ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/.golangci.toml b/vendor/github.com/aws/aws-sdk-go-v2/.golangci.toml new file mode 100644 index 000000000000..75e338858d46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/.golangci.toml @@ -0,0 +1,27 @@ +[run] +concurrency = 4 +timeout = "1m" +issues-exit-code = 0 +modules-download-mode = "readonly" +allow-parallel-runners = true +skip-dirs = ["internal/repotools"] +skip-dirs-use-default = true + +[output] +format = "github-actions" + +[linters-settings.cyclop] +skip-tests = false + +[linters-settings.errcheck] +check-blank = true + +[linters] +disable-all = true +enable = ["errcheck"] +fast = false + +[issues] +exclude-use-default = false + +# Refer config definitions at https://golangci-lint.run/usage/configuration/#config-file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/.travis.yml b/vendor/github.com/aws/aws-sdk-go-v2/.travis.yml new file mode 100644 index 000000000000..4b498a7a2bb7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/.travis.yml @@ -0,0 +1,31 @@ +language: go +sudo: true +dist: bionic + +branches: + only: + - main + +os: + - linux + - osx + # Travis doesn't work with windows and Go tip + #- windows + +go: + - tip + +matrix: + allow_failures: + - go: tip + +before_install: + - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi + - (cd /tmp/; go get golang.org/x/lint/golint) + +env: + - EACHMODULE_CONCURRENCY=4 + +script: + - make ci-test-no-generate; + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md new file mode 100644 index 000000000000..a4e6d6c9695e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md @@ -0,0 +1,4293 @@ +# Release (2022-04-25) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.16.3 + * **Dependency Update**: Update SDK's internal copy of golang.org/x/sync/singleflight to address issue with test failing due to timeing issues +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.12.0](credentials/CHANGELOG.md#v1120-2022-04-25) + * **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.23.0](service/connect/CHANGELOG.md#v1230-2022-04-25) + * **Feature**: This release adds SearchUsers API which can be used to search for users with a Connect Instance +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.14.4](service/gamelift/CHANGELOG.md#v1144-2022-04-25) + * **Documentation**: Documentation updates for Amazon GameLift. +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.13.0](service/mq/CHANGELOG.md#v1130-2022-04-25) + * **Feature**: This release adds the CRITICAL_ACTION_REQUIRED broker state and the ActionRequired API property. CRITICAL_ACTION_REQUIRED informs you when your broker is degraded. ActionRequired provides you with a code which you can use to find instructions in the Developer Guide on how to resolve the issue. +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.12.0](service/rdsdata/CHANGELOG.md#v1120-2022-04-25) + * **Feature**: Support to receive SQL query results in the form of a simplified JSON string. This enables developers using the new JSON string format to more easily convert it to an object using popular JSON string parsing libraries. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.21.0](service/securityhub/CHANGELOG.md#v1210-2022-04-25) + * **Feature**: Security Hub now lets you opt-out of auto-enabling the defaults standards (CIS and FSBP) in accounts that are auto-enabled with Security Hub via Security Hub's integration with AWS Organizations. + +# Release (2022-04-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.9.0](service/chimesdkmeetings/CHANGELOG.md#v190-2022-04-22) + * **Feature**: Include additional exceptions types. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.36.0](service/ec2/CHANGELOG.md#v1360-2022-04-22) + * **Feature**: Adds support for waiters that automatically poll for a deleted NAT Gateway until it reaches the deleted state. + +# Release (2022-04-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.5](service/elasticache/CHANGELOG.md#v1205-2022-04-21) + * **Documentation**: Doc only update for ElastiCache +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.24.0](service/glue/CHANGELOG.md#v1240-2022-04-21) + * **Feature**: This release adds APIs to create, read, delete, list, and batch read of Glue custom entity types +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.21.0](service/iotsitewise/CHANGELOG.md#v1210-2022-04-21) + * **Feature**: This release adds 3 new batch data query APIs : BatchGetAssetPropertyValue, BatchGetAssetPropertyValueHistory and BatchGetAssetPropertyAggregates +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.7.0](service/iottwinmaker/CHANGELOG.md#v170-2022-04-21) + * **Feature**: General availability (GA) for AWS IoT TwinMaker. For more information, see https://docs.aws.amazon.com/iot-twinmaker/latest/apireference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.12.0](service/lookoutmetrics/CHANGELOG.md#v1120-2022-04-21) + * **Feature**: Added DetectMetricSetConfig API for detecting configuration required for creating metric set from provided S3 data source. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.17.0](service/mediatailor/CHANGELOG.md#v1170-2022-04-21) + * **Feature**: This release introduces tiered channels and adds support for live sources. Customers using a STANDARD channel can now create programs using live sources. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.5](service/secretsmanager/CHANGELOG.md#v1155-2022-04-21) + * **Documentation**: Documentation updates for Secrets Manager +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.17.0](service/storagegateway/CHANGELOG.md#v1170-2022-04-21) + * **Feature**: This release adds support for minimum of 5 character length virtual tape barcodes. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.8.0](service/wisdom/CHANGELOG.md#v180-2022-04-21) + * **Feature**: This release updates the GetRecommendations API to include a trigger event list for classifying and grouping recommendations. + +# Release (2022-04-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.22.0](service/connect/CHANGELOG.md#v1220-2022-04-20) + * **Feature**: This release adds APIs to search, claim, release, list, update, and describe phone numbers. You can also use them to associate and disassociate contact flows to phone numbers. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.21.0](service/macie2/CHANGELOG.md#v1210-2022-04-20) + * **Feature**: Sensitive data findings in Amazon Macie now indicate how Macie found the sensitive data that produced a finding (originType). +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.14.0](service/mgn/CHANGELOG.md#v1140-2022-04-20) + * **Feature**: Removed required annotation from input fields in Describe operations requests. Added quotaValue to ServiceQuotaExceededException +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.20.0](service/rds/CHANGELOG.md#v1200-2022-04-20) + * **Feature**: Added a new cluster-level attribute to set the capacity range for Aurora Serverless v2 instances. + +# Release (2022-04-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.23.0](service/autoscaling/CHANGELOG.md#v1230-2022-04-19) + * **Feature**: EC2 Auto Scaling now adds default instance warm-up times for all scaling activities, health check replacements, and other replacement events in the Auto Scaling instance lifecycle. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.25.0](service/kendra/CHANGELOG.md#v1250-2022-04-19) + * **Feature**: Amazon Kendra now provides a data source connector for Quip. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-quip.html +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.17.0](service/kms/CHANGELOG.md#v1170-2022-04-19) + * **Feature**: Adds support for KMS keys and APIs that generate and verify HMAC codes +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.19.0](service/personalize/CHANGELOG.md#v1190-2022-04-19) + * **Feature**: Adding StartRecommender and StopRecommender APIs for Personalize. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.15.0](service/polly/CHANGELOG.md#v1150-2022-04-19) + * **Feature**: Amazon Polly adds new Austrian German voice - Hannah. Hannah is available as Neural voice only. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.23.0](service/redshift/CHANGELOG.md#v1230-2022-04-19) + * **Feature**: Introduces new fields for LogDestinationType and LogExports on EnableLogging requests and Enable/Disable/DescribeLogging responses. Customers can now select CloudWatch Logs as a destination for their Audit Logs. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.25.0](service/ssm/CHANGELOG.md#v1250-2022-04-19) + * **Feature**: Added offset support for specifying the number of days to wait after the date and time specified by a CRON expression when creating SSM association. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.15.0](service/textract/CHANGELOG.md#v1150-2022-04-19) + * **Feature**: This release adds support for specifying and extracting information from documents using the Queries feature within Analyze Document API +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.4](service/transfer/CHANGELOG.md#v1184-2022-04-19) + * **Documentation**: This release contains corrected HomeDirectoryMappings examples for several API functions: CreateAccess, UpdateAccess, CreateUser, and UpdateUser,. +* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.12.0](service/worklink/CHANGELOG.md#v1120-2022-04-19) + * **Feature**: Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK. + +# Release (2022-04-15) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.9.0](feature/dynamodb/attributevalue/CHANGELOG.md#v190-2022-04-15) + * **Feature**: Support has been added for specifying a custom time format when encoding and decoding DynamoDB AttributeValues. Use `EncoderOptions.EncodeTime` to specify a custom time encoding function, and use `DecoderOptions.DecodeTime` for specifying how to handle the corresponding AttributeValues using the format. Thank you [Pablo Lopez](https://github.com/plopezlpz) for this contribution. +* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.9.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v190-2022-04-15) + * **Feature**: Support has been added for specifying a custom time format when encoding and decoding DynamoDB AttributeValues. Use `EncoderOptions.EncodeTime` to specify a custom time encoding function, and use `DecoderOptions.DecodeTime` for specifying how to handle the corresponding AttributeValues using the format. Thank you [Pablo Lopez](https://github.com/plopezlpz) for this contribution. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.15.0](service/athena/CHANGELOG.md#v1150-2022-04-15) + * **Feature**: This release adds subfields, ErrorMessage, Retryable, to the AthenaError response object in the GetQueryExecution API when a query fails. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.19.0](service/lightsail/CHANGELOG.md#v1190-2022-04-15) + * **Feature**: This release adds support to describe the synchronization status of the account-level block public access feature for your Amazon Lightsail buckets. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.19.0](service/rds/CHANGELOG.md#v1190-2022-04-15) + * **Feature**: Removes Amazon RDS on VMware with the deletion of APIs related to Custom Availability Zones and Media installation + +# Release (2022-04-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.15.0](service/appflow/CHANGELOG.md#v1150-2022-04-14) + * **Feature**: Enables users to pass custom token URL parameters for Oauth2 authentication during create connector profile +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.16.0](service/appstream/CHANGELOG.md#v1160-2022-04-14) + * **Feature**: Includes updates for create and update fleet APIs to manage the session scripts locations for Elastic fleets. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.18.0](service/batch/CHANGELOG.md#v1180-2022-04-14) + * **Feature**: Enables configuration updates for compute environments with BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.18.1](service/cloudwatch/CHANGELOG.md#v1181-2022-04-14) + * **Documentation**: Updates documentation for additional statistics in CloudWatch Metric Streams. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.35.1](service/ec2/CHANGELOG.md#v1351-2022-04-14) + * **Documentation**: Documentation updates for Amazon EC2. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.23.0](service/glue/CHANGELOG.md#v1230-2022-04-14) + * **Feature**: Auto Scaling for Glue version 3.0 and later jobs to dynamically scale compute resources. This SDK change provides customers with the auto-scaled DPU usage + +# Release (2022-04-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.18.0](service/cloudwatch/CHANGELOG.md#v1180-2022-04-13) + * **Feature**: Adds support for additional statistics in CloudWatch Metric Streams. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.23.0](service/fsx/CHANGELOG.md#v1230-2022-04-13) + * **Feature**: This release adds support for deploying FSx for ONTAP file systems in a single Availability Zone. + +# Release (2022-04-12) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.17.0](service/devopsguru/CHANGELOG.md#v1170-2022-04-12) + * **Feature**: This release adds new APIs DeleteInsight to deletes the insight along with the associated anomalies, events and recommendations. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.35.0](service/ec2/CHANGELOG.md#v1350-2022-04-12) + * **Feature**: X2idn and X2iedn instances are powered by 3rd generation Intel Xeon Scalable processors with an all-core turbo frequency up to 3.5 GHzAmazon EC2. C6a instances are powered by 3rd generation AMD EPYC processors. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.17.0](service/efs/CHANGELOG.md#v1170-2022-04-12) + * **Feature**: Amazon EFS adds support for a ThrottlingException when using the CreateAccessPoint API if the account is nearing the AccessPoint limit(120). +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.6.0](service/iottwinmaker/CHANGELOG.md#v160-2022-04-12) + * **Feature**: This release adds the following new features: 1) ListEntities API now supports search using ExternalId. 2) BatchPutPropertyValue and GetPropertyValueHistory API now allows users to represent time in sub-second level precisions. +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.15.4](service/kinesis/CHANGELOG.md#v1154-2022-04-12) + * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.14.4](service/lexruntimev2/CHANGELOG.md#v1144-2022-04-12) + * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.26.5](service/s3/CHANGELOG.md#v1265-2022-04-12) + * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.6.4](service/transcribestreaming/CHANGELOG.md#v164-2022-04-12) + * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. + +# Release (2022-04-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.6.0](service/amplifyuibuilder/CHANGELOG.md#v160-2022-04-11) + * **Feature**: In this release, we have added the ability to bind events to component level actions. +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.12.0](service/apprunner/CHANGELOG.md#v1120-2022-04-11) + * **Feature**: This release adds tracing for App Runner services with X-Ray using AWS Distro for OpenTelemetry. New APIs: CreateObservabilityConfiguration, DescribeObservabilityConfiguration, ListObservabilityConfigurations, and DeleteObservabilityConfiguration. Updated APIs: CreateService and UpdateService. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.18.0](service/workspaces/CHANGELOG.md#v1180-2022-04-11) + * **Feature**: Added API support that allows customers to create GPU-enabled WorkSpaces using EC2 G4dn instances. + +# Release (2022-04-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.22.0](service/mediaconvert/CHANGELOG.md#v1220-2022-04-08) + * **Feature**: AWS Elemental MediaConvert SDK has added support for the pass-through of WebVTT styling to WebVTT outputs, pass-through of KLV metadata to supported formats, and improved filter support for processing 444/RGB content. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.17.0](service/mediapackagevod/CHANGELOG.md#v1170-2022-04-08) + * **Feature**: This release adds ScteMarkersSource as an available field for Dash Packaging Configurations. When set to MANIFEST, MediaPackage will source the SCTE-35 markers from the manifest. When set to SEGMENTS, MediaPackage will source the SCTE-35 markers from the segments. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.19.0](service/wafv2/CHANGELOG.md#v1190-2022-04-08) + * **Feature**: Add a new CurrentDefaultVersion field to ListAvailableManagedRuleGroupVersions API response; add a new VersioningSupported boolean to each ManagedRuleGroup returned from ListAvailableManagedRuleGroups API response. + +# Release (2022-04-07) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/internal/v4a`: [v1.0.0](internal/v4a/CHANGELOG.md#v100-2022-04-07) + * **Release**: New internal v4a signing module location. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.18.0](service/docdb/CHANGELOG.md#v1180-2022-04-07) + * **Feature**: Added support to enable/disable performance insights when creating or modifying db instances +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.16.0](service/eventbridge/CHANGELOG.md#v1160-2022-04-07) + * **Feature**: Adds new EventBridge Endpoint resources for disaster recovery, multi-region failover, and cross-region replication capabilities to help you build resilient event-driven applications. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.18.0](service/personalize/CHANGELOG.md#v1180-2022-04-07) + * **Feature**: This release provides tagging support in AWS Personalize. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.14.4](service/pi/CHANGELOG.md#v1144-2022-04-07) + * **Documentation**: Adds support for DocumentDB to the Performance Insights API. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.27.0](service/sagemaker/CHANGELOG.md#v1270-2022-04-07) + * **Feature**: Amazon Sagemaker Notebook Instances now supports G5 instance types + +# Release (2022-04-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.21.0](service/configservice/CHANGELOG.md#v1210-2022-04-06) + * **Feature**: Add resourceType enums for AWS::EMR::SecurityConfiguration and AWS::SageMaker::CodeRepository +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.24.0](service/kendra/CHANGELOG.md#v1240-2022-04-06) + * **Feature**: Amazon Kendra now provides a data source connector for Box. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-box.html +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.22.0](service/lambda/CHANGELOG.md#v1220-2022-04-06) + * **Feature**: This release adds new APIs for creating and managing Lambda Function URLs and adds a new FunctionUrlAuthType parameter to the AddPermission API. Customers can use Function URLs to create built-in HTTPS endpoints on their functions. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.7.0](service/panorama/CHANGELOG.md#v170-2022-04-06) + * **Feature**: Added Brand field to device listings. + +# Release (2022-04-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.15.0](service/datasync/CHANGELOG.md#v1150-2022-04-05) + * **Feature**: AWS DataSync now supports Amazon FSx for OpenZFS locations. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.22.0](service/fsx/CHANGELOG.md#v1220-2022-04-05) + * **Feature**: Provide customers more visibility into file system status by adding new "Misconfigured Unavailable" status for Amazon FSx for Windows File Server. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.21.4](service/s3control/CHANGELOG.md#v1214-2022-04-05) + * **Documentation**: Documentation-only update for doc bug fixes for the S3 Control API docs. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.20.0](service/securityhub/CHANGELOG.md#v1200-2022-04-05) + * **Feature**: Added additional ASFF details for RdsSecurityGroup AutoScalingGroup, ElbLoadBalancer, CodeBuildProject and RedshiftCluster. + +# Release (2022-04-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.24.0](service/iot/CHANGELOG.md#v1240-2022-04-04) + * **Feature**: AWS IoT - AWS IoT Device Defender adds support to list metric datapoints collected for IoT devices through the ListMetricValues API +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.13.0](service/proton/CHANGELOG.md#v1130-2022-04-04) + * **Feature**: SDK release to support tagging for AWS Proton Repository resource +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.0](service/servicecatalog/CHANGELOG.md#v1140-2022-04-04) + * **Feature**: This release adds ProvisioningArtifictOutputKeys to DescribeProvisioningParameters to reference the outputs of a Provisioned Product and deprecates ProvisioningArtifactOutputs. +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.12.4](service/sms/CHANGELOG.md#v1124-2022-04-04) + * **Documentation**: Revised product update notice for SMS console deprecation. + +# Release (2022-04-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.21.0](service/connect/CHANGELOG.md#v1210-2022-04-01) + * **Feature**: This release updates these APIs: UpdateInstanceAttribute, DescribeInstanceAttribute and ListInstanceAttributes. You can use it to programmatically enable/disable multi-party conferencing using attribute type MULTI_PARTY_CONFERENCING on the specified Amazon Connect instance. + +# Release (2022-03-31) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.8.4](feature/dynamodb/attributevalue/CHANGELOG.md#v184-2022-03-31) + * **Documentation**: Fixes documentation typos in Number type's helper methods +* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.8.4](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v184-2022-03-31) + * **Documentation**: Fixes documentation typos in Number type's helper methods +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.3](service/auditmanager/CHANGELOG.md#v1183-2022-03-31) + * **Documentation**: This release adds documentation updates for Audit Manager. The updates provide data deletion guidance when a customer deregisters Audit Manager or deregisters a delegated administrator. +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.9.0](service/cloudcontrol/CHANGELOG.md#v190-2022-03-31) + * **Feature**: SDK release for Cloud Control API in Amazon Web Services China (Beijing) Region, operated by Sinnet, and Amazon Web Services China (Ningxia) Region, operated by NWCD +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.20.0](service/databrew/CHANGELOG.md#v1200-2022-03-31) + * **Feature**: This AWS Glue Databrew release adds feature to support ORC as an input format. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.8.0](service/grafana/CHANGELOG.md#v180-2022-03-31) + * **Feature**: This release adds tagging support to the Managed Grafana service. New APIs: TagResource, UntagResource and ListTagsForResource. Updates: add optional field tags to support tagging while calling CreateWorkspace. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.0.0](service/pinpointsmsvoicev2/CHANGELOG.md#v100-2022-03-31) + * **Release**: New AWS service client module + * **Feature**: Amazon Pinpoint now offers a version 2.0 suite of SMS and voice APIs, providing increased control over sending and configuration. This release is a new SDK for sending SMS and voice messages called PinpointSMSVoiceV2. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.9.0](service/route53recoverycluster/CHANGELOG.md#v190-2022-03-31) + * **Feature**: This release adds a new API "ListRoutingControls" to list routing control states using the highly reliable Route 53 ARC data plane endpoints. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.17.0](service/workspaces/CHANGELOG.md#v1170-2022-03-31) + * **Feature**: Added APIs that allow you to customize the logo, login message, and help links in the WorkSpaces client login page. To learn more, visit https://docs.aws.amazon.com/workspaces/latest/adminguide/customize-branding.html + +# Release (2022-03-30) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.34.0](service/ec2/CHANGELOG.md#v1340-2022-03-30) + * **Feature**: This release simplifies the auto-recovery configuration process enabling customers to set the recovery behavior to disabled or default +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.17.0](service/fms/CHANGELOG.md#v1170-2022-03-30) + * **Feature**: AWS Firewall Manager now supports the configuration of third-party policies that can use either the centralized or distributed deployment models. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.21.0](service/fsx/CHANGELOG.md#v1210-2022-03-30) + * **Feature**: This release adds support for modifying throughput capacity for FSx for ONTAP file systems. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.23.3](service/iot/CHANGELOG.md#v1233-2022-03-30) + * **Documentation**: Doc only update for IoT that fixes customer-reported issues. +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.12.0](service/iotdataplane/CHANGELOG.md#v1120-2022-03-30) + * **Feature**: Update the default AWS IoT Core Data Plane endpoint from VeriSign signed to ATS signed. If you have firewalls with strict egress rules, configure the rules to grant you access to data-ats.iot.[region].amazonaws.com or data-ats.iot.[region].amazonaws.com.cn. + +# Release (2022-03-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.15.0](service/organizations/CHANGELOG.md#v1150-2022-03-29) + * **Feature**: This release provides the new CloseAccount API that enables principals in the management account to close any member account within an organization. + +# Release (2022-03-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.17.3](service/acmpca/CHANGELOG.md#v1173-2022-03-28) + * **Documentation**: Updating service name entities +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.20.0](service/medialive/CHANGELOG.md#v1200-2022-03-28) + * **Feature**: This release adds support for selecting a maintenance window. + +# Release (2022-03-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.17.0](service/batch/CHANGELOG.md#v1170-2022-03-25) + * **Feature**: Bug Fix: Fixed a bug where shapes were marked as unboxed and were not serialized and sent over the wire, causing an API error from the service. + * This is a breaking change, and has been accepted due to the API operation not being usable due to the members modeled as unboxed (aka value) types. The update changes the members to boxed (aka pointer) types so that the zero value of the members can be handled correctly by the SDK and service. Your application will fail to compile with the updated module. To workaround this you'll need to update your application to use pointer types for the members impacted. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.33.0](service/ec2/CHANGELOG.md#v1330-2022-03-25) + * **Feature**: This is release adds support for Amazon VPC Reachability Analyzer to analyze path through a Transit Gateway. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.24.0](service/ssm/CHANGELOG.md#v1240-2022-03-25) + * **Feature**: This Patch Manager release supports creating, updating, and deleting Patch Baselines for Rocky Linux OS. + +# Release (2022-03-24) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.20.0](service/configservice/CHANGELOG.md#v1200-2022-03-24) + * **Feature**: Added new APIs GetCustomRulePolicy and GetOrganizationCustomRulePolicy, and updated existing APIs PutConfigRule, DescribeConfigRule, DescribeConfigRuleEvaluationStatus, PutOrganizationConfigRule, DescribeConfigRule to support a new feature for building AWS Config rules with AWS CloudFormation Guard +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.21.0](service/lambda/CHANGELOG.md#v1210-2022-03-24) + * **Feature**: Adds support for increased ephemeral storage (/tmp) up to 10GB for Lambda functions. Customers can now provision up to 10 GB of ephemeral storage per function instance, a 20x increase over the previous limit of 512 MB. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.19.0](service/transcribe/CHANGELOG.md#v1190-2022-03-24) + * **Feature**: This release adds an additional parameter for subtitling with Amazon Transcribe batch jobs: outputStartIndex. + +# Release (2022-03-23) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.16.0 + * **Feature**: Update CredentialsCache to make use of two new optional CredentialsProvider interfaces to give the cache, per provider, behavior how the cache handles credentials that fail to refresh, and adjusting expires time. See [aws.CredentialsCache](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#CredentialsCache) for more details. + * **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.11.0](credentials/CHANGELOG.md#v1110-2022-03-23) + * **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.0](service/auditmanager/CHANGELOG.md#v1180-2022-03-23) + * **Feature**: This release updates 1 API parameter, the SnsArn attribute. The character length and regex pattern for the SnsArn attribute have been updated, which enables you to deselect an SNS topic when using the UpdateSettings operation. +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.15.0](service/ebs/CHANGELOG.md#v1150-2022-03-23) + * **Feature**: Increased the maximum supported value for the Timeout parameter of the StartSnapshot API from 60 minutes to 4320 minutes. Changed the HTTP error code for ConflictException from 503 to 409. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.2](service/elasticache/CHANGELOG.md#v1202-2022-03-23) + * **Documentation**: Doc only update for ElastiCache +* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.0.0](service/gamesparks/CHANGELOG.md#v100-2022-03-23) + * **Release**: New AWS service client module + * **Feature**: Released the preview of Amazon GameSparks, a fully managed AWS service that provides a multi-service backend for game developers. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.22.0](service/redshift/CHANGELOG.md#v1220-2022-03-23) + * **Feature**: This release adds a new [--encrypted | --no-encrypted] field in restore-from-cluster-snapshot API. Customers can now restore an unencrypted snapshot to a cluster encrypted with AWS Managed Key or their own KMS key. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.23.0](service/ssm/CHANGELOG.md#v1230-2022-03-23) + * **Feature**: Update AddTagsToResource, ListTagsForResource, and RemoveTagsFromResource APIs to reflect the support for tagging Automation resources. Includes other minor documentation updates. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.1](service/transfer/CHANGELOG.md#v1181-2022-03-23) + * **Documentation**: Documentation updates for AWS Transfer Family to describe how to remove an associated workflow from a server. + +# Release (2022-03-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.18.0](service/costexplorer/CHANGELOG.md#v1180-2022-03-22) + * **Feature**: Added three new APIs to support tagging and resource-level authorization on Cost Explorer resources: TagResource, UntagResource, ListTagsForResource. Added optional parameters to CreateCostCategoryDefinition, CreateAnomalySubscription and CreateAnomalyMonitor APIs to support Tag On Create. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.2](service/ecs/CHANGELOG.md#v1182-2022-03-22) + * **Documentation**: Documentation only update to address tickets +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.16.0](service/lakeformation/CHANGELOG.md#v1160-2022-03-22) + * **Feature**: The release fixes the incorrect permissions called out in the documentation - DESCRIBE_TAG, ASSOCIATE_TAG, DELETE_TAG, ALTER_TAG. This trebuchet release fixes the corresponding SDK and documentation. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.16.0](service/location/CHANGELOG.md#v1160-2022-03-22) + * **Feature**: Amazon Location Service now includes a MaxResults parameter for GetDevicePositionHistory requests. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.14.0](service/polly/CHANGELOG.md#v1140-2022-03-22) + * **Feature**: Amazon Polly adds new Catalan voice - Arlet. Arlet is available as Neural voice only. + +# Release (2022-03-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.8.0](service/chimesdkmeetings/CHANGELOG.md#v180-2022-03-21) + * **Feature**: Add support for media replication to link multiple WebRTC media sessions together to reach larger and global audiences. Participants connected to a replica session can be granted access to join the primary session and can switch sessions with their existing WebRTC connection +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.17.0](service/ecr/CHANGELOG.md#v1170-2022-03-21) + * **Feature**: This release includes a fix in the DescribeImageScanFindings paginated output. +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.16.0](service/mediaconnect/CHANGELOG.md#v1160-2022-03-21) + * **Feature**: This release adds support for selecting a maintenance window. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.21.0](service/quicksight/CHANGELOG.md#v1210-2022-03-21) + * **Feature**: AWS QuickSight Service Features - Expand public API support for group management. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.16.1](service/ram/CHANGELOG.md#v1161-2022-03-21) + * **Documentation**: Document improvements to the RAM API operations and parameter descriptions. + +# Release (2022-03-18) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.22.0](service/glue/CHANGELOG.md#v1220-2022-03-18) + * **Feature**: Added 9 new APIs for AWS Glue Interactive Sessions: ListSessions, StopSession, CreateSession, GetSession, DeleteSession, RunStatement, GetStatement, ListStatements, CancelStatement + +# Release (2022-03-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.17.0](service/acmpca/CHANGELOG.md#v1170-2022-03-16) + * **Feature**: AWS Certificate Manager (ACM) Private Certificate Authority (CA) now supports customizable certificate subject names and extensions. +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.13.0](service/amplifybackend/CHANGELOG.md#v1130-2022-03-16) + * **Feature**: Adding the ability to customize Cognito verification messages for email and SMS in CreateBackendAuth and UpdateBackendAuth. Adding deprecation documentation for ForgotPassword in CreateBackendAuth and UpdateBackendAuth +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.0.0](service/billingconductor/CHANGELOG.md#v100-2022-03-16) + * **Release**: New AWS service client module + * **Feature**: This is the initial SDK release for AWS Billing Conductor. The AWS Billing Conductor is a customizable billing service, allowing you to customize your billing data to match your desired business structure. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.13.0](service/s3outposts/CHANGELOG.md#v1130-2022-03-16) + * **Feature**: S3 on Outposts is releasing a new API, ListSharedEndpoints, that lists all endpoints associated with S3 on Outpost, that has been shared by Resource Access Manager (RAM). +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.13.0](service/ssmincidents/CHANGELOG.md#v1130-2022-03-16) + * **Feature**: Removed incorrect validation pattern for IncidentRecordSource.invokedBy + +# Release (2022-03-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.15.0](service/cognitoidentityprovider/CHANGELOG.md#v1150-2022-03-15) + * **Feature**: Updated EmailConfigurationType and SmsConfigurationType to reflect that you can now choose Amazon SES and Amazon SNS resources in the same Region. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.15.0](service/dataexchange/CHANGELOG.md#v1150-2022-03-15) + * **Feature**: This feature enables data providers to use the RevokeRevision operation to revoke subscriber access to a given revision. Subscribers are unable to interact with assets within a revoked revision. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.32.0](service/ec2/CHANGELOG.md#v1320-2022-03-15) + * **Feature**: Adds the Cascade parameter to the DeleteIpam API. Customers can use this parameter to automatically delete their IPAM, including non-default scopes, pools, cidrs, and allocations. There mustn't be any pools provisioned in the default public scope to use this parameter. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.1](service/ecs/CHANGELOG.md#v1181-2022-03-15) + * **Documentation**: Documentation only update to address tickets +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.0.2](service/keyspaces/CHANGELOG.md#v102-2022-03-15) + * **Documentation**: Fixing formatting issues in CLI and SDK documentation +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.15.1](service/location/CHANGELOG.md#v1151-2022-03-15) + * **Documentation**: New HERE style "VectorHereExplore" and "VectorHereExploreTruck". +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.18.1](service/rds/CHANGELOG.md#v1181-2022-03-15) + * **Documentation**: Various documentation improvements +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.17.0](service/robomaker/CHANGELOG.md#v1170-2022-03-15) + * **Feature**: This release deprecates ROS, Ubuntu and Gazbeo from RoboMaker Simulation Service Software Suites in favor of user-supplied containers and Relaxed Software Suites. + +# Release (2022-03-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.19.0](service/configservice/CHANGELOG.md#v1190-2022-03-14) + * **Feature**: Add resourceType enums for AWS::ECR::PublicRepository and AWS::EC2::LaunchTemplate +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.1](service/elasticache/CHANGELOG.md#v1201-2022-03-14) + * **Documentation**: Doc only update for ElastiCache +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.23.0](service/kendra/CHANGELOG.md#v1230-2022-03-14) + * **Feature**: Amazon Kendra now provides a data source connector for Slack. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-slack.html +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.14.0](service/timestreamquery/CHANGELOG.md#v1140-2022-03-14) + * **Feature**: Amazon Timestream Scheduled Queries now support Timestamp datatype in a multi-measure record. + +# Release (2022-03-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.20.0](service/chime/CHANGELOG.md#v1200-2022-03-11) + * **Feature**: Chime VoiceConnector Logging APIs will now support MediaMetricLogs. Also CreateMeetingDialOut now returns AccessDeniedException. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.20.0](service/connect/CHANGELOG.md#v1200-2022-03-11) + * **Feature**: This release adds support for enabling Rich Messaging when starting a new chat session via the StartChatContact API. Rich Messaging enables the following formatting options: bold, italics, hyperlinks, bulleted lists, and numbered lists. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.20.0](service/lambda/CHANGELOG.md#v1200-2022-03-11) + * **Feature**: Adds PrincipalOrgID support to AddPermission API. Customers can use it to manage permissions to lambda functions at AWS Organizations level. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.18.0](service/outposts/CHANGELOG.md#v1180-2022-03-11) + * **Feature**: This release adds address filters for listSites +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.1](service/secretsmanager/CHANGELOG.md#v1151-2022-03-11) + * **Documentation**: Documentation updates for Secrets Manager. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.6.0](service/transcribestreaming/CHANGELOG.md#v160-2022-03-11) + * **Feature**: Amazon Transcribe StartTranscription API now supports additional parameters for Language Identification feature: customVocabularies and customFilterVocabularies + +# Release (2022-03-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.20.0](service/lexmodelsv2/CHANGELOG.md#v1200-2022-03-10) + * **Feature**: This release makes slotTypeId an optional parameter in CreateSlot and UpdateSlot APIs in Amazon Lex V2 for model building. Customers can create and update slots without specifying a slot type id. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.18.0](service/transcribe/CHANGELOG.md#v1180-2022-03-10) + * **Feature**: Documentation fix for API `StartMedicalTranscriptionJobRequest`, now showing min sample rate as 16khz +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.0](service/transfer/CHANGELOG.md#v1180-2022-03-10) + * **Feature**: Adding more descriptive error types for managed workflows + +# Release (2022-03-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.17.0](service/comprehend/CHANGELOG.md#v1170-2022-03-09) + * **Feature**: Amazon Comprehend now supports extracting the sentiment associated with entities such as brands, products and services from text documents. + +# Release (2022-03-08.3) + +* No change notes available for this release. + +# Release (2022-03-08.2) + +* No change notes available for this release. + +# Release (2022-03-08) + +## General Highlights +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.11.0](service/amplify/CHANGELOG.md#v1110-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.5.0](service/amplifyuibuilder/CHANGELOG.md#v150-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.14.0](service/appflow/CHANGELOG.md#v1140-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.11.0](service/apprunner/CHANGELOG.md#v1110-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.14.0](service/athena/CHANGELOG.md#v1140-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.15.0](service/braket/CHANGELOG.md#v1150-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.7.0](service/chimesdkmeetings/CHANGELOG.md#v170-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.15.0](service/cloudtrail/CHANGELOG.md#v1150-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.19.0](service/connect/CHANGELOG.md#v1190-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.16.0](service/devopsguru/CHANGELOG.md#v1160-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.31.0](service/ec2/CHANGELOG.md#v1310-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.16.0](service/ecr/CHANGELOG.md#v1160-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.0](service/ecs/CHANGELOG.md#v1180-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.0](service/elasticache/CHANGELOG.md#v1200-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.10.0](service/finspacedata/CHANGELOG.md#v1100-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.12.0](service/fis/CHANGELOG.md#v1120-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.20.0](service/fsx/CHANGELOG.md#v1200-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.14.0](service/gamelift/CHANGELOG.md#v1140-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.15.0](service/greengrassv2/CHANGELOG.md#v1150-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/internal/checksum`: [v1.1.0](service/internal/checksum/CHANGELOG.md#v110-2022-03-08) + * **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606) +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.8.0](service/kafkaconnect/CHANGELOG.md#v180-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.22.0](service/kendra/CHANGELOG.md#v1220-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.0.0](service/keyspaces/CHANGELOG.md#v100-2022-03-08) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.14.0](service/macie/CHANGELOG.md#v1140-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.15.0](service/mediapackage/CHANGELOG.md#v1150-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.13.0](service/mgn/CHANGELOG.md#v1130-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.5.0](service/migrationhubrefactorspaces/CHANGELOG.md#v150-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.12.0](service/mq/CHANGELOG.md#v1120-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.6.0](service/panorama/CHANGELOG.md#v160-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.18.0](service/rds/CHANGELOG.md#v1180-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.8.0](service/route53recoverycluster/CHANGELOG.md#v180-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.12.0](service/servicecatalogappregistry/CHANGELOG.md#v1120-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.18.0](service/sqs/CHANGELOG.md#v1180-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.16.0](service/sts/CHANGELOG.md#v1160-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.14.0](service/synthetics/CHANGELOG.md#v1140-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.13.0](service/timestreamquery/CHANGELOG.md#v1130-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.17.0](service/transfer/CHANGELOG.md#v1170-2022-03-08) + * **Feature**: Updated service client model to latest release. + +# Release (2022-02-24.2) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.21.0](service/autoscaling/CHANGELOG.md#v1210-2022-02-242) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.18.0](service/databrew/CHANGELOG.md#v1180-2022-02-242) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.15.0](service/fms/CHANGELOG.md#v1150-2022-02-242) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.17.0](service/lightsail/CHANGELOG.md#v1170-2022-02-242) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.19.0](service/route53/CHANGELOG.md#v1190-2022-02-242) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.20.0](service/s3control/CHANGELOG.md#v1200-2022-02-242) + * **Feature**: API client updated + +# Release (2022-02-24) + +## General Highlights +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Bug Fix**: Fixes the AWS Sigv4 signer to trim header value's whitespace when computing the canonical headers block of the string to sign. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.14.0 + * **Feature**: Add new AdaptiveMode retryer to aws/retry package. This new retryer uses dynamic token bucketing with client ratelimiting when throttle responses are received. + * **Feature**: Adds new interface aws.RetryerV2, replacing aws.Retryer and deprecating the GetInitialToken method in favor of GetAttemptToken so Context can be provided. The SDK will use aws.RetryerV2 internally. Wrapping aws.Retryers as aws.RetryerV2 automatically. +* `github.com/aws/aws-sdk-go-v2/config`: [v1.14.0](config/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options. + * **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589) +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.9.0](credentials/CHANGELOG.md#v190-2022-02-24) + * **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575) +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.7.0](feature/dynamodb/attributevalue/CHANGELOG.md#v170-2022-02-24) + * **Feature**: Fixes [#645](https://github.com/aws/aws-sdk-go-v2/issues/645), [#411](https://github.com/aws/aws-sdk-go-v2/issues/411) by adding support for (un)marshaling AttributeValue maps to Go maps key types of string, number, bool, and types implementing encoding.Text(un)Marshaler interface + * **Bug Fix**: Fixes [#1569](https://github.com/aws/aws-sdk-go-v2/issues/1569) inconsistent serialization of Go struct field names +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression`: [v1.4.0](feature/dynamodb/expression/CHANGELOG.md#v140-2022-02-24) + * **Feature**: Add support for expression names with dots via new NameBuilder function NameNoDotSplit, related to [aws/aws-sdk-go#2570](https://github.com/aws/aws-sdk-go/issues/2570) +* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.7.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v170-2022-02-24) + * **Feature**: Fixes [#645](https://github.com/aws/aws-sdk-go-v2/issues/645), [#411](https://github.com/aws/aws-sdk-go-v2/issues/411) by adding support for (un)marshaling AttributeValue maps to Go maps key types of string, number, bool, and types implementing encoding.Text(un)Marshaler interface + * **Bug Fix**: Fixes [#1569](https://github.com/aws/aws-sdk-go-v2/issues/1569) inconsistent serialization of Go struct field names +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.14.0](service/accessanalyzer/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.5.0](service/account/CHANGELOG.md#v150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.13.0](service/acm/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.15.0](service/acmpca/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.13.0](service/alexaforbusiness/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.13.0](service/amp/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.10.0](service/amplify/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.11.0](service/amplifybackend/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.4.0](service/amplifyuibuilder/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.14.0](service/apigateway/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.9.0](service/apigatewaymanagementapi/CHANGELOG.md#v190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.11.0](service/apigatewayv2/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.11.0](service/appconfig/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.3.0](service/appconfigdata/CHANGELOG.md#v130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.13.0](service/appflow/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.12.0](service/appintegrations/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.14.0](service/applicationautoscaling/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.8.0](service/applicationcostprofiler/CHANGELOG.md#v180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.11.0](service/applicationdiscoveryservice/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.14.0](service/applicationinsights/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.12.0](service/appmesh/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.10.0](service/apprunner/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.14.0](service/appstream/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.13.0](service/appsync/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.13.0](service/athena/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.16.0](service/auditmanager/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.20.0](service/autoscaling/CHANGELOG.md#v1200-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.11.0](service/autoscalingplans/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.14.0](service/backup/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.4.0](service/backupgateway/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.15.0](service/batch/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.14.0](service/braket/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.11.0](service/budgets/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.18.0](service/chime/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.8.0](service/chimesdkidentity/CHANGELOG.md#v180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.6.0](service/chimesdkmeetings/CHANGELOG.md#v160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.8.0](service/chimesdkmessaging/CHANGELOG.md#v180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.15.0](service/cloud9/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.7.0](service/cloudcontrol/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.11.0](service/clouddirectory/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.19.0](service/cloudformation/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.15.0](service/cloudfront/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.11.0](service/cloudhsm/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.12.0](service/cloudhsmv2/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.12.0](service/cloudsearch/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.10.0](service/cloudsearchdomain/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.14.0](service/cloudtrail/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.16.0](service/cloudwatch/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.13.0](service/cloudwatchevents/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.14.0](service/cloudwatchlogs/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.11.0](service/codeartifact/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.18.0](service/codebuild/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.12.0](service/codecommit/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.13.0](service/codedeploy/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.11.0](service/codeguruprofiler/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.14.0](service/codegurureviewer/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.12.0](service/codepipeline/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.10.0](service/codestar/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.12.0](service/codestarconnections/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.10.0](service/codestarnotifications/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.12.0](service/cognitoidentity/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.13.0](service/cognitoidentityprovider/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.10.0](service/cognitosync/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.15.0](service/comprehend/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.12.0](service/comprehendmedical/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.16.0](service/computeoptimizer/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.17.0](service/configservice/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.18.0](service/connect/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.11.0](service/connectcontactlens/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.10.0](service/connectparticipant/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.12.0](service/costandusagereportservice/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.16.0](service/costexplorer/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.16.0](service/customerprofiles/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.17.0](service/databasemigrationservice/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.17.0](service/databrew/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.13.0](service/dataexchange/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.12.0](service/datapipeline/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.13.0](service/datasync/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.10.0](service/dax/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.14.0](service/detective/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.12.0](service/devicefarm/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.15.0](service/devopsguru/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.16.0](service/directconnect/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.12.0](service/directoryservice/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.10.0](service/dlm/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.16.0](service/docdb/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.4.0](service/drs/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.14.0](service/dynamodb/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.12.0](service/dynamodbstreams/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.13.0](service/ebs/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.30.0](service/ec2/CHANGELOG.md#v1300-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.12.0](service/ec2instanceconnect/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.15.0](service/ecr/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.12.0](service/ecrpublic/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.17.0](service/ecs/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.15.0](service/efs/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.19.0](service/eks/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.19.0](service/elasticache/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.13.0](service/elasticbeanstalk/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.10.0](service/elasticinference/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.13.0](service/elasticloadbalancing/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.17.0](service/elasticloadbalancingv2/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.14.0](service/elasticsearchservice/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.12.0](service/elastictranscoder/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.16.0](service/emr/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.12.0](service/emrcontainers/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.14.0](service/eventbridge/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.5.0](service/evidently/CHANGELOG.md#v150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.7.0](service/finspace/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.9.0](service/finspacedata/CHANGELOG.md#v190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.13.0](service/firehose/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.11.0](service/fis/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.14.0](service/fms/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.18.0](service/forecast/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.10.0](service/forecastquery/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated + * **Bug Fix**: Fixed an issue that resulted in the wrong service endpoints being constructed. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.18.0](service/frauddetector/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.19.0](service/fsx/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.13.0](service/gamelift/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.12.0](service/glacier/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.12.0](service/globalaccelerator/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.20.0](service/glue/CHANGELOG.md#v1200-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.6.0](service/grafana/CHANGELOG.md#v160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.12.0](service/greengrass/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.14.0](service/greengrassv2/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.12.0](service/groundstation/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.12.0](service/guardduty/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.14.0](service/health/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.13.0](service/healthlake/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.11.0](service/honeycode/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.17.0](service/iam/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.13.0](service/identitystore/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.18.0](service/imagebuilder/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.11.0](service/inspector/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.5.0](service/inspector2/CHANGELOG.md#v150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/internal/checksum`: [v1.0.0](service/internal/checksum/CHANGELOG.md#v100-2022-02-24) + * **Release**: New module for computing checksums +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.22.0](service/iot/CHANGELOG.md#v1220-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.9.0](service/iot1clickdevicesservice/CHANGELOG.md#v190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.10.0](service/iot1clickprojects/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.11.0](service/iotanalytics/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.10.0](service/iotdataplane/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.13.0](service/iotdeviceadvisor/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.13.0](service/iotevents/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.10.0](service/ioteventsdata/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.11.0](service/iotfleethub/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.10.0](service/iotjobsdataplane/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.11.0](service/iotsecuretunneling/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.19.0](service/iotsitewise/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.11.0](service/iotthingsgraph/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.4.0](service/iottwinmaker/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.17.0](service/iotwireless/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.15.0](service/ivs/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.16.0](service/kafka/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.7.0](service/kafkaconnect/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.21.0](service/kendra/CHANGELOG.md#v1210-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.14.0](service/kinesis/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.12.0](service/kinesisanalytics/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.13.0](service/kinesisanalyticsv2/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.10.0](service/kinesisvideo/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.11.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.9.0](service/kinesisvideomedia/CHANGELOG.md#v190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.9.0](service/kinesisvideosignaling/CHANGELOG.md#v190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.15.0](service/kms/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.14.0](service/lakeformation/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.18.0](service/lambda/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.15.0](service/lexmodelbuildingservice/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.18.0](service/lexmodelsv2/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.11.0](service/lexruntimeservice/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.13.0](service/lexruntimev2/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.14.0](service/licensemanager/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.16.0](service/lightsail/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.14.0](service/location/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.11.0](service/lookoutequipment/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.10.0](service/lookoutmetrics/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.11.0](service/lookoutvision/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.13.0](service/machinelearning/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.13.0](service/macie/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.19.0](service/macie2/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.11.0](service/managedblockchain/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.11.0](service/marketplacecatalog/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.10.0](service/marketplacecommerceanalytics/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.10.0](service/marketplaceentitlementservice/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.12.0](service/marketplacemetering/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.14.0](service/mediaconnect/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.20.0](service/mediaconvert/CHANGELOG.md#v1200-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.18.0](service/medialive/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.14.0](service/mediapackage/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.15.0](service/mediapackagevod/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.11.0](service/mediastore/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.11.0](service/mediastoredata/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.15.0](service/mediatailor/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.8.0](service/memorydb/CHANGELOG.md#v180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.12.0](service/mgn/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.11.0](service/migrationhub/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.11.0](service/migrationhubconfig/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.4.0](service/migrationhubrefactorspaces/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.4.0](service/migrationhubstrategy/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.10.0](service/mobile/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.11.0](service/mq/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.12.0](service/mturk/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.11.0](service/mwaa/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.15.0](service/neptune/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.14.0](service/networkfirewall/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.11.0](service/networkmanager/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.11.0](service/nimble/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.8.0](service/opensearch/CHANGELOG.md#v180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.12.0](service/opsworks/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.13.0](service/opsworkscm/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.13.0](service/organizations/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.16.0](service/outposts/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.5.0](service/panorama/CHANGELOG.md#v150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.16.0](service/personalize/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.10.0](service/personalizeevents/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.10.0](service/personalizeruntime/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.13.0](service/pi/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.15.0](service/pinpoint/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.10.0](service/pinpointemail/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.9.0](service/pinpointsmsvoice/CHANGELOG.md#v190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.12.0](service/polly/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.13.0](service/pricing/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.11.0](service/proton/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.13.0](service/qldb/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.12.0](service/qldbsession/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.19.0](service/quicksight/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.15.0](service/ram/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.5.0](service/rbin/CHANGELOG.md#v150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.17.0](service/rds/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.10.0](service/rdsdata/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.20.0](service/redshift/CHANGELOG.md#v1200-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.14.0](service/redshiftdata/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.16.0](service/rekognition/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.4.0](service/resiliencehub/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.11.0](service/resourcegroups/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.12.0](service/resourcegroupstaggingapi/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.15.0](service/robomaker/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.18.0](service/route53/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.11.0](service/route53domains/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.7.0](service/route53recoverycluster/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.8.0](service/route53recoverycontrolconfig/CHANGELOG.md#v180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.7.0](service/route53recoveryreadiness/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.14.0](service/route53resolver/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.5.0](service/rum/CHANGELOG.md#v150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.25.0](service/s3/CHANGELOG.md#v1250-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.19.0](service/s3control/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.11.0](service/s3outposts/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.25.0](service/sagemaker/CHANGELOG.md#v1250-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.11.0](service/sagemakera2iruntime/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.10.0](service/sagemakeredge/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.10.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.14.0](service/sagemakerruntime/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.10.0](service/savingsplans/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.13.0](service/schemas/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.14.0](service/secretsmanager/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.18.0](service/securityhub/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.10.0](service/serverlessapplicationrepository/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.12.0](service/servicecatalog/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.11.0](service/servicecatalogappregistry/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.16.0](service/servicediscovery/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.12.0](service/servicequotas/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.13.0](service/ses/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.12.0](service/sesv2/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.12.0](service/sfn/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.15.0](service/shield/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.12.0](service/signer/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.11.0](service/sms/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.14.0](service/snowball/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.7.0](service/snowdevicemanagement/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.16.0](service/sns/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.17.0](service/sqs/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.21.0](service/ssm/CHANGELOG.md#v1210-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.12.0](service/ssmcontacts/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.11.0](service/ssmincidents/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.10.0](service/sso/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.13.0](service/ssoadmin/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.11.0](service/ssooidc/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.15.0](service/storagegateway/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.15.0](service/sts/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.12.0](service/support/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.12.0](service/swf/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.13.0](service/synthetics/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.13.0](service/textract/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.12.0](service/timestreamquery/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.12.0](service/timestreamwrite/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.16.0](service/transcribe/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.4.0](service/transcribestreaming/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.16.0](service/transfer/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.12.0](service/translate/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.7.0](service/voiceid/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.10.0](service/waf/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.11.0](service/wafregional/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.17.0](service/wafv2/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.13.0](service/wellarchitected/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.6.0](service/wisdom/CHANGELOG.md#v160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.10.0](service/workdocs/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.10.0](service/worklink/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.14.0](service/workmail/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.10.0](service/workmailmessageflow/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.15.0](service/workspaces/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.4.0](service/workspacesweb/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.12.0](service/xray/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated + +# Release (2022-01-28) + +## General Highlights +* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. +* **Bug Fix**: Updates SDK API client deserialization to pre-allocate byte slice and string response payloads, [#1565](https://github.com/aws/aws-sdk-go-v2/pull/1565). Thanks to [Tyson Mote](https://github.com/tysonmote) for submitting this PR. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.13.1](config/CHANGELOG.md#v1131-2022-01-28) + * **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR. + * **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563) +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.13.0](service/applicationinsights/CHANGELOG.md#v1130-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.13.1](service/cloudtrail/CHANGELOG.md#v1131-2022-01-28) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.13.1](service/codegurureviewer/CHANGELOG.md#v1131-2022-01-28) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.16.0](service/configservice/CHANGELOG.md#v1160-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.17.0](service/connect/CHANGELOG.md#v1170-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.12.1](service/ebs/CHANGELOG.md#v1121-2022-01-28) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.29.0](service/ec2/CHANGELOG.md#v1290-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.11.0](service/ec2instanceconnect/CHANGELOG.md#v1110-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.14.0](service/efs/CHANGELOG.md#v1140-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.10.0](service/fis/CHANGELOG.md#v1100-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.17.0](service/frauddetector/CHANGELOG.md#v1170-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.18.0](service/fsx/CHANGELOG.md#v1180-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.11.0](service/greengrass/CHANGELOG.md#v1110-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.13.0](service/greengrassv2/CHANGELOG.md#v1130-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.11.0](service/guardduty/CHANGELOG.md#v1110-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.10.0](service/honeycode/CHANGELOG.md#v1100-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.14.0](service/ivs/CHANGELOG.md#v1140-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.15.0](service/kafka/CHANGELOG.md#v1150-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.13.0](service/location/CHANGELOG.md#v1130-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.9.0](service/lookoutmetrics/CHANGELOG.md#v190-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.18.0](service/macie2/CHANGELOG.md#v1180-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.19.0](service/mediaconvert/CHANGELOG.md#v1190-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.14.0](service/mediatailor/CHANGELOG.md#v1140-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.14.0](service/ram/CHANGELOG.md#v1140-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.6.1](service/route53recoveryreadiness/CHANGELOG.md#v161-2022-01-28) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.24.0](service/sagemaker/CHANGELOG.md#v1240-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.17.0](service/securityhub/CHANGELOG.md#v1170-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.14.0](service/storagegateway/CHANGELOG.md#v1140-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.15.0](service/transcribe/CHANGELOG.md#v1150-2022-01-28) + * **Feature**: Updated to latest API model. + +# Release (2022-01-14) + +## General Highlights +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.13.0 + * **Bug Fix**: Updates the Retry middleware to release the retry token, on subsequent attempts. This fixes #1413, and is based on PR #1424 +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.6.0](feature/dynamodb/attributevalue/CHANGELOG.md#v160-2022-01-14) + * **Feature**: Adds new MarshalWithOptions and UnmarshalWithOptions helpers allowing Encoding and Decoding options to be specified when serializing AttributeValues. Addresses issue: https://github.com/aws/aws-sdk-go-v2/issues/1494 +* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.6.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v160-2022-01-14) + * **Feature**: Adds new MarshalWithOptions and UnmarshalWithOptions helpers allowing Encoding and Decoding options to be specified when serializing AttributeValues. Addresses issue: https://github.com/aws/aws-sdk-go-v2/issues/1494 +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.12.0](service/appsync/CHANGELOG.md#v1120-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.10.0](service/autoscalingplans/CHANGELOG.md#v1100-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.15.0](service/computeoptimizer/CHANGELOG.md#v1150-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.15.0](service/costexplorer/CHANGELOG.md#v1150-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.16.0](service/databasemigrationservice/CHANGELOG.md#v1160-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.16.0](service/databrew/CHANGELOG.md#v1160-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.28.0](service/ec2/CHANGELOG.md#v1280-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.18.0](service/elasticache/CHANGELOG.md#v1180-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.13.0](service/elasticsearchservice/CHANGELOG.md#v1130-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.8.0](service/finspacedata/CHANGELOG.md#v180-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.13.0](service/fms/CHANGELOG.md#v1130-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.19.0](service/glue/CHANGELOG.md#v1190-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.9.0](service/honeycode/CHANGELOG.md#v190-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.12.0](service/identitystore/CHANGELOG.md#v1120-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.9.0](service/ioteventsdata/CHANGELOG.md#v190-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.16.0](service/iotwireless/CHANGELOG.md#v1160-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.20.0](service/kendra/CHANGELOG.md#v1200-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.17.0](service/lexmodelsv2/CHANGELOG.md#v1170-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.12.0](service/lexruntimev2/CHANGELOG.md#v1120-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.8.0](service/lookoutmetrics/CHANGELOG.md#v180-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.17.0](service/medialive/CHANGELOG.md#v1170-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.13.0](service/mediatailor/CHANGELOG.md#v1130-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.10.0](service/mwaa/CHANGELOG.md#v1100-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.10.0](service/nimble/CHANGELOG.md#v1100-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.7.0](service/opensearch/CHANGELOG.md#v170-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.12.0](service/pi/CHANGELOG.md#v1120-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.14.0](service/pinpoint/CHANGELOG.md#v1140-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.16.0](service/rds/CHANGELOG.md#v1160-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.20.0](service/ssm/CHANGELOG.md#v1200-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.9.0](service/sso/CHANGELOG.md#v190-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.14.0](service/transcribe/CHANGELOG.md#v1140-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.14.0](service/workspaces/CHANGELOG.md#v1140-2022-01-14) + * **Feature**: Updated API models + +# Release (2022-01-07) + +## General Highlights +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.12.0](config/CHANGELOG.md#v1120-2022-01-07) + * **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.12.0](service/appstream/CHANGELOG.md#v1120-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.12.0](service/cloudtrail/CHANGELOG.md#v1120-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.12.0](service/detective/CHANGELOG.md#v1120-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.27.0](service/ec2/CHANGELOG.md#v1270-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.15.0](service/ecs/CHANGELOG.md#v1150-2022-01-07) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.17.0](service/eks/CHANGELOG.md#v1170-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.18.0](service/glue/CHANGELOG.md#v1180-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.11.0](service/greengrassv2/CHANGELOG.md#v1110-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.20.0](service/iot/CHANGELOG.md#v1200-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.12.0](service/lakeformation/CHANGELOG.md#v1120-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.16.0](service/lambda/CHANGELOG.md#v1160-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.17.0](service/mediaconvert/CHANGELOG.md#v1170-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.17.0](service/quicksight/CHANGELOG.md#v1170-2022-01-07) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.15.0](service/rds/CHANGELOG.md#v1150-2022-01-07) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.14.0](service/rekognition/CHANGELOG.md#v1140-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.23.0](service/s3/CHANGELOG.md#v1230-2022-01-07) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.17.0](service/s3control/CHANGELOG.md#v1170-2022-01-07) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.9.0](service/s3outposts/CHANGELOG.md#v190-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.22.0](service/sagemaker/CHANGELOG.md#v1220-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.12.0](service/secretsmanager/CHANGELOG.md#v1120-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.9.0](service/ssooidc/CHANGELOG.md#v190-2022-01-07) + * **Feature**: API client updated + +# Release (2021-12-21) + +## General Highlights +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.11.0](service/accessanalyzer/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.10.0](service/acm/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.11.0](service/apigateway/CHANGELOG.md#v1110-2021-12-21) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.11.0](service/applicationautoscaling/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.10.0](service/appsync/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.17.0](service/autoscaling/CHANGELOG.md#v1170-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.3.0](service/chimesdkmeetings/CHANGELOG.md#v130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.5.0](service/chimesdkmessaging/CHANGELOG.md#v150-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.4.0](service/cloudcontrol/CHANGELOG.md#v140-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.16.0](service/cloudformation/CHANGELOG.md#v1160-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.13.0](service/cloudwatch/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.10.0](service/cloudwatchevents/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.11.0](service/cloudwatchlogs/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: API client updated + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.10.0](service/codedeploy/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.9.0](service/comprehendmedical/CHANGELOG.md#v190-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.13.0](service/configservice/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.13.0](service/customerprofiles/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.14.0](service/databasemigrationservice/CHANGELOG.md#v1140-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.10.0](service/datasync/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.12.0](service/devopsguru/CHANGELOG.md#v1120-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.13.0](service/directconnect/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.13.0](service/docdb/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.11.0](service/dynamodb/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.9.0](service/dynamodbstreams/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.26.0](service/ec2/CHANGELOG.md#v1260-2021-12-21) + * **Feature**: API client updated + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.12.0](service/ecr/CHANGELOG.md#v1120-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.14.0](service/ecs/CHANGELOG.md#v1140-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.16.0](service/elasticache/CHANGELOG.md#v1160-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.10.0](service/elasticloadbalancing/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.14.0](service/elasticloadbalancingv2/CHANGELOG.md#v1140-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.11.0](service/elasticsearchservice/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.13.0](service/emr/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.11.0](service/eventbridge/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.6.0](service/finspacedata/CHANGELOG.md#v160-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.15.0](service/forecast/CHANGELOG.md#v1150-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.9.0](service/glacier/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.9.0](service/groundstation/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.11.0](service/health/CHANGELOG.md#v1110-2021-12-21) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.15.0](service/imagebuilder/CHANGELOG.md#v1150-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.19.0](service/iot/CHANGELOG.md#v1190-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.11.0](service/kinesis/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.9.0](service/kinesisanalytics/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.10.0](service/kinesisanalyticsv2/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.12.0](service/kms/CHANGELOG.md#v1120-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.15.0](service/lambda/CHANGELOG.md#v1150-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.15.0](service/lexmodelsv2/CHANGELOG.md#v1150-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.10.0](service/location/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.6.0](service/lookoutmetrics/CHANGELOG.md#v160-2021-12-21) + * **Feature**: API client updated + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.8.0](service/lookoutvision/CHANGELOG.md#v180-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.9.0](service/marketplacemetering/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.11.0](service/mediaconnect/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.12.0](service/neptune/CHANGELOG.md#v1120-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.11.0](service/networkfirewall/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.8.0](service/nimble/CHANGELOG.md#v180-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.5.0](service/opensearch/CHANGELOG.md#v150-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.13.0](service/outposts/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.10.0](service/pi/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.10.0](service/qldb/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.14.0](service/rds/CHANGELOG.md#v1140-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.17.0](service/redshift/CHANGELOG.md#v1170-2021-12-21) + * **Feature**: API client updated + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.8.0](service/resourcegroups/CHANGELOG.md#v180-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.9.0](service/resourcegroupstaggingapi/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.15.0](service/route53/CHANGELOG.md#v1150-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.8.0](service/route53domains/CHANGELOG.md#v180-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.5.0](service/route53recoverycontrolconfig/CHANGELOG.md#v150-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.22.0](service/s3/CHANGELOG.md#v1220-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.16.0](service/s3control/CHANGELOG.md#v1160-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.21.0](service/sagemaker/CHANGELOG.md#v1210-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.7.3](service/savingsplans/CHANGELOG.md#v173-2021-12-21) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.11.0](service/secretsmanager/CHANGELOG.md#v1110-2021-12-21) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.14.0](service/securityhub/CHANGELOG.md#v1140-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.9.0](service/sfn/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.8.0](service/sms/CHANGELOG.md#v180-2021-12-21) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.13.0](service/sns/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.14.0](service/sqs/CHANGELOG.md#v1140-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.18.0](service/ssm/CHANGELOG.md#v1180-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.12.0](service/sts/CHANGELOG.md#v1120-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.9.0](service/support/CHANGELOG.md#v190-2021-12-21) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.9.0](service/swf/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.13.0](service/transfer/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.11.0](service/workmail/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: API client updated + +# Release (2021-12-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.10.1](service/accessanalyzer/CHANGELOG.md#v1101-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.9.3](service/amp/CHANGELOG.md#v193-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.0.0](service/amplifyuibuilder/CHANGELOG.md#v100-2021-12-03) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.8.3](service/appmesh/CHANGELOG.md#v183-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.10.2](service/braket/CHANGELOG.md#v1102-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.7.3](service/codeguruprofiler/CHANGELOG.md#v173-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.1.1](service/evidently/CHANGELOG.md#v111-2021-12-03) + * **Bug Fix**: Fixed a bug that prevented the resolution of the correct endpoint for some API operations. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.2.3](service/grafana/CHANGELOG.md#v123-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.9.2](service/location/CHANGELOG.md#v192-2021-12-03) + * **Bug Fix**: Fixed a bug that prevented the resolution of the correct endpoint for some API operations. + * **Bug Fix**: Fixed an issue that caused some operations to not be signed using sigv4, resulting in authentication failures. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.7.0](service/networkmanager/CHANGELOG.md#v170-2021-12-03) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.7.3](service/nimble/CHANGELOG.md#v173-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.7.2](service/proton/CHANGELOG.md#v172-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.10.0](service/ram/CHANGELOG.md#v1100-2021-12-03) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.12.0](service/rekognition/CHANGELOG.md#v1120-2021-12-03) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.3.3](service/snowdevicemanagement/CHANGELOG.md#v133-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.2.3](service/wisdom/CHANGELOG.md#v123-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. + +# Release (2021-12-02) + +## General Highlights +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.11.0](config/CHANGELOG.md#v1110-2021-12-02) + * **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`. +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.10.0](service/accessanalyzer/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.9.0](service/applicationinsights/CHANGELOG.md#v190-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.0.0](service/backupgateway/CHANGELOG.md#v100-2021-12-02) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.8.0](service/cloudhsm/CHANGELOG.md#v180-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.11.0](service/devopsguru/CHANGELOG.md#v1110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.12.0](service/directconnect/CHANGELOG.md#v1120-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.10.0](service/dynamodb/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.25.0](service/ec2/CHANGELOG.md#v1250-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.1.0](service/evidently/CHANGELOG.md#v110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.14.0](service/fsx/CHANGELOG.md#v1140-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.16.0](service/glue/CHANGELOG.md#v1160-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.1.0](service/inspector2/CHANGELOG.md#v110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.18.0](service/iot/CHANGELOG.md#v1180-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.0.0](service/iottwinmaker/CHANGELOG.md#v100-2021-12-02) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.11.0](service/kafka/CHANGELOG.md#v1110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.17.0](service/kendra/CHANGELOG.md#v1170-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.10.0](service/kinesis/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.10.0](service/lakeformation/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.14.0](service/lexmodelsv2/CHANGELOG.md#v1140-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.10.0](service/lexruntimev2/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: Support has been added for the `StartConversation` API. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.12.0](service/outposts/CHANGELOG.md#v1120-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.1.0](service/rbin/CHANGELOG.md#v110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.10.0](service/redshiftdata/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.1.0](service/rum/CHANGELOG.md#v110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.21.0](service/s3/CHANGELOG.md#v1210-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.20.0](service/sagemaker/CHANGELOG.md#v1200-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.11.0](service/sagemakerruntime/CHANGELOG.md#v1110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.11.0](service/shield/CHANGELOG.md#v1110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.10.0](service/snowball/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.10.0](service/storagegateway/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.0.0](service/workspacesweb/CHANGELOG.md#v100-2021-12-02) + * **Release**: New AWS service client module + +# Release (2021-11-30) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.16.0](service/autoscaling/CHANGELOG.md#v1160-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.10.0](service/backup/CHANGELOG.md#v1100-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.10.0](service/braket/CHANGELOG.md#v1100-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.2.0](service/chimesdkmeetings/CHANGELOG.md#v120-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.15.0](service/cloudformation/CHANGELOG.md#v1150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.13.0](service/computeoptimizer/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.13.0](service/connect/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.12.0](service/customerprofiles/CHANGELOG.md#v1120-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.13.0](service/databasemigrationservice/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.9.0](service/dataexchange/CHANGELOG.md#v190-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.9.0](service/dynamodb/CHANGELOG.md#v190-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.24.0](service/ec2/CHANGELOG.md#v1240-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.11.0](service/ecr/CHANGELOG.md#v1110-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.13.0](service/ecs/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.15.0](service/eks/CHANGELOG.md#v1150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.15.0](service/elasticache/CHANGELOG.md#v1150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.13.0](service/elasticloadbalancingv2/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.10.0](service/elasticsearchservice/CHANGELOG.md#v1100-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.0.0](service/evidently/CHANGELOG.md#v100-2021-11-30) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.5.0](service/finspacedata/CHANGELOG.md#v150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.14.0](service/imagebuilder/CHANGELOG.md#v1140-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.0.0](service/inspector2/CHANGELOG.md#v100-2021-11-30) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery`: [v1.3.2](service/internal/endpoint-discovery/CHANGELOG.md#v132-2021-11-30) + * **Bug Fix**: Fixed a race condition that caused concurrent calls relying on endpoint discovery to share the same `url.URL` reference in their operation's http.Request. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.17.0](service/iot/CHANGELOG.md#v1170-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.9.0](service/iotdeviceadvisor/CHANGELOG.md#v190-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.15.0](service/iotsitewise/CHANGELOG.md#v1150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.13.0](service/iotwireless/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.14.0](service/lambda/CHANGELOG.md#v1140-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.14.0](service/macie2/CHANGELOG.md#v1140-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.8.0](service/mgn/CHANGELOG.md#v180-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.0.0](service/migrationhubrefactorspaces/CHANGELOG.md#v100-2021-11-30) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.4.0](service/opensearch/CHANGELOG.md#v140-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.11.0](service/outposts/CHANGELOG.md#v1110-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.12.0](service/personalize/CHANGELOG.md#v1120-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.7.0](service/personalizeruntime/CHANGELOG.md#v170-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.12.0](service/pinpoint/CHANGELOG.md#v1120-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.7.0](service/proton/CHANGELOG.md#v170-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.15.0](service/quicksight/CHANGELOG.md#v1150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.0.0](service/rbin/CHANGELOG.md#v100-2021-11-30) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.13.0](service/rds/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.16.0](service/redshift/CHANGELOG.md#v1160-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.0.0](service/rum/CHANGELOG.md#v100-2021-11-30) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.20.0](service/s3/CHANGELOG.md#v1200-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.15.0](service/s3control/CHANGELOG.md#v1150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.13.0](service/sqs/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.17.0](service/ssm/CHANGELOG.md#v1170-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.11.0](service/sts/CHANGELOG.md#v1110-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.10.0](service/textract/CHANGELOG.md#v1100-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.8.0](service/timestreamquery/CHANGELOG.md#v180-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.8.0](service/timestreamwrite/CHANGELOG.md#v180-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.1.0](service/transcribestreaming/CHANGELOG.md#v110-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.8.0](service/translate/CHANGELOG.md#v180-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.9.0](service/wellarchitected/CHANGELOG.md#v190-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.11.0](service/workspaces/CHANGELOG.md#v1110-2021-11-30) + * **Feature**: API client updated + +# Release (2021-11-19) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.11.1 + * **Bug Fix**: Fixed a bug that prevented aws.EndpointResolverWithOptionsFunc from satisfying the aws.EndpointResolverWithOptions interface. +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.8.0](service/amplifybackend/CHANGELOG.md#v180-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.10.0](service/apigateway/CHANGELOG.md#v1100-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.7.0](service/appconfig/CHANGELOG.md#v170-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.0.0](service/appconfigdata/CHANGELOG.md#v100-2021-11-19) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.8.0](service/applicationinsights/CHANGELOG.md#v180-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.10.0](service/appstream/CHANGELOG.md#v1100-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.12.0](service/auditmanager/CHANGELOG.md#v1120-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.11.0](service/batch/CHANGELOG.md#v1110-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.14.0](service/chime/CHANGELOG.md#v1140-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.1.0](service/chimesdkmeetings/CHANGELOG.md#v110-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.14.0](service/cloudformation/CHANGELOG.md#v1140-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.10.0](service/cloudtrail/CHANGELOG.md#v1100-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.12.0](service/cloudwatch/CHANGELOG.md#v1120-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.12.0](service/connect/CHANGELOG.md#v1120-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.12.0](service/databasemigrationservice/CHANGELOG.md#v1120-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.13.0](service/databrew/CHANGELOG.md#v1130-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.10.0](service/devopsguru/CHANGELOG.md#v1100-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.0.0](service/drs/CHANGELOG.md#v100-2021-11-19) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.8.0](service/dynamodbstreams/CHANGELOG.md#v180-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.23.0](service/ec2/CHANGELOG.md#v1230-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.14.0](service/eks/CHANGELOG.md#v1140-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.14.0](service/forecast/CHANGELOG.md#v1140-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.10.0](service/ivs/CHANGELOG.md#v1100-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.10.0](service/kafka/CHANGELOG.md#v1100-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.16.0](service/kendra/CHANGELOG.md#v1160-2021-11-19) + * **Announcement**: Fix API modeling bug incorrectly generating `DocumentAttributeValue` type as a union instead of a structure. This update corrects this bug by correcting the `DocumentAttributeValue` type to be a `struct` instead of an `interface`. This change also removes the `DocumentAttributeValueMember` types. To migrate to this change your application using service/kendra will need to be updated to use struct members in `DocumentAttributeValue` instead of `DocumentAttributeValueMember` types. + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.11.0](service/kms/CHANGELOG.md#v1110-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.13.0](service/lambda/CHANGELOG.md#v1130-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.13.0](service/lexmodelsv2/CHANGELOG.md#v1130-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.9.0](service/lexruntimev2/CHANGELOG.md#v190-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.9.0](service/location/CHANGELOG.md#v190-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.15.0](service/mediaconvert/CHANGELOG.md#v1150-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.14.0](service/medialive/CHANGELOG.md#v1140-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.7.0](service/mgn/CHANGELOG.md#v170-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.0.0](service/migrationhubstrategy/CHANGELOG.md#v100-2021-11-19) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.9.0](service/qldb/CHANGELOG.md#v190-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.9.0](service/qldbsession/CHANGELOG.md#v190-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.15.0](service/redshift/CHANGELOG.md#v1150-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.12.0](service/sns/CHANGELOG.md#v1120-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.16.0](service/ssm/CHANGELOG.md#v1160-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.12.0](service/transfer/CHANGELOG.md#v1120-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.14.0](service/wafv2/CHANGELOG.md#v1140-2021-11-19) + * **Feature**: API client updated + +# Release (2021-11-12) + +## General Highlights +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. +* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.9.0](service/backup/CHANGELOG.md#v190-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.10.0](service/batch/CHANGELOG.md#v1100-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.0.0](service/chimesdkmeetings/CHANGELOG.md#v100-2021-11-12) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.12.0](service/computeoptimizer/CHANGELOG.md#v1120-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.11.0](service/connect/CHANGELOG.md#v1110-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.12.0](service/docdb/CHANGELOG.md#v1120-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.8.0](service/dynamodb/CHANGELOG.md#v180-2021-11-12) + * **Documentation**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.22.0](service/ec2/CHANGELOG.md#v1220-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.12.0](service/ecs/CHANGELOG.md#v1120-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.9.0](service/gamelift/CHANGELOG.md#v190-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.9.0](service/greengrassv2/CHANGELOG.md#v190-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.10.0](service/health/CHANGELOG.md#v1100-2021-11-12) + * **Documentation**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.9.0](service/identitystore/CHANGELOG.md#v190-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.12.0](service/iotwireless/CHANGELOG.md#v1120-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.11.0](service/neptune/CHANGELOG.md#v1110-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.12.0](service/rds/CHANGELOG.md#v1120-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.0.0](service/resiliencehub/CHANGELOG.md#v100-2021-11-12) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.8.0](service/resourcegroupstaggingapi/CHANGELOG.md#v180-2021-11-12) + * **Documentation**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.14.0](service/s3control/CHANGELOG.md#v1140-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.19.0](service/sagemaker/CHANGELOG.md#v1190-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.10.0](service/sagemakerruntime/CHANGELOG.md#v1100-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.7.0](service/ssmincidents/CHANGELOG.md#v170-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.11.0](service/transcribe/CHANGELOG.md#v1110-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.7.0](service/translate/CHANGELOG.md#v170-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.13.0](service/wafv2/CHANGELOG.md#v1130-2021-11-12) + * **Feature**: Updated service to latest API model. + +# Release (2021-11-06) + +## General Highlights +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream`: [v1.0.0](aws/protocol/eventstream/CHANGELOG.md#v100-2021-11-06) + * **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release. + * **Release**: Protocol support has been added for AWS event stream. +* `github.com/aws/aws-sdk-go-v2/internal/endpoints/v2`: [v2.0.0](internal/endpoints/v2/CHANGELOG.md#v200-2021-11-06) + * **Release**: Endpoint Variant Model Support +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.6.0](service/applicationinsights/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.8.0](service/appstream/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.11.0](service/auditmanager/CHANGELOG.md#v1110-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.14.0](service/autoscaling/CHANGELOG.md#v1140-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.13.0](service/chime/CHANGELOG.md#v1130-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.4.0](service/chimesdkidentity/CHANGELOG.md#v140-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.4.0](service/chimesdkmessaging/CHANGELOG.md#v140-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.10.0](service/cloudfront/CHANGELOG.md#v1100-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.7.0](service/codecommit/CHANGELOG.md#v170-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.10.0](service/connect/CHANGELOG.md#v1100-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.7.0](service/connectcontactlens/CHANGELOG.md#v170-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.6.0](service/connectparticipant/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.10.0](service/databasemigrationservice/CHANGELOG.md#v1100-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.8.0](service/datasync/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.11.0](service/docdb/CHANGELOG.md#v1110-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.9.0](service/ebs/CHANGELOG.md#v190-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.21.0](service/ec2/CHANGELOG.md#v1210-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.9.0](service/ecr/CHANGELOG.md#v190-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.11.0](service/ecs/CHANGELOG.md#v1110-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.12.0](service/eks/CHANGELOG.md#v1120-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.13.0](service/elasticache/CHANGELOG.md#v1130-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.9.0](service/elasticsearchservice/CHANGELOG.md#v190-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.8.0](service/emrcontainers/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.4.0](service/finspace/CHANGELOG.md#v140-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.12.0](service/fsx/CHANGELOG.md#v1120-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.8.0](service/gamelift/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.9.0](service/health/CHANGELOG.md#v190-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.12.0](service/iam/CHANGELOG.md#v1120-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/internal/eventstreamtesting`: [v1.0.0](service/internal/eventstreamtesting/CHANGELOG.md#v100-2021-11-06) + * **Release**: Protocol support has been added for AWS event stream. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.13.0](service/iotsitewise/CHANGELOG.md#v1130-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.14.0](service/kendra/CHANGELOG.md#v1140-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.8.0](service/kinesis/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Support has been added for the SubscribeToShard API. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.9.0](service/kms/CHANGELOG.md#v190-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.12.0](service/lightsail/CHANGELOG.md#v1120-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.13.0](service/macie2/CHANGELOG.md#v1130-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.6.0](service/mgn/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.10.0](service/neptune/CHANGELOG.md#v1100-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.6.0](service/networkmanager/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.6.0](service/nimble/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.3.0](service/opensearch/CHANGELOG.md#v130-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.14.0](service/quicksight/CHANGELOG.md#v1140-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.11.0](service/rds/CHANGELOG.md#v1110-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.10.0](service/rekognition/CHANGELOG.md#v1100-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.9.0](service/route53resolver/CHANGELOG.md#v190-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.18.0](service/s3/CHANGELOG.md#v1180-2021-11-06) + * **Feature**: Support has been added for the SelectObjectContent API. + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.13.0](service/s3control/CHANGELOG.md#v1130-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.18.0](service/sagemaker/CHANGELOG.md#v1180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.11.0](service/servicediscovery/CHANGELOG.md#v1110-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.6.0](service/ssmincidents/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.6.0](service/sso/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.8.0](service/storagegateway/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.7.0](service/support/CHANGELOG.md#v170-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.8.0](service/textract/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.10.0](service/transcribe/CHANGELOG.md#v1100-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.0.0](service/transcribestreaming/CHANGELOG.md#v100-2021-11-06) + * **Release**: New AWS service client module + * **Feature**: Support has been added for the StartStreamTranscription and StartMedicalStreamTranscription APIs. +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.6.0](service/waf/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.2.0](service/wisdom/CHANGELOG.md#v120-2021-11-06) + * **Feature**: Updated service to latest API model. + +# Release (2021-10-21) + +## General Highlights +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.10.0 + * **Feature**: Adds dynamic signing middleware that switches to unsigned payload when TLS is enabled. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.8.0](service/appflow/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.8.0](service/applicationautoscaling/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.13.0](service/autoscaling/CHANGELOG.md#v1130-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.3.0](service/chimesdkmessaging/CHANGELOG.md#v130-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.11.0](service/cloudformation/CHANGELOG.md#v1110-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.7.0](service/cloudsearch/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.7.0](service/cloudtrail/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.9.0](service/cloudwatch/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.7.0](service/cloudwatchevents/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.8.0](service/cloudwatchlogs/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.7.0](service/codedeploy/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.10.0](service/configservice/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.7.0](service/dataexchange/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.9.0](service/directconnect/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.10.0](service/docdb/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.6.0](service/dynamodb/CHANGELOG.md#v160-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.20.0](service/ec2/CHANGELOG.md#v1200-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.8.0](service/ecr/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.10.0](service/ecs/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.9.0](service/efs/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.12.0](service/elasticache/CHANGELOG.md#v1120-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.7.0](service/elasticloadbalancing/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.10.0](service/elasticloadbalancingv2/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.10.0](service/emr/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.8.0](service/eventbridge/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.6.0](service/glacier/CHANGELOG.md#v160-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.13.0](service/glue/CHANGELOG.md#v1130-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.8.0](service/ivs/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.13.0](service/kendra/CHANGELOG.md#v1130-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.7.0](service/kinesis/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.7.0](service/kinesisanalyticsv2/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.8.0](service/kms/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.10.0](service/lambda/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.13.0](service/mediaconvert/CHANGELOG.md#v1130-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.9.0](service/mediapackage/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.10.0](service/mediapackagevod/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.9.0](service/mediatailor/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.9.0](service/neptune/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.0.0](service/panorama/CHANGELOG.md#v100-2021-10-21) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.13.0](service/quicksight/CHANGELOG.md#v1130-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.10.0](service/rds/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.12.0](service/redshift/CHANGELOG.md#v1120-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.10.0](service/robomaker/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.17.0](service/s3/CHANGELOG.md#v1170-2021-10-21) + * **Feature**: Updates S3 streaming operations - PutObject, UploadPart, WriteGetObjectResponse to use unsigned payload signing auth when TLS is enabled. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.17.0](service/sagemaker/CHANGELOG.md#v1170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.12.0](service/securityhub/CHANGELOG.md#v1120-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.6.0](service/sfn/CHANGELOG.md#v160-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.9.0](service/sns/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.10.0](service/sqs/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.7.0](service/storagegateway/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.8.0](service/sts/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.6.0](service/swf/CHANGELOG.md#v160-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.8.0](service/workmail/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated + +# Release (2021-10-11) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.6.0](feature/ec2/imds/CHANGELOG.md#v160-2021-10-11) + * **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout. + * **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253 +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.5.0](service/amplifybackend/CHANGELOG.md#v150-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.7.0](service/applicationautoscaling/CHANGELOG.md#v170-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.3.0](service/apprunner/CHANGELOG.md#v130-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.6.0](service/backup/CHANGELOG.md#v160-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.11.0](service/chime/CHANGELOG.md#v1110-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.11.0](service/codebuild/CHANGELOG.md#v1110-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.10.0](service/databrew/CHANGELOG.md#v1100-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.19.0](service/ec2/CHANGELOG.md#v1190-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.8.0](service/efs/CHANGELOG.md#v180-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.9.0](service/elasticloadbalancingv2/CHANGELOG.md#v190-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.7.0](service/firehose/CHANGELOG.md#v170-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.10.0](service/frauddetector/CHANGELOG.md#v1100-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.10.0](service/fsx/CHANGELOG.md#v1100-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.12.0](service/glue/CHANGELOG.md#v1120-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.0.0](service/grafana/CHANGELOG.md#v100-2021-10-11) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.8.0](service/iotevents/CHANGELOG.md#v180-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.12.0](service/kendra/CHANGELOG.md#v1120-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.7.0](service/kms/CHANGELOG.md#v170-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.9.0](service/lexmodelsv2/CHANGELOG.md#v190-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.6.0](service/lexruntimev2/CHANGELOG.md#v160-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.6.0](service/location/CHANGELOG.md#v160-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.12.0](service/mediaconvert/CHANGELOG.md#v1120-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.10.0](service/medialive/CHANGELOG.md#v1100-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.16.0](service/sagemaker/CHANGELOG.md#v1160-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.7.0](service/secretsmanager/CHANGELOG.md#v170-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.11.0](service/securityhub/CHANGELOG.md#v1110-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.12.0](service/ssm/CHANGELOG.md#v1120-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.6.0](service/ssooidc/CHANGELOG.md#v160-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.7.0](service/synthetics/CHANGELOG.md#v170-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.6.0](service/textract/CHANGELOG.md#v160-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.7.0](service/workmail/CHANGELOG.md#v170-2021-10-11) + * **Feature**: API client updated + +# Release (2021-09-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.0.0](service/account/CHANGELOG.md#v100-2021-09-30) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.6.0](service/amp/CHANGELOG.md#v160-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.7.0](service/appintegrations/CHANGELOG.md#v170-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.0.0](service/cloudcontrol/CHANGELOG.md#v100-2021-09-30) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.5.0](service/cloudhsmv2/CHANGELOG.md#v150-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.8.0](service/connect/CHANGELOG.md#v180-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.6.0](service/dataexchange/CHANGELOG.md#v160-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.8.0](service/elasticloadbalancingv2/CHANGELOG.md#v180-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.11.0](service/imagebuilder/CHANGELOG.md#v1110-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.9.0](service/lambda/CHANGELOG.md#v190-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.11.0](service/macie2/CHANGELOG.md#v1110-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.7.0](service/networkfirewall/CHANGELOG.md#v170-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.8.0](service/pinpoint/CHANGELOG.md#v180-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.6.0](service/sesv2/CHANGELOG.md#v160-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.8.0](service/transfer/CHANGELOG.md#v180-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.0.0](service/voiceid/CHANGELOG.md#v100-2021-09-30) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.0.0](service/wisdom/CHANGELOG.md#v100-2021-09-30) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.6.0](service/workmail/CHANGELOG.md#v160-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.7.0](service/workspaces/CHANGELOG.md#v170-2021-09-30) + * **Feature**: API client updated + +# Release (2021-09-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression`: [v1.2.4](feature/dynamodb/expression/CHANGELOG.md#v124-2021-09-24) + * **Documentation**: Fixes typo in NameBuilder.NamesList example documentation to use the correct variable name. +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.6.0](service/appmesh/CHANGELOG.md#v160-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.7.0](service/appsync/CHANGELOG.md#v170-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.9.0](service/auditmanager/CHANGELOG.md#v190-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.5.0](service/codecommit/CHANGELOG.md#v150-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.8.0](service/comprehend/CHANGELOG.md#v180-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.8.0](service/databasemigrationservice/CHANGELOG.md#v180-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.18.0](service/ec2/CHANGELOG.md#v1180-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.7.0](service/ecr/CHANGELOG.md#v170-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.7.0](service/elasticsearchservice/CHANGELOG.md#v170-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.10.0](service/iam/CHANGELOG.md#v1100-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.6.0](service/identitystore/CHANGELOG.md#v160-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.10.0](service/imagebuilder/CHANGELOG.md#v1100-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.13.0](service/iot/CHANGELOG.md#v1130-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.7.0](service/iotevents/CHANGELOG.md#v170-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.1.0](service/kafkaconnect/CHANGELOG.md#v110-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.6.0](service/lakeformation/CHANGELOG.md#v160-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.8.0](service/lexmodelsv2/CHANGELOG.md#v180-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.5.0](service/lexruntimev2/CHANGELOG.md#v150-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.8.0](service/licensemanager/CHANGELOG.md#v180-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.11.0](service/mediaconvert/CHANGELOG.md#v1110-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.9.0](service/mediapackagevod/CHANGELOG.md#v190-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.8.0](service/mediatailor/CHANGELOG.md#v180-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.1.0](service/opensearch/CHANGELOG.md#v110-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.12.0](service/quicksight/CHANGELOG.md#v1120-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.11.0](service/ssm/CHANGELOG.md#v1110-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.10.0](service/wafv2/CHANGELOG.md#v1100-2021-09-24) + * **Feature**: API client updated + +# Release (2021-09-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.10.0](service/chime/CHANGELOG.md#v1100-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.10.1](service/cloudformation/CHANGELOG.md#v1101-2021-09-17) + * **Documentation**: Updated API client documentation. +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.7.0](service/comprehend/CHANGELOG.md#v170-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.17.0](service/ec2/CHANGELOG.md#v1170-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.6.0](service/ecr/CHANGELOG.md#v160-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.12.0](service/iot/CHANGELOG.md#v1120-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.0.0](service/kafkaconnect/CHANGELOG.md#v100-2021-09-17) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.7.0](service/lexmodelsv2/CHANGELOG.md#v170-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.4.0](service/lexruntimev2/CHANGELOG.md#v140-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.10.0](service/macie2/CHANGELOG.md#v1100-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.8.0](service/mediapackagevod/CHANGELOG.md#v180-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.6.0](service/networkfirewall/CHANGELOG.md#v160-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.7.0](service/pinpoint/CHANGELOG.md#v170-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.11.0](service/quicksight/CHANGELOG.md#v1110-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.9.0](service/rds/CHANGELOG.md#v190-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.9.0](service/robomaker/CHANGELOG.md#v190-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.16.0](service/s3/CHANGELOG.md#v1160-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.15.0](service/sagemaker/CHANGELOG.md#v1150-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.5.0](service/ssooidc/CHANGELOG.md#v150-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.8.0](service/transcribe/CHANGELOG.md#v180-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.9.0](service/wafv2/CHANGELOG.md#v190-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. + +# Release (2021-09-10) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.4.1](credentials/CHANGELOG.md#v141-2021-09-10) + * **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders. +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.5.0](service/amp/CHANGELOG.md#v150-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.7.0](service/braket/CHANGELOG.md#v170-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.2.0](service/chimesdkidentity/CHANGELOG.md#v120-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.2.0](service/chimesdkmessaging/CHANGELOG.md#v120-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.7.0](service/codegurureviewer/CHANGELOG.md#v170-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.10.0](service/eks/CHANGELOG.md#v1100-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.11.0](service/elasticache/CHANGELOG.md#v1110-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.9.0](service/emr/CHANGELOG.md#v190-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.10.0](service/forecast/CHANGELOG.md#v1100-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.9.0](service/frauddetector/CHANGELOG.md#v190-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.7.0](service/kafka/CHANGELOG.md#v170-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.4.0](service/lookoutequipment/CHANGELOG.md#v140-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.8.0](service/mediapackage/CHANGELOG.md#v180-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.0.0](service/opensearch/CHANGELOG.md#v100-2021-09-10) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.8.0](service/outposts/CHANGELOG.md#v180-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.7.0](service/ram/CHANGELOG.md#v170-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.14.0](service/sagemaker/CHANGELOG.md#v1140-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.9.0](service/servicediscovery/CHANGELOG.md#v190-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.5.0](service/ssmcontacts/CHANGELOG.md#v150-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.6.0](service/xray/CHANGELOG.md#v160-2021-09-10) + * **Feature**: API client updated + +# Release (2021-09-02) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.8.0](config/CHANGELOG.md#v180-2021-09-02) + * **Feature**: Add support for S3 Multi-Region Access Point ARNs. +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.7.0](service/accessanalyzer/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.8.0](service/acmpca/CHANGELOG.md#v180-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.8.0](service/cloud9/CHANGELOG.md#v180-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.10.0](service/cloudformation/CHANGELOG.md#v1100-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.6.0](service/cloudtrail/CHANGELOG.md#v160-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.10.0](service/codebuild/CHANGELOG.md#v1100-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.9.0](service/computeoptimizer/CHANGELOG.md#v190-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.9.0](service/configservice/CHANGELOG.md#v190-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.7.0](service/ebs/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.16.0](service/ec2/CHANGELOG.md#v1160-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.7.0](service/efs/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.8.0](service/emr/CHANGELOG.md#v180-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.6.0](service/firehose/CHANGELOG.md#v160-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.8.0](service/frauddetector/CHANGELOG.md#v180-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.9.0](service/fsx/CHANGELOG.md#v190-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/internal/s3shared`: [v1.7.0](service/internal/s3shared/CHANGELOG.md#v170-2021-09-02) + * **Feature**: Add support for S3 Multi-Region Access Point ARNs. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.11.0](service/iot/CHANGELOG.md#v1110-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.5.0](service/iotjobsdataplane/CHANGELOG.md#v150-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.7.0](service/ivs/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.6.0](service/kms/CHANGELOG.md#v160-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.9.0](service/lexmodelbuildingservice/CHANGELOG.md#v190-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.7.0](service/mediatailor/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.2.0](service/memorydb/CHANGELOG.md#v120-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.5.0](service/mwaa/CHANGELOG.md#v150-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.6.0](service/polly/CHANGELOG.md#v160-2021-09-02) + * **Feature**: API client updated + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.10.0](service/quicksight/CHANGELOG.md#v1100-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.15.0](service/s3/CHANGELOG.md#v1150-2021-09-02) + * **Feature**: API client updated + * **Feature**: Add support for S3 Multi-Region Access Point ARNs. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.11.0](service/s3control/CHANGELOG.md#v1110-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.7.0](service/sagemakerruntime/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.6.0](service/schemas/CHANGELOG.md#v160-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.10.0](service/securityhub/CHANGELOG.md#v1100-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.5.0](service/servicecatalogappregistry/CHANGELOG.md#v150-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.9.0](service/sqs/CHANGELOG.md#v190-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.4.0](service/ssmincidents/CHANGELOG.md#v140-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.7.0](service/transfer/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated + +# Release (2021-08-27) + +## General Highlights +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.4.0](credentials/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723 +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.2.0](feature/dynamodb/attributevalue/CHANGELOG.md#v120-2021-08-27) + * **Bug Fix**: Fix unmarshaler's decoding of AttributeValueMemberN into a type that is a string alias. +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.7.0](service/acmpca/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.5.0](service/amplify/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.4.0](service/amplifybackend/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.7.0](service/apigateway/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.4.0](service/apigatewaymanagementapi/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.7.0](service/appflow/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.4.0](service/applicationinsights/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.2.0](service/apprunner/CHANGELOG.md#v120-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.6.0](service/appstream/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.6.0](service/appsync/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.6.0](service/athena/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.8.0](service/auditmanager/CHANGELOG.md#v180-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.5.0](service/autoscalingplans/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.5.0](service/backup/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.7.0](service/batch/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.6.0](service/braket/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.1.0](service/chimesdkidentity/CHANGELOG.md#v110-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.1.0](service/chimesdkmessaging/CHANGELOG.md#v110-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.5.0](service/cloudtrail/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.6.0](service/cloudwatchevents/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.5.0](service/codeartifact/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.9.0](service/codebuild/CHANGELOG.md#v190-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.4.0](service/codecommit/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.5.0](service/codeguruprofiler/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.4.0](service/codestarnotifications/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.5.0](service/cognitoidentity/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.6.0](service/cognitoidentityprovider/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.6.0](service/comprehend/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.8.0](service/computeoptimizer/CHANGELOG.md#v180-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.5.0](service/connectcontactlens/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.9.0](service/customerprofiles/CHANGELOG.md#v190-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.7.0](service/databasemigrationservice/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.6.0](service/datasync/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.4.0](service/dax/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.5.0](service/directoryservice/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.5.0](service/dlm/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.4.0](service/dynamodbstreams/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.15.0](service/ec2/CHANGELOG.md#v1150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.5.0](service/ecrpublic/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.6.0](service/efs/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.9.0](service/eks/CHANGELOG.md#v190-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.6.0](service/emrcontainers/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.7.0](service/eventbridge/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.2.0](service/finspace/CHANGELOG.md#v120-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.2.0](service/finspacedata/CHANGELOG.md#v120-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.5.0](service/firehose/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.7.0](service/fms/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.9.0](service/forecast/CHANGELOG.md#v190-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.4.0](service/forecastquery/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.7.0](service/frauddetector/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.8.0](service/fsx/CHANGELOG.md#v180-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.6.0](service/gamelift/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.11.0](service/glue/CHANGELOG.md#v1110-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.6.0](service/groundstation/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.5.0](service/guardduty/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.7.0](service/health/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.6.0](service/healthlake/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.10.0](service/iot/CHANGELOG.md#v1100-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.4.0](service/iot1clickdevicesservice/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.5.0](service/iotanalytics/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.4.0](service/iotdataplane/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.5.0](service/iotfleethub/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.11.0](service/iotsitewise/CHANGELOG.md#v1110-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.6.0](service/ivs/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.5.0](service/lakeformation/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.6.0](service/lexmodelsv2/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.3.0](service/lexruntimev2/CHANGELOG.md#v130-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.7.0](service/licensemanager/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.10.0](service/lightsail/CHANGELOG.md#v1100-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.3.0](service/lookoutequipment/CHANGELOG.md#v130-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.3.0](service/lookoutmetrics/CHANGELOG.md#v130-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.9.0](service/macie2/CHANGELOG.md#v190-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.10.0](service/mediaconvert/CHANGELOG.md#v1100-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.7.0](service/mediapackage/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.7.0](service/mediapackagevod/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.5.0](service/mq/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.5.0](service/networkfirewall/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.7.0](service/outposts/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.6.0](service/pi/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.4.0](service/pinpointsmsvoice/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.5.0](service/polly/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.6.0](service/qldb/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.5.0](service/qldbsession/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.6.0](service/ram/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.8.0](service/rekognition/CHANGELOG.md#v180-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.5.0](service/resourcegroupstaggingapi/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.8.0](service/robomaker/CHANGELOG.md#v180-2021-08-27) + * **Bug Fix**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.1.0](service/route53recoverycontrolconfig/CHANGELOG.md#v110-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.7.0](service/route53resolver/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.14.0](service/s3/CHANGELOG.md#v1140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.10.0](service/s3control/CHANGELOG.md#v1100-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.5.0](service/s3outposts/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.5.0](service/servicecatalog/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.4.0](service/servicecatalogappregistry/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.5.0](service/signer/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.4.0](service/ssooidc/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.6.0](service/storagegateway/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.6.0](service/synthetics/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.5.0](service/textract/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.7.0](service/transcribe/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.6.0](service/transfer/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.5.0](service/wafregional/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. + +# Release (2021-08-19) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.6.0](service/apigateway/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.5.0](service/apigatewayv2/CHANGELOG.md#v150-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.6.0](service/appflow/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.5.0](service/applicationautoscaling/CHANGELOG.md#v150-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.6.0](service/cloud9/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.4.0](service/clouddirectory/CHANGELOG.md#v140-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.6.0](service/cloudwatchlogs/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.8.0](service/codebuild/CHANGELOG.md#v180-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.7.0](service/configservice/CHANGELOG.md#v170-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.8.0](service/costexplorer/CHANGELOG.md#v180-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.8.0](service/customerprofiles/CHANGELOG.md#v180-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.8.0](service/databrew/CHANGELOG.md#v180-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.4.0](service/directoryservice/CHANGELOG.md#v140-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.14.0](service/ec2/CHANGELOG.md#v1140-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.9.0](service/elasticache/CHANGELOG.md#v190-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.6.0](service/emr/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.10.0](service/iotsitewise/CHANGELOG.md#v1100-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.7.0](service/lambda/CHANGELOG.md#v170-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.6.0](service/licensemanager/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.0.0](service/memorydb/CHANGELOG.md#v100-2021-08-19) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.8.0](service/quicksight/CHANGELOG.md#v180-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.10.0](service/route53/CHANGELOG.md#v1100-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.6.0](service/route53resolver/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.13.0](service/s3/CHANGELOG.md#v1130-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.12.0](service/sagemaker/CHANGELOG.md#v1120-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.5.0](service/sagemakerruntime/CHANGELOG.md#v150-2021-08-19) + * **Feature**: API client updated + +# Release (2021-08-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`: [v1.3.1](feature/cloudfront/sign/CHANGELOG.md#v131-2021-08-12) + * **Bug Fix**: Update to not escape HTML when encoding the policy. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.5.0](service/athena/CHANGELOG.md#v150-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.11.0](service/autoscaling/CHANGELOG.md#v1110-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.8.0](service/chime/CHANGELOG.md#v180-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.0.0](service/chimesdkidentity/CHANGELOG.md#v100-2021-08-12) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.0.0](service/chimesdkmessaging/CHANGELOG.md#v100-2021-08-12) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.7.0](service/codebuild/CHANGELOG.md#v170-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.6.0](service/connect/CHANGELOG.md#v160-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.5.0](service/ebs/CHANGELOG.md#v150-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.8.0](service/ecs/CHANGELOG.md#v180-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.5.0](service/lexmodelsv2/CHANGELOG.md#v150-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.9.0](service/lightsail/CHANGELOG.md#v190-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.3.0](service/nimble/CHANGELOG.md#v130-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.7.0](service/rekognition/CHANGELOG.md#v170-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.9.0](service/route53/CHANGELOG.md#v190-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.0.0](service/snowdevicemanagement/CHANGELOG.md#v100-2021-08-12) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.9.0](service/ssm/CHANGELOG.md#v190-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.5.0](service/synthetics/CHANGELOG.md#v150-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.7.0](service/wafv2/CHANGELOG.md#v170-2021-08-12) + * **Feature**: API client updated + +# Release (2021-08-04) + +## General Highlights +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.8.0 + * **Bug Fix**: Corrected an issue where the retryer was not using the last attempt's ResultMetadata as the bases for the return result from the stack. ([#1345](https://github.com/aws/aws-sdk-go-v2/pull/1345)) +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression`: [v1.2.0](feature/dynamodb/expression/CHANGELOG.md#v120-2021-08-04) + * **Feature**: Add IsSet helper for ConditionBuilder and KeyConditionBuilder ([#1329](https://github.com/aws/aws-sdk-go-v2/pull/1329)) +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.5.2](service/accessanalyzer/CHANGELOG.md#v152-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.3.1](service/amp/CHANGELOG.md#v131-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.5.0](service/appintegrations/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.4.2](service/appmesh/CHANGELOG.md#v142-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.5.0](service/appsync/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.7.0](service/auditmanager/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.6.0](service/batch/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.5.2](service/braket/CHANGELOG.md#v152-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.7.0](service/chime/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.8.0](service/cloudformation/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.7.0](service/cloudwatch/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.6.0](service/codebuild/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.4.2](service/codeguruprofiler/CHANGELOG.md#v142-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.5.0](service/cognitoidentityprovider/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.7.0](service/computeoptimizer/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.7.0](service/databrew/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.7.0](service/directconnect/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.13.0](service/ec2/CHANGELOG.md#v1130-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.7.0](service/ecs/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.6.0](service/elasticloadbalancingv2/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.5.0](service/emr/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.5.0](service/emrcontainers/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.6.0](service/eventbridge/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.10.0](service/glue/CHANGELOG.md#v1100-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.5.0](service/greengrassv2/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.5.2](service/groundstation/CHANGELOG.md#v152-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.8.0](service/iam/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.4.0](service/identitystore/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.8.0](service/imagebuilder/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.9.0](service/iot/CHANGELOG.md#v190-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.4.0](service/iotanalytics/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.9.0](service/iotsitewise/CHANGELOG.md#v190-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.8.0](service/iotwireless/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.10.0](service/kendra/CHANGELOG.md#v1100-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.6.0](service/lambda/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.7.0](service/lexmodelbuildingservice/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.4.0](service/lexmodelsv2/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.4.0](service/location/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.9.0](service/mediaconvert/CHANGELOG.md#v190-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.8.0](service/medialive/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.3.1](service/mgn/CHANGELOG.md#v131-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.7.0](service/personalize/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.2.0](service/proton/CHANGELOG.md#v120-2021-08-04) + * **Feature**: Updated to latest API model. + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.5.0](service/qldb/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.7.0](service/quicksight/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.7.0](service/rds/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.10.0](service/redshift/CHANGELOG.md#v1100-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.5.0](service/redshiftdata/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.7.0](service/robomaker/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.8.0](service/route53/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.0.0](service/route53recoverycluster/CHANGELOG.md#v100-2021-08-04) + * **Release**: New AWS service client module + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.0.0](service/route53recoverycontrolconfig/CHANGELOG.md#v100-2021-08-04) + * **Release**: New AWS service client module + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.0.0](service/route53recoveryreadiness/CHANGELOG.md#v100-2021-08-04) + * **Release**: New AWS service client module + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.12.0](service/s3/CHANGELOG.md#v1120-2021-08-04) + * **Feature**: Add `HeadObject` presign support. ([#1346](https://github.com/aws/aws-sdk-go-v2/pull/1346)) +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.9.0](service/s3control/CHANGELOG.md#v190-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.4.0](service/s3outposts/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.11.0](service/sagemaker/CHANGELOG.md#v1110-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.5.0](service/secretsmanager/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.8.0](service/securityhub/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.6.0](service/shield/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.3.0](service/ssmcontacts/CHANGELOG.md#v130-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.2.0](service/ssmincidents/CHANGELOG.md#v120-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.5.0](service/ssoadmin/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.4.0](service/synthetics/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.4.0](service/textract/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.6.0](service/transcribe/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. + +# Release (2021-07-15) + +## General Highlights +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.5.0](config/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.3.0](feature/ec2/imds/CHANGELOG.md#v130-2021-07-15) + * **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.5.0](service/acm/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.3.0](service/amp/CHANGELOG.md#v130-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.4.0](service/amplify/CHANGELOG.md#v140-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.3.0](service/amplifybackend/CHANGELOG.md#v130-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.10.0](service/autoscaling/CHANGELOG.md#v1100-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.6.0](service/chime/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.7.0](service/cloudformation/CHANGELOG.md#v170-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.7.0](service/cloudfront/CHANGELOG.md#v170-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.5.0](service/cloudsearch/CHANGELOG.md#v150-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.6.0](service/cloudwatch/CHANGELOG.md#v160-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.6.0](service/databasemigrationservice/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.6.0](service/devopsguru/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.6.0](service/directconnect/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.8.0](service/docdb/CHANGELOG.md#v180-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.12.0](service/ec2/CHANGELOG.md#v1120-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.8.0](service/eks/CHANGELOG.md#v180-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.8.0](service/elasticache/CHANGELOG.md#v180-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.5.0](service/elasticbeanstalk/CHANGELOG.md#v150-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.5.0](service/elasticloadbalancing/CHANGELOG.md#v150-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.5.0](service/elasticloadbalancingv2/CHANGELOG.md#v150-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.6.0](service/fms/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.6.0](service/frauddetector/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.9.0](service/glue/CHANGELOG.md#v190-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.6.0](service/health/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.5.0](service/healthlake/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.7.0](service/iam/CHANGELOG.md#v170-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.7.0](service/imagebuilder/CHANGELOG.md#v170-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.8.0](service/iot/CHANGELOG.md#v180-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.8.0](service/iotsitewise/CHANGELOG.md#v180-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.9.0](service/kendra/CHANGELOG.md#v190-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.5.0](service/lambda/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.6.0](service/lexmodelbuildingservice/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.8.0](service/lightsail/CHANGELOG.md#v180-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.5.1](service/macie/CHANGELOG.md#v151-2021-07-15) + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.8.1](service/macie2/CHANGELOG.md#v181-2021-07-15) + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.8.0](service/mediaconvert/CHANGELOG.md#v180-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.5.0](service/mediatailor/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.3.0](service/mgn/CHANGELOG.md#v130-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.4.0](service/mq/CHANGELOG.md#v140-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.7.0](service/neptune/CHANGELOG.md#v170-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.6.0](service/outposts/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.5.1](service/pricing/CHANGELOG.md#v151-2021-07-15) + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.6.0](service/rds/CHANGELOG.md#v160-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.9.0](service/redshift/CHANGELOG.md#v190-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.10.0](service/sagemaker/CHANGELOG.md#v1100-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.5.0](service/ses/CHANGELOG.md#v150-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.7.0](service/sns/CHANGELOG.md#v170-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.7.0](service/sqs/CHANGELOG.md#v170-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.8.0](service/ssm/CHANGELOG.md#v180-2021-07-15) + * **Feature**: Updated service model to latest version. + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.5.0](service/storagegateway/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.6.0](service/sts/CHANGELOG.md#v160-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.5.0](service/wellarchitected/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Updated service model to latest version. + +# Release (2021-07-01) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.1.0](internal/ini/CHANGELOG.md#v110-2021-07-01) + * **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values. +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.9.0](service/autoscaling/CHANGELOG.md#v190-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.6.0](service/databrew/CHANGELOG.md#v160-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.11.0](service/ec2/CHANGELOG.md#v1110-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.8.0](service/glue/CHANGELOG.md#v180-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.8.0](service/kendra/CHANGELOG.md#v180-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.7.0](service/mediaconvert/CHANGELOG.md#v170-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.6.0](service/mediapackagevod/CHANGELOG.md#v160-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.8.0](service/redshift/CHANGELOG.md#v180-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.9.0](service/sagemaker/CHANGELOG.md#v190-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.7.0](service/servicediscovery/CHANGELOG.md#v170-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.6.0](service/sqs/CHANGELOG.md#v160-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.2.0](service/ssmcontacts/CHANGELOG.md#v120-2021-07-01) + * **Feature**: API client updated + +# Release (2021-06-25) + +## General Highlights +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.7.0 + * **Feature**: Adds configuration values for enabling endpoint discovery. + * **Bug Fix**: Keep Object-Lock headers a header when presigning Sigv4 signing requests +* `github.com/aws/aws-sdk-go-v2/config`: [v1.4.0](config/CHANGELOG.md#v140-2021-06-25) + * **Feature**: Adds configuration setting for enabling endpoint discovery. +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.3.0](credentials/CHANGELOG.md#v130-2021-06-25) + * **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275)) +* `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`: [v1.2.0](feature/cloudfront/sign/CHANGELOG.md#v120-2021-06-25) + * **Feature**: Add UnmarshalJSON for AWSEpochTime to correctly unmarshal AWSEpochTime, ([#1298](https://github.com/aws/aws-sdk-go-v2/pull/1298)) +* `github.com/aws/aws-sdk-go-v2/internal/configsources`: [v1.0.0](internal/configsources/CHANGELOG.md#v100-2021-06-25) + * **Release**: Release new modules +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.2.0](service/amp/CHANGELOG.md#v120-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.3.0](service/amplify/CHANGELOG.md#v130-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.2.0](service/amplifybackend/CHANGELOG.md#v120-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.5.0](service/appflow/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.4.0](service/appmesh/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.5.0](service/chime/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.5.0](service/cloud9/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.6.0](service/cloudformation/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.6.0](service/cloudfront/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.4.0](service/cloudsearch/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.5.0](service/cloudwatch/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.5.0](service/cloudwatchevents/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.5.0](service/codebuild/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.5.0](service/codegurureviewer/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.4.0](service/cognitoidentity/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.4.0](service/cognitoidentityprovider/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.5.0](service/connect/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.3.0](service/dax/CHANGELOG.md#v130-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.7.0](service/docdb/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.4.0](service/dynamodb/CHANGELOG.md#v140-2021-06-25) + * **Feature**: Adds support for endpoint discovery. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.10.0](service/ec2/CHANGELOG.md#v1100-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.7.0](service/elasticache/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.4.0](service/elasticbeanstalk/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.4.0](service/elasticloadbalancing/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.4.0](service/elasticloadbalancingv2/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.5.0](service/eventbridge/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.5.0](service/greengrass/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.4.0](service/greengrassv2/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.6.0](service/iam/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery`: [v1.0.0](service/internal/endpoint-discovery/CHANGELOG.md#v100-2021-06-25) + * **Release**: Release new modules + * **Feature**: Module supporting endpoint-discovery across all service clients. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.7.0](service/iot/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.3.0](service/iotanalytics/CHANGELOG.md#v130-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.7.0](service/kendra/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.4.0](service/kms/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.3.0](service/lexmodelsv2/CHANGELOG.md#v130-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.2.0](service/lexruntimev2/CHANGELOG.md#v120-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.5.0](service/licensemanager/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.2.0](service/lookoutmetrics/CHANGELOG.md#v120-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.4.0](service/managedblockchain/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.6.0](service/mediaconnect/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.7.0](service/medialive/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.4.0](service/mediatailor/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.6.0](service/neptune/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.1.0](service/proton/CHANGELOG.md#v110-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.6.0](service/quicksight/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.5.0](service/ram/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.5.0](service/rds/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.7.0](service/redshift/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.4.0](service/redshiftdata/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.7.0](service/route53/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.8.0](service/sagemaker/CHANGELOG.md#v180-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.4.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.7.0](service/securityhub/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.4.0](service/ses/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.5.0](service/snowball/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.6.0](service/sns/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.5.0](service/sqs/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.5.0](service/sts/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.3.0](service/timestreamquery/CHANGELOG.md#v130-2021-06-25) + * **Feature**: Adds support for endpoint discovery. +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.3.0](service/timestreamwrite/CHANGELOG.md#v130-2021-06-25) + * **Feature**: Adds support for endpoint discovery. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.5.0](service/transfer/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.3.0](service/waf/CHANGELOG.md#v130-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.6.0](service/wafv2/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated + +# Release (2021-06-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.7.0](service/autoscaling/CHANGELOG.md#v170-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.3.2](service/cloudtrail/CHANGELOG.md#v132-2021-06-11) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.3.3](service/cognitoidentityprovider/CHANGELOG.md#v133-2021-06-11) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.6.0](service/eks/CHANGELOG.md#v160-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.6.0](service/fsx/CHANGELOG.md#v160-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.6.0](service/glue/CHANGELOG.md#v160-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.6.0](service/kendra/CHANGELOG.md#v160-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.7.0](service/macie2/CHANGELOG.md#v170-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.6.0](service/medialive/CHANGELOG.md#v160-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.4.0](service/pi/CHANGELOG.md#v140-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.0.0](service/proton/CHANGELOG.md#v100-2021-06-11) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.3.1](service/qldb/CHANGELOG.md#v131-2021-06-11) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.4.2](service/rds/CHANGELOG.md#v142-2021-06-11) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.7.0](service/sagemaker/CHANGELOG.md#v170-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.4.1](service/transfer/CHANGELOG.md#v141-2021-06-11) + * **Documentation**: Updated to latest API model. + +# Release (2021-06-04) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.5.0](service/acmpca/CHANGELOG.md#v150-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.6.0](service/autoscaling/CHANGELOG.md#v160-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.4.0](service/braket/CHANGELOG.md#v140-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.5.2](service/cloudfront/CHANGELOG.md#v152-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.4.0](service/datasync/CHANGELOG.md#v140-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.3.0](service/devicefarm/CHANGELOG.md#v130-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.6.0](service/docdb/CHANGELOG.md#v160-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.9.0](service/ec2/CHANGELOG.md#v190-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.5.0](service/ecs/CHANGELOG.md#v150-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.7.0](service/forecast/CHANGELOG.md#v170-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.5.0](service/fsx/CHANGELOG.md#v150-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.5.1](service/iam/CHANGELOG.md#v151-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/internal/s3shared`: [v1.4.0](service/internal/s3shared/CHANGELOG.md#v140-2021-06-04) + * **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.4.0](service/iotevents/CHANGELOG.md#v140-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.3.0](service/ioteventsdata/CHANGELOG.md#v130-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.6.0](service/iotsitewise/CHANGELOG.md#v160-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.6.0](service/iotwireless/CHANGELOG.md#v160-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.5.0](service/kendra/CHANGELOG.md#v150-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.6.1](service/lightsail/CHANGELOG.md#v161-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.2.0](service/location/CHANGELOG.md#v120-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.2.0](service/mwaa/CHANGELOG.md#v120-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.4.0](service/outposts/CHANGELOG.md#v140-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.3.0](service/polly/CHANGELOG.md#v130-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.3.0](service/qldb/CHANGELOG.md#v130-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.3.2](service/resourcegroups/CHANGELOG.md#v132-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.6.2](service/route53/CHANGELOG.md#v162-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.4.2](service/route53resolver/CHANGELOG.md#v142-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.10.0](service/s3/CHANGELOG.md#v1100-2021-06-04) + * **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.7.0](service/s3control/CHANGELOG.md#v170-2021-06-04) + * **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.5.0](service/servicediscovery/CHANGELOG.md#v150-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.5.0](service/sns/CHANGELOG.md#v150-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.4.2](service/sqs/CHANGELOG.md#v142-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.6.2](service/ssm/CHANGELOG.md#v162-2021-06-04) + * **Documentation**: Updated service client to latest API model. + +# Release (2021-05-25) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.4.0](service/cloudwatchlogs/CHANGELOG.md#v140-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/commander`: [v1.1.0](service/commander/CHANGELOG.md#v110-2021-05-25) + * **Feature**: Deprecated module. The API client was incorrectly named. Use AWS Systems Manager Incident Manager (ssmincidents) instead. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.5.0](service/computeoptimizer/CHANGELOG.md#v150-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.6.0](service/costexplorer/CHANGELOG.md#v160-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.8.0](service/ec2/CHANGELOG.md#v180-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.4.0](service/efs/CHANGELOG.md#v140-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.6.0](service/forecast/CHANGELOG.md#v160-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.6.0](service/iot/CHANGELOG.md#v160-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.4.0](service/opsworkscm/CHANGELOG.md#v140-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.5.0](service/quicksight/CHANGELOG.md#v150-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.9.0](service/s3/CHANGELOG.md#v190-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.0.0](service/ssmincidents/CHANGELOG.md#v100-2021-05-25) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.4.0](service/transfer/CHANGELOG.md#v140-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.4.0](service/workspaces/CHANGELOG.md#v140-2021-05-25) + * **Feature**: API client updated + +# Release (2021-05-20) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.6.0 + * **Feature**: `internal/ini`: This package has been migrated to a separate module at `github.com/aws/aws-sdk-go-v2/internal/ini`. +* `github.com/aws/aws-sdk-go-v2/config`: [v1.3.0](config/CHANGELOG.md#v130-2021-05-20) + * **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile. + * **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations. +* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.0.0](internal/ini/CHANGELOG.md#v100-2021-05-20) + * **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module. +* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.0.0](service/applicationcostprofiler/CHANGELOG.md#v100-2021-05-20) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.0.0](service/apprunner/CHANGELOG.md#v100-2021-05-20) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.5.0](service/autoscaling/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.4.0](service/computeoptimizer/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.6.0](service/detective/CHANGELOG.md#v160-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.5.0](service/eks/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.6.0](service/elasticache/CHANGELOG.md#v160-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.4.0](service/elasticsearchservice/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.5.0](service/iam/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.5.0](service/imagebuilder/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.5.0](service/iot/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.4.0](service/iotdeviceadvisor/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.5.0](service/iotsitewise/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.4.0](service/kinesis/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.3.0](service/kinesisanalytics/CHANGELOG.md#v130-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.4.0](service/kinesisanalyticsv2/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.2.0](service/lexmodelsv2/CHANGELOG.md#v120-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.4.0](service/licensemanager/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.6.0](service/lightsail/CHANGELOG.md#v160-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.4.0](service/macie/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.6.0](service/macie2/CHANGELOG.md#v160-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.5.0](service/mediaconnect/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.5.0](service/neptune/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.5.0](service/personalize/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.4.0](service/quicksight/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.5.0](service/rekognition/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.8.0](service/s3/CHANGELOG.md#v180-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.6.0](service/sagemaker/CHANGELOG.md#v160-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.3.0](service/sagemakera2iruntime/CHANGELOG.md#v130-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.6.0](service/securityhub/CHANGELOG.md#v160-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.3.0](service/support/CHANGELOG.md#v130-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.4.0](service/transcribe/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated + +# Release (2021-05-14) + +## General Highlights +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.5.0 + * **Feature**: `AddSDKAgentKey` and `AddSDKAgentKeyValue` in `aws/middleware` package have been updated to direct metadata to `User-Agent` HTTP header. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.3.0](service/codeartifact/CHANGELOG.md#v130-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/commander`: [v1.0.0](service/commander/CHANGELOG.md#v100-2021-05-14) + * **Release**: New AWS service client module + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.5.0](service/configservice/CHANGELOG.md#v150-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.4.0](service/connect/CHANGELOG.md#v140-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.7.0](service/ec2/CHANGELOG.md#v170-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.4.0](service/ecs/CHANGELOG.md#v140-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.4.0](service/eks/CHANGELOG.md#v140-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.0.0](service/finspace/CHANGELOG.md#v100-2021-05-14) + * **Release**: New AWS service client module + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.0.0](service/finspacedata/CHANGELOG.md#v100-2021-05-14) + * **Release**: New AWS service client module + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.4.0](service/iot/CHANGELOG.md#v140-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.5.0](service/iotwireless/CHANGELOG.md#v150-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.3.0](service/kinesis/CHANGELOG.md#v130-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.2.0](service/kinesisanalytics/CHANGELOG.md#v120-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.3.0](service/kinesisanalyticsv2/CHANGELOG.md#v130-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.3.0](service/lakeformation/CHANGELOG.md#v130-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.1.0](service/lookoutmetrics/CHANGELOG.md#v110-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.5.0](service/mediaconvert/CHANGELOG.md#v150-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.6.0](service/route53/CHANGELOG.md#v160-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.7.0](service/s3/CHANGELOG.md#v170-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.6.0](service/s3control/CHANGELOG.md#v160-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.6.0](service/ssm/CHANGELOG.md#v160-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.0.0](service/ssmcontacts/CHANGELOG.md#v100-2021-05-14) + * **Release**: New AWS service client module + * **Feature**: Updated to latest service API model. + +# Release 2021-05-06 + +## Breaking change +* `service/ec2` - v1.6.0 + * This release contains a breaking change to the Amazon EC2 API client. API number(int/int64/etc) and boolean members were changed from value, to pointer type. Your applications using the EC2 API client will fail to compile after upgrading for all members that were updated. To migrate to this module you'll need to update your application to use pointers for all number and boolean members in the API client module. The SDK provides helper utilities to convert between value and pointer types. For example the [aws.Bool](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Bool) function to get the address from a bool literal. Similar utilities are available for all other primitive types in the [aws](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws) package. + +## Service Client Highlights +* `service/acmpca` - v1.3.0 + * Feature: API client updated +* `service/apigateway` - v1.3.0 + * Feature: API client updated +* `service/auditmanager` - v1.4.0 + * Feature: API client updated +* `service/chime` - v1.3.0 + * Feature: API client updated +* `service/cloudformation` - v1.4.0 + * Feature: API client updated +* `service/cloudfront` - v1.4.0 + * Feature: API client updated +* `service/codegurureviewer` - v1.3.0 + * Feature: API client updated +* `service/connect` - v1.3.0 + * Feature: API client updated +* `service/customerprofiles` - v1.5.0 + * Feature: API client updated +* `service/devopsguru` - v1.3.0 + * Feature: API client updated +* `service/docdb` - v1.4.0 + * Feature: API client updated +* `service/ec2` - v1.6.0 + * Bug Fix: Fix incorrectly modeled Amazon EC2 number and boolean members in structures. The Amazon EC2 API client has been updated with a breaking change to fix all structure number and boolean members to be pointer types instead of value types. Fixes [#1107](https://github.com/aws/aws-sdk-go-v2/issues/1107), [#1178](https://github.com/aws/aws-sdk-go-v2/issues/1178), and [#1190](https://github.com/aws/aws-sdk-go-v2/issues/1190). This breaking change is made within the major version of the client' module, because the client operations failed and were unusable with value type number and boolean members with the EC2 API. + * Feature: API client updated +* `service/ecs` - v1.3.0 + * Feature: API client updated +* `service/eks` - v1.3.0 + * Feature: API client updated +* `service/forecast` - v1.4.0 + * Feature: API client updated +* `service/glue` - v1.4.0 + * Feature: API client updated +* `service/health` - v1.3.0 + * Feature: API client updated +* `service/iotsitewise` - v1.3.0 + * Feature: API client updated +* `service/iotwireless` - v1.4.0 + * Feature: API client updated +* `service/kafka` - v1.3.0 + * Feature: API client updated +* `service/kinesisanalyticsv2` - v1.2.0 + * Feature: API client updated +* `service/macie2` - v1.4.0 + * Feature: API client updated +* `service/marketplacecatalog` - v1.2.0 + * Feature: API client updated +* `service/mediaconvert` - v1.4.0 + * Feature: API client updated +* `service/mediapackage` - v1.4.0 + * Feature: API client updated +* `service/mediapackagevod` - v1.3.0 + * Feature: API client updated +* `service/mturk` - v1.2.0 + * Feature: API client updated +* `service/nimble` - v1.0.0 + * Feature: API client updated +* `service/organizations` - v1.3.0 + * Feature: API client updated +* `service/personalize` - v1.3.0 + * Feature: API client updated +* `service/robomaker` - v1.4.0 + * Feature: API client updated +* `service/route53` - v1.5.0 + * Feature: API client updated +* `service/s3` - v1.6.0 + * Bug Fix: Fix PutObject and UploadPart unseekable stream documentation link to point to the correct location. + * Feature: API client updated +* `service/sagemaker` - v1.4.0 + * Feature: API client updated +* `service/securityhub` - v1.4.0 + * Feature: API client updated +* `service/servicediscovery` - v1.3.0 + * Feature: API client updated +* `service/snowball` - v1.3.0 + * Feature: API client updated +* `service/sns` - v1.3.0 + * Feature: API client updated +* `service/ssm` - v1.5.0 + * Feature: API client updated +## Core SDK Highlights +* Dependency Update: Update smithy-go dependency to v1.4.0 +* Dependency Update: Updated SDK dependencies to their latest versions. +* `aws` - v1.4.0 + * Feature: Add support for FIPS global partition endpoints ([#1242](https://github.com/aws/aws-sdk-go-v2/pull/1242)) + +# Release 2021-04-23 +## Service Client Highlights +* `service/cloudformation` - v1.3.2 + * Documentation: Service Documentation Updates +* `service/cognitoidentityprovider` - v1.2.3 + * Documentation: Service Documentation Updates +* `service/costexplorer` - v1.4.0 + * Feature: Service API Updates +* `service/databasemigrationservice` - v1.3.0 + * Feature: Service API Updates +* `service/detective` - v1.4.0 + * Feature: Service API Updates +* `service/elasticache` - v1.4.0 + * Feature: Service API Updates +* `service/forecast` - v1.3.0 + * Feature: Service API Updates +* `service/groundstation` - v1.3.0 + * Feature: Service API Updates +* `service/kendra` - v1.3.0 + * Feature: Service API Updates +* `service/redshift` - v1.5.0 + * Feature: Service API Updates +* `service/savingsplans` - v1.2.0 + * Feature: Service API Updates +* `service/securityhub` - v1.3.0 + * Feature: Service API Updates +## Core SDK Highlights +* Dependency Update: Updated SDK dependencies to their latest versions. +* `feature/rds/auth` - v1.0.0 + * Feature: Add Support for Amazon RDS IAM Authentication + +# Release 2021-04-14 +## Service Client Highlights +* `service/codebuild` - v1.3.0 + * Feature: API client updated +* `service/codestarconnections` - v1.2.0 + * Feature: API client updated +* `service/comprehendmedical` - v1.2.0 + * Feature: API client updated +* `service/configservice` - v1.4.0 + * Feature: API client updated +* `service/ec2` - v1.5.0 + * Feature: API client updated +* `service/fsx` - v1.3.0 + * Feature: API client updated +* `service/lightsail` - v1.4.0 + * Feature: API client updated +* `service/mediaconnect` - v1.3.0 + * Feature: API client updated +* `service/rds` - v1.3.0 + * Feature: API client updated +* `service/redshift` - v1.4.0 + * Feature: API client updated +* `service/shield` - v1.3.0 + * Feature: API client updated +* `service/sts` - v1.3.0 + * Feature: API client updated +## Core SDK Highlights +* Dependency Update: Updated SDK dependencies to their latest versions. + +# Release 2021-04-08 +## Service Client Highlights +* Feature: API model sync +* `service/lookoutequipment` - v1.0.0 + * v1 Release: new service client +* `service/mgn` - v1.0.0 + * v1 Release: new service client +## Core SDK Highlights +* Dependency Update: smithy-go version bump +* Dependency Update: Updated SDK dependencies to their latest versions. + +# Release 2021-04-01 +## Service Client Highlights +* Bug Fix: Fix URL Path and RawQuery of resolved endpoint being ignored by the API client's request serialization. + * Fixes [issue#1191](https://github.com/aws/aws-sdk-go-v2/issues/1191) +* Refactored internal endpoints model for accessors +* Feature: updated to latest models +* New services + * `service/location` - v1.0.0 + * `service/lookoutmetrics` - v1.0.0 +## Core SDK Highlights +* Dependency Update: update smithy-go module +* Dependency Update: Updated SDK dependencies to their latest versions. + +# Release 2021-03-18 +## Service Client Highlights +* Bug Fix: Updated presign URLs to no longer include the X-Amz-User-Agent header +* Feature: Update API model +* Add New supported API +* `service/internal/s3shared` - v1.2.0 + * Feature: Support for S3 Object Lambda +* `service/s3` - v1.3.0 + * Bug Fix: Adds documentation to the PutObject and UploadPart operations Body member how to upload unseekable objects to an Amazon S3 Bucket. + * Feature: S3 Object Lambda is a new S3 feature that enables users to apply their own custom code to process the output of a standard S3 GET request by automatically invoking a Lambda function with a GET request +* `service/s3control` - v1.3.0 + * Feature: S3 Object Lambda is a new S3 feature that enables users to apply their own custom code to process the output of a standard S3 GET request by automatically invoking a Lambda function with a GET request +## Core SDK Highlights +* Dependency Update: Updated SDK dependencies to their latest versions. +* `aws` - v1.3.0 + * Feature: Add helper to V4 signer package to swap compute payload hash middleware with unsigned payload middleware +* `feature/s3/manager` - v1.1.0 + * Bug Fix: Add support for Amazon S3 Object Lambda feature. + * Feature: Updates for S3 Object Lambda feature + +# Release 2021-03-12 +## Service Client Highlights +* Bug Fix: Fixed a bug that could union shape types to be deserialized incorrectly +* Bug Fix: Fixed a bug where unboxed shapes that were marked as required were not serialized and sent over the wire, causing an API error from the service. +* Bug Fix: Fixed a bug with generated API Paginators' handling of nil input parameters causing a panic. +* Dependency Update: update smithy-go dependency +* `service/detective` - v1.1.2 + * Bug Fix: Fix deserialization of API response timestamp member. +* `service/docdb` - v1.2.0 + * Feature: Client now support presigned URL generation for CopyDBClusterSnapshot and CreateDBCluster operations by specifying the target SourceRegion +* `service/neptune` - v1.2.0 + * Feature: Client now support presigned URL generation for CopyDBClusterSnapshot and CreateDBCluster operations by specifying the target SourceRegion +* `service/s3` - v1.2.1 + * Bug Fix: Fixed an issue where ListObjectsV2 and ListParts paginators could loop infinitely + * Bug Fix: Fixed key encoding when addressing S3 Access Points +## Core SDK Highlights +* Dependency Update: Updated SDK dependencies to their latest versions. +* `config` - v1.1.2 + * Bug Fix: Fixed a panic when using WithEC2IMDSRegion without a specified IMDS client + +# Release 2021-02-09 +## Service Client Highlights +* `service/s3` - v1.2.0 + * Feature: adds support for s3 vpc endpoint interface [#1113](https://github.com/aws/aws-sdk-go-v2/pull/1113) +* `service/s3control` - v1.2.0 + * Feature: adds support for s3 vpc endpoint interface [#1113](https://github.com/aws/aws-sdk-go-v2/pull/1113) +## Core SDK Highlights +* Dependency Update: Updated SDK dependencies to their latest versions. +* `aws` - v1.2.0 + * Feature: support to add endpoint source on context. Adds getter/setter for the endpoint source [#1113](https://github.com/aws/aws-sdk-go-v2/pull/1113) +* `config` - v1.1.1 + * Bug Fix: Only Validate SSO profile configuration when attempting to use SSO credentials [#1103](https://github.com/aws/aws-sdk-go-v2/pull/1103) + * Bug Fix: Environment credentials were not taking precedence over AWS_PROFILE [#1103](https://github.com/aws/aws-sdk-go-v2/pull/1103) + +# Release 2021-01-29 +## Service Client Highlights +* Bug Fix: A serialization bug has been fixed that caused some service operations with empty inputs to not be serialized correctly ([#1071](https://github.com/aws/aws-sdk-go-v2/pull/1071)) +* Bug Fix: Fixes a bug that could cause a waiter to fail when comparing types ([#1083](https://github.com/aws/aws-sdk-go-v2/pull/1083)) +## Core SDK Highlights +* Feature: EndpointResolverFromURL helpers have been added for constructing a service EndpointResolver type ([#1066](https://github.com/aws/aws-sdk-go-v2/pull/1066)) +* Dependency Update: Updated SDK dependencies to their latest versions. +* `aws` - v1.1.0 + * Feature: Add support for specifying the EndpointSource on aws.Endpoint types ([#1070](https://github.com/aws/aws-sdk-go-v2/pull/1070/)) +* `config` - v1.1.0 + * Feature: Add Support for AWS Single Sign-On (SSO) credential provider ([#1072](https://github.com/aws/aws-sdk-go-v2/pull/1072)) +* `credentials` - v1.1.0 + * Feature: Add AWS Single Sign-On (SSO) credential provider ([#1072](https://github.com/aws/aws-sdk-go-v2/pull/1072)) + +# Release 2021-01-19 + +We are excited to announce the [General Availability](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-version-2-general-availability/) +(GA) release of the [AWS SDK for Go version 2 (v2)](https://github.com/aws/aws-sdk-go-v2). +This release follows the [Release candidate](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-version-2-v2-release-candidate) +of the AWS SDK for Go v2. Version 2 incorporates customer feedback from version 1 and takes advantage of modern Go language features. + +## Breaking Changes +* `aws`: Updated Config.Retryer member to be a func that returns aws.Retryer ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) + * Updates the SDK's references to Config.Retryer to be a function that returns aws.Retryer value. This ensures that custom retry options specified in the `aws.Config` are scoped to individual client instances. + * All API clients created with the config will call the `Config.Retryer` function to get an aws.Retryer. + * Removes duplicate `Retryer` interface from `retry` package. Single definition is `aws.Retryer` now. +* `aws/middleware`: Updates `AddAttemptClockSkewMiddleware` to use appropriate `AddRecordResponseTiming` naming ([#1031](https://github.com/aws/aws-sdk-go-v2/pull/1031)) + * Removes `ResponseMetadata` struct type, and adds its members to middleware metadata directly, to improve discoverability. +* `config`: Updated the `WithRetryer` helper to take a function that returns an aws.Retryer ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) + * All API clients created with the config will call the `Config.Retryer` function to get an aws.Retryer. +* `API Clients`: Fix SDK's API client enum constant name generation to have expected casing ([#1020](https://github.com/aws/aws-sdk-go-v2/pull/1020)) + * This updates of the generated enum const value names in API client's `types` package to have the expected casing. Prior to this, enum names were being generated with lowercase names instead of camel case. +* `API Clients`: Updates SDK's API client request middleware stack values to be scoped to individual operation call ([#1019](https://github.com/aws/aws-sdk-go-v2/pull/1019)) + * The API client request middleware stack values were mistakenly allowed to escape to nested API operation calls. This broke the SDK's presigners. + * Stack values that should not escape are not scoped to the individual operation call. +* `Multiple API Clients`: Unexported the API client's `WithEndpointResolver` this type wasn't intended to be exported ([#1051](https://github.com/aws/aws-sdk-go-v2/pull/1051)) + * Using the `aws.Config.EndpointResolver` member for setting custom endpoint resolver instead. + +## New Features +* `service/sts`: Add support for presigning GetCallerIdentity operation ([#1030](https://github.com/aws/aws-sdk-go-v2/pull/1030)) + * Adds a PresignClient to the `sts` API client module. Use PresignGetCallerIdentity to obtain presigned URLs for the create presigned URLs for the GetCallerIdentity operation. + * Fixes [#1021](https://github.com/aws/aws-sdk-go-v2/issues/1021) +* `aws/retry`: Add package documentation for retry package ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) + * Adds documentation for the retry package + +## Bug Fixes +* `Multiple API Clients`: Fix SDK's generated serde for unmodeled operation input/output ([#1050](https://github.com/aws/aws-sdk-go-v2/pull/1050)) + * Fixes [#1047](https://github.com/aws/aws-sdk-go-v2/issues/1047) by fixing the how the SDKs generated serialization and deserialization of API operations that did not have modeled input or output types. This caused the SDK to incorrectly attempt to deserialize response documents that were either empty, or contained unexpected data. +* `service/s3`: Fix Tagging parameter not serialized correctly for presigned PutObject requests ([#1017](https://github.com/aws/aws-sdk-go-v2/pull/1017)) + * Fixes the Tagging parameter incorrectly being serialized to the URL's query string instead of being signed as a HTTP request header. + * When using PresignPutObject make sure to add all signed headers returned by the method to your down stream's HTTP client's request. These headers must be included in the request, or the request will fail with signature errors. + * Fixes [#1016](https://github.com/aws/aws-sdk-go-v2/issues/1016) +* `service/s3`: Fix Unmarshaling `GetObjectAcl` operation's Grantee type response ([#1034](https://github.com/aws/aws-sdk-go-v2/pull/1034)) + * Updates the SDK's codegen for correctly deserializing XML attributes in tags with XML namespaces. + * Fixes [#1013](https://github.com/aws/aws-sdk-go-v2/issues/1013) +* `service/s3`: Fix Unmarshaling `GetBucketLocation` operation's response ([#1027](https://github.com/aws/aws-sdk-go-v2/pull/1027)) + * Fixes [#908](https://github.com/aws/aws-sdk-go-v2/issues/908) + +## Migrating from v2 preview SDK's v0.31.0 to v1.0.0 + +### aws.Config Retryer member + +If your application sets the `Config.Retryer` member the application will need +to be updated to set a function that returns an `aws.Retryer`. In addition, if +your application used the `config.WithRetryer` helper a function that returns +an `aws.Retryer` needs to be used. + +If your application used the `retry.Retryer` type, update to using the +`aws.Retryer` type instead. + +### API Client enum value names + +If your application used the enum values in the API Client's `types` package between v0.31.0 and the latest version of the client module you may need to update the naming of the enum value. The enum value name casing were updated to camel case instead lowercased. + +# Release 2020-12-23 + +We’re happy to announce the Release Candidate (RC) of the AWS SDK for Go v2. +This RC follows the developer preview release of the AWS SDK for Go v2. The SDK +has undergone a major rewrite from the v1 code base to incorporate your +feedback and to take advantage of modern Go language features. + +## Documentation +* Developer Guide: https://aws.github.io/aws-sdk-go-v2/docs/ +* API Reference docs: https://pkg.go.dev/github.com/aws/aws-sdk-go-v2 +* Migration Guide: https://aws.github.io/aws-sdk-go-v2/docs/migrating/ + +## Breaking Changes +* Dependency `github.com/awslabs/smithy-go` has been relocated to `github.com/aws/smithy-go` + * The `smithy-go` repository was moved from the `awslabs` GitHub organization to `aws`. + * `xml`, `httpbinding`, and `json` package relocated under `encoding` package. +* The module `ec2imds` moved to `feature/ec2/imds` path ([#984](https://github.com/aws/aws-sdk-go-v2/pull/984)) + * Moves the `ec2imds` feature module to be in common location as other SDK features. +* `aws/signer/v4`: Refactor AWS Sigv4 Signer and options types to allow function options ([#955](https://github.com/aws/aws-sdk-go-v2/pull/955)) + * Fixes [#917](https://github.com/aws/aws-sdk-go-v2/issues/917), [#960](https://github.com/aws/aws-sdk-go-v2/issues/960), [#958](https://github.com/aws/aws-sdk-go-v2/issues/958) +* `aws`: CredentialCache type updated to require constructor function ([#946](https://github.com/aws/aws-sdk-go-v2/pull/946)) + * Fixes [#940](https://github.com/aws/aws-sdk-go-v2/issues/940) +* `credentials`: ExpiryWindow and Jitter moved from credential provider to `CredentialCache` ([#946](https://github.com/aws/aws-sdk-go-v2/pull/946)) + * Moves ExpiryWindow and Jitter options to common option of the `CredentialCache` instead of duplicated across providers. + * Fixes [#940](https://github.com/aws/aws-sdk-go-v2/issues/940) +* `config`: Ensure shared credentials file has precedence over shared config file ([#990](https://github.com/aws/aws-sdk-go-v2/pull/990)) + * The shared config file was incorrectly overriding the shared credentials file when merging values. +* `config`: Add `context.Context` to `LoadDefaultConfig` ([#951](https://github.com/aws/aws-sdk-go-v2/pull/951)) + * Updates `config#LoadDefaultConfig` function to take `context.Context` as well as functional options for the `config#LoadOptions` type. + * Fixes [#926](https://github.com/aws/aws-sdk-go-v2/issues/926), [#819](https://github.com/aws/aws-sdk-go-v2/issues/819) +* `aws`: Rename `NoOpRetryer` to `NopRetryer` to have consistent naming with rest of SDK ([#987](https://github.com/aws/aws-sdk-go-v2/pull/987)) + * Fixes [#878](https://github.com/aws/aws-sdk-go-v2/issues/878) +* `service/s3control`: Change `S3InitiateRestoreObjectOperation.ExpirationInDays` from value to pointer type ([#988](https://github.com/aws/aws-sdk-go-v2/pull/988)) +* `aws`: `ReaderSeekerCloser` and `WriteAtBuffer` have been relocated to `feature/s3/manager`. + +## New Features +* *Waiters*: Add Waiter utilities for API clients ([aws/smithy-go#237](https://github.com/aws/smithy-go/pull/237)) + * Your application can now use Waiter utilities to wait for AWS resources. +* `feature/dynamodb/attributevalue`: Add Amazon DynamoDB Attribute value marshaler utility ([#948](https://github.com/aws/aws-sdk-go-v2/pull/948)) + * Adds a utility for marshaling Go types too and from Amazon DynamoDB AttributeValues. + * Also includes utility for converting from Amazon DynamoDB Streams AttributeValues to Amazon DynamoDB AttributeValues. +* `feature/dynamodbstreams/attributevalue`: Add Amazon DynamoDB Streams Attribute value marshaler utility ([#948](https://github.com/aws/aws-sdk-go-v2/pull/948)) + * Adds a utility for marshaling Go types too and from Amazon DynamoDB Streams AttributeValues. + * Also includes utility for converting from Amazon DynamoDB AttributeValues to Amazon DynamoDB Streams AttributeValues. +* `feature/dynamodb/expression`: Add Amazon DynamoDB expression utility ([#981](https://github.com/aws/aws-sdk-go-v2/pull/981)) + * Adds the expression utility to the SDK for easily building Amazon DynamoDB operation expressions in code. + +## Bug Fixes +* `service/s3`: Fix Presigner to configure client correctly for Amazon S3 ([#969](https://github.com/aws/aws-sdk-go-v2/pull/969)) +* service/s3: Fix deserialization of CompleteMultipartUpload ([#965](https://github.com/aws/aws-sdk-go-v2/pull/965) + * Fixes [#927](https://github.com/aws/aws-sdk-go-v2/issues/927) +* `codegen`: Fix API client union serialization ([#979](https://github.com/aws/aws-sdk-go-v2/pull/979)) + * Fixes [#978](https://github.com/aws/aws-sdk-go-v2/issues/978) + +## Service Client Highlights +* API Clients have been bumped to version `v0.31.0` +* Regenerate API Clients from updated API models adding waiter utilities, and union parameters. +* `codegen`: + * Add documentation to union API parameters describing valid member types, and usage example ([aws/smithy-go#239](https://github.com/aws/smithy-go/pull/239)) + * Normalize Metadata header map keys to be lower case ([aws/smithy-go#241](https://github.com/aws/smithy-go/pull/241)), ([#982](https://github.com/aws/aws-sdk-go-v2/pull/982)) + * Fixes [#376](https://github.com/aws/aws-sdk-go-v2/issues/376) Amazon S3 Metadata parameters keys are always returned as lower case. + * Fix API client deserialization of XML based responses ([aws/smithy-go#245](https://github.com/aws/smithy-go/pull/245)), ([#992](https://github.com/aws/aws-sdk-go-v2/pull/992)) + * Fixes [#910](https://github.com/aws/aws-sdk-go-v2/issues/910) +* `service/s3`, `service/s3control`: + * Add support for reading `s3_use_arn_region` from shared config file ([#991](https://github.com/aws/aws-sdk-go-v2/pull/991)) + * Add Utility for getting RequestID and HostID of response ([#983](https://github.com/aws/aws-sdk-go-v2/pull/983)) + + +## Other changes +* Updates branch `HEAD` points from `master` to `main`. + * This should not impact your application, but if you have pull requests or forks of the SDK you may need to update the upstream branch your fork is based off of. + +## Migrating from v2 preview SDK's v0.30.0 to v0.31.0 release candidate + +### smithy-go module relocation + +If your application uses `smithy-go` utilities for request pipeline your application will need to be updated to refer to the new import path of `github.com/aws/smithy-go`. If you application did *not* use `smithy-go` utilities directly, your application will update automatically. + +### EC2 IMDS module relocation + +If your application used the `ec2imds` module, it has been relocated to `feature/ec2/imds`. Your application will need to update to the new import path, `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`. + +### CredentialsCache Constructor and ExpiryWindow Options + +The `aws#CredentialsCache` type was updated, and a new constructor function, `NewCredentialsCache` was added. This function needs to be used to initialize the `CredentialCache`. The constructor also has function options to specify additional configuration, e.g. ExpiryWindow and Jitter. + +If your application was specifying the `ExpiryWindow` with the `credentials/stscreds#AssumeRoleOptions`, `credentials/stscreds#WebIdentityRoleOptions`, `credentials/processcreds#Options`, or `credentials/ec2rolecrds#Options` types the `ExpiryWindow` option will need to specified on the `CredentialsCache` constructor instead. + +### AWS Sigv4 Signer Refactor + +The `aws/signer/v4` package's `Signer.SignHTTP` and `Signer.PresignHTTP` methods were updated to take functional options. If your application provided a custom implementation for API client's `HTTPSignerV4` or `HTTPPresignerV4` interfaces, that implementation will need to be updated for the new function signature. + +### Configuration Loading + +The `config#LoadDefaultConfig` function has been updated to require a `context.Context` as the first parameter, with additional optional function options as variadic additional arguments. Your application will need to update its usage of `LoadDefaultConfig` to pass in `context.Context` as the first parameter. If your application used the `With...` helpers those should continue to work without issue. + +The v2 SDK corrects its behavior to be inline with the AWS CLI and other AWS SDKs. Refer to https://docs.aws.amazon.com/credref/latest/refdocs/overview.html for more information how to use the shared config and credentials files. + + +# Release 2020-11-30 + +## Breaking Change +* `codegen`: Add support for slice and maps generated with value members instead of pointer ([#887](https://github.com/aws/aws-sdk-go-v2/pull/887)) + * This update allow the SDK's code generation to be aware of API shapes and members that are not nullable, and can be rendered as value types by the code generation instead of pointer types. + * Several API client parameter types will change from pointer members to value members for slice, map, number and bool member types. + * See Migration notes for migrating to v0.30.0 with this change. +* `aws/transport/http`: Move aws.BuildableHTTPClient to HTTP transport package ([#898](https://github.com/aws/aws-sdk-go-v2/pull/898)) + * Moves the `BuildableHTTPClient` from the SDK's `aws` package to the `aws/transport/http` package as `BuildableClient` to with other HTTP specific utilities. +* `feature/cloudfront/sign`: Add CloudFront sign feature as module ([#884](https://github.com/aws/aws-sdk-go-v2/pull/884)) + * Moves `service/cloudfront/sign` package out of the `cloudfront` module, and into its own module as `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`. + +## New Features +* `config`: Add a WithRetryer provider helper to the config loader ([#897](https://github.com/aws/aws-sdk-go-v2/pull/897)) + * Adds a `WithRetryer` configuration provider to the config loader as a convenience helper to set the `Retryer` on the `aws.Config` when its being loaded. +* `config`: Default to TLS 1.2 for HTTPS requests ([#892](https://github.com/aws/aws-sdk-go-v2/pull/892)) + * Updates the SDK's default HTTP client to use TLS 1.2 as the minimum TLS version for all HTTPS requests by default. + +## Bug Fixes +* `config`: Fix AWS_CA_BUNDLE usage while loading default config ([#912](https://github.com/aws/aws-sdk-go-v2/pull/)) + * Fixes the `LoadDefaultConfig`'s configuration provider order to correctly load a custom HTTP client prior to configuring the client for `AWS_CA_BUNDLE` environment variable. +* `service/s3`: Fix signature mismatch error for s3 ([#913](https://github.com/aws/aws-sdk-go-v2/pull/913)) + * Fixes ([#883](https://github.com/aws/aws-sdk-go-v2/issues/883)) +* `service/s3control`: + * Fix HostPrefix addition behavior for s3control ([#882](https://github.com/aws/aws-sdk-go-v2/pull/882)) + * Fixes ([#863](https://github.com/aws/aws-sdk-go-v2/issues/863)) + * Fix s3control error deserializer ([#875](https://github.com/aws/aws-sdk-go-v2/pull/875)) + * Fixes ([#864](https://github.com/aws/aws-sdk-go-v2/issues/864)) + +## Service Client Highlights +* Pagination support has been added to supported APIs. See [Using Operation Paginators](https://aws.github.io/aws-sdk-go-v2/docs/making-requests/#using-operation-paginators) in the Developer Guide. ([#885](https://github.com/aws/aws-sdk-go-v2/pull/885)) +* Logging support has been added to service clients. See [Logging](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/logging/) in the Developer Guide. ([#872](https://github.com/aws/aws-sdk-go-v2/pull/872)) +* `service`: Add support for pre-signed URL clients for S3, RDS, EC2 service ([#888](https://github.com/aws/aws-sdk-go-v2/pull/888)) + * `service/s3`: operations `PutObject` and `GetObject` are now supported with s3 pre-signed url client. + * `service/ec2`: operation `CopySnapshot` is now supported with ec2 pre-signed url client. + * `service/rds`: operations `CopyDBSnapshot`, `CreateDBInstanceReadReplica`, `CopyDBClusterSnapshot`, `CreateDBCluster` are now supported with rds pre-signed url client. +* `service/s3`: Add support for S3 access point and S3 on outposts access point ARNs ([#870](https://github.com/aws/aws-sdk-go-v2/pull/870)) +* `service/s3control`: Adds support for S3 on outposts access point and S3 on outposts bucket ARNs ([#870](https://github.com/aws/aws-sdk-go-v2/pull/870)) + +## Migrating from v2 preview SDK's v0.29.0 to v0.30.0 + +### aws.BuildableHTTPClient move +The `aws`'s `BuildableHTTPClient` HTTP client implementation was moved to `aws/transport/http` as `BuildableClient`. If your application used the `aws.BuildableHTTPClient` type, update it to use the `BuildableClient` in the `aws/transport/http` package. + +### Slice and Map API member types +This release includes several code generation updates for API client's slice map members. Using API modeling metadata the Slice and map members are now generated as value types instead of pointer types. For your application this means that for these types, the SDK no longer will have pointer member types, and have value member types. + +To migrate to this change you'll need to remove the pointer handling for slice and map members, and instead use value type handling of the member values. + +### Boolean and Number API member types +Similar to the slice and map API member types being generated as value, the SDK's code generation now has metadata where the SDK can generate boolean and number members as value type instead of pointer types. + +To migrate to this change you'll need to remove the pointer handling for numbers and boolean member types, and instead use value handling. + +# Release 2020-10-30 + +## New Features +* Adds HostnameImmutable flag on aws.Endpoint to direct SDK if the associated endpoint is modifiable.([#848](https://github.com/aws/aws-sdk-go-v2/pull/848)) + +## Bug Fixes +* Fix SDK handling of xml based services - xml namespaces ([#858](https://github.com/aws/aws-sdk-go-v2/pull/858)) + * Fixes ([#850](https://github.com/aws/aws-sdk-go-v2/issues/850)) + +## Service Client Highlights +* API Clients have been bumped to version `v0.29.0` + * Regenerate API Clients from update API models. +* Improve client doc generation. + +## Core SDK Highlights +* Dependency Update: Updated SDK dependencies to their latest versions. + +## Migrating from v2 preview SDK's v0.28.0 to v0.29.0 +* API Clients ResolverOptions type renamed to EndpointResolverOptions + +# Release 2020-10-26 + +## New Features +* `service/s3`: Add support for Accelerate, and Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836)) +* `service/s3control`: Add support for Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836)) + +## Service Client Highlights +* API Clients have been bumped to version `v0.28.0` + * Regenerate API Clients from update API models. +* `service/s3`: Add support for Accelerate, and Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836)) +* `service/s3control`: Add support for Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836)) +* `service/route53`: Fix sanitizeURL customization to handle leading slash(`/`) [#846](https://github.com/aws/aws-sdk-go-v2/pull/846) + * Fixes [#843](https://github.com/aws/aws-sdk-go-v2/issues/843) +* `service/route53`: Fix codegen to correctly look for operations that need sanitize url ([#851](https://github.com/aws/aws-sdk-go-v2/pull/851)) + +## Core SDK Highlights +* `aws/protocol/restjson`: Fix unexpected JSON error response deserialization ([#837](https://github.com/aws/aws-sdk-go-v2/pull/837)) + * Fixes [#832](https://github.com/aws/aws-sdk-go-v2/issues/832) +* `example/service/s3/listobjects`: Add example for Amazon S3 ListObjectsV2 ([#838](https://github.com/aws/aws-sdk-go-v2/pull/838)) + +# Release 2020-10-16 + +## New Features +* `feature/s3/manager`: + * Initial `v0.1.0` release + * Add the Amazon S3 Upload and Download transfer manager ([#802](https://github.com/aws/aws-sdk-go-v2/pull/802)) + +## Service Client Highlights +* Clients have been bumped to version `v0.27.0` +* `service/machinelearning`: Add customization for setting client endpoint with PredictEndpoint value if set ([#782](https://github.com/aws/aws-sdk-go-v2/pull/782)) +* `service/s3`: Fix empty response body deserialization in case of error response ([#801](https://github.com/aws/aws-sdk-go-v2/pull/801)) + * Fixes xml deserialization util to correctly handle empty response body in case of an error response. +* `service/s3`: Add customization to auto fill Content-Md5 request header for Amazon S3 operations ([#812](https://github.com/aws/aws-sdk-go-v2/pull/812)) +* `service/s3`: Add fallback to using HTTP status code for error code ([#818](https://github.com/aws/aws-sdk-go-v2/pull/818)) + * Adds falling back to using the HTTP status code to create a API Error code when not error code is received from the service, such as HeadObject. +* `service/route53`: Add support for deserialzing `InvalidChangeBatch` API error ([#792](https://github.com/aws/aws-sdk-go-v2/pull/792)) +* `codegen`: Remove API client `Options` getter methods ([#788](https://github.com/aws/aws-sdk-go-v2/pull/788)) +* `codegen`: Regenerate API Client modeled endpoints ([#791](https://github.com/aws/aws-sdk-go-v2/pull/791)) +* `codegen`: Sort API Client struct member paramaters by required and alphabetical ([#787](https://github.com/aws/aws-sdk-go-v2/pull/787)) +* `codegen`: Add package docs to API client modules ([#821](https://github.com/aws/aws-sdk-go-v2/pull/821)) +* `codegen`: Rename `smithy-go`'s `smithy.OperationError` to `smithy.OperationInvokeError`. + +## Core SDK Highlights +* `config`: + * Bumped to `v0.2.0` + * Refactor Config Module, Add Config Package Documentation and Examples, Improve Overall SDK Readme ([#822](https://github.com/aws/aws-sdk-go-v2/pull/822)) +* `credentials`: + * Bumped to `v0.1.2` + * Strip Monotonic Clock Readings when Comparing Credential Expiry Time ([#789](https://github.com/aws/aws-sdk-go-v2/pull/789)) +* `ec2imds`: + * Bumped to `v0.1.2` + * Fix refreshing API token if expired ([#789](https://github.com/aws/aws-sdk-go-v2/pull/789)) + +## Migrating from v0.26.0 to v0.27.0 + +#### Configuration + +The `config` module's exported types were trimmed down to add clarity and reduce confusion. Additional changes to the `config` module' helpers. + +* Refactored `WithCredentialsProvider`, `WithHTTPClient`, and `WithEndpointResolver` to functions instead of structs. +* Removed `MFATokenFuncProvider`, use `AssumeRoleCredentialOptionsProvider` for setting options for `stscreds.AssumeRoleOptions`. +* Renamed `WithWebIdentityCredentialProviderOptions` to `WithWebIdentityRoleCredentialOptions` +* Renamed `AssumeRoleCredentialProviderOptions` to `AssumeRoleCredentialOptionsProvider` +* Renamed `EndpointResolverFuncProvider` to `EndpointResolverProvider` + +#### API Client +* API Client `Options` type getter methods have been removed. Use the struct members instead. +* The error returned by API Client operations was renamed from `smithy.OperationError` to `smithy.OperationInvokeError`. + +# Release 2020-09-30 + +## Service Client Highlights +* Service clients have been bumped to `v0.26.0` simplify the documentation experience when using [pkg.go.dev](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2). +* `service/s3`: Disable automatic decompression of getting Amazon S3 objects with the `Content-Encoding: gzip` metadata header. ([#748](https://github.com/aws/aws-sdk-go-v2/pull/748)) + * This changes the SDK's default behavior with regard to making S3 API calls. The client will no longer automatically set the `Accept-Encoding` HTTP request header, nor will it automatically decompress the gzipped response when the `Content-Encoding: gzip` response header was received. + * If you'd like the client to sent the `Accept-Encoding: gzip` request header, you can add this header to the API operation method call with the [SetHeaderValue](https://pkg.go.dev/github.com/awslabs/smithy-go/transport/http#SetHeaderValue). middleware helper. +* `service/cloudfront/sign`: Fix cloudfront example usage of SignWithPolicy ([#673](https://github.com/aws/aws-sdk-go-v2/pull/673)) + * Fixes [#671](https://github.com/aws/aws-sdk-go-v2/issues/671) documentation typo by correcting the usage of `SignWithPolicy`. + +## Core SDK Highlights +* SDK core module released at `v0.26.0` +* `config` module released at `v0.1.1` +* `credentials` module released at `v0.1.1` +* `ec2imds` module released at `v0.1.1` + + +# Release 2020-09-28 +## Announcements +We’re happy to share the updated clients for the v0.25.0 preview version of the AWS SDK for Go V2. + +The updated clients leverage new developments and advancements within AWS and the Go software ecosystem at large since +our original preview announcement. Using the new clients will be a bit different than before. The key differences are: +simplified API operation invocation, performance improvements, support for error wrapping, and a new middleware architecture. +So below we have a guided walkthrough to help try it out and share your feedback in order to better influence the features +you’d like to see in the GA version. + +See [Announcement Blog Post](https://aws.amazon.com/blogs/developer/client-updates-in-the-preview-version-of-the-aws-sdk-for-go-v2/) for more details. + +## Service Client Highlights +* Initial service clients released at version `v0.1.0` +## Core SDK Highlights +* SDK core module released at `v0.25.0` +* `config` module released at `v0.1.0` +* `credentials` module released at `v0.1.0` +* `ec2imds` module released at `v0.1.0` + +## Migrating from v2 preview SDK's v0.24.0 to v0.25.0 + +#### Design changes + +The v2 preview SDK `v0.25.0` release represents a significant stepping stone bringing the v2 SDK closer to its target design and usability. This release includes significant breaking changes to the v2 preview SDK. The updates in the `v0.25.0` release focus on refactoring and modularization of the SDK’s API clients to use the new [client design](https://github.com/aws/aws-sdk-go-v2/issues/438), updated request pipeline (aka [middleware](https://pkg.go.dev/github.com/awslabs/smithy-go/middleware)), refactored [credential providers](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials), and [configuration loading](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) packages. + +We've also bumped the minimum supported Go version with this release. Starting with v0.25.0 the SDK requires a minimum version of Go `v1.15`. + +As a part of the refactoring done to v2 preview SDK some components have not been included in this update. The following is a non exhaustive list of features that are not available. + +* API Paginators - [#439](https://github.com/aws/aws-sdk-go-v2/issues/439) +* API Waiters - [#442](https://github.com/aws/aws-sdk-go-v2/issues/442) +* Presign URL - [#794](https://github.com/aws/aws-sdk-go-v2/issues/794) +* Amazon S3 Upload and Download manager - [#802](https://github.com/aws/aws-sdk-go-v2/pull/802) +* Amazon DynamoDB's AttributeValue marshaler, and Expression package - [#790](https://github.com/aws/aws-sdk-go-v2/issues/790) +* Debug Logging - [#594](https://github.com/aws/aws-sdk-go-v2/issues/594) + +We expect additional breaking changes to the v2 preview SDK in the coming releases. We expect these changes to focus on organizational, naming, and hardening the SDK's design for future feature capabilities after it is released for general availability. + + +#### Relocated Packages + +In this release packages within the SDK were relocated, and in some cases those packages were converted to Go modules. The following is a list of packages have were relocated. + +* `github.com/aws/aws-sdk-go-v2/aws/external` => `github.com/aws/aws-sdk-go-v2/config` module +* `github.com/aws/aws-sdk-go-v2/aws/ec2metadata` => `github.com/aws/aws-sdk-go-v2/ec2imds` module + +The `github.com/aws/aws-sdk-go-v2/credentials` module contains refactored credentials providers. + +* `github.com/aws/aws-sdk-go-v2/ec2rolecreds` => `github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds` +* `github.com/aws/aws-sdk-go-v2/endpointcreds` => `github.com/aws/aws-sdk-go-v2/credentials/endpointcreds` +* `github.com/aws/aws-sdk-go-v2/processcreds` => `github.com/aws/aws-sdk-go-v2/credentials/processcreds` +* `github.com/aws/aws-sdk-go-v2/stscreds` => `github.com/aws/aws-sdk-go-v2/credentials/stscreds` + + +#### Modularization + +New modules were added to the v2 preview SDK to allow the components to be versioned independently from each other. This allows your application to depend on specific versions of an API client module, and take discrete updates from the SDK core and other API client modules as desired. + +* [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) +* [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) +* Module for each API client, e.g. [github.com/aws/aws-sdk-go-v2/service/s3](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3) + + +#### API Clients + +The following is a list of the major changes to the API client modules + +* Removed paginators: we plan to add these back once they are implemented to integrate with the SDK's new API client design. +* Removed waiters: we need to further investigate how the V2 SDK should expose waiters, and how their behavior should be modeled. +* API Clients are now Go modules. When migrating to the v2 preview SDK `v0.25.0`, you'll need to add the API client's module to your application's go.mod file. +* API parameter nested types have been moved to a `types` package within the API client's module, e.g. `github.com/aws/aws-sdk-go-v2/service/s3/types` These types were moved to improve documentation and discovery of the API client, operation, and input/output types. For example Amazon S3's ListObject's operation [ListObjectOutput.Contents](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3/#ListObjectsOutput) input parameter is a slice of [types.Object](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3/types#Object). +* The client operation method has been renamed, removing the `Request` suffix. The method now invokes the operation instead of constructing a request, which needed to be invoked separately. The operation methods were also expanded to include functional options for providing operation specific configuration, such as modifying the request pipeline. + +```go +result, err := client.Scan(context.TODO(), &dynamodb.ScanInput{ + TableName: aws.String("exampleTable"), +}, func(o *Options) { + // Limit operation calls to only 1 attempt. + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, 1) +}) +``` + + +#### Configuration + +In addition to the `github.com/aws/aws-sdk-go-v2/aws/external` package being made a module at `github.com/aws/aws-sdk-go-v2/config`, the `LoadDefaultAWSConfig` function was renamed to `LoadDefaultConfig`. + +The `github.com/aws/aws-sdk-go-v2/aws/defaults` package has been removed. Its components have been migrated to the `github.com/aws/aws-sdk-go-v2/aws` package, and `github.com/aws/aws-sdk-go-v2/config` module. + + +#### Error Handling + +The `github.com/aws/aws-sdk-go-v2/aws/awserr` package was removed as a part of the SDK error handling refactor. The SDK now uses typed errors built around [Go v1.13](https://golang.org/doc/go1.13#error_wrapping)'s [errors.As](https://pkg.go.dev/errors#As) and [errors.Unwrap](https://pkg.go.dev/errors#Unwrap) features. All SDK error types that wrap other errors implement the `Unwrap` method. Generic v2 preview SDK errors created with `fmt.Errorf` use `%w` to wrap the underlying error. + +The SDK API clients now include generated public error types for errors modeled for an API. The SDK will automatically deserialize the error response from the API into the appropriate error type. Your application should use `errors.As` to check if the returned error matches one it is interested in. Your application can also use the generic interface [smithy.APIError](https://pkg.go.dev/github.com/awslabs/smithy-go/#APIError) to test if the API client's operation method returned an API error, but not check against a specific error. + +API client errors returned to the caller will use error wrapping to layer the error values. This allows underlying error types to be specific to their use case, and the SDK's more generic error types to wrap the underlying error. + +For example, if an [Amazon DynamoDB](https://aws.amazon.com/dynamodb/) [Scan](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/dynamodb#Scan) operation call cannot find the `TableName` requested, the error returned will contain [dynamodb.ResourceNotFoundException](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/dynamodb/types#ResourceNotFoundException). The SDK will return this error value wrapped in a couple layers, with each layer adding additional contextual information such as [ResponseError](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/transport/http#ResponseError) for AWS HTTP response error metadata , and [smithy.OperationError](https://pkg.go.dev/github.com/awslabs/smithy-go/#OperationError) for API operation call metadata. + +```go +result, err := client.Scan(context.TODO(), params) +if err != nil { + // To get a specific API error + var notFoundErr *types.ResourceNotFoundException + if errors.As(err, ¬FoundErr) { + log.Printf("scan failed because the table was not found, %v", + notFoundErr.ErrorMessage()) + } + + // To get any API error + var apiErr smithy.APIError + if errors.As(err, &apiErr) { + log.Printf("scan failed because of an API error, Code: %v, Message: %v", + apiErr.ErrorCode(), apiErr.ErrorMessage()) + } + + // To get the AWS response metadata, such as RequestID + var respErr *awshttp.ResponseError // Using import alias "awshttp" for package github.com/aws/aws-sdk-go-v2/aws/transport/http + if errors.As(err, &respErr) { + log.Printf("scan failed with HTTP status code %v, Request ID %v and error %v", + respErr.HTTPStatusCode(), respErr.ServiceRequestID(), respErr) + } + + return err +} +``` + +Logging an error value will include information from each wrapped error. For example, the following is a mock error logged for a Scan operation call that failed because the table was not found. + +> 2020/10/15 16:03:37 operation error DynamoDB: Scan, https response error StatusCode: 400, RequestID: ABCREQUESTID123, ResourceNotFoundException: Requested resource not found + + +#### Endpoints + +The `github.com/aws/aws-sdk-go-v2/aws/endpoints` has been removed from the SDK, along with all exported endpoint definitions and iteration behavior. Each generated API client now includes its own endpoint definition internally to the module. + +API clients can optionally be configured with a generic [aws.EndpointResolver](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#EndpointResolver) via the [aws.Config.EndpointResolver](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config.EndpointResolver). If the API client is not configured with a custom endpoint resolver it will defer to the endpoint resolver the client module was generated with. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md b/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..3b64466870c9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md b/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md new file mode 100644 index 000000000000..c2fc3b8f5b0a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md @@ -0,0 +1,178 @@ +# Contributing to the AWS SDK for Go + +Thank you for your interest in contributing to the AWS SDK for Go! +We work hard to provide a high-quality and useful SDK, and we greatly value +feedback and contributions from our community. Whether it's a bug report, +new feature, correction, or additional documentation, we welcome your issues +and pull requests. Please read through this document before submitting any +[issues] or [pull requests][pr] to ensure we have all the necessary information to +effectively respond to your bug report or contribution. + +Jump To: + +* [Bug Reports](#bug-reports) +* [Feature Requests](#feature-requests) +* [Code Contributions](#code-contributions) + + +## How to contribute + +*Before you send us a pull request, please be sure that:* + +1. You're working from the latest source on the master branch. +2. You check existing open, and recently closed, pull requests to be sure + that someone else hasn't already addressed the problem. +3. You create an issue before working on a contribution that will take a + significant amount of your time. + +*Creating a Pull Request* + +1. Fork the repository. +2. In your fork, make your change in a branch that's based on this repo's master branch. +3. Commit the change to your fork, using a clear and descriptive commit message. +4. Create a pull request, answering any questions in the pull request form. + +For contributions that will take a significant amount of time, open a new +issue to pitch your idea before you get started. Explain the problem and +describe the content you want to see added to the documentation. Let us know +if you'll write it yourself or if you'd like us to help. We'll discuss your +proposal with you and let you know whether we're likely to accept it. + +## Bug Reports + +You can file bug reports against the SDK on the [GitHub issues][issues] page. + +If you are filing a report for a bug or regression in the SDK, it's extremely +helpful to provide as much information as possible when opening the original +issue. This helps us reproduce and investigate the possible bug without having +to wait for this extra information to be provided. Please read the following +guidelines prior to filing a bug report. + +1. Search through existing [issues][] to ensure that your specific issue has + not yet been reported. If it is a common issue, it is likely there is + already a bug report for your problem. + +2. Ensure that you have tested the latest version of the SDK. Although you + may have an issue against an older version of the SDK, we cannot provide + bug fixes for old versions. It's also possible that the bug may have been + fixed in the latest release. + +3. Provide as much information about your environment, SDK version, and + relevant dependencies as possible. For example, let us know what version + of Go you are using, which and version of the operating system, and the + the environment your code is running in. e.g Container. + +4. Provide a minimal test case that reproduces your issue or any error + information you related to your problem. We can provide feedback much + more quickly if we know what operations you are calling in the SDK. If + you cannot provide a full test case, provide as much code as you can + to help us diagnose the problem. Any relevant information should be provided + as well, like whether this is a persistent issue, or if it only occurs + some of the time. + +## Feature Requests + +Open an [issue][issues] with the following: + +* A short, descriptive title. Ideally, other community members should be able + to get a good idea of the feature just from reading the title. +* A detailed description of the the proposed feature. + * Why it should be added to the SDK. + * If possible, example code to illustrate how it should work. +* Use Markdown to make the request easier to read; +* If you intend to implement this feature, indicate that you'd like to the issue to be assigned to you. + +## Code Contributions + +We are always happy to receive code and documentation contributions to the SDK. +Please be aware of the following notes prior to opening a pull request: + +1. The SDK is released under the [Apache license][license]. Any code you submit + will be released under that license. For substantial contributions, we may + ask you to sign a [Contributor License Agreement (CLA)][cla]. + +2. If you would like to implement support for a significant feature that is not + yet available in the SDK, please talk to us beforehand to avoid any + duplication of effort. + +3. Wherever possible, pull requests should contain tests as appropriate. + Bugfixes should contain tests that exercise the corrected behavior (i.e., the + test should fail without the bugfix and pass with it), and new features + should be accompanied by tests exercising the feature. + +4. Pull requests that contain failing tests will not be merged until the test + failures are addressed. Pull requests that cause a significant drop in the + SDK's test coverage percentage are unlikely to be merged until tests have + been added. + +5. The JSON files under the SDK's `models` folder are sourced from outside the SDK. + Such as `models/apis/ec2/2016-11-15/api.json`. We will not accept pull requests + directly on these models. If you discover an issue with the models please + create a [GitHub issue][issues] describing the issue. + +### Testing + +To run the tests locally, running the `make unit` command will `go get` the +SDK's testing dependencies, and run vet, link and unit tests for the SDK. + +``` +make unit +``` + +Standard go testing functionality is supported as well. To test SDK code that +is tagged with `codegen` you'll need to set the build tag in the go test +command. The `make unit` command will do this automatically. + +``` +go test -tags codegen ./private/... +``` + +See the `Makefile` for additional testing tags that can be used in testing. + +To test on multiple platform the SDK includes several DockerFiles under the +`awstesting/sandbox` folder, and associated make recipes to to execute +unit testing within environments configured for specific Go versions. + +``` +make sandbox-test-go18 +``` + +To run all sandbox environments use the following make recipe + +``` +# Optionally update the Go tip that will be used during the batch testing +make update-aws-golang-tip + +# Run all SDK tests for supported Go versions in sandboxes +make sandbox-test +``` + +In addition the sandbox environment include make recipes for interactive modes +so you can run command within the Docker container and context of the SDK. + +``` +make sandbox-go18 +``` + +### Changelog Documents + +You can see all release changes in the `CHANGELOG.md` file at the root of the +repository. The release notes added to this file will contain service client +updates, and major SDK changes. When submitting a pull request please include an entry in `CHANGELOG_PENDING.md` under the appropriate changelog type so your changelog entry is included on the following release. + +#### Changelog Types + +* `SDK Features` - For major additive features, internal changes that have +outward impact, or updates to the SDK foundations. This will result in a minor +version change. +* `SDK Enhancements` - For minor additive features or incremental sized changes. +This will result in a patch version change. +* `SDK Bugs` - For minor changes that resolve an issue. This will result in a +patch version change. + +[issues]: https://github.com/aws/aws-sdk-go/issues +[pr]: https://github.com/aws/aws-sdk-go/pulls +[license]: http://aws.amazon.com/apache2.0/ +[cla]: http://en.wikipedia.org/wiki/Contributor_License_Agreement +[releasenotes]: https://github.com/aws/aws-sdk-go/releases + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md b/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md new file mode 100644 index 000000000000..8490c7d67362 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md @@ -0,0 +1,15 @@ +Open Discussions +--- +The following issues are currently open for community feedback. +All discourse must adhere to the [Code of Conduct] policy. + +* [Refactoring API Client Paginators](https://github.com/aws/aws-sdk-go-v2/issues/439) +* [Refactoring API Client Waiters](https://github.com/aws/aws-sdk-go-v2/issues/442) +* [Refactoring API Client Enums and Types to Discrete Packages](https://github.com/aws/aws-sdk-go-v2/issues/445) +* [SDK Modularization](https://github.com/aws/aws-sdk-go-v2/issues/444) + +Past Discussions +--- +The issues listed here are for documentation purposes, and is used to capture issues and their associated discussions. + +[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/master/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/Makefile b/vendor/github.com/aws/aws-sdk-go-v2/Makefile new file mode 100644 index 000000000000..bea96abc2a7b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/Makefile @@ -0,0 +1,507 @@ +# Lint rules to ignore +LINTIGNORESINGLEFIGHT='internal/sync/singleflight/singleflight.go:.+error should be the last type' +LINT_IGNORE_S3MANAGER_INPUT='feature/s3/manager/upload.go:.+struct field SSEKMSKeyId should be SSEKMSKeyID' + +UNIT_TEST_TAGS= +BUILD_TAGS=-tags "example,codegen,integration,ec2env,perftest" + +SMITHY_GO_SRC ?= $(shell pwd)/../smithy-go + +SDK_MIN_GO_VERSION ?= 1.15 + +EACHMODULE_FAILFAST ?= true +EACHMODULE_FAILFAST_FLAG=-fail-fast=${EACHMODULE_FAILFAST} + +EACHMODULE_CONCURRENCY ?= 1 +EACHMODULE_CONCURRENCY_FLAG=-c ${EACHMODULE_CONCURRENCY} + +EACHMODULE_SKIP ?= +EACHMODULE_SKIP_FLAG=-skip="${EACHMODULE_SKIP}" + +EACHMODULE_FLAGS=${EACHMODULE_CONCURRENCY_FLAG} ${EACHMODULE_FAILFAST_FLAG} ${EACHMODULE_SKIP_FLAG} + +# SDK's Core and client packages that are compatible with Go 1.9+. +SDK_CORE_PKGS=./aws/... ./internal/... +SDK_CLIENT_PKGS=./service/... +SDK_COMPA_PKGS=${SDK_CORE_PKGS} ${SDK_CLIENT_PKGS} + +# SDK additional packages that are used for development of the SDK. +SDK_EXAMPLES_PKGS= +SDK_ALL_PKGS=${SDK_COMPA_PKGS} ${SDK_EXAMPLES_PKGS} + +RUN_NONE=-run NONE +RUN_INTEG=-run '^TestInteg_' + +CODEGEN_RESOURCES_PATH=$(shell pwd)/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen +CODEGEN_API_MODELS_PATH=$(shell pwd)/codegen/sdk-codegen/aws-models +ENDPOINTS_JSON=${CODEGEN_RESOURCES_PATH}/endpoints.json +ENDPOINT_PREFIX_JSON=${CODEGEN_RESOURCES_PATH}/endpoint-prefix.json + +LICENSE_FILE=$(shell pwd)/LICENSE.txt + +SMITHY_GO_VERSION ?= +PRE_RELEASE_VERSION ?= +RELEASE_MANIFEST_FILE ?= +RELEASE_CHGLOG_DESC_FILE ?= + +REPOTOOLS_VERSION ?= latest +REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools +REPOTOOLS_CMD_ANNOTATE_STABLE_GEN = ${REPOTOOLS_MODULE}/cmd/annotatestablegen@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_MAKE_RELATIVE = ${REPOTOOLS_MODULE}/cmd/makerelative@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_EDIT_MODULE_DEPENDENCY = ${REPOTOOLS_MODULE}/cmd/editmoduledependency@${REPOTOOLS_VERSION} + +REPOTOOLS_CALCULATE_RELEASE_VERBOSE ?= false +REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG=-v=${REPOTOOLS_CALCULATE_RELEASE_VERBOSE} + +REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS ?= + +ifneq ($(PRE_RELEASE_VERSION),) + REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} +endif + +.PHONY: all +all: generate unit + +################### +# Code Generation # +################### +.PHONY: generate smithy-generate smithy-build smithy-build-% smithy-clean smithy-go-publish-local format \ +gen-config-asserts gen-repo-mod-replace gen-mod-replace-smithy gen-mod-dropreplace-smithy gen-aws-ptrs tidy-modules-% \ +add-module-license-files sync-models sync-endpoints-model sync-endpoints.json clone-v1-models gen-internal-codegen \ +sync-api-models copy-attributevalue-feature min-go-version-% update-requires smithy-annotate-stable \ +update-module-metadata download-modules-% + +generate: smithy-generate update-requires gen-repo-mod-replace update-module-metadata smithy-annotate-stable \ +gen-config-asserts gen-internal-codegen copy-attributevalue-feature gen-mod-dropreplace-smithy min-go-version-. \ +tidy-modules-. add-module-license-files gen-aws-ptrs format + +smithy-generate: + cd codegen && ./gradlew clean build -Plog-tests && ./gradlew clean + +smithy-build: gen-repo-mod-replace + cd codegen && ./gradlew clean build -Plog-tests + +smithy-build-%: gen-repo-mod-replace + @# smithy-build- command that uses the pattern to define build filter that + @# the smithy API model service id starts with. Strips off the + @# "smithy-build-". + @# + @# e.g. smithy-build-com.amazonaws.rds + @# e.g. smithy-build-com.amazonaws.rds#AmazonRDSv19 + cd codegen && \ + SMITHY_GO_BUILD_API="$(subst smithy-build-,,$@)" ./gradlew clean build -Plog-tests + +smithy-annotate-stable: + go run ${REPOTOOLS_CMD_ANNOTATE_STABLE_GEN} + +smithy-clean: + cd codegen && ./gradlew clean + +smithy-go-publish-local: + rm -rf /tmp/smithy-go-local + git clone https://github.com/aws/smithy-go /tmp/smithy-go-local + make -C /tmp/smithy-go-local smithy-clean smithy-publish-local + +format: + gofmt -w -s . + +gen-config-asserts: + @echo "Generating SDK config package implementor assertions" + cd config \ + && go mod tidy \ + && go generate + +gen-internal-codegen: + @echo "Generating internal/codegen" + cd internal/codegen \ + && go generate + +gen-repo-mod-replace: + @echo "Generating go.mod replace for repo modules" + go run ${REPOTOOLS_CMD_MAKE_RELATIVE} + +gen-mod-replace-smithy: + cd ./internal/repotools/cmd/eachmodule \ + && go run . "go mod edit -replace github.com/aws/smithy-go=${SMITHY_GO_SRC}" + +gen-mod-dropreplace-smithy: + cd ./internal/repotools/cmd/eachmodule \ + && go run . "go mod edit -dropreplace github.com/aws/smithy-go" + +gen-aws-ptrs: + cd aws && go generate + +tidy-modules-%: + @# tidy command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "tidy-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. tidy-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst tidy-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go mod tidy" + +download-modules-%: + @# download command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "download-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. download-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst download-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go mod download all" + +add-module-license-files: + cd internal/repotools/cmd/eachmodule && \ + go run . -skip-root \ + "cp $(LICENSE_FILE) ." + +sync-models: sync-endpoints-model sync-api-models + +sync-endpoints-model: sync-endpoints.json + +sync-endpoints.json: + [[ ! -z "${ENDPOINTS_MODEL}" ]] && cp ${ENDPOINTS_MODEL} ${ENDPOINTS_JSON} || echo "ENDPOINTS_MODEL not set, must not be empty" + +clone-v1-models: + rm -rf /tmp/aws-sdk-go-model-sync + git clone https://github.com/aws/aws-sdk-go.git --depth 1 /tmp/aws-sdk-go-model-sync + +sync-api-models: + cd internal/repotools/cmd/syncAPIModels && \ + go run . \ + -m ${API_MODELS} \ + -o ${CODEGEN_API_MODELS_PATH} + +copy-attributevalue-feature: + cd ./feature/dynamodbstreams/attributevalue && \ + find . -name "*.go" | grep -v "doc.go" | xargs -I % rm % && \ + find ../../dynamodb/attributevalue -name "*.go" | grep -v "doc.go" | xargs -I % cp % . && \ + ls *.go | grep -v "convert.go" | grep -v "doc.go" | \ + xargs -I % sed -i.bk -E 's:github.com/aws/aws-sdk-go-v2/(service|feature)/dynamodb:github.com/aws/aws-sdk-go-v2/\1/dynamodbstreams:g' % && \ + ls *.go | grep -v "convert.go" | grep -v "doc.go" | \ + xargs -I % sed -i.bk 's:DynamoDB:DynamoDBStreams:g' % && \ + ls *.go | grep -v "doc.go" | \ + xargs -I % sed -i.bk 's:dynamodb\.:dynamodbstreams.:g' % && \ + sed -i.bk 's:streams\.:ddbtypes.:g' "convert.go" && \ + sed -i.bk 's:ddb\.:streams.:g' "convert.go" && \ + sed -i.bk 's:ddbtypes\.:ddb.:g' "convert.go" &&\ + sed -i.bk 's:Streams::g' "convert.go" && \ + rm -rf ./*.bk && \ + go mod tidy && \ + gofmt -w -s . && \ + go test . + +min-go-version-%: + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst min-go-version-,,$@)) ${EACHMODULE_FLAGS} \ + "go mod edit -go=${SDK_MIN_GO_VERSION}" + +update-requires: + go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} + +update-module-metadata: + go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} + +################ +# Unit Testing # +################ +.PHONY: unit unit-race unit-test unit-race-test unit-race-modules-% unit-modules-% build build-modules-% \ +go-build-modules-% test test-race-modules-% test-modules-% cachedep cachedep-modules-% api-diff-modules-% + +unit: lint unit-modules-. +unit-race: lint unit-race-modules-. + +unit-test: test-modules-. +unit-race-test: test-race-modules-. + +unit-race-modules-%: + @# unit command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "unit-race-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. unit-race-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst unit-race-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go vet ${BUILD_TAGS} --all ./..." \ + "go test ${BUILD_TAGS} ${RUN_NONE} ./..." \ + "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..." + + +unit-modules-%: + @# unit command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "unit-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. unit-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst unit-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go vet ${BUILD_TAGS} --all ./..." \ + "go test ${BUILD_TAGS} ${RUN_NONE} ./..." \ + "go test -timeout=1m ${UNIT_TEST_TAGS} ./..." + +build: build-modules-. + +build-modules-%: + @# build command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "build-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. build-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst build-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go test ${BUILD_TAGS} ${RUN_NONE} ./..." + +go-build-modules-%: + @# build command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "build-modules-" and + @# replaces all "_" with "/". + @# + @# Validates that all modules in the repo have buildable Go files. + @# + @# e.g. go-build-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst go-build-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go build ${BUILD_TAGS} ./..." + +test: test-modules-. + +test-race-modules-%: + @# Test command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "test-race-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. test-race-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst test-race-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..." + +test-modules-%: + @# Test command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "test-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. test-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst test-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go test -timeout=1m ${UNIT_TEST_TAGS} ./..." + +cachedep: cachedep-modules-. + +cachedep-modules-%: + @# build command that uses the pattern to define the root path that the + @# module caching will start from. Strips off the "cachedep-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. cachedep-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst cachedep-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go mod download" + +api-diff-modules-%: + @# Command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "api-diff-modules-" and + @# replaces all "_" with "/". + @# + @# Requires golang.org/x/exp/cmd/gorelease to be available in the GOPATH. + @# + @# e.g. api-diff-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst api-diff-modules-,,$@)) \ + -fail-fast=true \ + -c 1 \ + -skip="internal/repotools" \ + "$$(go env GOPATH)/bin/gorelease" + +############## +# CI Testing # +############## +.PHONY: ci-test ci-test-no-generate ci-test-generate-validate + +ci-test: generate unit-race ci-test-generate-validate +ci-test-no-generate: unit-race + +ci-test-generate-validate: + @echo "CI test validate no generated code changes" + git update-index --assume-unchanged go.mod go.sum + git add . -A + gitstatus=`git diff --cached --ignore-space-change`; \ + echo "$$gitstatus"; \ + if [ "$$gitstatus" != "" ] && [ "$$gitstatus" != "skipping validation" ]; then echo "$$gitstatus"; exit 1; fi + git update-index --no-assume-unchanged go.mod go.sum + +ci-lint: ci-lint-. + +ci-lint-%: + @# Run golangci-lint command that uses the pattern to define the root path that the + @# module check will start from. Strips off the "ci-lint-" and + @# replaces all "_" with "/". + @# + @# e.g. ci-lint-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst ci-lint-,,$@)) \ + -fail-fast=false \ + -c 1 \ + -skip="internal/repotools" \ + "golangci-lint run" + +ci-lint-install: + @# Installs golangci-lint at GoPATH. + @# This should be used to run golangci-lint locally. + @# + go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +####################### +# Integration Testing # +####################### +.PHONY: integration integ-modules-% cleanup-integ-buckets + +integration: integ-modules-service + +integ-modules-%: + @# integration command that uses the pattern to define the root path that + @# the module testing will start from. Strips off the "integ-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. test-modules-service_dynamodb + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst integ-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go test -timeout=10m -tags "integration" -v ${RUN_INTEG} -count 1 ./..." + +cleanup-integ-buckets: + @echo "Cleaning up SDK integration resources" + go run -tags "integration" ./internal/awstesting/cmd/bucket_cleanup/main.go "aws-sdk-go-integration" + +############## +# Benchmarks # +############## +.PHONY: bench bench-modules-% + +bench: bench-modules-. + +bench-modules-%: + @# benchmark command that uses the pattern to define the root path that + @# the module testing will start from. Strips off the "bench-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. bench-modules-service_dynamodb + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst bench-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go test -timeout=10m -bench . --benchmem ${BUILD_TAGS} ${RUN_NONE} ./..." + + +##################### +# Release Process # +##################### +.PHONY: preview-release pre-release-validation release + +ls-changes: + go run ${REPOTOOLS_CMD_CHANGELOG} ls + +preview-release: + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG} ${REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS} + +pre-release-validation: + @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \ + echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \ + fi + @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \ + echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \ + fi + +release: pre-release-validation + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG} ${REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS} + go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE} + go run ${REPOTOOLS_CMD_CHANGELOG} rm -all + go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE} + +############## +# Repo Tools # +############## +.PHONY: install-repotools + +install-repotools: + go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} + +set-smithy-go-version: + @if [[ -z "${SMITHY_GO_VERSION}" ]]; then \ + echo "SMITHY_GO_VERSION is required to update SDK's smithy-go module dependency version" && false; \ + fi + go run ${REPOTOOLS_CMD_EDIT_MODULE_DEPENDENCY} -s "github.com/aws/smithy-go" -v "${SMITHY_GO_VERSION}" + +################## +# Linting/Verify # +################## +.PHONY: verify lint vet vet-modules-% sdkv1check + +verify: lint vet sdkv1check + +lint: + @echo "go lint SDK and vendor packages" + @lint=`golint ./...`; \ + dolint=`echo "$$lint" | grep -E -v \ + -e ${LINT_IGNORE_S3MANAGER_INPUT} \ + -e ${LINTIGNORESINGLEFIGHT}`; \ + echo "$$dolint"; \ + if [ "$$dolint" != "" ]; then exit 1; fi + +vet: vet-modules-. + +vet-modules-%: + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst vet-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go vet ${BUILD_TAGS} --all ./..." + +sdkv1check: + @echo "Checking for usage of AWS SDK for Go v1" + @sdkv1usage=`go list -test -f '''{{ if not .Standard }}{{ range $$_, $$name := .Imports }} * {{ $$.ImportPath }} -> {{ $$name }}{{ print "\n" }}{{ end }}{{ range $$_, $$name := .TestImports }} *: {{ $$.ImportPath }} -> {{ $$name }}{{ print "\n" }}{{ end }}{{ end}}''' ./... | sort -u | grep '''/aws-sdk-go/'''`; \ + echo "$$sdkv1usage"; \ + if [ "$$sdkv1usage" != "" ]; then exit 1; fi + +list-deps: list-deps-. + +list-deps-%: + @# command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "list-deps-" and + @# replaces all "_" with "/". + @# + @# Trim output to only include stdout for list of dependencies only. + @# make list-deps 2>&- + @# + @# e.g. list-deps-internal_protocoltest + @cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst list-deps-,,$@)) ${EACHMODULE_FLAGS} \ + "go list -m all | grep -v 'github.com/aws/aws-sdk-go-v2'" | sort -u + +################### +# Sandbox Testing # +################### +.PHONY: sandbox-tests sandbox-build-% sandbox-run-% sandbox-test-% update-aws-golang-tip + +sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-gotip + +sandbox-build-%: + @# sandbox-build-go1.17 + @# sandbox-build-gotip + docker build \ + -f ./internal/awstesting/sandbox/Dockerfile.test.$(subst sandbox-build-,,$@) \ + -t "aws-sdk-go-$(subst sandbox-build-,,$@)" . +sandbox-run-%: sandbox-build-% + @# sandbox-run-go1.17 + @# sandbox-run-gotip + docker run -i -t "aws-sdk-go-$(subst sandbox-run-,,$@)" bash +sandbox-test-%: sandbox-build-% + @# sandbox-test-go1.17 + @# sandbox-test-gotip + docker run -t "aws-sdk-go-$(subst sandbox-test-,,$@)" + +update-aws-golang-tip: + docker build --no-cache=true -f ./internal/awstesting/sandbox/Dockerfile.golang-tip -t "aws-golang:tip" . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt new file mode 100644 index 000000000000..5f14d1162ed4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/README.md b/vendor/github.com/aws/aws-sdk-go-v2/README.md new file mode 100644 index 000000000000..bb4349fea270 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/README.md @@ -0,0 +1,155 @@ +# AWS SDK for Go v2 + +[![Go Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[![Codegen Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [![SDK Documentation](https://img.shields.io/badge/SDK-Documentation-blue)](https://aws.github.io/aws-sdk-go-v2/docs/) [![Migration Guide](https://img.shields.io/badge/Migration-Guide-blue)](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [![API Reference](https://img.shields.io/badge/api-reference-blue.svg)](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) + + +`aws-sdk-go-v2` is the v2 AWS SDK for the Go programming language. + +The v2 SDK requires a minimum version of `Go 1.15`. + +Checkout out the [release notes](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) for information about the latest bug +fixes, updates, and features added to the SDK. + +Jump To: +* [Getting Started](#getting-started) +* [Getting Help](#getting-help) +* [Contributing](#feedback-and-contributing) +* [More Resources](#resources) + +## Maintenance and support for SDK major versions + +For information about maintenance and support for SDK major versions and their underlying dependencies, see the +following in the AWS SDKs and Tools Shared Configuration and Credentials Reference Guide: + +* [AWS SDKs and Tools Maintenance Policy](https://docs.aws.amazon.com/credref/latest/refdocs/maint-policy.html) +* [AWS SDKs and Tools Version Support Matrix](https://docs.aws.amazon.com/credref/latest/refdocs/version-support-matrix.html) + +## Getting started +To get started working with the SDK setup your project for Go modules, and retrieve the SDK dependencies with `go get`. +This example shows how you can use the v2 SDK to make an API request using the SDK's [Amazon DynamoDB] client. + +###### Initialize Project +```sh +$ mkdir ~/helloaws +$ cd ~/helloaws +$ go mod init helloaws +``` +###### Add SDK Dependencies +```sh +$ go get github.com/aws/aws-sdk-go-v2/aws +$ go get github.com/aws/aws-sdk-go-v2/config +$ go get github.com/aws/aws-sdk-go-v2/service/dynamodb +``` + +###### Write Code +In your preferred editor add the following content to `main.go` + +```go +package main + +import ( + "context" + "fmt" + "log" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" +) + +func main() { + // Using the SDK's default configuration, loading additional config + // and credentials values from the environment variables, shared + // credentials, and shared configuration files + cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion("us-west-2")) + if err != nil { + log.Fatalf("unable to load SDK config, %v", err) + } + + // Using the Config value, create the DynamoDB client + svc := dynamodb.NewFromConfig(cfg) + + // Build the request with its input parameters + resp, err := svc.ListTables(context.TODO(), &dynamodb.ListTablesInput{ + Limit: aws.Int32(5), + }) + if err != nil { + log.Fatalf("failed to list tables, %v", err) + } + + fmt.Println("Tables:") + for _, tableName := range resp.TableNames { + fmt.Println(tableName) + } +} +``` + +###### Compile and Execute +```sh +$ go run . +Table: +tableOne +tableTwo +``` + +## Getting Help + +Please use these community resources for getting help. We use the GitHub issues +for tracking bugs and feature requests. + +* Ask a question on [StackOverflow](http://stackoverflow.com/) and tag it with the [`aws-sdk-go`](http://stackoverflow.com/questions/tagged/aws-sdk-go) tag. +* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html). +* If you think you may have found a bug, please open an [issue](https://github.com/aws/aws-sdk-go-v2/issues/new/choose). + +This SDK implements AWS service APIs. For general issues regarding the AWS services and their limitations, you may also take a look at the [Amazon Web Services Discussion Forums](https://forums.aws.amazon.com/). + +### Opening Issues + +If you encounter a bug with the AWS SDK for Go we would like to hear about it. +Search the [existing issues][Issues] and see +if others are also experiencing the issue before opening a new issue. Please +include the version of AWS SDK for Go, Go language, and OS you’re using. Please +also include reproduction case when appropriate. + +The GitHub issues are intended for bug reports and feature requests. For help +and questions with using AWS SDK for Go please make use of the resources listed +in the [Getting Help](#getting-help) section. +Keeping the list of open issues lean will help us respond in a timely manner. + +## Feedback and contributing + +The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways. + +**GitHub issues**. To provide feedback or report bugs, file GitHub [Issues] on the SDK. This is the preferred mechanism to give feedback so that other users can engage in the conversation, +1 issues, etc. Issues you open will be evaluated, and included in our roadmap for the GA launch. + +**Contributing**. You can open pull requests for fixes or additions to the AWS SDK for Go 2.0. All pull requests must be submitted under the Apache 2.0 license and will be reviewed by an SDK team member before being merged in. Accompanying unit tests, where possible, are appreciated. + +## Resources + +[SDK Developer Guide](https://aws.github.io/aws-sdk-go-v2/docs/) - Use this document to learn how to get started and +use the AWS SDK for Go V2. + +[SDK API Reference Documentation](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) - Use this +document to look up all API operation input and output parameters for AWS +services supported by the SDK. The API reference also includes documentation of +the SDK, and examples how to using the SDK, service client API operations, and +API operation require parameters. + +[Service Documentation](https://aws.amazon.com/documentation/) - Use this +documentation to learn how to interface with AWS services. These guides are +great for getting started with a service, or when looking for more +information about a service. While this document is not required for coding, +services may supply helpful samples to look out for. + +[Forum](https://forums.aws.amazon.com/forum.jspa?forumID=293) - Ask questions, get help, and give feedback + +[Issues] - Report issues, submit pull requests, and get involved + (see [Apache 2.0 License][license]) + +[Dep]: https://github.com/golang/dep +[Issues]: https://github.com/aws/aws-sdk-go-v2/issues +[Projects]: https://github.com/aws/aws-sdk-go-v2/projects +[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/master/CHANGELOG.md +[Amazon DynamoDB]: https://aws.amazon.com/dynamodb/ +[design]: https://github.com/aws/aws-sdk-go-v2/blob/master/DESIGN.md +[license]: http://aws.amazon.com/apache2.0/ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go new file mode 100644 index 000000000000..fe63fedadd68 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go @@ -0,0 +1,92 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an arn +// by looking for whether the string starts with arn: +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go new file mode 100644 index 000000000000..df2abb58cd6c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -0,0 +1,166 @@ +package aws + +import ( + "net/http" + + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// HTTPClient provides the interface to provide custom HTTPClients. Generally +// *http.Client is sufficient for most use cases. The HTTPClient should not +// follow redirects. +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// A Config provides service configuration for service clients. +type Config struct { + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for + // information on AWS regions. + Region string + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials CredentialsProvider + + // The HTTP Client the SDK's API clients will use to invoke HTTP requests. + // The SDK defaults to a BuildableClient allowing API clients to create + // copies of the HTTP Client for service specific customizations. + // + // Use a (*http.Client) for custom behavior. Using a custom http.Client + // will prevent the SDK from modifying the HTTP client. + HTTPClient HTTPClient + + // An endpoint resolver that can be used to provide or override an endpoint + // for the given service and region. + // + // See the `aws.EndpointResolver` documentation for additional usage + // information. + // + // Deprecated: See Config.EndpointResolverWithOptions + EndpointResolver EndpointResolver + + // An endpoint resolver that can be used to provide or override an endpoint + // for the given service and region. + // + // When EndpointResolverWithOptions is specified, it will be used by a + // service client rather than using EndpointResolver if also specified. + // + // See the `aws.EndpointResolverWithOptions` documentation for additional + // usage information. + EndpointResolverWithOptions EndpointResolverWithOptions + + // RetryMaxAttempts specifies the maximum number attempts an API client + // will call an operation that fails with a retryable error. + // + // API Clients will only use this value to construct a retryer if the + // Config.Retryer member is not nil. This value will be ignored if + // Retryer is not nil. + RetryMaxAttempts int + + // RetryMode specifies the retry model the API client will be created with. + // + // API Clients will only use this value to construct a retryer if the + // Config.Retryer member is not nil. This value will be ignored if + // Retryer is not nil. + RetryMode RetryMode + + // Retryer is a function that provides a Retryer implementation. A Retryer + // guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + // + // In general, the provider function should return a new instance of a + // Retryer if you are attempting to provide a consistent Retryer + // configuration across all clients. This will ensure that each client will + // be provided a new instance of the Retryer implementation, and will avoid + // issues such as sharing the same retry token bucket across services. + // + // If not nil, RetryMaxAttempts, and RetryMode will be ignored by API + // clients. + Retryer func() Retryer + + // ConfigSources are the sources that were used to construct the Config. + // Allows for additional configuration to be loaded by clients. + ConfigSources []interface{} + + // APIOptions provides the set of middleware mutations modify how the API + // client requests will be handled. This is useful for adding additional + // tracing data to a request, or changing behavior of the SDK's client. + APIOptions []func(*middleware.Stack) error + + // The logger writer interface to write logging messages to. Defaults to + // standard error. + Logger logging.Logger + + // Configures the events that will be sent to the configured logger. This + // can be used to configure the logging of signing, retries, request, and + // responses of the SDK clients. + // + // See the ClientLogMode type documentation for the complete set of logging + // modes and available configuration. + ClientLogMode ClientLogMode + + // The configured DefaultsMode. If not specified, service clients will + // default to legacy. + // + // Supported modes are: auto, cross-region, in-region, legacy, mobile, + // standard + DefaultsMode DefaultsMode + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode + // is set to DefaultsModeAuto and is initialized by + // `config.LoadDefaultConfig`. You should not populate this structure + // programmatically, or rely on the values here within your applications. + RuntimeEnvironment RuntimeEnvironment +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +func NewConfig() *Config { + return &Config{} +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c Config) Copy() Config { + cp := c + return cp +} + +// EndpointDiscoveryEnableState indicates if endpoint discovery is +// enabled, disabled, auto or unset state. +// +// Default behavior (Auto or Unset) indicates operations that require endpoint +// discovery will use Endpoint Discovery by default. Operations that +// optionally use Endpoint Discovery will not use Endpoint Discovery +// unless EndpointDiscovery is explicitly enabled. +type EndpointDiscoveryEnableState uint + +// Enumeration values for EndpointDiscoveryEnableState +const ( + // EndpointDiscoveryUnset represents EndpointDiscoveryEnableState is unset. + // Users do not need to use this value explicitly. The behavior for unset + // is the same as for EndpointDiscoveryAuto. + EndpointDiscoveryUnset EndpointDiscoveryEnableState = iota + + // EndpointDiscoveryAuto represents an AUTO state that allows endpoint + // discovery only when required by the api. This is the default + // configuration resolved by the client if endpoint discovery is neither + // enabled or disabled. + EndpointDiscoveryAuto // default state + + // EndpointDiscoveryDisabled indicates client MUST not perform endpoint + // discovery even when required. + EndpointDiscoveryDisabled + + // EndpointDiscoveryEnabled indicates client MUST always perform endpoint + // discovery if supported for the operation. + EndpointDiscoveryEnabled +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go new file mode 100644 index 000000000000..4d8e26ef3215 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go @@ -0,0 +1,22 @@ +package aws + +import ( + "context" + "time" +) + +type suppressedContext struct { + context.Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go new file mode 100644 index 000000000000..dfd2b1ddbff3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go @@ -0,0 +1,218 @@ +package aws + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" +) + +// CredentialsCacheOptions are the options +type CredentialsCacheOptions struct { + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // An ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. This can cause an + // increased number of requests to refresh the credentials to occur. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // ExpiryWindowJitterFrac provides a mechanism for randomizing the + // expiration of credentials within the configured ExpiryWindow by a random + // percentage. Valid values are between 0.0 and 1.0. + // + // As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac + // is 0.5 then credentials will be set to expire between 30 to 60 seconds + // prior to their actual expiration time. + // + // If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored. + // If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window. + // If ExpiryWindowJitterFrac < 0 the value will be treated as 0. + // If ExpiryWindowJitterFrac > 1 the value will be treated as 1. + ExpiryWindowJitterFrac float64 +} + +// CredentialsCache provides caching and concurrency safe credentials retrieval +// via the provider's retrieve method. +// +// CredentialsCache will look for optional interfaces on the Provider to adjust +// how the credential cache handles credentials caching. +// +// * HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle +// credential refresh failures. This could return an updated Credentials +// value, or attempt another means of retrieving credentials. +// +// * AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how +// credentials Expires is modified. This could modify how the Credentials +// Expires is adjusted based on the CredentialsCache ExpiryWindow option. +// Such as providing a floor not to reduce the Expires below. +type CredentialsCache struct { + provider CredentialsProvider + + options CredentialsCacheOptions + creds atomic.Value + sf singleflight.Group +} + +// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider +// is expected to not be nil. A variadic list of one or more functions can be +// provided to modify the CredentialsCache configuration. This allows for +// configuration of credential expiry window and jitter. +func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache { + options := CredentialsCacheOptions{} + + for _, fn := range optFns { + fn(&options) + } + + if options.ExpiryWindow < 0 { + options.ExpiryWindow = 0 + } + + if options.ExpiryWindowJitterFrac < 0 { + options.ExpiryWindowJitterFrac = 0 + } else if options.ExpiryWindowJitterFrac > 1 { + options.ExpiryWindowJitterFrac = 1 + } + + return &CredentialsCache{ + provider: provider, + options: options, + } +} + +// Retrieve returns the credentials. If the credentials have already been +// retrieved, and not expired the cached credentials will be returned. If the +// credentials have not been retrieved yet, or expired the provider's Retrieve +// method will be called. +// +// Returns and error if the provider's retrieve method returns an error. +func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) { + if creds, ok := p.getCreds(); ok && !creds.Expired() { + return creds, nil + } + + resCh := p.sf.DoChan("", func() (interface{}, error) { + return p.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Credentials), res.Err + case <-ctx.Done(): + return Credentials{}, &RequestCanceledError{Err: ctx.Err()} + } +} + +func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) { + currCreds, ok := p.getCreds() + if ok && !currCreds.Expired() { + return currCreds, nil + } + + newCreds, err := p.provider.Retrieve(ctx) + if err != nil { + handleFailToRefresh := defaultHandleFailToRefresh + if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok { + handleFailToRefresh = cs.HandleFailToRefresh + } + newCreds, err = handleFailToRefresh(ctx, currCreds, err) + if err != nil { + return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err) + } + } + + if newCreds.CanExpire && p.options.ExpiryWindow > 0 { + adjustExpiresBy := defaultAdjustExpiresBy + if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok { + adjustExpiresBy = cs.AdjustExpiresBy + } + + randFloat64, err := sdkrand.CryptoRandFloat64() + if err != nil { + return Credentials{}, fmt.Errorf("failed to get random provider, %w", err) + } + + var jitter time.Duration + if p.options.ExpiryWindowJitterFrac > 0 { + jitter = time.Duration(randFloat64 * + p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow)) + } + + newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter)) + if err != nil { + return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err) + } + } + + p.creds.Store(&newCreds) + return newCreds, nil +} + +// getCreds returns the currently stored credentials and true. Returning false +// if no credentials were stored. +func (p *CredentialsCache) getCreds() (Credentials, bool) { + v := p.creds.Load() + if v == nil { + return Credentials{}, false + } + + c := v.(*Credentials) + if c == nil || !c.HasKeys() { + return Credentials{}, false + } + + return *c, true +} + +// Invalidate will invalidate the cached credentials. The next call to Retrieve +// will cause the provider's Retrieve method to be called. +func (p *CredentialsCache) Invalidate() { + p.creds.Store((*Credentials)(nil)) +} + +// HandleFailRefreshCredentialsCacheStrategy is an interface for +// CredentialsCache to allow CredentialsProvider how failed to refresh +// credentials is handled. +type HandleFailRefreshCredentialsCacheStrategy interface { + // Given the previously cached Credentials, if any, and refresh error, may + // returns new or modified set of Credentials, or error. + // + // Credential caches may use default implementation if nil. + HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error) +} + +// defaultHandleFailToRefresh returns the passed in error. +func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) { + return Credentials{}, err +} + +// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache +// to allow CredentialsProvider to intercept adjustments to Credentials expiry +// based on expectations and use cases of CredentialsProvider. +// +// Credential caches may use default implementation if nil. +type AdjustExpiresByCredentialsCacheStrategy interface { + // Given a Credentials as input, applying any mutations and + // returning the potentially updated Credentials, or error. + AdjustExpiresBy(Credentials, time.Duration) (Credentials, error) +} + +// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires, +// and returns the updated credentials value. If Credentials value's CanExpire +// is false, the passed in credentials are returned unchanged. +func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) { + if !creds.CanExpire { + return creds, nil + } + + creds.Expires = creds.Expires.Add(dur) + return creds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go new file mode 100644 index 000000000000..0fffc53e671c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -0,0 +1,131 @@ +package aws + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +// AnonymousCredentials provides a sentinel CredentialsProvider that should be +// used to instruct the SDK's signing middleware to not sign the request. +// +// Using `nil` credentials when configuring an API client will achieve the same +// result. The AnonymousCredentials type allows you to configure the SDK's +// external config loading to not attempt to source credentials from the shared +// config or environment. +// +// For example you can use this CredentialsProvider with an API client's +// Options to instruct the client not to sign a request for accessing public +// S3 bucket objects. +// +// The following example demonstrates using the AnonymousCredentials to prevent +// SDK's external config loading attempt to resolve credentials. +// +// cfg, err := config.LoadDefaultConfig(context.TODO(), +// config.WithCredentialsProvider(aws.AnonymousCredentials{}), +// ) +// if err != nil { +// log.Fatalf("failed to load config, %v", err) +// } +// +// client := s3.NewFromConfig(cfg) +// +// Alternatively you can leave the API client Option's `Credential` member to +// nil. If using the `NewFromConfig` constructor you'll need to explicitly set +// the `Credentials` member to nil, if the external config resolved a +// credential provider. +// +// client := s3.New(s3.Options{ +// // Credentials defaults to a nil value. +// }) +// +// This can also be configured for specific operations calls too. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// log.Fatalf("failed to load config, %v", err) +// } +// +// client := s3.NewFromConfig(config) +// +// result, err := client.GetObject(context.TODO(), s3.GetObject{ +// Bucket: aws.String("example-bucket"), +// Key: aws.String("example-key"), +// }, func(o *s3.Options) { +// o.Credentials = nil +// // Or +// o.Credentials = aws.AnonymousCredentials{} +// }) +type AnonymousCredentials struct{} + +// Retrieve implements the CredentialsProvider interface, but will always +// return error, and cannot be used to sign a request. The AnonymousCredentials +// type is used as a sentinel type instructing the AWS request signing +// middleware to not sign a request. +func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) { + return Credentials{Source: "AnonymousCredentials"}, + fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with") +} + +// A Credentials is the AWS credentials value for individual credential fields. +type Credentials struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Source of the credentials + Source string + + // States if the credentials can expire or not. + CanExpire bool + + // The time the credentials will expire at. Should be ignored if CanExpire + // is false. + Expires time.Time +} + +// Expired returns if the credentials have expired. +func (v Credentials) Expired() bool { + if v.CanExpire { + // Calling Round(0) on the current time will truncate the monotonic + // reading only. Ensures credential expiry time is always based on + // reported wall-clock time. + return !v.Expires.After(sdk.NowTime().Round(0)) + } + + return false +} + +// HasKeys returns if the credentials keys are set. +func (v Credentials) HasKeys() bool { + return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0 +} + +// A CredentialsProvider is the interface for any component which will provide +// credentials Credentials. A CredentialsProvider is required to manage its own +// Expired state, and what to be expired means. +// +// A credentials provider implementation can be wrapped with a CredentialCache +// to cache the credential value retrieved. Without the cache the SDK will +// attempt to retrieve the credentials for every request. +type CredentialsProvider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve(ctx context.Context) (Credentials, error) +} + +// CredentialsProviderFunc provides a helper wrapping a function value to +// satisfy the CredentialsProvider interface. +type CredentialsProviderFunc func(context.Context) (Credentials, error) + +// Retrieve delegates to the function value the CredentialsProviderFunc wraps. +func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) { + return fn(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go new file mode 100644 index 000000000000..fd408e518600 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go @@ -0,0 +1,38 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "runtime" + "strings" +) + +var getGOOS = func() string { + return runtime.GOOS +} + +// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode +// is set to aws.DefaultsModeAuto. +func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode { + goos := getGOOS() + if goos == "android" || goos == "ios" { + return aws.DefaultsModeMobile + } + + var currentRegion string + if len(environment.EnvironmentIdentifier) > 0 { + currentRegion = environment.Region + } + + if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 { + currentRegion = environment.EC2InstanceMetadataRegion + } + + if len(region) > 0 && len(currentRegion) > 0 { + if strings.EqualFold(region, currentRegion) { + return aws.DefaultsModeInRegion + } + return aws.DefaultsModeCrossRegion + } + + return aws.DefaultsModeStandard +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go new file mode 100644 index 000000000000..8b7e01fa29a8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go @@ -0,0 +1,43 @@ +package defaults + +import ( + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// Configuration is the set of SDK configuration options that are determined based +// on the configured DefaultsMode. +type Configuration struct { + // RetryMode is the configuration's default retry mode API clients should + // use for constructing a Retryer. + RetryMode aws.RetryMode + + // ConnectTimeout is the maximum amount of time a dial will wait for + // a connect to complete. + // + // See https://pkg.go.dev/net#Dialer.Timeout + ConnectTimeout *time.Duration + + // TLSNegotiationTimeout specifies the maximum amount of time waiting to + // wait for a TLS handshake. + // + // See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout + TLSNegotiationTimeout *time.Duration +} + +// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set. +func (c *Configuration) GetConnectTimeout() (time.Duration, bool) { + if c.ConnectTimeout == nil { + return 0, false + } + return *c.ConnectTimeout, true +} + +// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set. +func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) { + if c.TLSNegotiationTimeout == nil { + return 0, false + } + return *c.TLSNegotiationTimeout, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go new file mode 100644 index 000000000000..dbaa873dc899 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go @@ -0,0 +1,50 @@ +// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT. + +package defaults + +import ( + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "time" +) + +// GetModeConfiguration returns the default Configuration descriptor for the given mode. +// +// Supports the following modes: cross-region, in-region, mobile, standard +func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) { + var mv aws.DefaultsMode + mv.SetFromString(string(mode)) + + switch mv { + case aws.DefaultsModeCrossRegion: + settings := Configuration{ + ConnectTimeout: aws.Duration(3100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeInRegion: + settings := Configuration{ + ConnectTimeout: aws.Duration(1100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeMobile: + settings := Configuration{ + ConnectTimeout: aws.Duration(30000 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeStandard: + settings := Configuration{ + ConnectTimeout: aws.Duration(3100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), + } + return settings, nil + default: + return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go new file mode 100644 index 000000000000..2d90011b426f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go @@ -0,0 +1,2 @@ +// Package defaults provides recommended configuration values for AWS SDKs and CLIs. +package defaults diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go new file mode 100644 index 000000000000..fcf9387c281a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go @@ -0,0 +1,95 @@ +// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT. + +package aws + +import ( + "strings" +) + +// DefaultsMode is the SDK defaults mode setting. +type DefaultsMode string + +// The DefaultsMode constants. +const ( + // DefaultsModeAuto is an experimental mode that builds on the standard mode. + // The SDK will attempt to discover the execution environment to determine the + // appropriate settings automatically. + // + // Note that the auto detection is heuristics-based and does not guarantee 100% + // accuracy. STANDARD mode will be used if the execution environment cannot + // be determined. The auto detection might query EC2 Instance Metadata service + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html), + // which might introduce latency. Therefore we recommend choosing an explicit + // defaults_mode instead if startup latency is critical to your application + DefaultsModeAuto DefaultsMode = "auto" + + // DefaultsModeCrossRegion builds on the standard mode and includes optimization + // tailored for applications which call AWS services in a different region + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeCrossRegion DefaultsMode = "cross-region" + + // DefaultsModeInRegion builds on the standard mode and includes optimization + // tailored for applications which call AWS services from within the same AWS + // region + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeInRegion DefaultsMode = "in-region" + + // DefaultsModeLegacy provides default settings that vary per SDK and were used + // prior to establishment of defaults_mode + DefaultsModeLegacy DefaultsMode = "legacy" + + // DefaultsModeMobile builds on the standard mode and includes optimization + // tailored for mobile applications + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeMobile DefaultsMode = "mobile" + + // DefaultsModeStandard provides the latest recommended default values that + // should be safe to run in most scenarios + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeStandard DefaultsMode = "standard" +) + +// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches +// the provided string when compared using EqualFold. If the value does not match a known +// constant it will be set to as-is and the function will return false. As a special case, if the +// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode. +func (d *DefaultsMode) SetFromString(v string) (ok bool) { + switch { + case strings.EqualFold(v, string(DefaultsModeAuto)): + *d = DefaultsModeAuto + ok = true + case strings.EqualFold(v, string(DefaultsModeCrossRegion)): + *d = DefaultsModeCrossRegion + ok = true + case strings.EqualFold(v, string(DefaultsModeInRegion)): + *d = DefaultsModeInRegion + ok = true + case strings.EqualFold(v, string(DefaultsModeLegacy)): + *d = DefaultsModeLegacy + ok = true + case strings.EqualFold(v, string(DefaultsModeMobile)): + *d = DefaultsModeMobile + ok = true + case strings.EqualFold(v, string(DefaultsModeStandard)): + *d = DefaultsModeStandard + ok = true + case len(v) == 0: + *d = DefaultsModeLegacy + ok = true + default: + *d = DefaultsMode(v) + } + return ok +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go new file mode 100644 index 000000000000..befc3bee1a7d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go @@ -0,0 +1,62 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.ToString(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.ToStringSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws + +// generate.go uses a build tag of "ignore", go run doesn't need to specify +// this because go run ignores all build flags when running a go file directly. +//go:generate go run -tags codegen generate.go +//go:generate go run -tags codegen logging_generate.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go new file mode 100644 index 000000000000..aa10a9b40f0d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go @@ -0,0 +1,229 @@ +package aws + +import ( + "fmt" +) + +// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior. +type DualStackEndpointState uint + +const ( + // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint resolution. + DualStackEndpointStateUnset DualStackEndpointState = iota + + // DualStackEndpointStateEnabled enables dual-stack endpoint resolution for service endpoints. + DualStackEndpointStateEnabled + + // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. + DualStackEndpointStateDisabled +) + +// GetUseDualStackEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. +// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. +func GetUseDualStackEndpoint(options ...interface{}) (value DualStackEndpointState, found bool) { + type iface interface { + GetUseDualStackEndpoint() DualStackEndpointState + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetUseDualStackEndpoint() + found = true + break + } + } + return value, found +} + +// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. +type FIPSEndpointState uint + +const ( + // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. + FIPSEndpointStateUnset FIPSEndpointState = iota + + // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. + FIPSEndpointStateEnabled + + // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. + FIPSEndpointStateDisabled +) + +// GetUseFIPSEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. +// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. +func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found bool) { + type iface interface { + GetUseFIPSEndpoint() FIPSEndpointState + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetUseFIPSEndpoint() + found = true + break + } + } + return value, found +} + +// Endpoint represents the endpoint a service client should make API operation +// calls to. +// +// The SDK will automatically resolve these endpoints per API client using an +// internal endpoint resolvers. If you'd like to provide custom endpoint +// resolving behavior you can implement the EndpointResolver interface. +type Endpoint struct { + // The base URL endpoint the SDK API clients will use to make API calls to. + // The SDK will suffix URI path and query elements to this endpoint. + URL string + + // Specifies if the endpoint's hostname can be modified by the SDK's API + // client. + // + // If the hostname is mutable the SDK API clients may modify any part of + // the hostname based on the requirements of the API, (e.g. adding, or + // removing content in the hostname). Such as, Amazon S3 API client + // prefixing "bucketname" to the hostname, or changing the + // hostname service name component from "s3." to "s3-accesspoint.dualstack." + // for the dualstack endpoint of an S3 Accesspoint resource. + // + // Care should be taken when providing a custom endpoint for an API. If the + // endpoint hostname is mutable, and the client cannot modify the endpoint + // correctly, the operation call will most likely fail, or have undefined + // behavior. + // + // If hostname is immutable, the SDK API clients will not modify the + // hostname of the URL. This may cause the API client not to function + // correctly if the API requires the operation specific hostname values + // to be used by the client. + // + // This flag does not modify the API client's behavior if this endpoint + // will be used instead of Endpoint Discovery, or if the endpoint will be + // used to perform Endpoint Discovery. That behavior is configured via the + // API Client's Options. + HostnameImmutable bool + + // The AWS partition the endpoint belongs to. + PartitionID string + + // The service name that should be used for signing the requests to the + // endpoint. + SigningName string + + // The region that should be used for signing the request to the endpoint. + SigningRegion string + + // The signing method that should be used for signing the requests to the + // endpoint. + SigningMethod string + + // The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata. + // When providing a custom endpoint, you should set the source as EndpointSourceCustom. + // If source is not provided when providing a custom endpoint, the SDK may not + // perform required host mutations correctly. Source should be used along with + // HostnameImmutable property as per the usage requirement. + Source EndpointSource +} + +// EndpointSource is the endpoint source type. +type EndpointSource int + +const ( + // EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source. + EndpointSourceServiceMetadata EndpointSource = iota + + // EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when + // user provides a custom endpoint to be used by the SDK. + EndpointSourceCustom +) + +// EndpointNotFoundError is a sentinel error to indicate that the +// EndpointResolver implementation was unable to resolve an endpoint for the +// given service and region. Resolvers should use this to indicate that an API +// client should fallback and attempt to use it's internal default resolver to +// resolve the endpoint. +type EndpointNotFoundError struct { + Err error +} + +// Error is the error message. +func (e *EndpointNotFoundError) Error() string { + return fmt.Sprintf("endpoint not found, %v", e.Err) +} + +// Unwrap returns the underlying error. +func (e *EndpointNotFoundError) Unwrap() error { + return e.Err +} + +// EndpointResolver is an endpoint resolver that can be used to provide or +// override an endpoint for the given service and region. API clients will +// attempt to use the EndpointResolver first to resolve an endpoint if +// available. If the EndpointResolver returns an EndpointNotFoundError error, +// API clients will fallback to attempting to resolve the endpoint using its +// internal default endpoint resolver. +// +// Deprecated: See EndpointResolverWithOptions +type EndpointResolver interface { + ResolveEndpoint(service, region string) (Endpoint, error) +} + +// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface. +// +// Deprecated: See EndpointResolverWithOptionsFunc +type EndpointResolverFunc func(service, region string) (Endpoint, error) + +// ResolveEndpoint calls the wrapped function and returns the results. +// +// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint +func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { + return e(service, region) +} + +// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or +// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will +// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if +// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, +// API clients will fallback to attempting to resolve the endpoint using its +// internal default endpoint resolver. +type EndpointResolverWithOptions interface { + ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) +} + +// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface. +type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error) + +// ResolveEndpoint calls the wrapped function and returns the results. +func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) { + return e(service, region, options...) +} + +// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value. +// Returns boolean false if the provided options does not have a method to retrieve the DisableHTTPS. +func GetDisableHTTPS(options ...interface{}) (value bool, found bool) { + type iface interface { + GetDisableHTTPS() bool + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetDisableHTTPS() + found = true + break + } + } + return value, found +} + +// GetResolvedRegion takes a service's EndpointResolverOptions and returns the ResolvedRegion value. +// Returns boolean false if the provided options does not have a method to retrieve the ResolvedRegion. +func GetResolvedRegion(options ...interface{}) (value string, found bool) { + type iface interface { + GetResolvedRegion() string + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetResolvedRegion() + found = true + break + } + } + return value, found +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go new file mode 100644 index 000000000000..f390a08f9ffa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go @@ -0,0 +1,9 @@ +package aws + +// MissingRegionError is an error that is returned if region configuration +// value was not found. +type MissingRegionError struct{} + +func (*MissingRegionError) Error() string { + return "an AWS region is required, but was not found" +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go new file mode 100644 index 000000000000..2394418e9bd5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go @@ -0,0 +1,365 @@ +// Code generated by aws/generate.go DO NOT EDIT. + +package aws + +import ( + "github.com/aws/smithy-go/ptr" + "time" +) + +// ToBool returns bool value dereferenced if the passed +// in pointer was not nil. Returns a bool zero value if the +// pointer was nil. +func ToBool(p *bool) (v bool) { + return ptr.ToBool(p) +} + +// ToBoolSlice returns a slice of bool values, that are +// dereferenced if the passed in pointer was not nil. Returns a bool +// zero value if the pointer was nil. +func ToBoolSlice(vs []*bool) []bool { + return ptr.ToBoolSlice(vs) +} + +// ToBoolMap returns a map of bool values, that are +// dereferenced if the passed in pointer was not nil. The bool +// zero value is used if the pointer was nil. +func ToBoolMap(vs map[string]*bool) map[string]bool { + return ptr.ToBoolMap(vs) +} + +// ToByte returns byte value dereferenced if the passed +// in pointer was not nil. Returns a byte zero value if the +// pointer was nil. +func ToByte(p *byte) (v byte) { + return ptr.ToByte(p) +} + +// ToByteSlice returns a slice of byte values, that are +// dereferenced if the passed in pointer was not nil. Returns a byte +// zero value if the pointer was nil. +func ToByteSlice(vs []*byte) []byte { + return ptr.ToByteSlice(vs) +} + +// ToByteMap returns a map of byte values, that are +// dereferenced if the passed in pointer was not nil. The byte +// zero value is used if the pointer was nil. +func ToByteMap(vs map[string]*byte) map[string]byte { + return ptr.ToByteMap(vs) +} + +// ToString returns string value dereferenced if the passed +// in pointer was not nil. Returns a string zero value if the +// pointer was nil. +func ToString(p *string) (v string) { + return ptr.ToString(p) +} + +// ToStringSlice returns a slice of string values, that are +// dereferenced if the passed in pointer was not nil. Returns a string +// zero value if the pointer was nil. +func ToStringSlice(vs []*string) []string { + return ptr.ToStringSlice(vs) +} + +// ToStringMap returns a map of string values, that are +// dereferenced if the passed in pointer was not nil. The string +// zero value is used if the pointer was nil. +func ToStringMap(vs map[string]*string) map[string]string { + return ptr.ToStringMap(vs) +} + +// ToInt returns int value dereferenced if the passed +// in pointer was not nil. Returns a int zero value if the +// pointer was nil. +func ToInt(p *int) (v int) { + return ptr.ToInt(p) +} + +// ToIntSlice returns a slice of int values, that are +// dereferenced if the passed in pointer was not nil. Returns a int +// zero value if the pointer was nil. +func ToIntSlice(vs []*int) []int { + return ptr.ToIntSlice(vs) +} + +// ToIntMap returns a map of int values, that are +// dereferenced if the passed in pointer was not nil. The int +// zero value is used if the pointer was nil. +func ToIntMap(vs map[string]*int) map[string]int { + return ptr.ToIntMap(vs) +} + +// ToInt8 returns int8 value dereferenced if the passed +// in pointer was not nil. Returns a int8 zero value if the +// pointer was nil. +func ToInt8(p *int8) (v int8) { + return ptr.ToInt8(p) +} + +// ToInt8Slice returns a slice of int8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int8 +// zero value if the pointer was nil. +func ToInt8Slice(vs []*int8) []int8 { + return ptr.ToInt8Slice(vs) +} + +// ToInt8Map returns a map of int8 values, that are +// dereferenced if the passed in pointer was not nil. The int8 +// zero value is used if the pointer was nil. +func ToInt8Map(vs map[string]*int8) map[string]int8 { + return ptr.ToInt8Map(vs) +} + +// ToInt16 returns int16 value dereferenced if the passed +// in pointer was not nil. Returns a int16 zero value if the +// pointer was nil. +func ToInt16(p *int16) (v int16) { + return ptr.ToInt16(p) +} + +// ToInt16Slice returns a slice of int16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int16 +// zero value if the pointer was nil. +func ToInt16Slice(vs []*int16) []int16 { + return ptr.ToInt16Slice(vs) +} + +// ToInt16Map returns a map of int16 values, that are +// dereferenced if the passed in pointer was not nil. The int16 +// zero value is used if the pointer was nil. +func ToInt16Map(vs map[string]*int16) map[string]int16 { + return ptr.ToInt16Map(vs) +} + +// ToInt32 returns int32 value dereferenced if the passed +// in pointer was not nil. Returns a int32 zero value if the +// pointer was nil. +func ToInt32(p *int32) (v int32) { + return ptr.ToInt32(p) +} + +// ToInt32Slice returns a slice of int32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int32 +// zero value if the pointer was nil. +func ToInt32Slice(vs []*int32) []int32 { + return ptr.ToInt32Slice(vs) +} + +// ToInt32Map returns a map of int32 values, that are +// dereferenced if the passed in pointer was not nil. The int32 +// zero value is used if the pointer was nil. +func ToInt32Map(vs map[string]*int32) map[string]int32 { + return ptr.ToInt32Map(vs) +} + +// ToInt64 returns int64 value dereferenced if the passed +// in pointer was not nil. Returns a int64 zero value if the +// pointer was nil. +func ToInt64(p *int64) (v int64) { + return ptr.ToInt64(p) +} + +// ToInt64Slice returns a slice of int64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int64 +// zero value if the pointer was nil. +func ToInt64Slice(vs []*int64) []int64 { + return ptr.ToInt64Slice(vs) +} + +// ToInt64Map returns a map of int64 values, that are +// dereferenced if the passed in pointer was not nil. The int64 +// zero value is used if the pointer was nil. +func ToInt64Map(vs map[string]*int64) map[string]int64 { + return ptr.ToInt64Map(vs) +} + +// ToUint returns uint value dereferenced if the passed +// in pointer was not nil. Returns a uint zero value if the +// pointer was nil. +func ToUint(p *uint) (v uint) { + return ptr.ToUint(p) +} + +// ToUintSlice returns a slice of uint values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint +// zero value if the pointer was nil. +func ToUintSlice(vs []*uint) []uint { + return ptr.ToUintSlice(vs) +} + +// ToUintMap returns a map of uint values, that are +// dereferenced if the passed in pointer was not nil. The uint +// zero value is used if the pointer was nil. +func ToUintMap(vs map[string]*uint) map[string]uint { + return ptr.ToUintMap(vs) +} + +// ToUint8 returns uint8 value dereferenced if the passed +// in pointer was not nil. Returns a uint8 zero value if the +// pointer was nil. +func ToUint8(p *uint8) (v uint8) { + return ptr.ToUint8(p) +} + +// ToUint8Slice returns a slice of uint8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint8 +// zero value if the pointer was nil. +func ToUint8Slice(vs []*uint8) []uint8 { + return ptr.ToUint8Slice(vs) +} + +// ToUint8Map returns a map of uint8 values, that are +// dereferenced if the passed in pointer was not nil. The uint8 +// zero value is used if the pointer was nil. +func ToUint8Map(vs map[string]*uint8) map[string]uint8 { + return ptr.ToUint8Map(vs) +} + +// ToUint16 returns uint16 value dereferenced if the passed +// in pointer was not nil. Returns a uint16 zero value if the +// pointer was nil. +func ToUint16(p *uint16) (v uint16) { + return ptr.ToUint16(p) +} + +// ToUint16Slice returns a slice of uint16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint16 +// zero value if the pointer was nil. +func ToUint16Slice(vs []*uint16) []uint16 { + return ptr.ToUint16Slice(vs) +} + +// ToUint16Map returns a map of uint16 values, that are +// dereferenced if the passed in pointer was not nil. The uint16 +// zero value is used if the pointer was nil. +func ToUint16Map(vs map[string]*uint16) map[string]uint16 { + return ptr.ToUint16Map(vs) +} + +// ToUint32 returns uint32 value dereferenced if the passed +// in pointer was not nil. Returns a uint32 zero value if the +// pointer was nil. +func ToUint32(p *uint32) (v uint32) { + return ptr.ToUint32(p) +} + +// ToUint32Slice returns a slice of uint32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint32 +// zero value if the pointer was nil. +func ToUint32Slice(vs []*uint32) []uint32 { + return ptr.ToUint32Slice(vs) +} + +// ToUint32Map returns a map of uint32 values, that are +// dereferenced if the passed in pointer was not nil. The uint32 +// zero value is used if the pointer was nil. +func ToUint32Map(vs map[string]*uint32) map[string]uint32 { + return ptr.ToUint32Map(vs) +} + +// ToUint64 returns uint64 value dereferenced if the passed +// in pointer was not nil. Returns a uint64 zero value if the +// pointer was nil. +func ToUint64(p *uint64) (v uint64) { + return ptr.ToUint64(p) +} + +// ToUint64Slice returns a slice of uint64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint64 +// zero value if the pointer was nil. +func ToUint64Slice(vs []*uint64) []uint64 { + return ptr.ToUint64Slice(vs) +} + +// ToUint64Map returns a map of uint64 values, that are +// dereferenced if the passed in pointer was not nil. The uint64 +// zero value is used if the pointer was nil. +func ToUint64Map(vs map[string]*uint64) map[string]uint64 { + return ptr.ToUint64Map(vs) +} + +// ToFloat32 returns float32 value dereferenced if the passed +// in pointer was not nil. Returns a float32 zero value if the +// pointer was nil. +func ToFloat32(p *float32) (v float32) { + return ptr.ToFloat32(p) +} + +// ToFloat32Slice returns a slice of float32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float32 +// zero value if the pointer was nil. +func ToFloat32Slice(vs []*float32) []float32 { + return ptr.ToFloat32Slice(vs) +} + +// ToFloat32Map returns a map of float32 values, that are +// dereferenced if the passed in pointer was not nil. The float32 +// zero value is used if the pointer was nil. +func ToFloat32Map(vs map[string]*float32) map[string]float32 { + return ptr.ToFloat32Map(vs) +} + +// ToFloat64 returns float64 value dereferenced if the passed +// in pointer was not nil. Returns a float64 zero value if the +// pointer was nil. +func ToFloat64(p *float64) (v float64) { + return ptr.ToFloat64(p) +} + +// ToFloat64Slice returns a slice of float64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float64 +// zero value if the pointer was nil. +func ToFloat64Slice(vs []*float64) []float64 { + return ptr.ToFloat64Slice(vs) +} + +// ToFloat64Map returns a map of float64 values, that are +// dereferenced if the passed in pointer was not nil. The float64 +// zero value is used if the pointer was nil. +func ToFloat64Map(vs map[string]*float64) map[string]float64 { + return ptr.ToFloat64Map(vs) +} + +// ToTime returns time.Time value dereferenced if the passed +// in pointer was not nil. Returns a time.Time zero value if the +// pointer was nil. +func ToTime(p *time.Time) (v time.Time) { + return ptr.ToTime(p) +} + +// ToTimeSlice returns a slice of time.Time values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Time +// zero value if the pointer was nil. +func ToTimeSlice(vs []*time.Time) []time.Time { + return ptr.ToTimeSlice(vs) +} + +// ToTimeMap returns a map of time.Time values, that are +// dereferenced if the passed in pointer was not nil. The time.Time +// zero value is used if the pointer was nil. +func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { + return ptr.ToTimeMap(vs) +} + +// ToDuration returns time.Duration value dereferenced if the passed +// in pointer was not nil. Returns a time.Duration zero value if the +// pointer was nil. +func ToDuration(p *time.Duration) (v time.Duration) { + return ptr.ToDuration(p) +} + +// ToDurationSlice returns a slice of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Duration +// zero value if the pointer was nil. +func ToDurationSlice(vs []*time.Duration) []time.Duration { + return ptr.ToDurationSlice(vs) +} + +// ToDurationMap returns a map of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. The time.Duration +// zero value is used if the pointer was nil. +func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { + return ptr.ToDurationMap(vs) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go new file mode 100644 index 000000000000..8dd3973bf5ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package aws + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.16.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go new file mode 100644 index 000000000000..9e34d26f2158 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go @@ -0,0 +1,117 @@ +// Code generated by aws/logging_generate.go DO NOT EDIT. + +package aws + +// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where +// each bit is a flag that describes the logging behavior for one or more client components. +// The entire 64-bit group is reserved for later expansion by the SDK. +// +// Example: Setting ClientLogMode to enable logging of retries and requests +// clientLogMode := aws.LogRetries | aws.LogRequest +// +// Example: Adding an additional log mode to an existing ClientLogMode value +// clientLogMode |= aws.LogResponse +type ClientLogMode uint64 + +// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. +const ( + LogSigning ClientLogMode = 1 << (64 - 1 - iota) + LogRetries + LogRequest + LogRequestWithBody + LogResponse + LogResponseWithBody + LogDeprecatedUsage + LogRequestEventMessage + LogResponseEventMessage +) + +// IsSigning returns whether the Signing logging mode bit is set +func (m ClientLogMode) IsSigning() bool { + return m&LogSigning != 0 +} + +// IsRetries returns whether the Retries logging mode bit is set +func (m ClientLogMode) IsRetries() bool { + return m&LogRetries != 0 +} + +// IsRequest returns whether the Request logging mode bit is set +func (m ClientLogMode) IsRequest() bool { + return m&LogRequest != 0 +} + +// IsRequestWithBody returns whether the RequestWithBody logging mode bit is set +func (m ClientLogMode) IsRequestWithBody() bool { + return m&LogRequestWithBody != 0 +} + +// IsResponse returns whether the Response logging mode bit is set +func (m ClientLogMode) IsResponse() bool { + return m&LogResponse != 0 +} + +// IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set +func (m ClientLogMode) IsResponseWithBody() bool { + return m&LogResponseWithBody != 0 +} + +// IsDeprecatedUsage returns whether the DeprecatedUsage logging mode bit is set +func (m ClientLogMode) IsDeprecatedUsage() bool { + return m&LogDeprecatedUsage != 0 +} + +// IsRequestEventMessage returns whether the RequestEventMessage logging mode bit is set +func (m ClientLogMode) IsRequestEventMessage() bool { + return m&LogRequestEventMessage != 0 +} + +// IsResponseEventMessage returns whether the ResponseEventMessage logging mode bit is set +func (m ClientLogMode) IsResponseEventMessage() bool { + return m&LogResponseEventMessage != 0 +} + +// ClearSigning clears the Signing logging mode bit +func (m *ClientLogMode) ClearSigning() { + *m &^= LogSigning +} + +// ClearRetries clears the Retries logging mode bit +func (m *ClientLogMode) ClearRetries() { + *m &^= LogRetries +} + +// ClearRequest clears the Request logging mode bit +func (m *ClientLogMode) ClearRequest() { + *m &^= LogRequest +} + +// ClearRequestWithBody clears the RequestWithBody logging mode bit +func (m *ClientLogMode) ClearRequestWithBody() { + *m &^= LogRequestWithBody +} + +// ClearResponse clears the Response logging mode bit +func (m *ClientLogMode) ClearResponse() { + *m &^= LogResponse +} + +// ClearResponseWithBody clears the ResponseWithBody logging mode bit +func (m *ClientLogMode) ClearResponseWithBody() { + *m &^= LogResponseWithBody +} + +// ClearDeprecatedUsage clears the DeprecatedUsage logging mode bit +func (m *ClientLogMode) ClearDeprecatedUsage() { + *m &^= LogDeprecatedUsage +} + +// ClearRequestEventMessage clears the RequestEventMessage logging mode bit +func (m *ClientLogMode) ClearRequestEventMessage() { + *m &^= LogRequestEventMessage +} + +// ClearResponseEventMessage clears the ResponseEventMessage logging mode bit +func (m *ClientLogMode) ClearResponseEventMessage() { + *m &^= LogResponseEventMessage +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go new file mode 100644 index 000000000000..6ecc2231a122 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go @@ -0,0 +1,95 @@ +//go:build clientlogmode +// +build clientlogmode + +package main + +import ( + "fmt" + "log" + "os" + "strings" + "text/template" +) + +var config = struct { + ModeBits []string +}{ + // Items should be appended only to keep bit-flag positions stable + ModeBits: []string{ + "Signing", + "Retries", + "Request", + "RequestWithBody", + "Response", + "ResponseWithBody", + "DeprecatedUsage", + "RequestEventMessage", + "ResponseEventMessage", + }, +} + +func bitName(name string) string { + return strings.ToUpper(name[:1]) + name[1:] +} + +var tmpl = template.Must(template.New("ClientLogMode").Funcs(map[string]interface{}{ + "symbolName": func(name string) string { + return "Log" + bitName(name) + }, + "bitName": bitName, +}).Parse(`// Code generated by aws/logging_generate.go DO NOT EDIT. + +package aws + +// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where +// each bit is a flag that describes the logging behavior for one or more client components. +// The entire 64-bit group is reserved for later expansion by the SDK. +// +// Example: Setting ClientLogMode to enable logging of retries and requests +// clientLogMode := aws.LogRetries | aws.LogRequest +// +// Example: Adding an additional log mode to an existing ClientLogMode value +// clientLogMode |= aws.LogResponse +type ClientLogMode uint64 + +// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. +const ( +{{- range $index, $field := .ModeBits }} + {{ (symbolName $field) }}{{- if (eq 0 $index) }} ClientLogMode = 1 << (64 - 1 - iota){{- end }} +{{- end }} +) +{{ range $_, $field := .ModeBits }} +// Is{{- bitName $field }} returns whether the {{ bitName $field }} logging mode bit is set +func (m ClientLogMode) Is{{- bitName $field }}() bool { + return m&{{- (symbolName $field) }} != 0 +} +{{ end }} +{{- range $_, $field := .ModeBits }} +// Clear{{- bitName $field }} clears the {{ bitName $field }} logging mode bit +func (m *ClientLogMode) Clear{{- bitName $field }}() { + *m &^= {{ (symbolName $field) }} +} +{{ end -}} +`)) + +func main() { + uniqueBitFields := make(map[string]struct{}) + + for _, bitName := range config.ModeBits { + if _, ok := uniqueBitFields[strings.ToLower(bitName)]; ok { + panic(fmt.Sprintf("duplicate bit field: %s", bitName)) + } + uniqueBitFields[bitName] = struct{}{} + } + + file, err := os.Create("logging.go") + if err != nil { + log.Fatal(err) + } + defer file.Close() + + err = tmpl.Execute(file, config) + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go new file mode 100644 index 000000000000..e6e87ac77764 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go @@ -0,0 +1,180 @@ +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + + "github.com/aws/smithy-go/middleware" +) + +// RegisterServiceMetadata registers metadata about the service and operation into the middleware context +// so that it is available at runtime for other middleware to introspect. +type RegisterServiceMetadata struct { + ServiceID string + SigningName string + Region string + OperationName string +} + +// ID returns the middleware identifier. +func (s *RegisterServiceMetadata) ID() string { + return "RegisterServiceMetadata" +} + +// HandleInitialize registers service metadata information into the middleware context, allowing for introspection. +func (s RegisterServiceMetadata) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) { + if len(s.ServiceID) > 0 { + ctx = SetServiceID(ctx, s.ServiceID) + } + if len(s.SigningName) > 0 { + ctx = SetSigningName(ctx, s.SigningName) + } + if len(s.Region) > 0 { + ctx = setRegion(ctx, s.Region) + } + if len(s.OperationName) > 0 { + ctx = setOperationName(ctx, s.OperationName) + } + return next.HandleInitialize(ctx, in) +} + +// service metadata keys for storing and lookup of runtime stack information. +type ( + serviceIDKey struct{} + signingNameKey struct{} + signingRegionKey struct{} + regionKey struct{} + operationNameKey struct{} + partitionIDKey struct{} +) + +// GetServiceID retrieves the service id from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetServiceID(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, serviceIDKey{}).(string) + return v +} + +// GetSigningName retrieves the service signing name from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSigningName(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string) + return v +} + +// GetSigningRegion retrieves the region from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSigningRegion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string) + return v +} + +// GetRegion retrieves the endpoint region from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetRegion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, regionKey{}).(string) + return v +} + +// GetOperationName retrieves the service operation metadata from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetOperationName(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, operationNameKey{}).(string) + return v +} + +// GetPartitionID retrieves the endpoint partition id from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetPartitionID(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, partitionIDKey{}).(string) + return v +} + +// SetSigningName set or modifies the signing name on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSigningName(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, signingNameKey{}, value) +} + +// SetSigningRegion sets or modifies the region on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSigningRegion(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, signingRegionKey{}, value) +} + +// SetServiceID sets the service id on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetServiceID(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, serviceIDKey{}, value) +} + +// setRegion sets the endpoint region on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setRegion(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, regionKey{}, value) +} + +// setOperationName sets the service operation on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setOperationName(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, operationNameKey{}, value) +} + +// SetPartitionID sets the partition id of a resolved region on the context +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetPartitionID(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, partitionIDKey{}, value) +} + +// EndpointSource key +type endpointSourceKey struct{} + +// GetEndpointSource returns an endpoint source if set on context +func GetEndpointSource(ctx context.Context) (v aws.EndpointSource) { + v, _ = middleware.GetStackValue(ctx, endpointSourceKey{}).(aws.EndpointSource) + return v +} + +// SetEndpointSource sets endpoint source on context +func SetEndpointSource(ctx context.Context, value aws.EndpointSource) context.Context { + return middleware.WithStackValue(ctx, endpointSourceKey{}, value) +} + +type signingCredentialsKey struct{} + +// GetSigningCredentials returns the credentials that were used for signing if set on context. +func GetSigningCredentials(ctx context.Context) (v aws.Credentials) { + v, _ = middleware.GetStackValue(ctx, signingCredentialsKey{}).(aws.Credentials) + return v +} + +// SetSigningCredentials sets the credentails used for signing on the context. +func SetSigningCredentials(ctx context.Context, value aws.Credentials) context.Context { + return middleware.WithStackValue(ctx, signingCredentialsKey{}, value) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go new file mode 100644 index 000000000000..9bd0dfb15086 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go @@ -0,0 +1,168 @@ +package middleware + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyrand "github.com/aws/smithy-go/rand" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ClientRequestID is a Smithy BuildMiddleware that will generate a unique ID for logical API operation +// invocation. +type ClientRequestID struct{} + +// ID the identifier for the ClientRequestID +func (r *ClientRequestID) ID() string { + return "ClientRequestID" +} + +// HandleBuild attaches a unique operation invocation id for the operation to the request +func (r ClientRequestID) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + invocationID, err := smithyrand.NewUUID(rand.Reader).GetUUID() + if err != nil { + return out, metadata, err + } + + const invocationIDHeader = "Amz-Sdk-Invocation-Id" + req.Header[invocationIDHeader] = append(req.Header[invocationIDHeader][:0], invocationID) + + return next.HandleBuild(ctx, in) +} + +// RecordResponseTiming records the response timing for the SDK client requests. +type RecordResponseTiming struct{} + +// ID is the middleware identifier +func (a *RecordResponseTiming) ID() string { + return "RecordResponseTiming" +} + +// HandleDeserialize calculates response metadata and clock skew +func (a RecordResponseTiming) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + responseAt := sdk.NowTime() + setResponseAt(&metadata, responseAt) + + var serverTime time.Time + + switch resp := out.RawResponse.(type) { + case *smithyhttp.Response: + respDateHeader := resp.Header.Get("Date") + if len(respDateHeader) == 0 { + break + } + var parseErr error + serverTime, parseErr = smithyhttp.ParseTime(respDateHeader) + if parseErr != nil { + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "failed to parse response Date header value, got %v", + parseErr.Error()) + break + } + setServerTime(&metadata, serverTime) + } + + if !serverTime.IsZero() { + attemptSkew := serverTime.Sub(responseAt) + setAttemptSkew(&metadata, attemptSkew) + } + + return out, metadata, err +} + +type responseAtKey struct{} + +// GetResponseAt returns the time response was received at. +func GetResponseAt(metadata middleware.Metadata) (v time.Time, ok bool) { + v, ok = metadata.Get(responseAtKey{}).(time.Time) + return v, ok +} + +// setResponseAt sets the response time on the metadata. +func setResponseAt(metadata *middleware.Metadata, v time.Time) { + metadata.Set(responseAtKey{}, v) +} + +type serverTimeKey struct{} + +// GetServerTime returns the server time for response. +func GetServerTime(metadata middleware.Metadata) (v time.Time, ok bool) { + v, ok = metadata.Get(serverTimeKey{}).(time.Time) + return v, ok +} + +// setServerTime sets the server time on the metadata. +func setServerTime(metadata *middleware.Metadata, v time.Time) { + metadata.Set(serverTimeKey{}, v) +} + +type attemptSkewKey struct{} + +// GetAttemptSkew returns Attempt clock skew for response from metadata. +func GetAttemptSkew(metadata middleware.Metadata) (v time.Duration, ok bool) { + v, ok = metadata.Get(attemptSkewKey{}).(time.Duration) + return v, ok +} + +// setAttemptSkew sets the attempt clock skew on the metadata. +func setAttemptSkew(metadata *middleware.Metadata, v time.Duration) { + metadata.Set(attemptSkewKey{}, v) +} + +// AddClientRequestIDMiddleware adds ClientRequestID to the middleware stack +func AddClientRequestIDMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&ClientRequestID{}, middleware.After) +} + +// AddRecordResponseTiming adds RecordResponseTiming middleware to the +// middleware stack. +func AddRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&RecordResponseTiming{}, middleware.After) +} + +// rawResponseKey is the accessor key used to store and access the +// raw response within the response metadata. +type rawResponseKey struct{} + +// addRawResponse middleware adds raw response on to the metadata +type addRawResponse struct{} + +// ID the identifier for the ClientRequestID +func (m *addRawResponse) ID() string { + return "AddRawResponseToMetadata" +} + +// HandleDeserialize adds raw response on the middleware metadata +func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + metadata.Set(rawResponseKey{}, out.RawResponse) + return out, metadata, err +} + +// AddRawResponseToMetadata adds middleware to the middleware stack that +// store raw response on to the metadata. +func AddRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&addRawResponse{}, middleware.Before) +} + +// GetRawResponse returns raw response set on metadata +func GetRawResponse(metadata middleware.Metadata) interface{} { + return metadata.Get(rawResponseKey{}) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go new file mode 100644 index 000000000000..ba262dadcd0d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go @@ -0,0 +1,24 @@ +//go:build go1.16 +// +build go1.16 + +package middleware + +import "runtime" + +func getNormalizedOSName() (os string) { + switch runtime.GOOS { + case "android": + os = "android" + case "linux": + os = "linux" + case "windows": + os = "windows" + case "darwin": + os = "macos" + case "ios": + os = "ios" + default: + os = "other" + } + return os +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go new file mode 100644 index 000000000000..e14a1e4ecb9d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go @@ -0,0 +1,24 @@ +//go:build !go1.16 +// +build !go1.16 + +package middleware + +import "runtime" + +func getNormalizedOSName() (os string) { + switch runtime.GOOS { + case "android": + os = "android" + case "linux": + os = "linux" + case "windows": + os = "windows" + case "darwin": + // Due to Apple M1 we can't distinguish between macOS and iOS when GOOS/GOARCH is darwin/amd64 + // For now declare this as "other" until we have a better detection mechanism. + fallthrough + default: + os = "other" + } + return os +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go new file mode 100644 index 000000000000..dd3391fe41e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go @@ -0,0 +1,27 @@ +package middleware + +import ( + "github.com/aws/smithy-go/middleware" +) + +// requestIDKey is used to retrieve request id from response metadata +type requestIDKey struct{} + +// SetRequestIDMetadata sets the provided request id over middleware metadata +func SetRequestIDMetadata(metadata *middleware.Metadata, id string) { + metadata.Set(requestIDKey{}, id) +} + +// GetRequestIDMetadata retrieves the request id from middleware metadata +// returns string and bool indicating value of request id, whether request id was set. +func GetRequestIDMetadata(metadata middleware.Metadata) (string, bool) { + if !metadata.Has(requestIDKey{}) { + return "", false + } + + v, ok := metadata.Get(requestIDKey{}).(string) + if !ok { + return "", true + } + return v, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go new file mode 100644 index 000000000000..7ce48c611cd1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go @@ -0,0 +1,49 @@ +package middleware + +import ( + "context" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddRequestIDRetrieverMiddleware adds request id retriever middleware +func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before operation deserializers so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&requestIDRetriever{}, "OperationDeserializer", middleware.Before) +} + +type requestIDRetriever struct { +} + +// ID returns the middleware identifier +func (m *requestIDRetriever) ID() string { + return "RequestIDRetriever" +} + +func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // Different header which can map to request id + requestIDHeaderList := []string{"X-Amzn-Requestid", "X-Amz-RequestId"} + + for _, h := range requestIDHeaderList { + // check for headers known to contain Request id + if v := resp.Header.Get(h); len(v) != 0 { + // set reqID on metadata for successful responses. + SetRequestIDMetadata(&metadata, v) + break + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go new file mode 100644 index 000000000000..d5adfec90bd4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -0,0 +1,241 @@ +package middleware + +import ( + "context" + "fmt" + "os" + "runtime" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +var languageVersion = strings.TrimPrefix(runtime.Version(), "go") + +// SDKAgentKeyType is the metadata type to add to the SDK agent string +type SDKAgentKeyType int + +// The set of valid SDKAgentKeyType constants. If an unknown value is assigned for SDKAgentKeyType it will +// be mapped to AdditionalMetadata. +const ( + _ SDKAgentKeyType = iota + APIMetadata + OperatingSystemMetadata + LanguageMetadata + EnvironmentMetadata + FeatureMetadata + ConfigMetadata + FrameworkMetadata + AdditionalMetadata + ApplicationIdentifier +) + +func (k SDKAgentKeyType) string() string { + switch k { + case APIMetadata: + return "api" + case OperatingSystemMetadata: + return "os" + case LanguageMetadata: + return "lang" + case EnvironmentMetadata: + return "exec-env" + case FeatureMetadata: + return "ft" + case ConfigMetadata: + return "cfg" + case FrameworkMetadata: + return "lib" + case ApplicationIdentifier: + return "app" + case AdditionalMetadata: + fallthrough + default: + return "md" + } +} + +const execEnvVar = `AWS_EXECUTION_ENV` + +// requestUserAgent is a build middleware that set the User-Agent for the request. +type requestUserAgent struct { + sdkAgent, userAgent *smithyhttp.UserAgentBuilder +} + +// newRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the +// request. +// +// User-Agent example: +// aws-sdk-go-v2/1.2.3 +// +// X-Amz-User-Agent example: +// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15 +func newRequestUserAgent() *requestUserAgent { + userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() + addProductName(userAgent) + addProductName(sdkAgent) + + r := &requestUserAgent{ + sdkAgent: sdkAgent, + userAgent: userAgent, + } + + addSDKMetadata(r) + + return r +} + +func addSDKMetadata(r *requestUserAgent) { + r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName()) + r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion) + r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS) + r.AddSDKAgentKeyValue(AdditionalMetadata, "GOARCH", runtime.GOARCH) + if ev := os.Getenv(execEnvVar); len(ev) > 0 { + r.AddSDKAgentKey(EnvironmentMetadata, ev) + } +} + +func addProductName(builder *smithyhttp.UserAgentBuilder) { + builder.AddKeyValue(aws.SDKName, aws.SDKVersion) +} + +// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. +func AddUserAgentKey(key string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddUserAgentKey(key) + return nil + } +} + +// AddUserAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. +func AddUserAgentKeyValue(key, value string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddUserAgentKeyValue(key, value) + return nil + } +} + +// AddSDKAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. +func AddSDKAgentKey(keyType SDKAgentKeyType, key string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddSDKAgentKey(keyType, key) + return nil + } +} + +// AddSDKAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. +func AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddSDKAgentKeyValue(keyType, key, value) + return nil + } +} + +// AddRequestUserAgentMiddleware registers a requestUserAgent middleware on the stack if not present. +func AddRequestUserAgentMiddleware(stack *middleware.Stack) error { + _, err := getOrAddRequestUserAgent(stack) + return err +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error) { + id := (*requestUserAgent)(nil).ID() + bm, ok := stack.Build.Get(id) + if !ok { + bm = newRequestUserAgent() + err := stack.Build.Add(bm, middleware.After) + if err != nil { + return nil, err + } + } + + requestUserAgent, ok := bm.(*requestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id) + } + + return requestUserAgent, nil +} + +// AddUserAgentKey adds the component identified by name to the User-Agent string. +func (u *requestUserAgent) AddUserAgentKey(key string) { + u.userAgent.AddKey(key) +} + +// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. +func (u *requestUserAgent) AddUserAgentKeyValue(key, value string) { + u.userAgent.AddKeyValue(key, value) +} + +// AddUserAgentKey adds the component identified by name to the User-Agent string. +func (u *requestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { + // TODO: should target sdkAgent + u.userAgent.AddKey(keyType.string() + "/" + key) +} + +// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. +func (u *requestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { + // TODO: should target sdkAgent + u.userAgent.AddKeyValue(keyType.string()+"/"+key, value) +} + +// ID the name of the middleware. +func (u *requestUserAgent) ID() string { + return "UserAgent" +} + +// HandleBuild adds or appends the constructed user agent to the request. +func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + switch req := in.Request.(type) { + case *smithyhttp.Request: + u.addHTTPUserAgent(req) + // TODO: To be re-enabled + // u.addHTTPSDKAgent(req) + default: + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + return next.HandleBuild(ctx, in) +} + +func (u *requestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { + const userAgent = "User-Agent" + updateHTTPHeader(request, userAgent, u.userAgent.Build()) +} + +func (u *requestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { + const sdkAgent = "X-Amz-User-Agent" + updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build()) +} + +func updateHTTPHeader(request *smithyhttp.Request, header string, value string) { + var current string + if v := request.Header[header]; len(v) > 0 { + current = v[0] + } + if len(current) > 0 { + current = value + " " + current + } else { + current = value + } + request.Header[header] = append(request.Header[header][:0], current) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md new file mode 100644 index 000000000000..46ff9e511dd5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md @@ -0,0 +1,26 @@ +# v1.4.1 (2022-03-24) + +* No change notes available for this release. + +# v1.4.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.3.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.2.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.1.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.0.0 (2021-11-06) + +* **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release. +* **Release**: Protocol support has been added for AWS event stream. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go new file mode 100644 index 000000000000..151054971a51 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + *hs = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go new file mode 100644 index 000000000000..d9ab7652f4a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go @@ -0,0 +1,218 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/smithy-go/logging" + "hash" + "hash/crc32" + "io" +) + +// DecoderOptions is the Decoder configuration options. +type DecoderOptions struct { + Logger logging.Logger + LogMessages bool +} + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + options DecoderOptions +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(optFns ...func(*DecoderOptions)) *Decoder { + options := DecoderOptions{} + + for _, fn := range optFns { + fn(&options) + } + + return &Decoder{ + options: options, + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if decodeMessage fails to read +// the message from the stream. +// +// payloadBuf is a byte slice that will be used in the returned Message.Payload. Callers +// must ensure that the Message.Payload from a previous decode has been consumed before passing in the same underlying +// payloadBuf byte slice. +func (d *Decoder) Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { + if d.options.Logger != nil && d.options.LogMessages { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.options.Logger, debugMsgBuf, m, err) + }() + } + + m, err = decodeMessage(reader, payloadBuf) + + return m, err +} + +// decodeMessage attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if decodeMessage fails to read +// the message from the reader. +func decodeMessage(reader io.Reader, payloadBuf []byte) (m Message, err error) { + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +func logMessageDecode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Logf(logging.Debug, w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "decodeMessage error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return v, err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return b[0], err +} + +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} + +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} + +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go new file mode 100644 index 000000000000..f03ee4b934b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go @@ -0,0 +1,167 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/smithy-go/logging" + "hash" + "hash/crc32" + "io" +) + +// EncoderOptions is the configuration options for Encoder. +type EncoderOptions struct { + Logger logging.Logger + LogMessages bool +} + +// Encoder provides EventStream message encoding. +type Encoder struct { + options EncoderOptions + + headersBuf *bytes.Buffer + messageBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages. +func NewEncoder(optFns ...func(*EncoderOptions)) *Encoder { + o := EncoderOptions{} + + for _, fn := range optFns { + fn(&o) + } + + return &Encoder{ + options: o, + headersBuf: bytes.NewBuffer(nil), + messageBuf: bytes.NewBuffer(nil), + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(w io.Writer, msg Message) (err error) { + e.headersBuf.Reset() + e.messageBuf.Reset() + + var writer io.Writer = e.messageBuf + if e.options.Logger != nil && e.options.LogMessages { + encodeMsgBuf := bytes.NewBuffer(nil) + writer = io.MultiWriter(writer, encodeMsgBuf) + defer func() { + logMessageEncode(e.options.Logger, encodeMsgBuf, msg, err) + }() + } + + if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(writer, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err = hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + if err := binary.Write(writer, binary.BigEndian, msgCRC); err != nil { + return err + } + + _, err = io.Copy(w, e.messageBuf) + + return err +} + +func logMessageEncode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Logf(logging.Debug, w.String()) }() + + fmt.Fprintf(w, "Message to encode:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(msg); err != nil { + fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) + } + + if encodeErr != nil { + fmt.Fprintf(w, "Encode error: %v\n", encodeErr) + return + } + + fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +// EncodeHeaders writes the header values to the writer encoded in the event +// stream format. Returns an error if a header fails to encode. +func EncodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go new file mode 100644 index 000000000000..5481ef30796d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go new file mode 100644 index 000000000000..93ea71ffdf8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go @@ -0,0 +1,24 @@ +package eventstreamapi + +// EventStream headers with specific meaning to async API functionality. +const ( + ChunkSignatureHeader = `:chunk-signature` // chunk signature for message + DateHeader = `:date` // Date header for signature + ContentTypeHeader = ":content-type" // message payload content-type + + // Message header and values + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go new file mode 100644 index 000000000000..d07ff6b89e14 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go @@ -0,0 +1,71 @@ +package eventstreamapi + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" +) + +type eventStreamWriterKey struct{} + +// GetInputStreamWriter returns EventTypeHeader io.PipeWriter used for the operation's input event stream. +func GetInputStreamWriter(ctx context.Context) io.WriteCloser { + writeCloser, _ := middleware.GetStackValue(ctx, eventStreamWriterKey{}).(io.WriteCloser) + return writeCloser +} + +func setInputStreamWriter(ctx context.Context, writeCloser io.WriteCloser) context.Context { + return middleware.WithStackValue(ctx, eventStreamWriterKey{}, writeCloser) +} + +// InitializeStreamWriter is a Finalize middleware initializes an in-memory pipe for sending event stream messages +// via the HTTP request body. +type InitializeStreamWriter struct{} + +// AddInitializeStreamWriter adds the InitializeStreamWriter middleware to the provided stack. +func AddInitializeStreamWriter(stack *middleware.Stack) error { + return stack.Finalize.Add(&InitializeStreamWriter{}, middleware.After) +} + +// ID returns the identifier for the middleware. +func (i *InitializeStreamWriter) ID() string { + return "InitializeStreamWriter" +} + +// HandleFinalize is the middleware implementation. +func (i *InitializeStreamWriter) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) + } + + inputReader, inputWriter := io.Pipe() + defer func() { + if err == nil { + return + } + _ = inputReader.Close() + _ = inputWriter.Close() + }() + + request, err = request.SetStream(inputReader) + if err != nil { + return out, metadata, err + } + in.Request = request + + ctx = setInputStreamWriter(ctx, inputWriter) + + out, metadata, err = next.HandleFinalize(ctx, in) + if err != nil { + return out, metadata, err + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go new file mode 100644 index 000000000000..cbf5a28621b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go @@ -0,0 +1,13 @@ +//go:build go1.18 +// +build go1.18 + +package eventstreamapi + +import smithyhttp "github.com/aws/smithy-go/transport/http" + +// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality. +// +// This operation is a no-op for Go 1.18 and above. +func ApplyHTTPTransportFixes(r *smithyhttp.Request) error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go new file mode 100644 index 000000000000..7d10ec2ebff5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go @@ -0,0 +1,12 @@ +//go:build !go1.18 +// +build !go1.18 + +package eventstreamapi + +import smithyhttp "github.com/aws/smithy-go/transport/http" + +// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality. +func ApplyHTTPTransportFixes(r *smithyhttp.Request) error { + r.Header.Set("Expect", "100-continue") + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go new file mode 100644 index 000000000000..bdf01173fdba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package eventstream + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.4.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go new file mode 100644 index 000000000000..f6f8c5674eda --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go @@ -0,0 +1,175 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +// Clone returns a deep copy of the headers +func (hs Headers) Clone() Headers { + o := make(Headers, 0, len(hs)) + for _, h := range hs { + o.Set(h.Name, h.Value) + } + return o +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go new file mode 100644 index 000000000000..423b6bb26c1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go @@ -0,0 +1,521 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "encoding/hex" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +// MarshalJSON implements the json.Marshaler interface +func (v TimestampValue) MarshalJSON() ([]byte, error) { + return []byte(v.String()), nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + var scratch [36]byte + + const dash = '-' + + hex.Encode(scratch[:8], v[0:4]) + scratch[8] = dash + hex.Encode(scratch[9:13], v[4:6]) + scratch[13] = dash + hex.Encode(scratch[14:18], v[6:8]) + scratch[18] = dash + hex.Encode(scratch[19:23], v[8:10]) + scratch[23] = dash + hex.Encode(scratch[24:], v[10:]) + + return string(scratch[:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go new file mode 100644 index 000000000000..f7427da039e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go @@ -0,0 +1,117 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := EncodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +// Clone returns a deep copy of the message. +func (m Message) Clone() Message { + var payload []byte + if m.Payload != nil { + payload = make([]byte, len(m.Payload)) + copy(payload, m.Payload) + } + + return Message{ + Headers: m.Headers.Clone(), + Payload: payload, + } +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go new file mode 100644 index 000000000000..77dd4d8db8fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go @@ -0,0 +1,61 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Array represents the encoding of Query lists and sets. A Query array is a +// representation of a list of values of a fixed type. A serialized array might +// look like the following: +// +// ListName.member.1=foo +// &ListName.member.2=bar +// &Listname.member.3=baz +type Array struct { + // The query values to add the array to. + values url.Values + // The array's prefix, which includes the names of all parent structures + // and ends with the name of the list. For example, the prefix might be + // "ParentStructure.ListName". This prefix will be used to form the full + // keys for each element in the list. For example, an entry might have the + // key "ParentStructure.ListName.member.MemberName.1". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string + // Whether the list is flat or not. A list that is not flat will produce the + // following entry to the url.Values for a given entry: + // ListName.MemberName.1=value + // A list that is flat will produce the following: + // ListName.1=value + flat bool + // The location name of the member. In most cases this should be "member". + memberName string + // Elements are stored in values, so we keep track of the list size here. + size int32 +} + +func newArray(values url.Values, prefix string, flat bool, memberName string) *Array { + return &Array{ + values: values, + prefix: prefix, + flat: flat, + memberName: memberName, + } +} + +// Value adds a new element to the Query Array. Returns a Value type used to +// encode the array element. +func (a *Array) Value() Value { + // Query lists start a 1, so adjust the size first + a.size++ + prefix := a.prefix + if !a.flat { + prefix = fmt.Sprintf("%s.%s", prefix, a.memberName) + } + // Lists can't have flat members + return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go new file mode 100644 index 000000000000..2ecf9241cdd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go @@ -0,0 +1,80 @@ +package query + +import ( + "io" + "net/url" + "sort" +) + +// Encoder is a Query encoder that supports construction of Query body +// values using methods. +type Encoder struct { + // The query values that will be built up to manage encoding. + values url.Values + // The writer that the encoded body will be written to. + writer io.Writer + Value +} + +// NewEncoder returns a new Query body encoder +func NewEncoder(writer io.Writer) *Encoder { + values := url.Values{} + return &Encoder{ + values: values, + writer: writer, + Value: newBaseValue(values), + } +} + +// Encode returns the []byte slice representing the current +// state of the Query encoder. +func (e Encoder) Encode() error { + ws, ok := e.writer.(interface{ WriteString(string) (int, error) }) + if !ok { + // Fall back to less optimal byte slice casting if WriteString isn't available. + ws = &wrapWriteString{writer: e.writer} + } + + // Get the keys and sort them to have a stable output + keys := make([]string, 0, len(e.values)) + for k := range e.values { + keys = append(keys, k) + } + sort.Strings(keys) + isFirstEntry := true + for _, key := range keys { + queryValues := e.values[key] + escapedKey := url.QueryEscape(key) + for _, value := range queryValues { + if !isFirstEntry { + if _, err := ws.WriteString(`&`); err != nil { + return err + } + } else { + isFirstEntry = false + } + if _, err := ws.WriteString(escapedKey); err != nil { + return err + } + if _, err := ws.WriteString(`=`); err != nil { + return err + } + if _, err := ws.WriteString(url.QueryEscape(value)); err != nil { + return err + } + } + } + return nil +} + +// wrapWriteString wraps an io.Writer to provide a WriteString method +// where one is not available. +type wrapWriteString struct { + writer io.Writer +} + +// WriteString writes a string to the wrapped writer by casting it to +// a byte array first. +func (w wrapWriteString) WriteString(v string) (int, error) { + return w.writer.Write([]byte(v)) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go new file mode 100644 index 000000000000..ab91e357bc9a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go @@ -0,0 +1,78 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Map represents the encoding of Query maps. A Query map is a representation +// of a mapping of arbitrary string keys to arbitrary values of a fixed type. +// A Map differs from an Object in that the set of keys is not fixed, in that +// the values must all be of the same type, and that map entries are ordered. +// A serialized map might look like the following: +// +// MapName.entry.1.key=Foo +// &MapName.entry.1.value=spam +// &MapName.entry.2.key=Bar +// &MapName.entry.2.value=eggs +type Map struct { + // The query values to add the map to. + values url.Values + // The map's prefix, which includes the names of all parent structures + // and ends with the name of the object. For example, the prefix might be + // "ParentStructure.MapName". This prefix will be used to form the full + // keys for each key-value pair of the map. For example, a value might have + // the key "ParentStructure.MapName.1.value". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string + // Whether the map is flat or not. A map that is not flat will produce the + // following entries to the url.Values for a given key-value pair: + // MapName.entry.1.KeyLocationName=mykey + // MapName.entry.1.ValueLocationName=myvalue + // A map that is flat will produce the following: + // MapName.1.KeyLocationName=mykey + // MapName.1.ValueLocationName=myvalue + flat bool + // The location name of the key. In most cases this should be "key". + keyLocationName string + // The location name of the value. In most cases this should be "value". + valueLocationName string + // Elements are stored in values, so we keep track of the list size here. + size int32 +} + +func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map { + return &Map{ + values: values, + prefix: prefix, + flat: flat, + keyLocationName: keyLocationName, + valueLocationName: valueLocationName, + } +} + +// Key adds the given named key to the Query map. +// Returns a Value encoder that should be used to encode a Query value type. +func (m *Map) Key(name string) Value { + // Query lists start a 1, so adjust the size first + m.size++ + var key string + var value string + if m.flat { + key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName) + value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName) + } else { + key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName) + value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName) + } + + // The key can only be a string, so we just go ahead and set it here + newValue(m.values, key, false).String(name) + + // Maps can't have flat members + return newValue(m.values, value, false) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go new file mode 100644 index 000000000000..36034479113b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go @@ -0,0 +1,62 @@ +package query + +import ( + "context" + "fmt" + "io/ioutil" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the +// operation serializer that will convert the query request body to a GET +// operation with the query message in the HTTP request querystring. +func AddAsGetRequestMiddleware(stack *middleware.Stack) error { + return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After) +} + +type asGetRequest struct{} + +func (*asGetRequest) ID() string { return "Query:AsGetRequest" } + +func (m *asGetRequest) HandleSerialize( + ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request) + } + + req.Method = "GET" + + // If the stream is not set, nothing else to do. + stream := req.GetStream() + if stream == nil { + return next.HandleSerialize(ctx, input) + } + + // Clear the stream since there will not be any body. + req.Header.Del("Content-Type") + req, err = req.SetStream(nil) + if err != nil { + return out, metadata, fmt.Errorf("unable update request body %w", err) + } + input.Request = req + + // Update request query with the body's query string value. + delim := "" + if len(req.URL.RawQuery) != 0 { + delim = "&" + } + + b, err := ioutil.ReadAll(stream) + if err != nil { + return out, metadata, fmt.Errorf("unable to get request body %w", err) + } + req.URL.RawQuery += delim + string(b) + + return next.HandleSerialize(ctx, input) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go new file mode 100644 index 000000000000..debb413dec99 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go @@ -0,0 +1,56 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Object represents the encoding of Query structures and unions. A Query +// object is a representation of a mapping of string keys to arbitrary +// values where there is a fixed set of keys whose values each have their +// own known type. A serialized object might look like the following: +// +// ObjectName.Foo=value +// &ObjectName.Bar=5 +type Object struct { + // The query values to add the object to. + values url.Values + // The object's prefix, which includes the names of all parent structures + // and ends with the name of the object. For example, the prefix might be + // "ParentStructure.ObjectName". This prefix will be used to form the full + // keys for each member of the object. For example, a member might have the + // key "ParentStructure.ObjectName.MemberName". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string +} + +func newObject(values url.Values, prefix string) *Object { + return &Object{ + values: values, + prefix: prefix, + } +} + +// Key adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query value type. +func (o *Object) Key(name string) Value { + return o.key(name, false) +} + +// FlatKey adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query value type. The +// value will be flattened if it is a map or array. +func (o *Object) FlatKey(name string) Value { + return o.key(name, true) +} + +func (o *Object) key(name string, flatValue bool) Value { + if o.prefix != "" { + return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) + } + return newValue(o.values, name, flatValue) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go new file mode 100644 index 000000000000..302525ab1014 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go @@ -0,0 +1,106 @@ +package query + +import ( + "math/big" + "net/url" + + "github.com/aws/smithy-go/encoding/httpbinding" +) + +// Value represents a Query Value type. +type Value struct { + // The query values to add the value to. + values url.Values + // The value's key, which will form the prefix for complex types. + key string + // Whether the value should be flattened or not if it's a flattenable type. + flat bool + queryValue httpbinding.QueryValue +} + +func newValue(values url.Values, key string, flat bool) Value { + return Value{ + values: values, + key: key, + flat: flat, + queryValue: httpbinding.NewQueryValue(values, key, false), + } +} + +func newBaseValue(values url.Values) Value { + return Value{ + values: values, + queryValue: httpbinding.NewQueryValue(nil, "", false), + } +} + +// Array returns a new Array encoder. +func (qv Value) Array(locationName string) *Array { + return newArray(qv.values, qv.key, qv.flat, locationName) +} + +// Object returns a new Object encoder. +func (qv Value) Object() *Object { + return newObject(qv.values, qv.key) +} + +// Map returns a new Map encoder. +func (qv Value) Map(keyLocationName string, valueLocationName string) *Map { + return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName) +} + +// Base64EncodeBytes encodes v as a base64 query string value. +// This is intended to enable compatibility with the JSON encoder. +func (qv Value) Base64EncodeBytes(v []byte) { + qv.queryValue.Blob(v) +} + +// Boolean encodes v as a query string value +func (qv Value) Boolean(v bool) { + qv.queryValue.Boolean(v) +} + +// String encodes v as a query string value +func (qv Value) String(v string) { + qv.queryValue.String(v) +} + +// Byte encodes v as a query string value +func (qv Value) Byte(v int8) { + qv.queryValue.Byte(v) +} + +// Short encodes v as a query string value +func (qv Value) Short(v int16) { + qv.queryValue.Short(v) +} + +// Integer encodes v as a query string value +func (qv Value) Integer(v int32) { + qv.queryValue.Integer(v) +} + +// Long encodes v as a query string value +func (qv Value) Long(v int64) { + qv.queryValue.Long(v) +} + +// Float encodes v as a query string value +func (qv Value) Float(v float32) { + qv.queryValue.Float(v) +} + +// Double encodes v as a query string value +func (qv Value) Double(v float64) { + qv.queryValue.Double(v) +} + +// BigInteger encodes v as a query string value +func (qv Value) BigInteger(v *big.Int) { + qv.queryValue.BigInteger(v) +} + +// BigDecimal encodes v as a query string value +func (qv Value) BigDecimal(v *big.Float) { + qv.queryValue.BigDecimal(v) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go new file mode 100644 index 000000000000..1bce78a4d45b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go @@ -0,0 +1,85 @@ +package restjson + +import ( + "encoding/json" + "io" + "strings" + + "github.com/aws/smithy-go" +) + +// GetErrorInfo util looks for code, __type, and message members in the +// json body. These members are optionally available, and the function +// returns the value of member if it is available. This function is useful to +// identify the error code, msg in a REST JSON error response. +func GetErrorInfo(decoder *json.Decoder) (errorType string, message string, err error) { + var errInfo struct { + Code string + Type string `json:"__type"` + Message string + } + + err = decoder.Decode(&errInfo) + if err != nil { + if err == io.EOF { + return errorType, message, nil + } + return errorType, message, err + } + + // assign error type + if len(errInfo.Code) != 0 { + errorType = errInfo.Code + } else if len(errInfo.Type) != 0 { + errorType = errInfo.Type + } + + // assign error message + if len(errInfo.Message) != 0 { + message = errInfo.Message + } + + // sanitize error + if len(errorType) != 0 { + errorType = SanitizeErrorCode(errorType) + } + + return errorType, message, nil +} + +// SanitizeErrorCode sanitizes the errorCode string . +// The rule for sanitizing is if a `:` character is present, then take only the +// contents before the first : character in the value. +// If a # character is present, then take only the contents after the +// first # character in the value. +func SanitizeErrorCode(errorCode string) string { + if strings.ContainsAny(errorCode, ":") { + errorCode = strings.SplitN(errorCode, ":", 2)[0] + } + + if strings.ContainsAny(errorCode, "#") { + errorCode = strings.SplitN(errorCode, "#", 2)[1] + } + + return errorCode +} + +// GetSmithyGenericAPIError returns smithy generic api error and an error interface. +// Takes in json decoder, and error Code string as args. The function retrieves error message +// and error code from the decoder body. If errorCode of length greater than 0 is passed in as +// an argument, it is used instead. +func GetSmithyGenericAPIError(decoder *json.Decoder, errorCode string) (*smithy.GenericAPIError, error) { + errorType, message, err := GetErrorInfo(decoder) + if err != nil { + return nil, err + } + + if len(errorCode) == 0 { + errorCode = errorType + } + + return &smithy.GenericAPIError{ + Code: errorCode, + Message: message, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go new file mode 100644 index 000000000000..c228f7d87858 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go @@ -0,0 +1,56 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "io" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string + Message string + RequestID string +} + +// GetErrorResponseComponents returns the error fields from an xml error response body +func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { + if noErrorWrapping { + var errResponse noWrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + RequestID: errResponse.RequestID, + }, nil + } + + var errResponse wrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + RequestID: errResponse.RequestID, + }, nil +} + +// noWrappedErrorResponse represents the error response body with +// no internal ... +type wrappedErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go new file mode 100644 index 000000000000..974ef594f071 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go @@ -0,0 +1,96 @@ +package ratelimit + +import ( + "sync" +) + +// TokenBucket provides a concurrency safe utility for adding and removing +// tokens from the available token bucket. +type TokenBucket struct { + remainingTokens uint + maxCapacity uint + minCapacity uint + mu sync.Mutex +} + +// NewTokenBucket returns an initialized TokenBucket with the capacity +// specified. +func NewTokenBucket(i uint) *TokenBucket { + return &TokenBucket{ + remainingTokens: i, + maxCapacity: i, + minCapacity: 1, + } +} + +// Retrieve attempts to reduce the available tokens by the amount requested. If +// there are tokens available true will be returned along with the number of +// available tokens remaining. If amount requested is larger than the available +// capacity, false will be returned along with the available capacity. If the +// amount is less than the available capacity, the capacity will be reduced by +// that amount, and the remaining capacity and true will be returned. +func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) { + t.mu.Lock() + defer t.mu.Unlock() + + if amount > t.remainingTokens { + return t.remainingTokens, false + } + + t.remainingTokens -= amount + return t.remainingTokens, true +} + +// Refund returns the amount of tokens back to the available token bucket, up +// to the initial capacity. +func (t *TokenBucket) Refund(amount uint) { + t.mu.Lock() + defer t.mu.Unlock() + + // Capacity cannot exceed max capacity. + t.remainingTokens = uintMin(t.remainingTokens+amount, t.maxCapacity) +} + +// Capacity returns the maximum capacity of tokens that the bucket could +// contain. +func (t *TokenBucket) Capacity() uint { + t.mu.Lock() + defer t.mu.Unlock() + + return t.maxCapacity +} + +// Remaining returns the number of tokens that remaining in the bucket. +func (t *TokenBucket) Remaining() uint { + t.mu.Lock() + defer t.mu.Unlock() + + return t.remainingTokens +} + +// Resize adjusts the size of the token bucket. Returns the capacity remaining. +func (t *TokenBucket) Resize(size uint) uint { + t.mu.Lock() + defer t.mu.Unlock() + + t.maxCapacity = uintMax(size, t.minCapacity) + + // Capacity needs to be capped at max capacity, if max size reduced. + t.remainingTokens = uintMin(t.remainingTokens, t.maxCapacity) + + return t.remainingTokens +} + +func uintMin(a, b uint) uint { + if a < b { + return a + } + return b +} + +func uintMax(a, b uint) uint { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go new file mode 100644 index 000000000000..12a3f0c4fbb5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go @@ -0,0 +1,87 @@ +package ratelimit + +import ( + "context" + "fmt" +) + +type rateToken struct { + tokenCost uint + bucket *TokenBucket +} + +func (t rateToken) release() error { + t.bucket.Refund(t.tokenCost) + return nil +} + +// TokenRateLimit provides a Token Bucket RateLimiter implementation +// that limits the overall number of retry attempts that can be made across +// operation invocations. +type TokenRateLimit struct { + bucket *TokenBucket +} + +// NewTokenRateLimit returns an TokenRateLimit with default values. +// Functional options can configure the retry rate limiter. +func NewTokenRateLimit(tokens uint) *TokenRateLimit { + return &TokenRateLimit{ + bucket: NewTokenBucket(tokens), + } +} + +func isTimeoutError(error) bool { + return false +} + +type canceledError struct { + Err error +} + +func (c canceledError) CanceledError() bool { return true } +func (c canceledError) Unwrap() error { return c.Err } +func (c canceledError) Error() string { + return fmt.Sprintf("canceled, %v", c.Err) +} + +// GetToken may cause a available pool of retry quota to be +// decremented. Will return an error if the decremented value can not be +// reduced from the retry quota. +func (l *TokenRateLimit) GetToken(ctx context.Context, cost uint) (func() error, error) { + select { + case <-ctx.Done(): + return nil, canceledError{Err: ctx.Err()} + default: + } + if avail, ok := l.bucket.Retrieve(cost); !ok { + return nil, QuotaExceededError{Available: avail, Requested: cost} + } + + return rateToken{ + tokenCost: cost, + bucket: l.bucket, + }.release, nil +} + +// AddTokens increments the token bucket by a fixed amount. +func (l *TokenRateLimit) AddTokens(v uint) error { + l.bucket.Refund(v) + return nil +} + +// Remaining returns the number of remaining tokens in the bucket. +func (l *TokenRateLimit) Remaining() uint { + return l.bucket.Remaining() +} + +// QuotaExceededError provides the SDK error when the retries for a given +// token bucket have been exhausted. +type QuotaExceededError struct { + Available uint + Requested uint +} + +func (e QuotaExceededError) Error() string { + return fmt.Sprintf("retry quota exceeded, %d available, %d requested", + e.Available, e.Requested) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go new file mode 100644 index 000000000000..d8d00e615823 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go @@ -0,0 +1,25 @@ +package aws + +import ( + "fmt" +) + +// TODO remove replace with smithy.CanceledError + +// RequestCanceledError is the error that will be returned by an API request +// that was canceled. Requests given a Context may return this error when +// canceled. +type RequestCanceledError struct { + Err error +} + +// CanceledError returns true to satisfy interfaces checking for canceled errors. +func (*RequestCanceledError) CanceledError() bool { return true } + +// Unwrap returns the underlying error, if there was one. +func (e *RequestCanceledError) Unwrap() error { + return e.Err +} +func (e *RequestCanceledError) Error() string { + return fmt.Sprintf("request canceled, %v", e.Err) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go new file mode 100644 index 000000000000..b9fce01d6eed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go @@ -0,0 +1,156 @@ +package retry + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +const ( + // DefaultRequestCost is the cost of a single request from the adaptive + // rate limited token bucket. + DefaultRequestCost uint = 1 +) + +// DefaultThrottles provides the set of errors considered throttle errors that +// are checked by default. +var DefaultThrottles = []IsErrorThrottle{ + ThrottleErrorCode{ + Codes: DefaultThrottleErrorCodes, + }, +} + +// AdaptiveModeOptions provides the functional options for configuring the +// adaptive retry mode, and delay behavior. +type AdaptiveModeOptions struct { + // If the adaptive token bucket is empty, when an attempt will be made + // AdaptiveMode will sleep until a token is available. This can occur when + // attempts fail with throttle errors. Use this option to disable the sleep + // until token is available, and return error immediately. + FailOnNoAttemptTokens bool + + // The cost of an attempt from the AdaptiveMode's adaptive token bucket. + RequestCost uint + + // Set of strategies to determine if the attempt failed due to a throttle + // error. + // + // It is safe to append to this list in NewAdaptiveMode's functional options. + Throttles []IsErrorThrottle + + // Set of options for standard retry mode that AdaptiveMode is built on top + // of. AdaptiveMode may apply its own defaults to Standard retry mode that + // are different than the defaults of NewStandard. Use these options to + // override the default options. + StandardOptions []func(*StandardOptions) +} + +// AdaptiveMode provides an experimental retry strategy that expands on the +// Standard retry strategy, adding client attempt rate limits. The attempt rate +// limit is initially unrestricted, but becomes restricted when the attempt +// fails with for a throttle error. When restricted AdaptiveMode may need to +// sleep before an attempt is made, if too many throttles have been received. +// AdaptiveMode's sleep can be canceled with context cancel. Set +// AdaptiveModeOptions FailOnNoAttemptTokens to change the behavior from sleep, +// to fail fast. +// +// Eventually unrestricted attempt rate limit will be restored once attempts no +// longer are failing due to throttle errors. +type AdaptiveMode struct { + options AdaptiveModeOptions + throttles IsErrorThrottles + + retryer aws.RetryerV2 + rateLimit *adaptiveRateLimit +} + +// NewAdaptiveMode returns an initialized AdaptiveMode retry strategy. +func NewAdaptiveMode(optFns ...func(*AdaptiveModeOptions)) *AdaptiveMode { + o := AdaptiveModeOptions{ + RequestCost: DefaultRequestCost, + Throttles: append([]IsErrorThrottle{}, DefaultThrottles...), + } + for _, fn := range optFns { + fn(&o) + } + + return &AdaptiveMode{ + options: o, + throttles: IsErrorThrottles(o.Throttles), + retryer: NewStandard(o.StandardOptions...), + rateLimit: newAdaptiveRateLimit(), + } +} + +// IsErrorRetryable returns if the failed attempt is retryable. This check +// should determine if the error can be retried, or if the error is +// terminal. +func (a *AdaptiveMode) IsErrorRetryable(err error) bool { + return a.retryer.IsErrorRetryable(err) +} + +// MaxAttempts returns the maximum number of attempts that can be made for +// a attempt before failing. A value of 0 implies that the attempt should +// be retried until it succeeds if the errors are retryable. +func (a *AdaptiveMode) MaxAttempts() int { + return a.retryer.MaxAttempts() +} + +// RetryDelay returns the delay that should be used before retrying the +// attempt. Will return error if the if the delay could not be determined. +func (a *AdaptiveMode) RetryDelay(attempt int, opErr error) ( + time.Duration, error, +) { + return a.retryer.RetryDelay(attempt, opErr) +} + +// GetRetryToken attempts to deduct the retry cost from the retry token pool. +// Returning the token release function, or error. +func (a *AdaptiveMode) GetRetryToken(ctx context.Context, opErr error) ( + releaseToken func(error) error, err error, +) { + return a.retryer.GetRetryToken(ctx, opErr) +} + +// GetInitialToken returns the initial attempt token that can increment the +// retry token pool if the attempt is successful. +// +// Deprecated: This method does not provide a way to block using Context, +// nor can it return an error. Use RetryerV2, and GetAttemptToken instead. Only +// present to implement Retryer interface. +func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) { + return nopRelease +} + +// GetAttemptToken returns the attempt token that can be used to rate limit +// attempt calls. Will be used by the SDK's retry package's Attempt +// middleware to get a attempt token prior to calling the temp and releasing +// the attempt token after the attempt has been made. +func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) { + for { + acquiredToken, waitTryAgain := a.rateLimit.AcquireToken(a.options.RequestCost) + if acquiredToken { + break + } + if a.options.FailOnNoAttemptTokens { + return nil, fmt.Errorf( + "unable to get attempt token, and FailOnNoAttemptTokens enables") + } + + if err := sdk.SleepWithContext(ctx, waitTryAgain); err != nil { + return nil, fmt.Errorf("failed to wait for token to be available, %w", err) + } + } + + return a.handleResponse, nil +} + +func (a *AdaptiveMode) handleResponse(opErr error) error { + throttled := a.throttles.IsErrorThrottle(opErr).Bool() + + a.rateLimit.Update(throttled) + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go new file mode 100644 index 000000000000..ad96d9b8c5d6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go @@ -0,0 +1,158 @@ +package retry + +import ( + "math" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +type adaptiveRateLimit struct { + tokenBucketEnabled bool + + smooth float64 + beta float64 + scaleConstant float64 + minFillRate float64 + + fillRate float64 + calculatedRate float64 + lastRefilled time.Time + measuredTxRate float64 + lastTxRateBucket float64 + requestCount int64 + lastMaxRate float64 + lastThrottleTime time.Time + timeWindow float64 + + tokenBucket *adaptiveTokenBucket + + mu sync.Mutex +} + +func newAdaptiveRateLimit() *adaptiveRateLimit { + now := sdk.NowTime() + return &adaptiveRateLimit{ + smooth: 0.8, + beta: 0.7, + scaleConstant: 0.4, + + minFillRate: 0.5, + + lastTxRateBucket: math.Floor(timeFloat64Seconds(now)), + lastThrottleTime: now, + + tokenBucket: newAdaptiveTokenBucket(0), + } +} + +func (a *adaptiveRateLimit) Enable(v bool) { + a.mu.Lock() + defer a.mu.Unlock() + + a.tokenBucketEnabled = v +} + +func (a *adaptiveRateLimit) AcquireToken(amount uint) ( + tokenAcquired bool, waitTryAgain time.Duration, +) { + a.mu.Lock() + defer a.mu.Unlock() + + if !a.tokenBucketEnabled { + return true, 0 + } + + a.tokenBucketRefill() + + available, ok := a.tokenBucket.Retrieve(float64(amount)) + if !ok { + waitDur := float64Seconds((float64(amount) - available) / a.fillRate) + return false, waitDur + } + + return true, 0 +} + +func (a *adaptiveRateLimit) Update(throttled bool) { + a.mu.Lock() + defer a.mu.Unlock() + + a.updateMeasuredRate() + + if throttled { + rateToUse := a.measuredTxRate + if a.tokenBucketEnabled { + rateToUse = math.Min(a.measuredTxRate, a.fillRate) + } + + a.lastMaxRate = rateToUse + a.calculateTimeWindow() + a.lastThrottleTime = sdk.NowTime() + a.calculatedRate = a.cubicThrottle(rateToUse) + a.tokenBucketEnabled = true + } else { + a.calculateTimeWindow() + a.calculatedRate = a.cubicSuccess(sdk.NowTime()) + } + + newRate := math.Min(a.calculatedRate, 2*a.measuredTxRate) + a.tokenBucketUpdateRate(newRate) +} + +func (a *adaptiveRateLimit) cubicSuccess(t time.Time) float64 { + dt := secondsFloat64(t.Sub(a.lastThrottleTime)) + return (a.scaleConstant * math.Pow(dt-a.timeWindow, 3)) + a.lastMaxRate +} + +func (a *adaptiveRateLimit) cubicThrottle(rateToUse float64) float64 { + return rateToUse * a.beta +} + +func (a *adaptiveRateLimit) calculateTimeWindow() { + a.timeWindow = math.Pow((a.lastMaxRate*(1.-a.beta))/a.scaleConstant, 1./3.) +} + +func (a *adaptiveRateLimit) tokenBucketUpdateRate(newRPS float64) { + a.tokenBucketRefill() + a.fillRate = math.Max(newRPS, a.minFillRate) + a.tokenBucket.Resize(newRPS) +} + +func (a *adaptiveRateLimit) updateMeasuredRate() { + now := sdk.NowTime() + timeBucket := math.Floor(timeFloat64Seconds(now)*2.) / 2. + a.requestCount++ + + if timeBucket > a.lastTxRateBucket { + currentRate := float64(a.requestCount) / (timeBucket - a.lastTxRateBucket) + a.measuredTxRate = (currentRate * a.smooth) + (a.measuredTxRate * (1. - a.smooth)) + a.requestCount = 0 + a.lastTxRateBucket = timeBucket + } +} + +func (a *adaptiveRateLimit) tokenBucketRefill() { + now := sdk.NowTime() + if a.lastRefilled.IsZero() { + a.lastRefilled = now + return + } + + fillAmount := secondsFloat64(now.Sub(a.lastRefilled)) * a.fillRate + a.tokenBucket.Refund(fillAmount) + a.lastRefilled = now +} + +func float64Seconds(v float64) time.Duration { + return time.Duration(v * float64(time.Second)) +} + +func secondsFloat64(v time.Duration) float64 { + return float64(v) / float64(time.Second) +} + +func timeFloat64Seconds(v time.Time) float64 { + return float64(v.UnixNano()) / float64(time.Second) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go new file mode 100644 index 000000000000..052723e8ed1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go @@ -0,0 +1,83 @@ +package retry + +import ( + "math" + "sync" +) + +// adaptiveTokenBucket provides a concurrency safe utility for adding and +// removing tokens from the available token bucket. +type adaptiveTokenBucket struct { + remainingTokens float64 + maxCapacity float64 + minCapacity float64 + mu sync.Mutex +} + +// newAdaptiveTokenBucket returns an initialized adaptiveTokenBucket with the +// capacity specified. +func newAdaptiveTokenBucket(i float64) *adaptiveTokenBucket { + return &adaptiveTokenBucket{ + remainingTokens: i, + maxCapacity: i, + minCapacity: 1, + } +} + +// Retrieve attempts to reduce the available tokens by the amount requested. If +// there are tokens available true will be returned along with the number of +// available tokens remaining. If amount requested is larger than the available +// capacity, false will be returned along with the available capacity. If the +// amount is less than the available capacity, the capacity will be reduced by +// that amount, and the remaining capacity and true will be returned. +func (t *adaptiveTokenBucket) Retrieve(amount float64) (available float64, retrieved bool) { + t.mu.Lock() + defer t.mu.Unlock() + + if amount > t.remainingTokens { + return t.remainingTokens, false + } + + t.remainingTokens -= amount + return t.remainingTokens, true +} + +// Refund returns the amount of tokens back to the available token bucket, up +// to the initial capacity. +func (t *adaptiveTokenBucket) Refund(amount float64) { + t.mu.Lock() + defer t.mu.Unlock() + + // Capacity cannot exceed max capacity. + t.remainingTokens = math.Min(t.remainingTokens+amount, t.maxCapacity) +} + +// Capacity returns the maximum capacity of tokens that the bucket could +// contain. +func (t *adaptiveTokenBucket) Capacity() float64 { + t.mu.Lock() + defer t.mu.Unlock() + + return t.maxCapacity +} + +// Remaining returns the number of tokens that remaining in the bucket. +func (t *adaptiveTokenBucket) Remaining() float64 { + t.mu.Lock() + defer t.mu.Unlock() + + return t.remainingTokens +} + +// Resize adjusts the size of the token bucket. Returns the capacity remaining. +func (t *adaptiveTokenBucket) Resize(size float64) float64 { + t.mu.Lock() + defer t.mu.Unlock() + + t.maxCapacity = math.Max(size, t.minCapacity) + + // Capacity needs to be capped at max capacity, if max size reduced. + t.remainingTokens = math.Min(t.remainingTokens, t.maxCapacity) + + return t.remainingTokens +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go new file mode 100644 index 000000000000..42ced06e2489 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go @@ -0,0 +1,80 @@ +// Package retry provides interfaces and implementations for SDK request retry behavior. +// +// Retryer Interface and Implementations +// +// This packages defines Retryer interface that is used to either implement custom retry behavior +// or to extend the existing retry implementations provided by the SDK. This packages provides a single +// retry implementations: Standard. +// +// Standard +// +// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited +// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs. +// In addition, the retryer uses a configurable token bucket to rate limit the retry attempts across the client, +// and uses an additional delay policy to limit the time between a requests subsequent attempts. +// +// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether +// a given error is retryable. By default this list of retryables includes the following: +// - Retrying errors that implement the RetryableError method, and return true. +// - Connection Errors +// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true. +// - Connection Reset Errors. +// - net.OpErr types that are dialing errors or are temporary. +// - HTTP Status Codes: 500, 502, 503, and 504. +// - API Error Codes +// - RequestTimeout, RequestTimeoutException +// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException, +// RequestThrottled, SlowDown, EC2ThrottledException +// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException +// - TransactionInProgressException, PriorRequestNotComplete +// +// The standard retryer will not retry a request in the event if the context associated with the request +// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context +// value. +// +// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer +// using the NewStandard function, and providing one more functional arguments that mutate the StandardOptions +// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions, +// and the retry delay policy. +// +// For example to modify the default retry attempts for the standard retryer: +// +// // configure the custom retryer +// customRetry := retry.NewStandard(func(o *retry.StandardOptions) { +// o.MaxAttempts = 5 +// }) +// +// // create a service client with the retryer +// s3.NewFromConfig(cfg, func(o *s3.Options) { +// o.Retryer = customRetry +// }) +// +// Utilities +// +// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic +// way. These are: +// +// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable +// in addition to those considered retryable by the provided retryer. +// +// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping +// a retryer implementation. +// +// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a +// request by wrapping a retryer implementation. +// +// The following package functions have been provided to easily satisfy different retry interfaces to further customize +// a given retryer's behavior: +// +// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example, +// you can use this method to easily create custom back off policies to be used with the +// standard retryer. +// +// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example, +// this can be used to extend the standard retryer to add additional logic ot determine if a +// error should be retried. +// +// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example, +// this can be used to extend the standard retryer to add additional logic to determine if an +// error should be considered a timeout. +package retry diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go new file mode 100644 index 000000000000..3e432eefe77f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go @@ -0,0 +1,20 @@ +package retry + +import "fmt" + +// MaxAttemptsError provides the error when the maximum number of attempts have +// been exceeded. +type MaxAttemptsError struct { + Attempt int + Err error +} + +func (e *MaxAttemptsError) Error() string { + return fmt.Sprintf("exceeded maximum number of attempts, %d, %v", e.Attempt, e.Err) +} + +// Unwrap returns the nested error causing the max attempts error. Provides the +// implementation for errors.Is and errors.As to unwrap nested errors. +func (e *MaxAttemptsError) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go new file mode 100644 index 000000000000..c266996dea23 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go @@ -0,0 +1,49 @@ +package retry + +import ( + "math" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/timeconv" +) + +// ExponentialJitterBackoff provides backoff delays with jitter based on the +// number of attempts. +type ExponentialJitterBackoff struct { + maxBackoff time.Duration + // precomputed number of attempts needed to reach max backoff. + maxBackoffAttempts float64 + + randFloat64 func() (float64, error) +} + +// NewExponentialJitterBackoff returns an ExponentialJitterBackoff configured +// for the max backoff. +func NewExponentialJitterBackoff(maxBackoff time.Duration) *ExponentialJitterBackoff { + return &ExponentialJitterBackoff{ + maxBackoff: maxBackoff, + maxBackoffAttempts: math.Log2( + float64(maxBackoff) / float64(time.Second)), + randFloat64: rand.CryptoRandFloat64, + } +} + +// BackoffDelay returns the duration to wait before the next attempt should be +// made. Returns an error if unable get a duration. +func (j *ExponentialJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { + if attempt > int(j.maxBackoffAttempts) { + return j.maxBackoff, nil + } + + b, err := j.randFloat64() + if err != nil { + return 0, err + } + + // [0.0, 1.0) * 2 ^ attempts + ri := int64(1 << uint64(attempt)) + delaySeconds := b * float64(ri) + + return timeconv.FloatSecondsDur(delaySeconds), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go new file mode 100644 index 000000000000..7a3f18301863 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go @@ -0,0 +1,52 @@ +package retry + +import ( + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" +) + +// attemptResultsKey is a metadata accessor key to retrieve metadata +// for all request attempts. +type attemptResultsKey struct { +} + +// GetAttemptResults retrieves attempts results from middleware metadata. +func GetAttemptResults(metadata middleware.Metadata) (AttemptResults, bool) { + m, ok := metadata.Get(attemptResultsKey{}).(AttemptResults) + return m, ok +} + +// AttemptResults represents struct containing metadata returned by all request attempts. +type AttemptResults struct { + + // Results is a slice consisting attempt result from all request attempts. + // Results are stored in order request attempt is made. + Results []AttemptResult +} + +// AttemptResult represents attempt result returned by a single request attempt. +type AttemptResult struct { + + // Err is the error if received for the request attempt. + Err error + + // Retryable denotes if request may be retried. This states if an + // error is considered retryable. + Retryable bool + + // Retried indicates if this request was retried. + Retried bool + + // ResponseMetadata is any existing metadata passed via the response middlewares. + ResponseMetadata middleware.Metadata +} + +// addAttemptResults adds attempt results to middleware metadata +func addAttemptResults(metadata *middleware.Metadata, v AttemptResults) { + metadata.Set(attemptResultsKey{}, v) +} + +// GetRawResponse returns raw response recorded for the attempt result +func (a AttemptResult) GetRawResponse() interface{} { + return awsmiddle.GetRawResponse(a.ResponseMetadata) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go new file mode 100644 index 000000000000..926f5f5e1e08 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -0,0 +1,331 @@ +package retry + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithymiddle "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" +) + +// RequestCloner is a function that can take an input request type and clone +// the request for use in a subsequent retry attempt. +type RequestCloner func(interface{}) interface{} + +type retryMetadata struct { + AttemptNum int + AttemptTime time.Time + MaxAttempts int + AttemptClockSkew time.Duration +} + +// Attempt is a Smithy Finalize middleware that handles retry attempts using +// the provided Retryer implementation. +type Attempt struct { + // Enable the logging of retry attempts performed by the SDK. This will + // include logging retry attempts, unretryable errors, and when max + // attempts are reached. + LogAttempts bool + + retryer aws.RetryerV2 + requestCloner RequestCloner +} + +// NewAttemptMiddleware returns a new Attempt retry middleware. +func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt { + m := &Attempt{ + retryer: wrapAsRetryerV2(retryer), + requestCloner: requestCloner, + } + for _, fn := range optFns { + fn(m) + } + return m +} + +// ID returns the middleware identifier +func (r *Attempt) ID() string { return "Retry" } + +func (r Attempt) logf(logger logging.Logger, classification logging.Classification, format string, v ...interface{}) { + if !r.LogAttempts { + return + } + logger.Logf(classification, format, v...) +} + +// HandleFinalize utilizes the provider Retryer implementation to attempt +// retries over the next handler +func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( + out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, +) { + var attemptNum int + var attemptClockSkew time.Duration + var attemptResults AttemptResults + + maxAttempts := r.retryer.MaxAttempts() + releaseRetryToken := nopRelease + + for { + attemptNum++ + attemptInput := in + attemptInput.Request = r.requestCloner(attemptInput.Request) + + // Record the metadata for the for attempt being started. + attemptCtx := setRetryMetadata(ctx, retryMetadata{ + AttemptNum: attemptNum, + AttemptTime: sdk.NowTime().UTC(), + MaxAttempts: maxAttempts, + AttemptClockSkew: attemptClockSkew, + }) + + var attemptResult AttemptResult + out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) + attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) + + // AttempResult Retried states that the attempt was not successful, and + // should be retried. + shouldRetry := attemptResult.Retried + + // Add attempt metadata to list of all attempt metadata + attemptResults.Results = append(attemptResults.Results, attemptResult) + + if !shouldRetry { + // Ensure the last response's metadata is used as the bases for result + // metadata returned by the stack. The Slice of attempt results + // will be added to this cloned metadata. + metadata = attemptResult.ResponseMetadata.Clone() + + break + } + } + + addAttemptResults(&metadata, attemptResults) + return out, metadata, err +} + +// handleAttempt handles an individual request attempt. +func (r *Attempt) handleAttempt( + ctx context.Context, in smithymiddle.FinalizeInput, releaseRetryToken func(error) error, next smithymiddle.FinalizeHandler, +) ( + out smithymiddle.FinalizeOutput, attemptResult AttemptResult, _ func(error) error, err error, +) { + defer func() { + attemptResult.Err = err + }() + + // Short circuit if this attempt never can succeed because the context is + // canceled. This reduces the chance of token pools being modified for + // attempts that will not be made + select { + case <-ctx.Done(): + return out, attemptResult, nopRelease, ctx.Err() + default: + } + + //------------------------------ + // Get Attempt Token + //------------------------------ + releaseAttemptToken, err := r.retryer.GetAttemptToken(ctx) + if err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to get retry Send token, %w", err) + } + + //------------------------------ + // Send Attempt + //------------------------------ + logger := smithymiddle.GetLogger(ctx) + service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx) + retryMetadata, _ := getRetryMetadata(ctx) + attemptNum := retryMetadata.AttemptNum + maxAttempts := retryMetadata.MaxAttempts + + // Following attempts must ensure the request payload stream starts in a + // rewound state. + if attemptNum > 1 { + if rewindable, ok := in.Request.(interface{ RewindStream() error }); ok { + if rewindErr := rewindable.RewindStream(); rewindErr != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to rewind transport stream for retry, %w", rewindErr) + } + } + + r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d", + service, operation, attemptNum) + } + + var metadata smithymiddle.Metadata + out, metadata, err = next.HandleFinalize(ctx, in) + attemptResult.ResponseMetadata = metadata + + //------------------------------ + // Bookkeeping + //------------------------------ + // Release the retry token based on the state of the attempt's error (if any). + if releaseError := releaseRetryToken(err); releaseError != nil && err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to release retry token after request error, %w", err) + } + // Release the attempt token based on the state of the attempt's error (if any). + if releaseError := releaseAttemptToken(err); releaseError != nil && err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to release initial token after request error, %w", err) + } + // If there was no error making the attempt, nothing further to do. There + // will be nothing to retry. + if err == nil { + return out, attemptResult, nopRelease, err + } + + //------------------------------ + // Is Retryable and Should Retry + //------------------------------ + // If the attempt failed with an unretryable error, nothing further to do + // but return, and inform the caller about the terminal failure. + retryable := r.retryer.IsErrorRetryable(err) + if !retryable { + r.logf(logger, logging.Debug, "request failed with unretryable error %v", err) + return out, attemptResult, nopRelease, err + } + + // set retryable to true + attemptResult.Retryable = true + + // Once the maximum number of attempts have been exhausted there is nothing + // further to do other than inform the caller about the terminal failure. + if maxAttempts > 0 && attemptNum >= maxAttempts { + r.logf(logger, logging.Debug, "max retry attempts exhausted, max %d", maxAttempts) + err = &MaxAttemptsError{ + Attempt: attemptNum, + Err: err, + } + return out, attemptResult, nopRelease, err + } + + //------------------------------ + // Get Retry (aka Retry Quota) Token + //------------------------------ + // Get a retry token that will be released after the + releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err) + if retryTokenErr != nil { + return out, attemptResult, nopRelease, retryTokenErr + } + + //------------------------------ + // Retry Delay and Sleep + //------------------------------ + // Get the retry delay before another attempt can be made, and sleep for + // that time. Potentially early exist if the sleep is canceled via the + // context. + retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err) + if reqErr != nil { + return out, attemptResult, releaseRetryToken, reqErr + } + if reqErr = sdk.SleepWithContext(ctx, retryDelay); reqErr != nil { + err = &aws.RequestCanceledError{Err: reqErr} + return out, attemptResult, releaseRetryToken, err + } + + // The request should be re-attempted. + attemptResult.Retried = true + + return out, attemptResult, releaseRetryToken, err +} + +// MetricsHeader attaches SDK request metric header for retries to the transport +type MetricsHeader struct{} + +// ID returns the middleware identifier +func (r *MetricsHeader) ID() string { + return "RetryMetricsHeader" +} + +// HandleFinalize attaches the SDK request metric header to the transport layer +func (r MetricsHeader) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( + out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, +) { + retryMetadata, _ := getRetryMetadata(ctx) + + const retryMetricHeader = "Amz-Sdk-Request" + var parts []string + + parts = append(parts, "attempt="+strconv.Itoa(retryMetadata.AttemptNum)) + if retryMetadata.MaxAttempts != 0 { + parts = append(parts, "max="+strconv.Itoa(retryMetadata.MaxAttempts)) + } + + var ttl time.Time + if deadline, ok := ctx.Deadline(); ok { + ttl = deadline + } + + // Only append the TTL if it can be determined. + if !ttl.IsZero() && retryMetadata.AttemptClockSkew > 0 { + const unixTimeFormat = "20060102T150405Z" + ttl = ttl.Add(retryMetadata.AttemptClockSkew) + parts = append(parts, "ttl="+ttl.Format(unixTimeFormat)) + } + + switch req := in.Request.(type) { + case *http.Request: + req.Header[retryMetricHeader] = append(req.Header[retryMetricHeader][:0], strings.Join(parts, "; ")) + default: + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + return next.HandleFinalize(ctx, in) +} + +type retryMetadataKey struct{} + +// getRetryMetadata retrieves retryMetadata from the context and a bool +// indicating if it was set. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) { + metadata, ok = middleware.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata) + return metadata, ok +} + +// setRetryMetadata sets the retryMetadata on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context { + return middleware.WithStackValue(ctx, retryMetadataKey{}, metadata) +} + +// AddRetryMiddlewaresOptions is the set of options that can be passed to +// AddRetryMiddlewares for configuring retry associated middleware. +type AddRetryMiddlewaresOptions struct { + Retryer aws.Retryer + + // Enable the logging of retry attempts performed by the SDK. This will + // include logging retry attempts, unretryable errors, and when max + // attempts are reached. + LogRetryAttempts bool +} + +// AddRetryMiddlewares adds retry middleware to operation middleware stack +func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresOptions) error { + attempt := NewAttemptMiddleware(options.Retryer, http.RequestCloner, func(middleware *Attempt) { + middleware.LogAttempts = options.LogRetryAttempts + }) + + if err := stack.Finalize.Add(attempt, smithymiddle.After); err != nil { + return err + } + if err := stack.Finalize.Add(&MetricsHeader{}, smithymiddle.After); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go new file mode 100644 index 000000000000..af81635b3fdd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go @@ -0,0 +1,90 @@ +package retry + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// AddWithErrorCodes returns a Retryer with additional error codes considered +// for determining if the error should be retried. +func AddWithErrorCodes(r aws.Retryer, codes ...string) aws.Retryer { + retryable := &RetryableErrorCode{ + Codes: map[string]struct{}{}, + } + for _, c := range codes { + retryable.Codes[c] = struct{}{} + } + + return &withIsErrorRetryable{ + RetryerV2: wrapAsRetryerV2(r), + Retryable: retryable, + } +} + +type withIsErrorRetryable struct { + aws.RetryerV2 + Retryable IsErrorRetryable +} + +func (r *withIsErrorRetryable) IsErrorRetryable(err error) bool { + if v := r.Retryable.IsErrorRetryable(err); v != aws.UnknownTernary { + return v.Bool() + } + return r.RetryerV2.IsErrorRetryable(err) +} + +// AddWithMaxAttempts returns a Retryer with MaxAttempts set to the value +// specified. +func AddWithMaxAttempts(r aws.Retryer, max int) aws.Retryer { + return &withMaxAttempts{ + RetryerV2: wrapAsRetryerV2(r), + Max: max, + } +} + +type withMaxAttempts struct { + aws.RetryerV2 + Max int +} + +func (w *withMaxAttempts) MaxAttempts() int { + return w.Max +} + +// AddWithMaxBackoffDelay returns a retryer wrapping the passed in retryer +// overriding the RetryDelay behavior for a alternate minimum initial backoff +// delay. +func AddWithMaxBackoffDelay(r aws.Retryer, delay time.Duration) aws.Retryer { + return &withMaxBackoffDelay{ + RetryerV2: wrapAsRetryerV2(r), + backoff: NewExponentialJitterBackoff(delay), + } +} + +type withMaxBackoffDelay struct { + aws.RetryerV2 + backoff *ExponentialJitterBackoff +} + +func (r *withMaxBackoffDelay) RetryDelay(attempt int, err error) (time.Duration, error) { + return r.backoff.BackoffDelay(attempt, err) +} + +type wrappedAsRetryerV2 struct { + aws.Retryer +} + +func wrapAsRetryerV2(r aws.Retryer) aws.RetryerV2 { + v, ok := r.(aws.RetryerV2) + if !ok { + v = wrappedAsRetryerV2{Retryer: r} + } + + return v +} + +func (w wrappedAsRetryerV2) GetAttemptToken(context.Context) (func(error) error, error) { + return w.Retryer.GetInitialToken(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go new file mode 100644 index 000000000000..c695e6fe5277 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -0,0 +1,186 @@ +package retry + +import ( + "errors" + "net" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorRetryable provides the interface of an implementation to determine if +// a error as the result of an operation is retryable. +type IsErrorRetryable interface { + IsErrorRetryable(error) aws.Ternary +} + +// IsErrorRetryables is a collection of checks to determine of the error is +// retryable. Iterates through the checks and returns the state of retryable +// if any check returns something other than unknown. +type IsErrorRetryables []IsErrorRetryable + +// IsErrorRetryable returns if the error is retryable if any of the checks in +// the list return a value other than unknown. +func (r IsErrorRetryables) IsErrorRetryable(err error) aws.Ternary { + for _, re := range r { + if v := re.IsErrorRetryable(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorRetryableFunc wraps a function with the IsErrorRetryable interface. +type IsErrorRetryableFunc func(error) aws.Ternary + +// IsErrorRetryable returns if the error is retryable. +func (fn IsErrorRetryableFunc) IsErrorRetryable(err error) aws.Ternary { + return fn(err) +} + +// RetryableError is an IsErrorRetryable implementation which uses the +// optional interface Retryable on the error value to determine if the error is +// retryable. +type RetryableError struct{} + +// IsErrorRetryable returns if the error is retryable if it satisfies the +// Retryable interface, and returns if the attempt should be retried. +func (RetryableError) IsErrorRetryable(err error) aws.Ternary { + var v interface{ RetryableError() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + return aws.BoolTernary(v.RetryableError()) +} + +// NoRetryCanceledError detects if the error was an request canceled error and +// returns if so. +type NoRetryCanceledError struct{} + +// IsErrorRetryable returns the error is not retryable if the request was +// canceled. +func (NoRetryCanceledError) IsErrorRetryable(err error) aws.Ternary { + var v interface{ CanceledError() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + if v.CanceledError() { + return aws.FalseTernary + } + return aws.UnknownTernary +} + +// RetryableConnectionError determines if the underlying error is an HTTP +// connection and returns if it should be retried. +// +// Includes errors such as connection reset, connection refused, net dial, +// temporary, and timeout errors. +type RetryableConnectionError struct{} + +// IsErrorRetryable returns if the error is caused by and HTTP connection +// error, and should be retried. +func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { + if err == nil { + return aws.UnknownTernary + } + var retryable bool + + var conErr interface{ ConnectionError() bool } + var tempErr interface{ Temporary() bool } + var timeoutErr interface{ Timeout() bool } + var urlErr *url.Error + var netOpErr *net.OpError + + switch { + case errors.As(err, &conErr) && conErr.ConnectionError(): + retryable = true + + case strings.Contains(err.Error(), "connection reset"): + retryable = true + + case errors.As(err, &urlErr): + // Refused connections should be retried as the service may not yet be + // running on the port. Go TCP dial considers refused connections as + // not temporary. + if strings.Contains(urlErr.Error(), "connection refused") { + retryable = true + } else { + return r.IsErrorRetryable(errors.Unwrap(urlErr)) + } + + case errors.As(err, &netOpErr): + // Network dial, or temporary network errors are always retryable. + if strings.EqualFold(netOpErr.Op, "dial") || netOpErr.Temporary() { + retryable = true + } else { + return r.IsErrorRetryable(errors.Unwrap(netOpErr)) + } + + case errors.As(err, &tempErr) && tempErr.Temporary(): + // Fallback to the generic temporary check, with temporary errors + // retryable. + retryable = true + + case errors.As(err, &timeoutErr) && timeoutErr.Timeout(): + // Fallback to the generic timeout check, with timeout errors + // retryable. + retryable = true + + default: + return aws.UnknownTernary + } + + return aws.BoolTernary(retryable) + +} + +// RetryableHTTPStatusCode provides a IsErrorRetryable based on HTTP status +// codes. +type RetryableHTTPStatusCode struct { + Codes map[int]struct{} +} + +// IsErrorRetryable return if the passed in error is retryable based on the +// HTTP status code. +func (r RetryableHTTPStatusCode) IsErrorRetryable(err error) aws.Ternary { + var v interface{ HTTPStatusCode() int } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.HTTPStatusCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} + +// RetryableErrorCode determines if an attempt should be retried based on the +// API error code. +type RetryableErrorCode struct { + Codes map[string]struct{} +} + +// IsErrorRetryable return if the error is retryable based on the error codes. +// Returns unknown if the error doesn't have a code or it is unknown. +func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary { + var v interface{ ErrorCode() string } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.ErrorCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go new file mode 100644 index 000000000000..25abffc81289 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go @@ -0,0 +1,258 @@ +package retry + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/ratelimit" +) + +// BackoffDelayer provides the interface for determining the delay to before +// another request attempt, that previously failed. +type BackoffDelayer interface { + BackoffDelay(attempt int, err error) (time.Duration, error) +} + +// BackoffDelayerFunc provides a wrapper around a function to determine the +// backoff delay of an attempt retry. +type BackoffDelayerFunc func(int, error) (time.Duration, error) + +// BackoffDelay returns the delay before attempt to retry a request. +func (fn BackoffDelayerFunc) BackoffDelay(attempt int, err error) (time.Duration, error) { + return fn(attempt, err) +} + +const ( + // DefaultMaxAttempts is the maximum of attempts for an API request + DefaultMaxAttempts int = 3 + + // DefaultMaxBackoff is the maximum back off delay between attempts + DefaultMaxBackoff time.Duration = 20 * time.Second +) + +// Default retry token quota values. +const ( + DefaultRetryRateTokens uint = 500 + DefaultRetryCost uint = 5 + DefaultRetryTimeoutCost uint = 10 + DefaultNoRetryIncrement uint = 1 +) + +// DefaultRetryableHTTPStatusCodes is the default set of HTTP status codes the SDK +// should consider as retryable errors. +var DefaultRetryableHTTPStatusCodes = map[int]struct{}{ + 500: {}, + 502: {}, + 503: {}, + 504: {}, +} + +// DefaultRetryableErrorCodes provides the set of API error codes that should +// be retried. +var DefaultRetryableErrorCodes = map[string]struct{}{ + "RequestTimeout": {}, + "RequestTimeoutException": {}, +} + +// DefaultThrottleErrorCodes provides the set of API error codes that are +// considered throttle errors. +var DefaultThrottleErrorCodes = map[string]struct{}{ + "Throttling": {}, + "ThrottlingException": {}, + "ThrottledException": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, + "ProvisionedThroughputExceededException": {}, + "TransactionInProgressException": {}, + "RequestLimitExceeded": {}, + "BandwidthLimitExceeded": {}, + "LimitExceededException": {}, + "RequestThrottled": {}, + "SlowDown": {}, + "PriorRequestNotComplete": {}, + "EC2ThrottledException": {}, +} + +// DefaultRetryables provides the set of retryable checks that are used by +// default. +var DefaultRetryables = []IsErrorRetryable{ + NoRetryCanceledError{}, + RetryableError{}, + RetryableConnectionError{}, + RetryableHTTPStatusCode{ + Codes: DefaultRetryableHTTPStatusCodes, + }, + RetryableErrorCode{ + Codes: DefaultRetryableErrorCodes, + }, + RetryableErrorCode{ + Codes: DefaultThrottleErrorCodes, + }, +} + +// DefaultTimeouts provides the set of timeout checks that are used by default. +var DefaultTimeouts = []IsErrorTimeout{ + TimeouterError{}, +} + +// StandardOptions provides the functional options for configuring the standard +// retryable, and delay behavior. +type StandardOptions struct { + // Maximum number of attempts that should be made. + MaxAttempts int + + // MaxBackoff duration between retried attempts. + MaxBackoff time.Duration + + // Provides the backoff strategy the retryer will use to determine the + // delay between retry attempts. + Backoff BackoffDelayer + + // Set of strategies to determine if the attempt should be retried based on + // the error response received. + // + // It is safe to append to this list in NewStandard's functional options. + Retryables []IsErrorRetryable + + // Set of strategies to determine if the attempt failed due to a timeout + // error. + // + // It is safe to append to this list in NewStandard's functional options. + Timeouts []IsErrorTimeout + + // Provides the rate limiting strategy for rate limiting attempt retries + // across all attempts the retryer is being used with. + RateLimiter RateLimiter + + // The cost to deduct from the RateLimiter's token bucket per retry. + RetryCost uint + + // The cost to deduct from the RateLimiter's token bucket per retry caused + // by timeout error. + RetryTimeoutCost uint + + // The cost to payback to the RateLimiter's token bucket for successful + // attempts. + NoRetryIncrement uint +} + +// RateLimiter provides the interface for limiting the rate of attempt retries +// allowed by the retryer. +type RateLimiter interface { + GetToken(ctx context.Context, cost uint) (releaseToken func() error, err error) + AddTokens(uint) error +} + +// Standard is the standard retry pattern for the SDK. It uses a set of +// retryable checks to determine of the failed attempt should be retried, and +// what retry delay should be used. +type Standard struct { + options StandardOptions + + timeout IsErrorTimeout + retryable IsErrorRetryable + backoff BackoffDelayer +} + +// NewStandard initializes a standard retry behavior with defaults that can be +// overridden via functional options. +func NewStandard(fnOpts ...func(*StandardOptions)) *Standard { + o := StandardOptions{ + MaxAttempts: DefaultMaxAttempts, + MaxBackoff: DefaultMaxBackoff, + Retryables: append([]IsErrorRetryable{}, DefaultRetryables...), + Timeouts: append([]IsErrorTimeout{}, DefaultTimeouts...), + + RateLimiter: ratelimit.NewTokenRateLimit(DefaultRetryRateTokens), + RetryCost: DefaultRetryCost, + RetryTimeoutCost: DefaultRetryTimeoutCost, + NoRetryIncrement: DefaultNoRetryIncrement, + } + for _, fn := range fnOpts { + fn(&o) + } + if o.MaxAttempts <= 0 { + o.MaxAttempts = DefaultMaxAttempts + } + + backoff := o.Backoff + if backoff == nil { + backoff = NewExponentialJitterBackoff(o.MaxBackoff) + } + + return &Standard{ + options: o, + backoff: backoff, + retryable: IsErrorRetryables(o.Retryables), + timeout: IsErrorTimeouts(o.Timeouts), + } +} + +// MaxAttempts returns the maximum number of attempts that can be made for a +// request before failing. +func (s *Standard) MaxAttempts() int { + return s.options.MaxAttempts +} + +// IsErrorRetryable returns if the error is can be retried or not. Should not +// consider the number of attempts made. +func (s *Standard) IsErrorRetryable(err error) bool { + return s.retryable.IsErrorRetryable(err).Bool() +} + +// RetryDelay returns the delay to use before another request attempt is made. +func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) { + return s.backoff.BackoffDelay(attempt, err) +} + +// GetAttemptToken returns the token to be released after then attempt completes. +// The release token will add NoRetryIncrement to the RateLimiter token pool if +// the attempt was successful. If the attempt failed, nothing will be done. +func (s *Standard) GetAttemptToken(context.Context) (func(error) error, error) { + return s.GetInitialToken(), nil +} + +// GetInitialToken returns a token for adding the NoRetryIncrement to the +// RateLimiter token if the attempt completed successfully without error. +// +// InitialToken applies to result of the each attempt, including the first. +// Whereas the RetryToken applies to the result of subsequent attempts. +// +// Deprecated: use GetAttemptToken instead. +func (s *Standard) GetInitialToken() func(error) error { + return releaseToken(s.noRetryIncrement).release +} + +func (s *Standard) noRetryIncrement() error { + return s.options.RateLimiter.AddTokens(s.options.NoRetryIncrement) +} + +// GetRetryToken attempts to deduct the retry cost from the retry token pool. +// Returning the token release function, or error. +func (s *Standard) GetRetryToken(ctx context.Context, opErr error) (func(error) error, error) { + cost := s.options.RetryCost + + if s.timeout.IsErrorTimeout(opErr).Bool() { + cost = s.options.RetryTimeoutCost + } + + fn, err := s.options.RateLimiter.GetToken(ctx, cost) + if err != nil { + return nil, fmt.Errorf("failed to get rate limit token, %w", err) + } + + return releaseToken(fn).release, nil +} + +func nopRelease(error) error { return nil } + +type releaseToken func() error + +func (f releaseToken) release(err error) error { + if err != nil { + return nil + } + + return f() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go new file mode 100644 index 000000000000..c4b844d15f19 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go @@ -0,0 +1,60 @@ +package retry + +import ( + "errors" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorThrottle provides the interface of an implementation to determine if +// a error response from an operation is a throttling error. +type IsErrorThrottle interface { + IsErrorThrottle(error) aws.Ternary +} + +// IsErrorThrottles is a collection of checks to determine of the error a +// throttle error. Iterates through the checks and returns the state of +// throttle if any check returns something other than unknown. +type IsErrorThrottles []IsErrorThrottle + +// IsErrorThrottle returns if the error is a throttle error if any of the +// checks in the list return a value other than unknown. +func (r IsErrorThrottles) IsErrorThrottle(err error) aws.Ternary { + for _, re := range r { + if v := re.IsErrorThrottle(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorThrottleFunc wraps a function with the IsErrorThrottle interface. +type IsErrorThrottleFunc func(error) aws.Ternary + +// IsErrorThrottle returns if the error is a throttle error. +func (fn IsErrorThrottleFunc) IsErrorThrottle(err error) aws.Ternary { + return fn(err) +} + +// ThrottleErrorCode determines if an attempt should be retried based on the +// API error code. +type ThrottleErrorCode struct { + Codes map[string]struct{} +} + +// IsErrorThrottle return if the error is a throttle error based on the error +// codes. Returns unknown if the error doesn't have a code or it is unknown. +func (r ThrottleErrorCode) IsErrorThrottle(err error) aws.Ternary { + var v interface{ ErrorCode() string } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.ErrorCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go new file mode 100644 index 000000000000..3d47870d2dc2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go @@ -0,0 +1,52 @@ +package retry + +import ( + "errors" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorTimeout provides the interface of an implementation to determine if +// a error matches. +type IsErrorTimeout interface { + IsErrorTimeout(err error) aws.Ternary +} + +// IsErrorTimeouts is a collection of checks to determine of the error is +// retryable. Iterates through the checks and returns the state of retryable +// if any check returns something other than unknown. +type IsErrorTimeouts []IsErrorTimeout + +// IsErrorTimeout returns if the error is retryable if any of the checks in +// the list return a value other than unknown. +func (ts IsErrorTimeouts) IsErrorTimeout(err error) aws.Ternary { + for _, t := range ts { + if v := t.IsErrorTimeout(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorTimeoutFunc wraps a function with the IsErrorTimeout interface. +type IsErrorTimeoutFunc func(error) aws.Ternary + +// IsErrorTimeout returns if the error is retryable. +func (fn IsErrorTimeoutFunc) IsErrorTimeout(err error) aws.Ternary { + return fn(err) +} + +// TimeouterError provides the IsErrorTimeout implementation for determining if +// an error is a timeout based on type with the Timeout method. +type TimeouterError struct{} + +// IsErrorTimeout returns if the error is a timeout error. +func (t TimeouterError) IsErrorTimeout(err error) aws.Ternary { + var v interface{ Timeout() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + return aws.BoolTernary(v.Timeout()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go new file mode 100644 index 000000000000..1e378f86a9af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go @@ -0,0 +1,127 @@ +package aws + +import ( + "context" + "fmt" + "time" +) + +// RetryMode provides the mode the API client will use to create a retryer +// based on. +type RetryMode string + +const ( + // RetryModeStandard model provides rate limited retry attempts with + // exponential backoff delay. + RetryModeStandard RetryMode = "standard" + + // RetryModeAdaptive model provides attempt send rate limiting on throttle + // responses in addition to standard mode's retry rate limiting. + // + // Adaptive retry mode is experimental and is subject to change in the + // future. + RetryModeAdaptive RetryMode = "adaptive" +) + +// ParseRetryMode attempts to parse a RetryMode from the given string. +// Returning error if the value is not a known RetryMode. +func ParseRetryMode(v string) (mode RetryMode, err error) { + switch v { + case "standard": + return RetryModeStandard, nil + case "adaptive": + return RetryModeAdaptive, nil + default: + return mode, fmt.Errorf("unknown RetryMode, %v", v) + } +} + +func (m RetryMode) String() string { return string(m) } + +// Retryer is an interface to determine if a given error from a +// attempt should be retried, and if so what backoff delay to apply. The +// default implementation used by most services is the retry package's Standard +// type. Which contains basic retry logic using exponential backoff. +type Retryer interface { + // IsErrorRetryable returns if the failed attempt is retryable. This check + // should determine if the error can be retried, or if the error is + // terminal. + IsErrorRetryable(error) bool + + // MaxAttempts returns the maximum number of attempts that can be made for + // a attempt before failing. A value of 0 implies that the attempt should + // be retried until it succeeds if the errors are retryable. + MaxAttempts() int + + // RetryDelay returns the delay that should be used before retrying the + // attempt. Will return error if the if the delay could not be determined. + RetryDelay(attempt int, opErr error) (time.Duration, error) + + // GetRetryToken attempts to deduct the retry cost from the retry token pool. + // Returning the token release function, or error. + GetRetryToken(ctx context.Context, opErr error) (releaseToken func(error) error, err error) + + // GetInitialToken returns the initial attempt token that can increment the + // retry token pool if the attempt is successful. + GetInitialToken() (releaseToken func(error) error) +} + +// RetryerV2 is an interface to determine if a given error from a attempt +// should be retried, and if so what backoff delay to apply. The default +// implementation used by most services is the retry package's Standard type. +// Which contains basic retry logic using exponential backoff. +// +// RetryerV2 replaces the Retryer interface, deprecating the GetInitialToken +// method in favor of GetAttemptToken which takes a context, and can return an error. +// +// The SDK's retry package's Attempt middleware, and utilities will always +// wrap a Retryer as a RetryerV2. Delegating to GetInitialToken, only if +// GetAttemptToken is not implemented. +type RetryerV2 interface { + Retryer + + // GetInitialToken returns the initial attempt token that can increment the + // retry token pool if the attempt is successful. + // + // Deprecated: This method does not provide a way to block using Context, + // nor can it return an error. Use RetryerV2, and GetAttemptToken instead. + GetInitialToken() (releaseToken func(error) error) + + // GetAttemptToken returns the send token that can be used to rate limit + // attempt calls. Will be used by the SDK's retry package's Attempt + // middleware to get a send token prior to calling the temp and releasing + // the send token after the attempt has been made. + GetAttemptToken(context.Context) (func(error) error, error) +} + +// NopRetryer provides a RequestRetryDecider implementation that will flag +// all attempt errors as not retryable, with a max attempts of 1. +type NopRetryer struct{} + +// IsErrorRetryable returns false for all error values. +func (NopRetryer) IsErrorRetryable(error) bool { return false } + +// MaxAttempts always returns 1 for the original attempt. +func (NopRetryer) MaxAttempts() int { return 1 } + +// RetryDelay is not valid for the NopRetryer. Will always return error. +func (NopRetryer) RetryDelay(int, error) (time.Duration, error) { + return 0, fmt.Errorf("not retrying any attempt errors") +} + +// GetRetryToken returns a stub function that does nothing. +func (NopRetryer) GetRetryToken(context.Context, error) (func(error) error, error) { + return nopReleaseToken, nil +} + +// GetInitialToken returns a stub function that does nothing. +func (NopRetryer) GetInitialToken() func(error) error { + return nopReleaseToken +} + +// GetAttemptToken returns a stub function that does nothing. +func (NopRetryer) GetAttemptToken(context.Context) (func(error) error, error) { + return nopReleaseToken, nil +} + +func nopReleaseToken(error) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go new file mode 100644 index 000000000000..3af9b2b33614 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go @@ -0,0 +1,14 @@ +package aws + +// ExecutionEnvironmentID is the AWS execution environment runtime identifier. +type ExecutionEnvironmentID string + +// RuntimeEnvironment is a collection of values that are determined at runtime +// based on the environment that the SDK is executing in. Some of these values +// may or may not be present based on the executing environment and certain SDK +// configuration properties that drive whether these values are populated.. +type RuntimeEnvironment struct { + EnvironmentIdentifier ExecutionEnvironmentID + Region string + EC2InstanceMetadataRegion string +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go new file mode 100644 index 000000000000..cbf22f1d0b0c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go @@ -0,0 +1,115 @@ +package v4 + +import ( + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +func lookupKey(service, region string) string { + var s strings.Builder + s.Grow(len(region) + len(service) + 3) + s.WriteString(region) + s.WriteRune('/') + s.WriteString(service) + return s.String() +} + +type derivedKey struct { + AccessKey string + Date time.Time + Credential []byte +} + +type derivedKeyCache struct { + values map[string]derivedKey + mutex sync.RWMutex +} + +func newDerivedKeyCache() derivedKeyCache { + return derivedKeyCache{ + values: make(map[string]derivedKey), + } +} + +func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte { + key := lookupKey(service, region) + s.mutex.RLock() + if cred, ok := s.get(key, credentials, signingTime.Time); ok { + s.mutex.RUnlock() + return cred + } + s.mutex.RUnlock() + + s.mutex.Lock() + if cred, ok := s.get(key, credentials, signingTime.Time); ok { + s.mutex.Unlock() + return cred + } + cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime) + entry := derivedKey{ + AccessKey: credentials.AccessKeyID, + Date: signingTime.Time, + Credential: cred, + } + s.values[key] = entry + s.mutex.Unlock() + + return cred +} + +func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) { + cacheEntry, ok := s.retrieveFromCache(key) + if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) { + return cacheEntry.Credential, true + } + return nil, false +} + +func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) { + if v, ok := s.values[key]; ok { + return v, true + } + return derivedKey{}, false +} + +// SigningKeyDeriver derives a signing key from a set of credentials +type SigningKeyDeriver struct { + cache derivedKeyCache +} + +// NewSigningKeyDeriver returns a new SigningKeyDeriver +func NewSigningKeyDeriver() *SigningKeyDeriver { + return &SigningKeyDeriver{ + cache: newDerivedKeyCache(), + } +} + +// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing. +func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte { + return k.cache.Get(credential, service, region, signingTime) +} + +func deriveKey(secret, service, region string, t SigningTime) []byte { + hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat())) + hmacRegion := HMACSHA256(hmacDate, []byte(region)) + hmacService := HMACSHA256(hmacRegion, []byte(service)) + return HMACSHA256(hmacService, []byte("aws4_request")) +} + +func isSameDay(x, y time.Time) bool { + xYear, xMonth, xDay := x.Date() + yYear, yMonth, yDay := y.Date() + + if xYear != yYear { + return false + } + + if xMonth != yMonth { + return false + } + + return xDay == yDay +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go new file mode 100644 index 000000000000..a23cb003bf77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go @@ -0,0 +1,40 @@ +package v4 + +// Signature Version 4 (SigV4) Constants +const ( + // EmptyStringSHA256 is the hex encoded sha256 value of an empty string + EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` + + // UnsignedPayload indicates that the request payload body is unsigned + UnsignedPayload = "UNSIGNED-PAYLOAD" + + // AmzAlgorithmKey indicates the signing algorithm + AmzAlgorithmKey = "X-Amz-Algorithm" + + // AmzSecurityTokenKey indicates the security token to be used with temporary credentials + AmzSecurityTokenKey = "X-Amz-Security-Token" + + // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' + AmzDateKey = "X-Amz-Date" + + // AmzCredentialKey is the access key ID and credential scope + AmzCredentialKey = "X-Amz-Credential" + + // AmzSignedHeadersKey is the set of headers signed for the request + AmzSignedHeadersKey = "X-Amz-SignedHeaders" + + // AmzSignatureKey is the query parameter to store the SigV4 signature + AmzSignatureKey = "X-Amz-Signature" + + // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter + TimeFormat = "20060102T150405Z" + + // ShortTimeFormat is the shorten time format used in the credential scope + ShortTimeFormat = "20060102" + + // ContentSHAKey is the SHA256 of request body + ContentSHAKey = "X-Amz-Content-Sha256" + + // StreamingEventsPayload indicates that the request payload body is a signed event stream. + StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go new file mode 100644 index 000000000000..c61955ad5b9b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" +) + +// Rules houses a set of Rule needed for validation of a +// string value +type Rules []Rule + +// Rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that Rule +type Rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r Rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// MapRule generic Rule for maps +type MapRule map[string]struct{} + +// IsValid for the map Rule satisfies whether it exists in the map +func (m MapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// AllowList is a generic Rule for include listing +type AllowList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (w AllowList) IsValid(value string) bool { + return w.Rule.IsValid(value) +} + +// ExcludeList is a generic Rule for exclude listing +type ExcludeList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (b ExcludeList) IsValid(value string) bool { + return !b.Rule.IsValid(value) +} + +// Patterns is a list of strings to match against +type Patterns []string + +// IsValid for Patterns checks each pattern and returns if a match has +// been found +func (p Patterns) IsValid(value string) bool { + for _, pattern := range p { + if sdkstrings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// InclusiveRules rules allow for rules to depend on one another +type InclusiveRules []Rule + +// IsValid will return true if all rules are true +func (r InclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go new file mode 100644 index 000000000000..85a1d8f032fb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -0,0 +1,68 @@ +package v4 + +// IgnoredHeaders is a list of headers that are ignored during signing +var IgnoredHeaders = Rules{ + ExcludeList{ + MapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// RequiredSignedHeaders is a allow list for Build canonical headers. +var RequiredSignedHeaders = Rules{ + AllowList{ + MapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + "X-Amz-Tagging": struct{}{}, + }, + }, + Patterns{"X-Amz-Object-Lock-"}, + Patterns{"X-Amz-Meta-"}, +} + +// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value +// represents whether or not it is a pattern. +var AllowedQueryHoisting = InclusiveRules{ + ExcludeList{RequiredSignedHeaders}, + Patterns{"X-Amz-"}, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go new file mode 100644 index 000000000000..e7fa7a1b1e60 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go @@ -0,0 +1,13 @@ +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" +) + +// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. +func HMACSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go new file mode 100644 index 000000000000..bf93659a43f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go @@ -0,0 +1,75 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go new file mode 100644 index 000000000000..fc7887909e29 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go @@ -0,0 +1,13 @@ +package v4 + +import "strings" + +// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope +func BuildCredentialScope(signingTime SigningTime, region, service string) string { + return strings.Join([]string{ + signingTime.ShortTimeFormat(), + region, + service, + "aws4_request", + }, "/") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go new file mode 100644 index 000000000000..1de06a765d1b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go @@ -0,0 +1,36 @@ +package v4 + +import "time" + +// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. +type SigningTime struct { + time.Time + timeFormat string + shortTimeFormat string +} + +// NewSigningTime creates a new SigningTime given a time.Time +func NewSigningTime(t time.Time) SigningTime { + return SigningTime{ + Time: t, + } +} + +// TimeFormat provides a time formatted in the X-Amz-Date format. +func (m *SigningTime) TimeFormat() string { + return m.format(&m.timeFormat, TimeFormat) +} + +// ShortTimeFormat provides a time formatted of 20060102. +func (m *SigningTime) ShortTimeFormat() string { + return m.format(&m.shortTimeFormat, ShortTimeFormat) +} + +func (m *SigningTime) format(target *string, format string) string { + if len(*target) > 0 { + return *target + } + v := m.Time.Format(format) + *target = v + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go new file mode 100644 index 000000000000..0cb9cffaf518 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go @@ -0,0 +1,64 @@ +package v4 + +import ( + "net/url" + "strings" +) + +const doubleSpace = " " + +// StripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func StripExcessSpaces(str string) string { + var j, k, l, m, spaces int + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + return str + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + return string(buf[:m]) +} + +// GetURIPath returns the escaped URI component from the provided URL +func GetURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go new file mode 100644 index 000000000000..3f3bcf456a78 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -0,0 +1,400 @@ +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const computePayloadHashMiddlewareID = "ComputePayloadHash" + +// HashComputationError indicates an error occurred while computing the signing hash +type HashComputationError struct { + Err error +} + +// Error is the error message +func (e *HashComputationError) Error() string { + return fmt.Sprintf("failed to compute payload hash: %v", e.Err) +} + +// Unwrap returns the underlying error if one is set +func (e *HashComputationError) Unwrap() error { + return e.Err +} + +// SigningError indicates an error condition occurred while performing SigV4 signing +type SigningError struct { + Err error +} + +func (e *SigningError) Error() string { + return fmt.Sprintf("failed to sign request: %v", e.Err) +} + +// Unwrap returns the underlying error cause +func (e *SigningError) Unwrap() error { + return e.Err +} + +// UseDynamicPayloadSigningMiddleware swaps the compute payload sha256 middleware with a resolver middleware that +// switches between unsigned and signed payload based on TLS state for request. +// This middleware should not be used for AWS APIs that do not support unsigned payload signing auth. +// By default, SDK uses this middleware for known AWS APIs that support such TLS based auth selection . +// +// Usage example - +// S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to +// dynamically switch between unsigned and signed payload based on TLS state for request. +func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{}) + return err +} + +// dynamicPayloadSigningMiddleware dynamically resolves the middleware that computes and set payload sha256 middleware. +type dynamicPayloadSigningMiddleware struct { +} + +// ID returns the resolver identifier +func (m *dynamicPayloadSigningMiddleware) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild sets a resolver that directs to the payload sha256 compute handler. +func (m *dynamicPayloadSigningMiddleware) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + // if TLS is enabled, use unsigned payload when supported + if strings.EqualFold(req.URL.Scheme, "https") { + return (&unsignedPayload{}).HandleBuild(ctx, in, next) + } + + // else fall back to signed payload + return (&computePayloadSHA256{}).HandleBuild(ctx, in, next) +} + +// unsignedPayload sets the SigV4 request payload hash to unsigned. +// +// Will not set the Unsigned Payload magic SHA value, if a SHA has already been +// stored in the context. (e.g. application pre-computed SHA256 before making +// API call). +// +// This middleware does not check the X-Amz-Content-Sha256 header, if that +// header is serialized a middleware must translate it into the context. +type unsignedPayload struct{} + +// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation +// middleware stack +func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&unsignedPayload{}, middleware.After) +} + +// ID returns the unsignedPayload identifier +func (m *unsignedPayload) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild sets the payload hash to be an unsigned payload +func (m *unsignedPayload) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + // This should not compute the content SHA256 if the value is already + // known. (e.g. application pre-computed SHA256 before making API call). + // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if + // that header is provided a middleware must translate it into the context. + contentSHA := GetPayloadHash(ctx) + if len(contentSHA) == 0 { + contentSHA = v4Internal.UnsignedPayload + } + + ctx = SetPayloadHash(ctx, contentSHA) + return next.HandleBuild(ctx, in) +} + +// computePayloadSHA256 computes SHA256 payload hash to sign. +// +// Will not set the Unsigned Payload magic SHA value, if a SHA has already been +// stored in the context. (e.g. application pre-computed SHA256 before making +// API call). +// +// This middleware does not check the X-Amz-Content-Sha256 header, if that +// header is serialized a middleware must translate it into the context. +type computePayloadSHA256 struct{} + +// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the +// operation middleware stack +func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error { + return stack.Build.Add(&computePayloadSHA256{}, middleware.After) +} + +// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the +// operation middleware stack +func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error { + _, err := stack.Build.Remove(computePayloadHashMiddlewareID) + return err +} + +// ID is the middleware name +func (m *computePayloadSHA256) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild compute the payload hash for the request payload +func (m *computePayloadSHA256) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + // This should not compute the content SHA256 if the value is already + // known. (e.g. application pre-computed SHA256 before making API call) + // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if + // that header is provided a middleware must translate it into the context. + if contentSHA := GetPayloadHash(ctx); len(contentSHA) != 0 { + return next.HandleBuild(ctx, in) + } + + hash := sha256.New() + if stream := req.GetStream(); stream != nil { + _, err = io.Copy(hash, stream) + if err != nil { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("failed to compute payload hash, %w", err), + } + } + + if err := req.RewindStream(); err != nil { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("failed to seek body to start, %w", err), + } + } + } + + ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil))) + + return next.HandleBuild(ctx, in) +} + +// SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the +// ComputePayloadSHA256 middleware with the UnsignedPayload middleware. +// +// Use this to disable computing the Payload SHA256 checksum and instead use +// UNSIGNED-PAYLOAD for the SHA256 value. +func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &unsignedPayload{}) + return err +} + +// contentSHA256Header sets the X-Amz-Content-Sha256 header value to +// the Payload hash stored in the context. +type contentSHA256Header struct{} + +// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the +// operation middleware stack +func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error { + return stack.Build.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) +} + +// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware +// from the operation middleware stack +func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Remove((*contentSHA256Header)(nil).ID()) + return err +} + +// ID returns the ContentSHA256HeaderMiddleware identifier +func (m *contentSHA256Header) ID() string { + return "SigV4ContentSHA256Header" +} + +// HandleBuild sets the X-Amz-Content-Sha256 header value to the Payload hash +// stored in the context. +func (m *contentSHA256Header) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &HashComputationError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} + } + + req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx)) + + return next.HandleBuild(ctx, in) +} + +// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware. +type SignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Signer HTTPSigner + LogSigning bool +} + +// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 HTTP Signing +type SignHTTPRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + signer HTTPSigner + logSigning bool +} + +// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests +func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { + return &SignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + signer: options.Signer, + logSigning: options.LogSigning, + } +} + +// ID is the SignHTTPRequestMiddleware identifier +func (s *SignHTTPRequestMiddleware) ID() string { + return "Signing" +} + +// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme +func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if !haveCredentialProvider(s.credentialsProvider) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} + } + + signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) + payloadHash := GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} + } + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} + } + + err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} + } + + ctx = awsmiddleware.SetSigningCredentials(ctx, credentials) + + return next.HandleFinalize(ctx, in) +} + +type streamingEventsPayload struct{} + +// AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack. +func AddStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Build.Add(&streamingEventsPayload{}, middleware.After) +} + +func (s *streamingEventsPayload) ID() string { + return computePayloadHashMiddlewareID +} + +func (s *streamingEventsPayload) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + contentSHA := GetPayloadHash(ctx) + if len(contentSHA) == 0 { + contentSHA = v4Internal.StreamingEventsPayload + } + + ctx = SetPayloadHash(ctx, contentSHA) + + return next.HandleBuild(ctx, in) +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + const authHeaderSignatureElem = "Signature=" + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func haveCredentialProvider(p aws.CredentialsProvider) bool { + if p == nil { + return false + } + switch p.(type) { + case aws.AnonymousCredentials, + *aws.AnonymousCredentials: + return false + } + + return true +} + +type payloadHashKey struct{} + +// GetPayloadHash retrieves the payload hash to use for signing +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetPayloadHash(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, payloadHashKey{}).(string) + return v +} + +// SetPayloadHash sets the payload hash to be used for signing the request +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetPayloadHash(ctx context.Context, hash string) context.Context { + return middleware.WithStackValue(ctx, payloadHashKey{}, hash) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go new file mode 100644 index 000000000000..e1a066512437 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go @@ -0,0 +1,127 @@ +package v4 + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyHTTP "github.com/aws/smithy-go/transport/http" +) + +// HTTPPresigner is an interface to a SigV4 signer that can sign create a +// presigned URL for a HTTP requests. +type HTTPPresigner interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignedHTTPRequest provides the URL and signed headers that are included +// in the presigned URL. +type PresignedHTTPRequest struct { + URL string + Method string + SignedHeader http.Header +} + +// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. +type PresignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Presigner HTTPPresigner + LogSigning bool +} + +// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a +// presigned URL for an HTTP request. +// +// Will short circuit the middleware stack and not forward onto the next +// Finalize handler. +type PresignHTTPRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + presigner HTTPPresigner + logSigning bool +} + +// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware +// initialized with the presigner. +func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { + return &PresignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + presigner: options.Presigner, + logSigning: options.LogSigning, + } +} + +// ID provides the middleware ID. +func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 presign authentication scheme. +// +// Since the signed request is not a valid HTTP request +func (s *PresignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyHTTP.Request) + if !ok { + return out, metadata, &SigningError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + httpReq := req.Build(ctx) + if !haveCredentialProvider(s.credentialsProvider) { + out.Result = &PresignedHTTPRequest{ + URL: httpReq.URL.String(), + Method: httpReq.Method, + SignedHeader: http.Header{}, + } + + return out, metadata, nil + } + + signingName := awsmiddleware.GetSigningName(ctx) + signingRegion := awsmiddleware.GetSigningRegion(ctx) + payloadHash := GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{ + Err: fmt.Errorf("computed payload hash missing from context"), + } + } + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to retrieve credentials: %w", err), + } + } + + u, h, err := s.presigner.PresignHTTP(ctx, credentials, + httpReq, payloadHash, signingName, signingRegion, sdk.NowTime(), + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to sign http request, %w", err), + } + } + + out.Result = &PresignedHTTPRequest{ + URL: u, + Method: httpReq.Method, + SignedHeader: h, + } + + return out, metadata, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go new file mode 100644 index 000000000000..66aa2bd6ab0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go @@ -0,0 +1,86 @@ +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "github.com/aws/aws-sdk-go-v2/aws" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "strings" + "time" +) + +// EventStreamSigner is an AWS EventStream protocol signer. +type EventStreamSigner interface { + GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) +} + +// StreamSignerOptions is the configuration options for StreamSigner. +type StreamSignerOptions struct{} + +// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads. +type StreamSigner struct { + options StreamSignerOptions + + credentials aws.Credentials + service string + region string + + prevSignature []byte + + signingKeyDeriver *v4Internal.SigningKeyDeriver +} + +// NewStreamSigner returns a new AWS EventStream protocol signer. +func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner { + o := StreamSignerOptions{} + + for _, fn := range optFns { + fn(&o) + } + + return &StreamSigner{ + options: o, + credentials: credentials, + service: service, + region: region, + signingKeyDeriver: v4Internal.NewSigningKeyDeriver(), + prevSignature: seedSignature, + } +} + +// GetSignature signs the provided header and payload bytes. +func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + prevSignature := s.prevSignature + + st := v4Internal.NewSigningTime(signingTime) + + sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st) + + scope := v4Internal.BuildCredentialScope(st, s.region, s.service) + + stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st) + + signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign)) + s.prevSignature = signature + + return signature, nil +} + +func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string { + hash := sha256.New() + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + signingTime.TimeFormat(), + credentialScope, + hex.EncodeToString(previousSignature), + hex.EncodeToString(makeHash(hash, headers)), + hex.EncodeToString(makeHash(hash, payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go new file mode 100644 index 000000000000..06ba7773ab50 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go @@ -0,0 +1,542 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires +// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + "net/http" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/logging" +) + +const ( + signingAlgorithm = "AWS4-HMAC-SHA256" + authorizationHeader = "Authorization" +) + +// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests +type HTTPSigner interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error +} + +type keyDerivator interface { + DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte +} + +// SignerOptions is the SigV4 Signer options. +type SignerOptions struct { + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // The logger to send log messages to. + Logger logging.Logger + + // Enable logging of signed requests. + // This will enable logging of the canonical request, the string to sign, and for presigning the subsequent + // presigned URL. + LogSigning bool +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + options SignerOptions + keyDerivator keyDerivator +} + +// NewSigner returns a new SigV4 Signer +func NewSigner(optFns ...func(signer *SignerOptions)) *Signer { + options := SignerOptions{} + + for _, fn := range optFns { + fn(&options) + } + + return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()} +} + +type httpSigner struct { + Request *http.Request + ServiceName string + Region string + Time v4Internal.SigningTime + Credentials aws.Credentials + KeyDerivator keyDerivator + IsPreSign bool + + PayloadHash string + + DisableHeaderHoisting bool + DisableURIPathEscaping bool +} + +func (s *httpSigner) Build() (signedRequest, error) { + req := s.Request + + query := req.URL.Query() + headers := req.Header + + s.setRequiredSigningFields(headers, query) + + // Sort Each Query Key's Values + for key := range query { + sort.Strings(query[key]) + } + + v4Internal.SanitizeHostForHeader(req) + + credentialScope := s.buildCredentialScope() + credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope + if s.IsPreSign { + query.Set(v4Internal.AmzCredentialKey, credentialStr) + } + + unsignedHeaders := headers + if s.IsPreSign && !s.DisableHeaderHoisting { + var urlValues url.Values + urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers) + for k := range urlValues { + query[k] = urlValues[k] + } + } + + host := req.URL.Host + if len(req.Host) > 0 { + host = req.Host + } + + signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) + + if s.IsPreSign { + query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr) + } + + var rawQuery strings.Builder + rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1)) + + canonicalURI := v4Internal.GetURIPath(req.URL) + if !s.DisableURIPathEscaping { + canonicalURI = httpbinding.EscapePath(canonicalURI, false) + } + + canonicalString := s.buildCanonicalString( + req.Method, + canonicalURI, + rawQuery.String(), + signedHeadersStr, + canonicalHeaderStr, + ) + + strToSign := s.buildStringToSign(credentialScope, canonicalString) + signingSignature, err := s.buildSignature(strToSign) + if err != nil { + return signedRequest{}, err + } + + if s.IsPreSign { + rawQuery.WriteString("&X-Amz-Signature=") + rawQuery.WriteString(signingSignature) + } else { + headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) + } + + req.URL.RawQuery = rawQuery.String() + + return signedRequest{ + Request: req, + SignedHeaders: signedHeaders, + CanonicalString: canonicalString, + StringToSign: strToSign, + PreSigned: s.IsPreSign, + }, nil +} + +func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { + const credential = "Credential=" + const signedHeaders = "SignedHeaders=" + const signature = "Signature=" + const commaSpace = ", " + + var parts strings.Builder + parts.Grow(len(signingAlgorithm) + 1 + + len(credential) + len(credentialStr) + 2 + + len(signedHeaders) + len(signedHeadersStr) + 2 + + len(signature) + len(signingSignature), + ) + parts.WriteString(signingAlgorithm) + parts.WriteRune(' ') + parts.WriteString(credential) + parts.WriteString(credentialStr) + parts.WriteString(commaSpace) + parts.WriteString(signedHeaders) + parts.WriteString(signedHeadersStr) + parts.WriteString(commaSpace) + parts.WriteString(signature) + parts.WriteString(signingSignature) + return parts.String() +} + +// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// The payloadHash is the hex encoded SHA-256 hash of the request payload, and +// must be provided. Even if the request has no payload (aka body). If the +// request has no payload you should use the hex encoded SHA-256 of an empty +// string as the payloadHash value. +// +// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +// +// Some services such as Amazon S3 accept alternative values for the payload +// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be +// included in the request signature. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The passed in request will be modified in place. +func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r, + PayloadHash: payloadHash, + ServiceName: service, + Region: region, + Credentials: credentials, + Time: v4Internal.NewSigningTime(signingTime.UTC()), + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + KeyDerivator: s.keyDerivator, + } + + signedRequest, err := signer.Build() + if err != nil { + return err + } + + logSigningInfo(ctx, options, &signedRequest, false) + + return nil +} + +// PresignHTTP signs AWS v4 requests with the payload hash, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns the signed URL and the map of HTTP headers that were included in the +// signature or an error if signing the request failed. For presigned requests +// these headers and their values must be included on the HTTP request when it +// is made. This is helpful to know what header values need to be shared with +// the party the presigned request will be distributed to. +// +// The payloadHash is the hex encoded SHA-256 hash of the request payload, and +// must be provided. Even if the request has no payload (aka body). If the +// request has no payload you should use the hex encoded SHA-256 of an empty +// string as the payloadHash value. +// +// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +// +// Some services such as Amazon S3 accept alternative values for the payload +// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be +// included in the request signature. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html +// +// PresignHTTP differs from SignHTTP in that it will sign the request using +// query string instead of header values. This allows you to share the +// Presigned Request's URL with third parties, or distribute it throughout your +// system with minimal dependencies. +// +// PresignHTTP will not set the expires time of the presigned request +// automatically. To specify the expire duration for a request add the +// "X-Amz-Expires" query parameter on the request with the value as the +// duration in seconds the presigned URL should be considered valid for. This +// parameter is not used by all AWS services, and is most notable used by +// Amazon S3 APIs. +// +// expires := 20 * time.Minute +// query := req.URL.Query() +// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10) +// req.URL.RawQuery = query.Encode() +// +// This method does not modify the provided request. +func (s *Signer) PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*SignerOptions), +) (signedURI string, signedHeaders http.Header, err error) { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r.Clone(r.Context()), + PayloadHash: payloadHash, + ServiceName: service, + Region: region, + Credentials: credentials, + Time: v4Internal.NewSigningTime(signingTime.UTC()), + IsPreSign: true, + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + KeyDerivator: s.keyDerivator, + } + + signedRequest, err := signer.Build() + if err != nil { + return "", nil, err + } + + logSigningInfo(ctx, options, &signedRequest, true) + + signedHeaders = make(http.Header) + + // For the signed headers we canonicalize the header keys in the returned map. + // This avoids situations where can standard library double headers like host header. For example the standard + // library will set the Host header, even if it is present in lower-case form. + for k, v := range signedRequest.SignedHeaders { + key := textproto.CanonicalMIMEHeaderKey(k) + signedHeaders[key] = append(signedHeaders[key], v...) + } + + return signedRequest.Request.URL.String(), signedHeaders, nil +} + +func (s *httpSigner) buildCredentialScope() string { + return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName) +} + +func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} + +func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { + signed = make(http.Header) + + var headers []string + const hostHeader = "host" + headers = append(headers, hostHeader) + signed[hostHeader] = append(signed[hostHeader], host) + + if length > 0 { + const contentLengthHeader = "content-length" + headers = append(headers, contentLengthHeader) + signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) + } + + for k, v := range header { + if !rule.IsValid(k) { + continue // ignored header + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := signed[lowerCaseKey]; ok { + // include additional values + signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + signed[lowerCaseKey] = v + } + sort.Strings(headers) + + signedHeaders = strings.Join(headers, ";") + + var canonicalHeaders strings.Builder + n := len(headers) + const colon = ':' + for i := 0; i < n; i++ { + if headers[i] == hostHeader { + canonicalHeaders.WriteString(hostHeader) + canonicalHeaders.WriteRune(colon) + canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) + } else { + canonicalHeaders.WriteString(headers[i]) + canonicalHeaders.WriteRune(colon) + // Trim out leading, trailing, and dedup inner spaces from signed header values. + values := signed[headers[i]] + for j, v := range values { + cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) + canonicalHeaders.WriteString(cleanedValue) + if j < len(values)-1 { + canonicalHeaders.WriteRune(',') + } + } + } + canonicalHeaders.WriteRune('\n') + } + canonicalHeadersStr = canonicalHeaders.String() + + return signed, signedHeaders, canonicalHeadersStr +} + +func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { + return strings.Join([]string{ + method, + uri, + query, + canonicalHeaders, + signedHeaders, + s.PayloadHash, + }, "\n") +} + +func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { + return strings.Join([]string{ + signingAlgorithm, + s.Time.TimeFormat(), + credentialScope, + hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), + }, "\n") +} + +func makeHash(hash hash.Hash, b []byte) []byte { + hash.Reset() + hash.Write(b) + return hash.Sum(nil) +} + +func (s *httpSigner) buildSignature(strToSign string) (string, error) { + key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time) + return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil +} + +func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { + amzDate := s.Time.TimeFormat() + + if s.IsPreSign { + query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm) + if sessionToken := s.Credentials.SessionToken; len(sessionToken) > 0 { + query.Set("X-Amz-Security-Token", sessionToken) + } + + query.Set(v4Internal.AmzDateKey, amzDate) + return + } + + headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate) + + if len(s.Credentials.SessionToken) > 0 { + headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken) + } +} + +func logSigningInfo(ctx context.Context, options SignerOptions, request *signedRequest, isPresign bool) { + if !options.LogSigning { + return + } + signedURLMsg := "" + if isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String()) + } + logger := logging.WithContext(ctx, options.Logger) + logger.Logf(logging.Debug, logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg) +} + +type signedRequest struct { + Request *http.Request + SignedHeaders http.Header + CanonicalString string + StringToSign string + PreSigned bool +} + +const logSignInfoMsg = `Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go new file mode 100644 index 000000000000..f3fc4d610dcd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go @@ -0,0 +1,297 @@ +// Code generated by aws/generate.go DO NOT EDIT. + +package aws + +import ( + "github.com/aws/smithy-go/ptr" + "time" +) + +// Bool returns a pointer value for the bool value passed in. +func Bool(v bool) *bool { + return ptr.Bool(v) +} + +// BoolSlice returns a slice of bool pointers from the values +// passed in. +func BoolSlice(vs []bool) []*bool { + return ptr.BoolSlice(vs) +} + +// BoolMap returns a map of bool pointers from the values +// passed in. +func BoolMap(vs map[string]bool) map[string]*bool { + return ptr.BoolMap(vs) +} + +// Byte returns a pointer value for the byte value passed in. +func Byte(v byte) *byte { + return ptr.Byte(v) +} + +// ByteSlice returns a slice of byte pointers from the values +// passed in. +func ByteSlice(vs []byte) []*byte { + return ptr.ByteSlice(vs) +} + +// ByteMap returns a map of byte pointers from the values +// passed in. +func ByteMap(vs map[string]byte) map[string]*byte { + return ptr.ByteMap(vs) +} + +// String returns a pointer value for the string value passed in. +func String(v string) *string { + return ptr.String(v) +} + +// StringSlice returns a slice of string pointers from the values +// passed in. +func StringSlice(vs []string) []*string { + return ptr.StringSlice(vs) +} + +// StringMap returns a map of string pointers from the values +// passed in. +func StringMap(vs map[string]string) map[string]*string { + return ptr.StringMap(vs) +} + +// Int returns a pointer value for the int value passed in. +func Int(v int) *int { + return ptr.Int(v) +} + +// IntSlice returns a slice of int pointers from the values +// passed in. +func IntSlice(vs []int) []*int { + return ptr.IntSlice(vs) +} + +// IntMap returns a map of int pointers from the values +// passed in. +func IntMap(vs map[string]int) map[string]*int { + return ptr.IntMap(vs) +} + +// Int8 returns a pointer value for the int8 value passed in. +func Int8(v int8) *int8 { + return ptr.Int8(v) +} + +// Int8Slice returns a slice of int8 pointers from the values +// passed in. +func Int8Slice(vs []int8) []*int8 { + return ptr.Int8Slice(vs) +} + +// Int8Map returns a map of int8 pointers from the values +// passed in. +func Int8Map(vs map[string]int8) map[string]*int8 { + return ptr.Int8Map(vs) +} + +// Int16 returns a pointer value for the int16 value passed in. +func Int16(v int16) *int16 { + return ptr.Int16(v) +} + +// Int16Slice returns a slice of int16 pointers from the values +// passed in. +func Int16Slice(vs []int16) []*int16 { + return ptr.Int16Slice(vs) +} + +// Int16Map returns a map of int16 pointers from the values +// passed in. +func Int16Map(vs map[string]int16) map[string]*int16 { + return ptr.Int16Map(vs) +} + +// Int32 returns a pointer value for the int32 value passed in. +func Int32(v int32) *int32 { + return ptr.Int32(v) +} + +// Int32Slice returns a slice of int32 pointers from the values +// passed in. +func Int32Slice(vs []int32) []*int32 { + return ptr.Int32Slice(vs) +} + +// Int32Map returns a map of int32 pointers from the values +// passed in. +func Int32Map(vs map[string]int32) map[string]*int32 { + return ptr.Int32Map(vs) +} + +// Int64 returns a pointer value for the int64 value passed in. +func Int64(v int64) *int64 { + return ptr.Int64(v) +} + +// Int64Slice returns a slice of int64 pointers from the values +// passed in. +func Int64Slice(vs []int64) []*int64 { + return ptr.Int64Slice(vs) +} + +// Int64Map returns a map of int64 pointers from the values +// passed in. +func Int64Map(vs map[string]int64) map[string]*int64 { + return ptr.Int64Map(vs) +} + +// Uint returns a pointer value for the uint value passed in. +func Uint(v uint) *uint { + return ptr.Uint(v) +} + +// UintSlice returns a slice of uint pointers from the values +// passed in. +func UintSlice(vs []uint) []*uint { + return ptr.UintSlice(vs) +} + +// UintMap returns a map of uint pointers from the values +// passed in. +func UintMap(vs map[string]uint) map[string]*uint { + return ptr.UintMap(vs) +} + +// Uint8 returns a pointer value for the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return ptr.Uint8(v) +} + +// Uint8Slice returns a slice of uint8 pointers from the values +// passed in. +func Uint8Slice(vs []uint8) []*uint8 { + return ptr.Uint8Slice(vs) +} + +// Uint8Map returns a map of uint8 pointers from the values +// passed in. +func Uint8Map(vs map[string]uint8) map[string]*uint8 { + return ptr.Uint8Map(vs) +} + +// Uint16 returns a pointer value for the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return ptr.Uint16(v) +} + +// Uint16Slice returns a slice of uint16 pointers from the values +// passed in. +func Uint16Slice(vs []uint16) []*uint16 { + return ptr.Uint16Slice(vs) +} + +// Uint16Map returns a map of uint16 pointers from the values +// passed in. +func Uint16Map(vs map[string]uint16) map[string]*uint16 { + return ptr.Uint16Map(vs) +} + +// Uint32 returns a pointer value for the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return ptr.Uint32(v) +} + +// Uint32Slice returns a slice of uint32 pointers from the values +// passed in. +func Uint32Slice(vs []uint32) []*uint32 { + return ptr.Uint32Slice(vs) +} + +// Uint32Map returns a map of uint32 pointers from the values +// passed in. +func Uint32Map(vs map[string]uint32) map[string]*uint32 { + return ptr.Uint32Map(vs) +} + +// Uint64 returns a pointer value for the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return ptr.Uint64(v) +} + +// Uint64Slice returns a slice of uint64 pointers from the values +// passed in. +func Uint64Slice(vs []uint64) []*uint64 { + return ptr.Uint64Slice(vs) +} + +// Uint64Map returns a map of uint64 pointers from the values +// passed in. +func Uint64Map(vs map[string]uint64) map[string]*uint64 { + return ptr.Uint64Map(vs) +} + +// Float32 returns a pointer value for the float32 value passed in. +func Float32(v float32) *float32 { + return ptr.Float32(v) +} + +// Float32Slice returns a slice of float32 pointers from the values +// passed in. +func Float32Slice(vs []float32) []*float32 { + return ptr.Float32Slice(vs) +} + +// Float32Map returns a map of float32 pointers from the values +// passed in. +func Float32Map(vs map[string]float32) map[string]*float32 { + return ptr.Float32Map(vs) +} + +// Float64 returns a pointer value for the float64 value passed in. +func Float64(v float64) *float64 { + return ptr.Float64(v) +} + +// Float64Slice returns a slice of float64 pointers from the values +// passed in. +func Float64Slice(vs []float64) []*float64 { + return ptr.Float64Slice(vs) +} + +// Float64Map returns a map of float64 pointers from the values +// passed in. +func Float64Map(vs map[string]float64) map[string]*float64 { + return ptr.Float64Map(vs) +} + +// Time returns a pointer value for the time.Time value passed in. +func Time(v time.Time) *time.Time { + return ptr.Time(v) +} + +// TimeSlice returns a slice of time.Time pointers from the values +// passed in. +func TimeSlice(vs []time.Time) []*time.Time { + return ptr.TimeSlice(vs) +} + +// TimeMap returns a map of time.Time pointers from the values +// passed in. +func TimeMap(vs map[string]time.Time) map[string]*time.Time { + return ptr.TimeMap(vs) +} + +// Duration returns a pointer value for the time.Duration value passed in. +func Duration(v time.Duration) *time.Duration { + return ptr.Duration(v) +} + +// DurationSlice returns a slice of time.Duration pointers from the values +// passed in. +func DurationSlice(vs []time.Duration) []*time.Duration { + return ptr.DurationSlice(vs) +} + +// DurationMap returns a map of time.Duration pointers from the values +// passed in. +func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { + return ptr.DurationMap(vs) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go new file mode 100644 index 000000000000..26d90719b2d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go @@ -0,0 +1,310 @@ +package http + +import ( + "crypto/tls" + "github.com/aws/aws-sdk-go-v2/aws" + "net" + "net/http" + "reflect" + "sync" + "time" +) + +// Defaults for the HTTPTransportBuilder. +var ( + // Default connection pool options + DefaultHTTPTransportMaxIdleConns = 100 + DefaultHTTPTransportMaxIdleConnsPerHost = 10 + + // Default connection timeouts + DefaultHTTPTransportIdleConnTimeout = 90 * time.Second + DefaultHTTPTransportTLSHandleshakeTimeout = 10 * time.Second + DefaultHTTPTransportExpectContinueTimeout = 1 * time.Second + + // Default to TLS 1.2 for all HTTPS requests. + DefaultHTTPTransportTLSMinVersion uint16 = tls.VersionTLS12 +) + +// Timeouts for net.Dialer's network connection. +var ( + DefaultDialConnectTimeout = 30 * time.Second + DefaultDialKeepAliveTimeout = 30 * time.Second +) + +// BuildableClient provides a HTTPClient implementation with options to +// create copies of the HTTPClient when additional configuration is provided. +// +// The client's methods will not share the http.Transport value between copies +// of the BuildableClient. Only exported member values of the Transport and +// optional Dialer will be copied between copies of BuildableClient. +type BuildableClient struct { + transport *http.Transport + dialer *net.Dialer + + initOnce sync.Once + + clientTimeout time.Duration + client *http.Client +} + +// NewBuildableClient returns an initialized client for invoking HTTP +// requests. +func NewBuildableClient() *BuildableClient { + return &BuildableClient{} +} + +// Do implements the HTTPClient interface's Do method to invoke a HTTP request, +// and receive the response. Uses the BuildableClient's current +// configuration to invoke the http.Request. +// +// If connection pooling is enabled (aka HTTP KeepAlive) the client will only +// share pooled connections with its own instance. Copies of the +// BuildableClient will have their own connection pools. +// +// Redirect (3xx) responses will not be followed, the HTTP response received +// will returned instead. +func (b *BuildableClient) Do(req *http.Request) (*http.Response, error) { + b.initOnce.Do(b.build) + + return b.client.Do(req) +} + +// Freeze returns a frozen aws.HTTPClient implementation that is no longer a BuildableClient. +// Use this to prevent the SDK from applying DefaultMode configuration values to a buildable client. +func (b *BuildableClient) Freeze() aws.HTTPClient { + cpy := b.clone() + cpy.build() + return cpy.client +} + +func (b *BuildableClient) build() { + b.client = wrapWithLimitedRedirect(&http.Client{ + Timeout: b.clientTimeout, + Transport: b.GetTransport(), + }) +} + +func (b *BuildableClient) clone() *BuildableClient { + cpy := NewBuildableClient() + cpy.transport = b.GetTransport() + cpy.dialer = b.GetDialer() + cpy.clientTimeout = b.clientTimeout + + return cpy +} + +// WithTransportOptions copies the BuildableClient and returns it with the +// http.Transport options applied. +// +// If a non (*http.Transport) was set as the round tripper, the round tripper +// will be replaced with a default Transport value before invoking the option +// functions. +func (b *BuildableClient) WithTransportOptions(opts ...func(*http.Transport)) *BuildableClient { + cpy := b.clone() + + tr := cpy.GetTransport() + for _, opt := range opts { + opt(tr) + } + cpy.transport = tr + + return cpy +} + +// WithDialerOptions copies the BuildableClient and returns it with the +// net.Dialer options applied. Will set the client's http.Transport DialContext +// member. +func (b *BuildableClient) WithDialerOptions(opts ...func(*net.Dialer)) *BuildableClient { + cpy := b.clone() + + dialer := cpy.GetDialer() + for _, opt := range opts { + opt(dialer) + } + cpy.dialer = dialer + + tr := cpy.GetTransport() + tr.DialContext = cpy.dialer.DialContext + cpy.transport = tr + + return cpy +} + +// WithTimeout Sets the timeout used by the client for all requests. +func (b *BuildableClient) WithTimeout(timeout time.Duration) *BuildableClient { + cpy := b.clone() + cpy.clientTimeout = timeout + return cpy +} + +// GetTransport returns a copy of the client's HTTP Transport. +func (b *BuildableClient) GetTransport() *http.Transport { + var tr *http.Transport + if b.transport != nil { + tr = b.transport.Clone() + } else { + tr = defaultHTTPTransport() + } + + return tr +} + +// GetDialer returns a copy of the client's network dialer. +func (b *BuildableClient) GetDialer() *net.Dialer { + var dialer *net.Dialer + if b.dialer != nil { + dialer = shallowCopyStruct(b.dialer).(*net.Dialer) + } else { + dialer = defaultDialer() + } + + return dialer +} + +// GetTimeout returns a copy of the client's timeout to cancel requests with. +func (b *BuildableClient) GetTimeout() time.Duration { + return b.clientTimeout +} + +func defaultDialer() *net.Dialer { + return &net.Dialer{ + Timeout: DefaultDialConnectTimeout, + KeepAlive: DefaultDialKeepAliveTimeout, + DualStack: true, + } +} + +func defaultHTTPTransport() *http.Transport { + dialer := defaultDialer() + + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: dialer.DialContext, + TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout, + MaxIdleConns: DefaultHTTPTransportMaxIdleConns, + MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost, + IdleConnTimeout: DefaultHTTPTransportIdleConnTimeout, + ExpectContinueTimeout: DefaultHTTPTransportExpectContinueTimeout, + ForceAttemptHTTP2: true, + TLSClientConfig: &tls.Config{ + MinVersion: DefaultHTTPTransportTLSMinVersion, + }, + } + + return tr +} + +// shallowCopyStruct creates a shallow copy of the passed in source struct, and +// returns that copy of the same struct type. +func shallowCopyStruct(src interface{}) interface{} { + srcVal := reflect.ValueOf(src) + srcValType := srcVal.Type() + + var returnAsPtr bool + if srcValType.Kind() == reflect.Ptr { + srcVal = srcVal.Elem() + srcValType = srcValType.Elem() + returnAsPtr = true + } + dstVal := reflect.New(srcValType).Elem() + + for i := 0; i < srcValType.NumField(); i++ { + ft := srcValType.Field(i) + if len(ft.PkgPath) != 0 { + // unexported fields have a PkgPath + continue + } + + dstVal.Field(i).Set(srcVal.Field(i)) + } + + if returnAsPtr { + dstVal = dstVal.Addr() + } + + return dstVal.Interface() +} + +// wrapWithLimitedRedirect updates the Client's Transport and CheckRedirect to +// not follow any redirect other than 307 and 308. No other redirect will be +// followed. +// +// If the client does not have a Transport defined will use a new SDK default +// http.Transport configuration. +func wrapWithLimitedRedirect(c *http.Client) *http.Client { + tr := c.Transport + if tr == nil { + tr = defaultHTTPTransport() + } + + cc := *c + cc.CheckRedirect = limitedRedirect + cc.Transport = suppressBadHTTPRedirectTransport{ + tr: tr, + } + + return &cc +} + +// limitedRedirect is a CheckRedirect that prevents the client from following +// any non 307/308 HTTP status code redirects. +// +// The 307 and 308 redirects are allowed because the client must use the +// original HTTP method for the redirected to location. Whereas 301 and 302 +// allow the client to switch to GET for the redirect. +// +// Suppresses all redirect requests with a URL of badHTTPRedirectLocation. +func limitedRedirect(r *http.Request, via []*http.Request) error { + // Request.Response, in CheckRedirect is the response that is triggering + // the redirect. + resp := r.Response + if r.URL.String() == badHTTPRedirectLocation { + resp.Header.Del(badHTTPRedirectLocation) + return http.ErrUseLastResponse + } + + switch resp.StatusCode { + case 307, 308: + // Only allow 307 and 308 redirects as they preserve the method. + return nil + } + + return http.ErrUseLastResponse +} + +// suppressBadHTTPRedirectTransport provides an http.RoundTripper +// implementation that wraps another http.RoundTripper to prevent HTTP client +// receiving 301 and 302 HTTP responses redirects without the required location +// header. +// +// Clients using this utility must have a CheckRedirect, e.g. limitedRedirect, +// that check for responses with having a URL of baseHTTPRedirectLocation, and +// suppress the redirect. +type suppressBadHTTPRedirectTransport struct { + tr http.RoundTripper +} + +const badHTTPRedirectLocation = `https://amazonaws.com/badhttpredirectlocation` + +// RoundTrip backfills a stub location when a 301/302 response is received +// without a location. This stub location is used by limitedRedirect to prevent +// the HTTP client from failing attempting to use follow a redirect without a +// location value. +func (t suppressBadHTTPRedirectTransport) RoundTrip(r *http.Request) (*http.Response, error) { + resp, err := t.tr.RoundTrip(r) + if err != nil { + return resp, err + } + + // S3 is the only known service to return 301 without location header. + // The Go standard library HTTP client will return an opaque error if it + // tries to follow a 301/302 response missing the location header. + switch resp.StatusCode { + case 301, 302: + if v := resp.Header.Get("Location"); len(v) == 0 { + resp.Header.Set("Location", badHTTPRedirectLocation) + } + } + + return resp, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go new file mode 100644 index 000000000000..556f54a7f777 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go @@ -0,0 +1,42 @@ +package http + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// removeContentTypeHeader is a build middleware that removes +// content type header if content-length header is unset or +// is set to zero, +type removeContentTypeHeader struct { +} + +// ID the name of the middleware. +func (m *removeContentTypeHeader) ID() string { + return "RemoveContentTypeHeader" +} + +// HandleBuild adds or appends the constructed user agent to the request. +func (m *removeContentTypeHeader) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + // remove contentTypeHeader when content-length is zero + if req.ContentLength == 0 { + req.Header.Del("content-type") + } + + return next.HandleBuild(ctx, in) +} + +// RemoveContentTypeHeader removes content-type header if +// content length is unset or equal to zero. +func RemoveContentTypeHeader(stack *middleware.Stack) error { + return stack.Build.Add(&removeContentTypeHeader{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go new file mode 100644 index 000000000000..44651c9902df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go @@ -0,0 +1,33 @@ +package http + +import ( + "errors" + "fmt" + + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ResponseError provides the HTTP centric error type wrapping the underlying error +// with the HTTP response value and the deserialized RequestID. +type ResponseError struct { + *smithyhttp.ResponseError + + // RequestID associated with response error + RequestID string +} + +// ServiceRequestID returns the request id associated with Response Error +func (e *ResponseError) ServiceRequestID() string { return e.RequestID } + +// Error returns the formatted error +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "https response error StatusCode: %d, RequestID: %s, %v", + e.Response.StatusCode, e.RequestID, e.Err) +} + +// As populates target and returns true if the type of target is a error type that +// the ResponseError embeds, (e.g.AWS HTTP ResponseError) +func (e *ResponseError) As(target interface{}) bool { + return errors.As(e.ResponseError, target) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go new file mode 100644 index 000000000000..8fd14cecd231 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go @@ -0,0 +1,54 @@ +package http + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddResponseErrorMiddleware adds response error wrapper middleware +func AddResponseErrorMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before request id retriever middleware so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&responseErrorWrapper{}, "RequestIDRetriever", middleware.Before) +} + +type responseErrorWrapper struct { +} + +// ID returns the middleware identifier +func (m *responseErrorWrapper) ID() string { + return "ResponseErrorWrapper" +} + +func (m *responseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err == nil { + // Nothing to do when there is no error. + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // look for request id in metadata + reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) + + // Wrap the returned smithy error with the request id retrieved from the metadata + err = &ResponseError{ + ResponseError: &smithyhttp.ResponseError{ + Response: resp, + Err: err, + }, + RequestID: reqID, + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go new file mode 100644 index 000000000000..993929bd9b7a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go @@ -0,0 +1,104 @@ +package http + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type readResult struct { + n int + err error +} + +// ResponseTimeoutError is an error when the reads from the response are +// delayed longer than the timeout the read was configured for. +type ResponseTimeoutError struct { + TimeoutDur time.Duration +} + +// Timeout returns that the error is was caused by a timeout, and can be +// retried. +func (*ResponseTimeoutError) Timeout() bool { return true } + +func (e *ResponseTimeoutError) Error() string { + return fmt.Sprintf("read on body reach timeout limit, %v", e.TimeoutDur) +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, &ResponseTimeoutError{TimeoutDur: r.duration} + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +// AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the +// response body so that a read that takes too long will return an error. +func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error { + return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After) +} + +// readTimeout wraps the response body with a timeoutReadCloser +type readTimeout struct { + duration time.Duration +} + +// ID returns the id of the middleware +func (*readTimeout) ID() string { + return "ReadResponseTimeout" +} + +// HandleDeserialize implements the DeserializeMiddleware interface +func (m *readTimeout) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + response.Body = &timeoutReadCloser{ + reader: response.Body, + duration: m.duration, + } + out.RawResponse = response + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go new file mode 100644 index 000000000000..cc3ae811402d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go @@ -0,0 +1,42 @@ +package aws + +import ( + "fmt" +) + +// Ternary is an enum allowing an unknown or none state in addition to a bool's +// true and false. +type Ternary int + +func (t Ternary) String() string { + switch t { + case UnknownTernary: + return "unknown" + case FalseTernary: + return "false" + case TrueTernary: + return "true" + default: + return fmt.Sprintf("unknown value, %d", int(t)) + } +} + +// Bool returns true if the value is TrueTernary, false otherwise. +func (t Ternary) Bool() bool { + return t == TrueTernary +} + +// Enumerations for the values of the Ternary type. +const ( + UnknownTernary Ternary = iota + FalseTernary + TrueTernary +) + +// BoolTernary returns a true or false Ternary value for the bool provided. +func BoolTernary(v bool) Ternary { + if v { + return TrueTernary + } + return FalseTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go new file mode 100644 index 000000000000..5f729d45e1cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go-v2" + +// SDKVersion is the version of this SDK +const SDKVersion = goModuleVersion diff --git a/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml b/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml new file mode 100644 index 000000000000..b11df5082a4a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml @@ -0,0 +1,12 @@ +version: 0.2 + +phases: + build: + commands: + - echo Build started on `date` + - export GOPATH=/go + - export SDK_CODEBUILD_ROOT=`pwd` + - make ci-test-no-generate + post_build: + commands: + - echo Build completed on `date` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md new file mode 100644 index 000000000000..ce448dbe9780 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -0,0 +1,140 @@ +# v1.15.5 (2022-05-09) + +* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682) + +# v1.15.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-02-24) + +* **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options. +* **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589) +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-01-28) + +* **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR. +* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. +* **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-01-07) + +* **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2021-12-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-12-02) + +* **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.3 (2021-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.2 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.1 (2021-11-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.3 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-09-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-09-02) + +* **Feature**: Add support for S3 Multi-Region Access Point ARNs. + +# v1.7.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-07-15) + +* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-07-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-25) + +* **Feature**: Adds configuration setting for enabling endpoint discovery. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-20) + +* **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile. +* **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go new file mode 100644 index 000000000000..3e9c20009fee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -0,0 +1,198 @@ +package config + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// defaultLoaders are a slice of functions that will read external configuration +// sources for configuration values. These values are read by the AWSConfigResolvers +// using interfaces to extract specific information from the external configuration. +var defaultLoaders = []loader{ + loadEnvConfig, + loadSharedConfigIgnoreNotExist, +} + +// defaultAWSConfigResolvers are a slice of functions that will resolve external +// configuration values into AWS configuration values. +// +// This will setup the AWS configuration's Region, +var defaultAWSConfigResolvers = []awsConfigResolver{ + // Resolves the default configuration the SDK's aws.Config will be + // initialized with. + resolveDefaultAWSConfig, + + // Sets the logger to be used. Could be user provided logger, and client + // logging mode. + resolveLogger, + resolveClientLogMode, + + // Sets the HTTP client and configuration to use for making requests using + // the HTTP transport. + resolveHTTPClient, + resolveCustomCABundle, + + // Sets the endpoint resolving behavior the API Clients will use for making + // requests to. Clients default to their own clients this allows overrides + // to be specified. The resolveEndpointResolver option is deprecated, but + // we still need to set it for backwards compatibility on config + // construction. + resolveEndpointResolver, + resolveEndpointResolverWithOptions, + + // Sets the retry behavior API clients will use within their retry attempt + // middleware. Defaults to unset, allowing API clients to define their own + // retry behavior. + resolveRetryer, + + // Sets the region the API Clients should use for making requests to. + resolveRegion, + resolveEC2IMDSRegion, + resolveDefaultRegion, + + // Sets the additional set of middleware stack mutators that will custom + // API client request pipeline middleware. + resolveAPIOptions, + + // Resolves the DefaultsMode that should be used by SDK clients. If this + // mode is set to DefaultsModeAuto. + // + // Comes after HTTPClient and CustomCABundle to ensure the HTTP client is + // configured if provided before invoking IMDS if mode is auto. Comes + // before resolving credentials so that those subsequent clients use the + // configured auto mode. + resolveDefaultsModeOptions, + + // Sets the resolved credentials the API clients will use for + // authentication. Provides the SDK's default credential chain. + // + // Should probably be the last step in the resolve chain to ensure that all + // other configurations are resolved first in case downstream credentials + // implementations depend on or can be configured with earlier resolved + // configuration options. + resolveCredentials, +} + +// A Config represents a generic configuration value or set of values. This type +// will be used by the AWSConfigResolvers to extract +// +// General the Config type will use type assertion against the Provider interfaces +// to extract specific data from the Config. +type Config interface{} + +// A loader is used to load external configuration data and returns it as +// a generic Config type. +// +// The loader should return an error if it fails to load the external configuration +// or the configuration data is malformed, or required components missing. +type loader func(context.Context, configs) (Config, error) + +// An awsConfigResolver will extract configuration data from the configs slice +// using the provider interfaces to extract specific functionality. The extracted +// configuration values will be written to the AWS Config value. +// +// The resolver should return an error if it it fails to extract the data, the +// data is malformed, or incomplete. +type awsConfigResolver func(ctx context.Context, cfg *aws.Config, configs configs) error + +// configs is a slice of Config values. These values will be used by the +// AWSConfigResolvers to extract external configuration values to populate the +// AWS Config type. +// +// Use AppendFromLoaders to add additional external Config values that are +// loaded from external sources. +// +// Use ResolveAWSConfig after external Config values have been added or loaded +// to extract the loaded configuration values into the AWS Config. +type configs []Config + +// AppendFromLoaders iterates over the slice of loaders passed in calling each +// loader function in order. The external config value returned by the loader +// will be added to the returned configs slice. +// +// If a loader returns an error this method will stop iterating and return +// that error. +func (cs configs) AppendFromLoaders(ctx context.Context, loaders []loader) (configs, error) { + for _, fn := range loaders { + cfg, err := fn(ctx, cs) + if err != nil { + return nil, err + } + + cs = append(cs, cfg) + } + + return cs, nil +} + +// ResolveAWSConfig returns a AWS configuration populated with values by calling +// the resolvers slice passed in. Each resolver is called in order. Any resolver +// may overwrite the AWS Configuration value of a previous resolver. +// +// If an resolver returns an error this method will return that error, and stop +// iterating over the resolvers. +func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigResolver) (aws.Config, error) { + var cfg aws.Config + + for _, fn := range resolvers { + if err := fn(ctx, &cfg, cs); err != nil { + return aws.Config{}, err + } + } + + return cfg, nil +} + +// ResolveConfig calls the provide function passing slice of configuration sources. +// This implements the aws.ConfigResolver interface. +func (cs configs) ResolveConfig(f func(configs []interface{}) error) error { + var cfgs []interface{} + for i := range cs { + cfgs = append(cfgs, cs[i]) + } + return f(cfgs) +} + +// LoadDefaultConfig reads the SDK's default external configurations, and +// populates an AWS Config with the values from the external configurations. +// +// An optional variadic set of additional Config values can be provided as input +// that will be prepended to the configs slice. Use this to add custom configuration. +// The custom configurations must satisfy the respective providers for their data +// or the custom data will be ignored by the resolvers and config loaders. +// +// cfg, err := config.LoadDefaultConfig( context.TODO(), +// WithSharedConfigProfile("test-profile"), +// ) +// if err != nil { +// panic(fmt.Sprintf("failed loading config, %v", err)) +// } +// +// +// The default configuration sources are: +// * Environment Variables +// * Shared Configuration and Shared Credentials files. +func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) (cfg aws.Config, err error) { + var options LoadOptions + for _, optFn := range optFns { + if err := optFn(&options); err != nil { + return aws.Config{}, err + } + } + + // assign Load Options to configs + var cfgCpy = configs{options} + + cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, defaultLoaders) + if err != nil { + return aws.Config{}, err + } + + cfg, err = cfgCpy.ResolveAWSConfig(ctx, defaultAWSConfigResolvers) + if err != nil { + return aws.Config{}, err + } + + return cfg, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go new file mode 100644 index 000000000000..20b66367ffd2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go @@ -0,0 +1,47 @@ +package config + +import ( + "context" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" +) + +const execEnvVar = "AWS_EXECUTION_ENV" + +// DefaultsModeOptions is the set of options that are used to configure +type DefaultsModeOptions struct { + // The SDK configuration defaults mode. Defaults to legacy if not specified. + // + // Supported modes are: auto, cross-region, in-region, legacy, mobile, standard + Mode aws.DefaultsMode + + // The EC2 Instance Metadata Client that should be used when performing environment + // discovery when aws.DefaultsModeAuto is set. + // + // If not specified the SDK will construct a client if the instance metadata service has not been disabled by + // the AWS_EC2_METADATA_DISABLED environment variable. + IMDSClient *imds.Client +} + +func resolveDefaultsModeRuntimeEnvironment(ctx context.Context, envConfig *EnvConfig, client *imds.Client) (aws.RuntimeEnvironment, error) { + getRegionOutput, err := client.GetRegion(ctx, &imds.GetRegionInput{}) + // honor context timeouts, but if we couldn't talk to IMDS don't fail runtime environment introspection. + select { + case <-ctx.Done(): + return aws.RuntimeEnvironment{}, err + default: + } + + var imdsRegion string + if err == nil { + imdsRegion = getRegionOutput.Region + } + + return aws.RuntimeEnvironment{ + EnvironmentIdentifier: aws.ExecutionEnvironmentID(os.Getenv(execEnvVar)), + Region: envConfig.Region, + EC2InstanceMetadataRegion: imdsRegion, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go new file mode 100644 index 000000000000..31648ffb5747 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go @@ -0,0 +1,20 @@ +// Package config provides utilities for loading configuration from multiple +// sources that can be used to configure the SDK's API clients, and utilities. +// +// The config package will load configuration from environment variables, AWS +// shared configuration file (~/.aws/config), and AWS shared credentials file +// (~/.aws/credentials). +// +// Use the LoadDefaultConfig to load configuration from all the SDK's supported +// sources, and resolve credentials using the SDK's default credential chain. +// +// LoadDefaultConfig allows for a variadic list of additional Config sources that can +// provide one or more configuration values which can be used to programmatically control the resolution +// of a specific value, or allow for broader range of additional configuration sources not supported by the SDK. +// A Config source implements one or more provider interfaces defined in this package. Config sources passed in will +// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources +// implement the same provider interface, priority will be handled by the order in which the sources were passed in. +// +// A number of helpers (prefixed by ``With``) are provided in this package that implement their respective provider +// interface. These helpers should be used for overriding configuration programmatically at runtime. +package config diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go new file mode 100644 index 000000000000..18c8e0121b9d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -0,0 +1,665 @@ +package config + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" +) + +// CredentialsSourceName provides a name of the provider when config is +// loaded from environment. +const CredentialsSourceName = "EnvConfigCredentials" + +// Environment variables that will be read for configuration values. +const ( + awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" + awsAccessKeyEnvVar = "AWS_ACCESS_KEY" + + awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" + awsSecretKeyEnvVar = "AWS_SECRET_KEY" + + awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" + + awsContainerCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + awsContainerCredentialsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + awsContainerPProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + + awsRegionEnvVar = "AWS_REGION" + awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" + + awsProfileEnvVar = "AWS_PROFILE" + awsDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE" + + awsSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" + + awsConfigFileEnvVar = "AWS_CONFIG_FILE" + + awsCustomCABundleEnvVar = "AWS_CA_BUNDLE" + + awsWebIdentityTokenFilePathEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE" + + awsRoleARNEnvVar = "AWS_ROLE_ARN" + awsRoleSessionNameEnvVar = "AWS_ROLE_SESSION_NAME" + + awsEnableEndpointDiscoveryEnvVar = "AWS_ENABLE_ENDPOINT_DISCOVERY" + + awsS3UseARNRegionEnvVar = "AWS_S3_USE_ARN_REGION" + + awsEc2MetadataServiceEndpointModeEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE" + + awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" + + awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" + + awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" + + awsUseDualStackEndpoint = "AWS_USE_DUALSTACK_ENDPOINT" + + awsUseFIPSEndpoint = "AWS_USE_FIPS_ENDPOINT" + + awsDefaultMode = "AWS_DEFAULTS_MODE" + + awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS" + awsRetryMode = "AWS_RETRY_MODE" +) + +var ( + credAccessEnvKeys = []string{ + awsAccessKeyIDEnvVar, + awsAccessKeyEnvVar, + } + credSecretEnvKeys = []string{ + awsSecretAccessKeyEnvVar, + awsSecretKeyEnvVar, + } + regionEnvKeys = []string{ + awsRegionEnvVar, + awsDefaultRegionEnvVar, + } + profileEnvKeys = []string{ + awsProfileEnvVar, + awsDefaultProfileEnvVar, + } +) + +// EnvConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type EnvConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Credentials aws.Credentials + + // ContainerCredentialsEndpoint value is the HTTP enabled endpoint to retrieve credentials + // using the endpointcreds.Provider + ContainerCredentialsEndpoint string + + // ContainerCredentialsRelativePath is the relative URI path that will be used when attempting to retrieve + // credentials from the container endpoint. + ContainerCredentialsRelativePath string + + // ContainerAuthorizationToken is the authorization token that will be included in the HTTP Authorization + // header when attempting to retrieve credentials from the container credentials endpoint. + ContainerAuthorizationToken string + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-west-2 + // AWS_DEFAULT_REGION=us-west-2 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // AWS_DEFAULT_PROFILE=my_profile + SharedConfigProfile string + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the config. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion *bool + + // Specifies if the EC2 IMDS service client is enabled. + // + // AWS_EC2_METADATA_DISABLED=true + EC2IMDSClientEnableState imds.ClientEnableState + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies if the S3 service should disable multi-region access points + // support. + // + // AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS=true + S3DisableMultiRegionAccessPoints *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // AWS_USE_DUALSTACK_ENDPOINT=true + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // AWS_USE_FIPS_ENDPOINT=true + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies the SDK Defaults Mode used by services. + // + // AWS_DEFAULTS_MODE=standard + DefaultsMode aws.DefaultsMode + + // Specifies the maximum number attempts an API client will call an + // operation that fails with a retryable error. + // + // AWS_MAX_ATTEMPTS=3 + RetryMaxAttempts int + + // Specifies the retry model the API client will be created with. + // + // aws_retry_mode=standard + RetryMode aws.RetryMode +} + +// loadEnvConfig reads configuration values from the OS's environment variables. +// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type. +func loadEnvConfig(ctx context.Context, cfgs configs) (Config, error) { + return NewEnvConfig() +} + +// NewEnvConfig retrieves the SDK's environment configuration. +// See `EnvConfig` for the values that will be retrieved. +func NewEnvConfig() (EnvConfig, error) { + var cfg EnvConfig + + creds := aws.Credentials{ + Source: CredentialsSourceName, + } + setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) + setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) + if creds.HasKeys() { + creds.SessionToken = os.Getenv(awsSessionTokenEnvVar) + cfg.Credentials = creds + } + + cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsEndpointEnvVar) + cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativePathEnvVar) + cfg.ContainerAuthorizationToken = os.Getenv(awsContainerPProviderAuthorizationEnvVar) + + setStringFromEnvVal(&cfg.Region, regionEnvKeys) + setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys) + + cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnvVar) + cfg.SharedConfigFile = os.Getenv(awsConfigFileEnvVar) + + cfg.CustomCABundle = os.Getenv(awsCustomCABundleEnvVar) + + cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFilePathEnvVar) + + cfg.RoleARN = os.Getenv(awsRoleARNEnvVar) + cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar) + + if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil { + return cfg, err + } + + if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnvVar}); err != nil { + return cfg, err + } + + setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabled}) + if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnvVar}); err != nil { + return cfg, err + } + cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar) + + if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil { + return cfg, err + } + + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpoint}); err != nil { + return cfg, err + } + + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpoint}); err != nil { + return cfg, err + } + + if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultMode}); err != nil { + return cfg, err + } + + if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsRetryMaxAttempts}); err != nil { + return cfg, err + } + if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryMode}); err != nil { + return cfg, err + } + + return cfg, nil +} + +func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { + if len(c.DefaultsMode) == 0 { + return "", false, nil + } + return c.DefaultsMode, true, nil +} + +// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, +// and not 0. +func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if c.RetryMaxAttempts == 0 { + return 0, false, nil + } + return c.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the RetryMode of AWS_RETRY_MODE if was specified, and a +// valid value. +func (c EnvConfig) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if len(c.RetryMode) == 0 { + return "", false, nil + } + return c.RetryMode, true, nil +} + +func setEC2IMDSClientEnableState(state *imds.ClientEnableState, keys []string) { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + switch { + case strings.EqualFold(value, "true"): + *state = imds.ClientDisabled + case strings.EqualFold(value, "false"): + *state = imds.ClientEnabled + default: + continue + } + break + } +} + +func setDefaultsModeFromEnvVal(mode *aws.DefaultsMode, keys []string) error { + for _, k := range keys { + if value := os.Getenv(k); len(value) > 0 { + if ok := mode.SetFromString(value); !ok { + return fmt.Errorf("invalid %s value: %s", k, value) + } + break + } + } + return nil +} + +func setRetryModeFromEnvVal(mode *aws.RetryMode, keys []string) (err error) { + for _, k := range keys { + if value := os.Getenv(k); len(value) > 0 { + *mode, err = aws.ParseRetryMode(value) + if err != nil { + return fmt.Errorf("invalid %s value, %w", k, err) + } + break + } + } + return nil +} + +func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + if err := mode.SetFromString(value); err != nil { + return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) + } + } + return nil +} + +// GetRegion returns the AWS Region if set in the environment. Returns an empty +// string if not set. +func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { + if len(c.Region) == 0 { + return "", false, nil + } + return c.Region, true, nil +} + +// GetSharedConfigProfile returns the shared config profile if set in the +// environment. Returns an empty string if not set. +func (c EnvConfig) getSharedConfigProfile(ctx context.Context) (string, bool, error) { + if len(c.SharedConfigProfile) == 0 { + return "", false, nil + } + + return c.SharedConfigProfile, true, nil +} + +// getSharedConfigFiles returns a slice of filenames set in the environment. +// +// Will return the filenames in the order of: +// * Shared Config +func (c EnvConfig) getSharedConfigFiles(context.Context) ([]string, bool, error) { + var files []string + if v := c.SharedConfigFile; len(v) > 0 { + files = append(files, v) + } + + if len(files) == 0 { + return nil, false, nil + } + return files, true, nil +} + +// getSharedCredentialsFiles returns a slice of filenames set in the environment. +// +// Will return the filenames in the order of: +// * Shared Credentials +func (c EnvConfig) getSharedCredentialsFiles(context.Context) ([]string, bool, error) { + var files []string + if v := c.SharedCredentialsFile; len(v) > 0 { + files = append(files, v) + } + if len(files) == 0 { + return nil, false, nil + } + return files, true, nil +} + +// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was +func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { + if len(c.CustomCABundle) == 0 { + return nil, false, nil + } + + b, err := ioutil.ReadFile(c.CustomCABundle) + if err != nil { + return nil, false, err + } + return bytes.NewReader(b), true, nil +} + +// GetS3UseARNRegion returns whether to allow ARNs to direct the region +// the S3 client's requests are sent to. +func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { + if c.S3UseARNRegion == nil { + return false, false, nil + } + + return *c.S3UseARNRegion, true, nil +} + +// GetS3DisableMultRegionAccessPoints returns whether to disable multi-region access point +// support for the S3 client. +func (c EnvConfig) GetS3DisableMultRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { + if c.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + + return *c.S3DisableMultiRegionAccessPoints, true, nil +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (c EnvConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + + return c.UseDualStackEndpoint, true, nil +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (c EnvConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + + return c.UseFIPSEndpoint, true, nil +} + +func setStringFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} + +func setIntFromEnvVal(dst *int, keys []string) error { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("invalid value %s=%s, %w", k, v, err) + } + *dst = int(i) + break + } + } + + return nil +} + +func setBoolPtrFromEnvVal(dst **bool, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + if *dst == nil { + *dst = new(bool) + } + + switch { + case strings.EqualFold(value, "false"): + **dst = false + case strings.EqualFold(value, "true"): + **dst = true + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + k, value) + } + break + } + + return nil +} + +func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, endpointDiscoveryDisabled): + *dst = aws.EndpointDiscoveryDisabled + case strings.EqualFold(value, endpointDiscoveryEnabled): + *dst = aws.EndpointDiscoveryEnabled + case strings.EqualFold(value, endpointDiscoveryAuto): + *dst = aws.EndpointDiscoveryAuto + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false or auto", + k, value) + } + } + return nil +} + +func setUseDualStackEndpointFromEnvVal(dst *aws.DualStackEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = aws.DualStackEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = aws.DualStackEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +func setUseFIPSEndpointFromEnvVal(dst *aws.FIPSEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = aws.FIPSEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = aws.FIPSEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +// GetEnableEndpointDiscovery returns resolved value for EnableEndpointDiscovery env variable setting. +func (c EnvConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) { + if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + + return c.EnableEndpointDiscovery, true, nil +} + +// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. +func (c EnvConfig) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { + if c.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { + return imds.ClientDefaultEnableState, false, nil + } + + return c.EC2IMDSClientEnableState, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (c EnvConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return c.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) { + if len(c.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return c.EC2IMDSEndpoint, true, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go new file mode 100644 index 000000000000..654a7a77fb7e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go @@ -0,0 +1,4 @@ +package config + +//go:generate go run -tags codegen ./codegen -output=provider_assert_test.go +//go:generate gofmt -s -w ./ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go new file mode 100644 index 000000000000..d909ccfb3ef9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package config + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.15.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go new file mode 100644 index 000000000000..22e6019fbd00 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -0,0 +1,926 @@ +package config + +import ( + "context" + "io" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// LoadOptionsFunc is a type alias for LoadOptions functional option +type LoadOptionsFunc func(*LoadOptions) error + +// LoadOptions are discrete set of options that are valid for loading the +// configuration +type LoadOptions struct { + + // Region is the region to send requests to. + Region string + + // Credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // HTTPClient the SDK's API clients will use to invoke HTTP requests. + HTTPClient HTTPClient + + // EndpointResolver that can be used to provide or override an endpoint for + // the given service and region. + // + // See the `aws.EndpointResolver` documentation on usage. + // + // Deprecated: See EndpointResolverWithOptions + EndpointResolver aws.EndpointResolver + + // EndpointResolverWithOptions that can be used to provide or override an + // endpoint for the given service and region. + // + // See the `aws.EndpointResolverWithOptions` documentation on usage. + EndpointResolverWithOptions aws.EndpointResolverWithOptions + + // RetryMaxAttempts specifies the maximum number attempts an API client + // will call an operation that fails with a retryable error. + // + // This value will only be used if Retryer option is nil. + RetryMaxAttempts int + + // RetryMode specifies the retry model the API client will be created with. + // + // This value will only be used if Retryer option is nil. + RetryMode aws.RetryMode + + // Retryer is a function that provides a Retryer implementation. A Retryer + // guides how HTTP requests should be retried in case of recoverable + // failures. + // + // If not nil, RetryMaxAttempts, and RetryMode will be ignored. + Retryer func() aws.Retryer + + // APIOptions provides the set of middleware mutations modify how the API + // client requests will be handled. This is useful for adding additional + // tracing data to a request, or changing behavior of the SDK's client. + APIOptions []func(*middleware.Stack) error + + // Logger writer interface to write logging messages to. + Logger logging.Logger + + // ClientLogMode is used to configure the events that will be sent to the + // configured logger. This can be used to configure the logging of signing, + // retries, request, and responses of the SDK clients. + // + // See the ClientLogMode type documentation for the complete set of logging + // modes and available configuration. + ClientLogMode *aws.ClientLogMode + + // SharedConfigProfile is the profile to be used when loading the SharedConfig + SharedConfigProfile string + + // SharedConfigFiles is the slice of custom shared config files to use when + // loading the SharedConfig. A non-default profile used within config file + // must have name defined with prefix 'profile '. eg [profile xyz] + // indicates a profile with name 'xyz'. To read more on the format of the + // config file, please refer the documentation at + // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-config + // + // If duplicate profiles are provided within the same, or across multiple + // shared config files, the next parsed profile will override only the + // properties that conflict with the previously defined profile. Note that + // if duplicate profiles are provided within the SharedCredentialsFiles and + // SharedConfigFiles, the properties defined in shared credentials file + // take precedence. + SharedConfigFiles []string + + // SharedCredentialsFile is the slice of custom shared credentials files to + // use when loading the SharedConfig. The profile name used within + // credentials file must not prefix 'profile '. eg [xyz] indicates a + // profile with name 'xyz'. Profile declared as [profile xyz] will be + // ignored. To read more on the format of the credentials file, please + // refer the documentation at + // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-creds + // + // If duplicate profiles are provided with a same, or across multiple + // shared credentials files, the next parsed profile will override only + // properties that conflict with the previously defined profile. Note that + // if duplicate profiles are provided within the SharedCredentialsFiles and + // SharedConfigFiles, the properties defined in shared credentials file + // take precedence. + SharedCredentialsFiles []string + + // CustomCABundle is CA bundle PEM bytes reader + CustomCABundle io.Reader + + // DefaultRegion is the fall back region, used if a region was not resolved + // from other sources + DefaultRegion string + + // UseEC2IMDSRegion indicates if SDK should retrieve the region + // from the EC2 Metadata service + UseEC2IMDSRegion *UseEC2IMDSRegion + + // CredentialsCacheOptions is a function for setting the + // aws.CredentialsCacheOptions + CredentialsCacheOptions func(*aws.CredentialsCacheOptions) + + // ProcessCredentialOptions is a function for setting + // the processcreds.Options + ProcessCredentialOptions func(*processcreds.Options) + + // EC2RoleCredentialOptions is a function for setting + // the ec2rolecreds.Options + EC2RoleCredentialOptions func(*ec2rolecreds.Options) + + // EndpointCredentialOptions is a function for setting + // the endpointcreds.Options + EndpointCredentialOptions func(*endpointcreds.Options) + + // WebIdentityRoleCredentialOptions is a function for setting + // the stscreds.WebIdentityRoleOptions + WebIdentityRoleCredentialOptions func(*stscreds.WebIdentityRoleOptions) + + // AssumeRoleCredentialOptions is a function for setting the + // stscreds.AssumeRoleOptions + AssumeRoleCredentialOptions func(*stscreds.AssumeRoleOptions) + + // SSOProviderOptions is a function for setting + // the ssocreds.Options + SSOProviderOptions func(options *ssocreds.Options) + + // LogConfigurationWarnings when set to true, enables logging + // configuration warnings + LogConfigurationWarnings *bool + + // S3UseARNRegion specifies if the S3 service should allow ARNs to direct + // the region, the client's requests are sent to. + S3UseARNRegion *bool + + // EnableEndpointDiscovery specifies if endpoint discovery is enable for + // the client. + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies if the EC2 IMDS service client is enabled. + // + // AWS_EC2_METADATA_DISABLED=true + EC2IMDSClientEnableState imds.ClientEnableState + + // Specifies the EC2 Instance Metadata Service default endpoint selection + // mode (IPv4 or IPv6) + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If + // specified it overrides EC2IMDSEndpointMode. + EC2IMDSEndpoint string + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies the SDK configuration mode for defaults. + DefaultsModeOptions DefaultsModeOptions +} + +func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { + if len(o.DefaultsModeOptions.Mode) == 0 { + return "", false, nil + } + return o.DefaultsModeOptions.Mode, true, nil +} + +// GetRetryMaxAttempts returns the RetryMaxAttempts if specified in the +// LoadOptions and not 0. +func (o LoadOptions) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if o.RetryMaxAttempts == 0 { + return 0, false, nil + } + return o.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the RetryMode specified in the LoadOptions. +func (o LoadOptions) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if len(o.RetryMode) == 0 { + return "", false, nil + } + return o.RetryMode, true, nil +} + +func (o LoadOptions) getDefaultsModeIMDSClient(ctx context.Context) (*imds.Client, bool, error) { + if o.DefaultsModeOptions.IMDSClient == nil { + return nil, false, nil + } + return o.DefaultsModeOptions.IMDSClient, true, nil +} + +// getRegion returns Region from config's LoadOptions +func (o LoadOptions) getRegion(ctx context.Context) (string, bool, error) { + if len(o.Region) == 0 { + return "", false, nil + } + + return o.Region, true, nil +} + +// WithRegion is a helper function to construct functional options +// that sets Region on config's LoadOptions. Setting the region to +// an empty string, will result in the region value being ignored. +// If multiple WithRegion calls are made, the last call overrides +// the previous call values. +func WithRegion(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Region = v + return nil + } +} + +// getDefaultRegion returns DefaultRegion from config's LoadOptions +func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { + if len(o.DefaultRegion) == 0 { + return "", false, nil + } + + return o.DefaultRegion, true, nil +} + +// WithDefaultRegion is a helper function to construct functional options +// that sets a DefaultRegion on config's LoadOptions. Setting the default +// region to an empty string, will result in the default region value +// being ignored. If multiple WithDefaultRegion calls are made, the last +// call overrides the previous call values. Note that both WithRegion and +// WithEC2IMDSRegion call takes precedence over WithDefaultRegion call +// when resolving region. +func WithDefaultRegion(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.DefaultRegion = v + return nil + } +} + +// getSharedConfigProfile returns SharedConfigProfile from config's LoadOptions +func (o LoadOptions) getSharedConfigProfile(ctx context.Context) (string, bool, error) { + if len(o.SharedConfigProfile) == 0 { + return "", false, nil + } + + return o.SharedConfigProfile, true, nil +} + +// WithSharedConfigProfile is a helper function to construct functional options +// that sets SharedConfigProfile on config's LoadOptions. Setting the shared +// config profile to an empty string, will result in the shared config profile +// value being ignored. +// If multiple WithSharedConfigProfile calls are made, the last call overrides +// the previous call values. +func WithSharedConfigProfile(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedConfigProfile = v + return nil + } +} + +// getSharedConfigFiles returns SharedConfigFiles set on config's LoadOptions +func (o LoadOptions) getSharedConfigFiles(ctx context.Context) ([]string, bool, error) { + if o.SharedConfigFiles == nil { + return nil, false, nil + } + + return o.SharedConfigFiles, true, nil +} + +// WithSharedConfigFiles is a helper function to construct functional options +// that sets slice of SharedConfigFiles on config's LoadOptions. +// Setting the shared config files to an nil string slice, will result in the +// shared config files value being ignored. +// If multiple WithSharedConfigFiles calls are made, the last call overrides +// the previous call values. +func WithSharedConfigFiles(v []string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedConfigFiles = v + return nil + } +} + +// getSharedCredentialsFiles returns SharedCredentialsFiles set on config's LoadOptions +func (o LoadOptions) getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) { + if o.SharedCredentialsFiles == nil { + return nil, false, nil + } + + return o.SharedCredentialsFiles, true, nil +} + +// WithSharedCredentialsFiles is a helper function to construct functional options +// that sets slice of SharedCredentialsFiles on config's LoadOptions. +// Setting the shared credentials files to an nil string slice, will result in the +// shared credentials files value being ignored. +// If multiple WithSharedCredentialsFiles calls are made, the last call overrides +// the previous call values. +func WithSharedCredentialsFiles(v []string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedCredentialsFiles = v + return nil + } +} + +// getCustomCABundle returns CustomCABundle from LoadOptions +func (o LoadOptions) getCustomCABundle(ctx context.Context) (io.Reader, bool, error) { + if o.CustomCABundle == nil { + return nil, false, nil + } + + return o.CustomCABundle, true, nil +} + +// WithCustomCABundle is a helper function to construct functional options +// that sets CustomCABundle on config's LoadOptions. Setting the custom CA Bundle +// to nil will result in custom CA Bundle value being ignored. +// If multiple WithCustomCABundle calls are made, the last call overrides the +// previous call values. +func WithCustomCABundle(v io.Reader) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.CustomCABundle = v + return nil + } +} + +// UseEC2IMDSRegion provides a regionProvider that retrieves the region +// from the EC2 Metadata service. +type UseEC2IMDSRegion struct { + // If unset will default to generic EC2 IMDS client. + Client *imds.Client +} + +// getRegion attempts to retrieve the region from EC2 Metadata service. +func (p *UseEC2IMDSRegion) getRegion(ctx context.Context) (string, bool, error) { + if ctx == nil { + ctx = context.Background() + } + + client := p.Client + if client == nil { + client = imds.New(imds.Options{}) + } + + result, err := client.GetRegion(ctx, nil) + if err != nil { + return "", false, err + } + if len(result.Region) != 0 { + return result.Region, true, nil + } + return "", false, nil +} + +// getEC2IMDSRegion returns the value of EC2 IMDS region. +func (o LoadOptions) getEC2IMDSRegion(ctx context.Context) (string, bool, error) { + if o.UseEC2IMDSRegion == nil { + return "", false, nil + } + + return o.UseEC2IMDSRegion.getRegion(ctx) +} + +// WithEC2IMDSRegion is a helper function to construct functional options +// that enables resolving EC2IMDS region. The function takes +// in a UseEC2IMDSRegion functional option, and can be used to set the +// EC2IMDS client which will be used to resolve EC2IMDSRegion. +// If no functional option is provided, an EC2IMDS client is built and used +// by the resolver. If multiple WithEC2IMDSRegion calls are made, the last +// call overrides the previous call values. Note that the WithRegion calls takes +// precedence over WithEC2IMDSRegion when resolving region. +func WithEC2IMDSRegion(fnOpts ...func(o *UseEC2IMDSRegion)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseEC2IMDSRegion = &UseEC2IMDSRegion{} + + for _, fn := range fnOpts { + fn(o.UseEC2IMDSRegion) + } + return nil + } +} + +// getCredentialsProvider returns the credentials value +func (o LoadOptions) getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) { + if o.Credentials == nil { + return nil, false, nil + } + + return o.Credentials, true, nil +} + +// WithCredentialsProvider is a helper function to construct functional options +// that sets Credential provider value on config's LoadOptions. If credentials +// provider is set to nil, the credentials provider value will be ignored. +// If multiple WithCredentialsProvider calls are made, the last call overrides +// the previous call values. +func WithCredentialsProvider(v aws.CredentialsProvider) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Credentials = v + return nil + } +} + +// getCredentialsCacheOptionsProvider returns the wrapped function to set aws.CredentialsCacheOptions +func (o LoadOptions) getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) { + if o.CredentialsCacheOptions == nil { + return nil, false, nil + } + + return o.CredentialsCacheOptions, true, nil +} + +// WithCredentialsCacheOptions is a helper function to construct functional +// options that sets a function to modify the aws.CredentialsCacheOptions the +// aws.CredentialsCache will be configured with, if the CredentialsCache is used +// by the configuration loader. +// +// If multiple WithCredentialsCacheOptions calls are made, the last call +// overrides the previous call values. +func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.CredentialsCacheOptions = v + return nil + } +} + +// getProcessCredentialOptions returns the wrapped function to set processcreds.Options +func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) { + if o.ProcessCredentialOptions == nil { + return nil, false, nil + } + + return o.ProcessCredentialOptions, true, nil +} + +// WithProcessCredentialOptions is a helper function to construct functional options +// that sets a function to use processcreds.Options on config's LoadOptions. +// If process credential options is set to nil, the process credential value will +// be ignored. If multiple WithProcessCredentialOptions calls are made, the last call +// overrides the previous call values. +func WithProcessCredentialOptions(v func(*processcreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.ProcessCredentialOptions = v + return nil + } +} + +// getEC2RoleCredentialOptions returns the wrapped function to set the ec2rolecreds.Options +func (o LoadOptions) getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) { + if o.EC2RoleCredentialOptions == nil { + return nil, false, nil + } + + return o.EC2RoleCredentialOptions, true, nil +} + +// WithEC2RoleCredentialOptions is a helper function to construct functional options +// that sets a function to use ec2rolecreds.Options on config's LoadOptions. If +// EC2 role credential options is set to nil, the EC2 role credential options value +// will be ignored. If multiple WithEC2RoleCredentialOptions calls are made, +// the last call overrides the previous call values. +func WithEC2RoleCredentialOptions(v func(*ec2rolecreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2RoleCredentialOptions = v + return nil + } +} + +// getEndpointCredentialOptions returns the wrapped function to set endpointcreds.Options +func (o LoadOptions) getEndpointCredentialOptions(context.Context) (func(*endpointcreds.Options), bool, error) { + if o.EndpointCredentialOptions == nil { + return nil, false, nil + } + + return o.EndpointCredentialOptions, true, nil +} + +// WithEndpointCredentialOptions is a helper function to construct functional options +// that sets a function to use endpointcreds.Options on config's LoadOptions. If +// endpoint credential options is set to nil, the endpoint credential options +// value will be ignored. If multiple WithEndpointCredentialOptions calls are made, +// the last call overrides the previous call values. +func WithEndpointCredentialOptions(v func(*endpointcreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointCredentialOptions = v + return nil + } +} + +// getWebIdentityRoleCredentialOptions returns the wrapped function +func (o LoadOptions) getWebIdentityRoleCredentialOptions(context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) { + if o.WebIdentityRoleCredentialOptions == nil { + return nil, false, nil + } + + return o.WebIdentityRoleCredentialOptions, true, nil +} + +// WithWebIdentityRoleCredentialOptions is a helper function to construct +// functional options that sets a function to use stscreds.WebIdentityRoleOptions +// on config's LoadOptions. If web identity role credentials options is set to nil, +// the web identity role credentials value will be ignored. If multiple +// WithWebIdentityRoleCredentialOptions calls are made, the last call +// overrides the previous call values. +func WithWebIdentityRoleCredentialOptions(v func(*stscreds.WebIdentityRoleOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.WebIdentityRoleCredentialOptions = v + return nil + } +} + +// getAssumeRoleCredentialOptions returns AssumeRoleCredentialOptions from LoadOptions +func (o LoadOptions) getAssumeRoleCredentialOptions(context.Context) (func(options *stscreds.AssumeRoleOptions), bool, error) { + if o.AssumeRoleCredentialOptions == nil { + return nil, false, nil + } + + return o.AssumeRoleCredentialOptions, true, nil +} + +// WithAssumeRoleCredentialOptions is a helper function to construct +// functional options that sets a function to use stscreds.AssumeRoleOptions +// on config's LoadOptions. If assume role credentials options is set to nil, +// the assume role credentials value will be ignored. If multiple +// WithAssumeRoleCredentialOptions calls are made, the last call overrides +// the previous call values. +func WithAssumeRoleCredentialOptions(v func(*stscreds.AssumeRoleOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.AssumeRoleCredentialOptions = v + return nil + } +} + +func (o LoadOptions) getHTTPClient(ctx context.Context) (HTTPClient, bool, error) { + if o.HTTPClient == nil { + return nil, false, nil + } + + return o.HTTPClient, true, nil +} + +// WithHTTPClient is a helper function to construct functional options +// that sets HTTPClient on LoadOptions. If HTTPClient is set to nil, +// the HTTPClient value will be ignored. +// If multiple WithHTTPClient calls are made, the last call overrides +// the previous call values. +func WithHTTPClient(v HTTPClient) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.HTTPClient = v + return nil + } +} + +func (o LoadOptions) getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) { + if o.APIOptions == nil { + return nil, false, nil + } + + return o.APIOptions, true, nil +} + +// WithAPIOptions is a helper function to construct functional options +// that sets APIOptions on LoadOptions. If APIOptions is set to nil, the +// APIOptions value is ignored. If multiple WithAPIOptions calls are +// made, the last call overrides the previous call values. +func WithAPIOptions(v []func(*middleware.Stack) error) LoadOptionsFunc { + return func(o *LoadOptions) error { + if v == nil { + return nil + } + + o.APIOptions = append(o.APIOptions, v...) + return nil + } +} + +func (o LoadOptions) getRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if o.RetryMaxAttempts == 0 { + return 0, false, nil + } + + return o.RetryMaxAttempts, true, nil +} + +// WithRetryMaxAttempts is a helper function to construct functional options that sets +// RetryMaxAttempts on LoadOptions. If RetryMaxAttempts is unset, the RetryMaxAttempts value is +// ignored. If multiple WithRetryMaxAttempts calls are made, the last call overrides +// the previous call values. +// +// Will be ignored of LoadOptions.Retryer or WithRetryer are used. +func WithRetryMaxAttempts(v int) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.RetryMaxAttempts = v + return nil + } +} + +func (o LoadOptions) getRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if o.RetryMode == "" { + return "", false, nil + } + + return o.RetryMode, true, nil +} + +// WithRetryMode is a helper function to construct functional options that sets +// RetryMode on LoadOptions. If RetryMode is unset, the RetryMode value is +// ignored. If multiple WithRetryMode calls are made, the last call overrides +// the previous call values. +// +// Will be ignored of LoadOptions.Retryer or WithRetryer are used. +func WithRetryMode(v aws.RetryMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.RetryMode = v + return nil + } +} + +func (o LoadOptions) getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) { + if o.Retryer == nil { + return nil, false, nil + } + + return o.Retryer, true, nil +} + +// WithRetryer is a helper function to construct functional options +// that sets Retryer on LoadOptions. If Retryer is set to nil, the +// Retryer value is ignored. If multiple WithRetryer calls are +// made, the last call overrides the previous call values. +func WithRetryer(v func() aws.Retryer) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Retryer = v + return nil + } +} + +func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) { + if o.EndpointResolver == nil { + return nil, false, nil + } + + return o.EndpointResolver, true, nil +} + +// WithEndpointResolver is a helper function to construct functional options +// that sets the EndpointResolver on LoadOptions. If the EndpointResolver is set to nil, +// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls +// are made, the last call overrides the previous call values. +// +// Deprecated: See WithEndpointResolverWithOptions +func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointResolver = v + return nil + } +} + +func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) { + if o.EndpointResolverWithOptions == nil { + return nil, false, nil + } + + return o.EndpointResolverWithOptions, true, nil +} + +// WithEndpointResolverWithOptions is a helper function to construct functional options +// that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil, +// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls +// are made, the last call overrides the previous call values. +func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointResolverWithOptions = v + return nil + } +} + +func (o LoadOptions) getLogger(ctx context.Context) (logging.Logger, bool, error) { + if o.Logger == nil { + return nil, false, nil + } + + return o.Logger, true, nil +} + +// WithLogger is a helper function to construct functional options +// that sets Logger on LoadOptions. If Logger is set to nil, the +// Logger value will be ignored. If multiple WithLogger calls are made, +// the last call overrides the previous call values. +func WithLogger(v logging.Logger) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Logger = v + return nil + } +} + +func (o LoadOptions) getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) { + if o.ClientLogMode == nil { + return 0, false, nil + } + + return *o.ClientLogMode, true, nil +} + +// WithClientLogMode is a helper function to construct functional options +// that sets client log mode on LoadOptions. If client log mode is set to nil, +// the client log mode value will be ignored. If multiple WithClientLogMode calls are made, +// the last call overrides the previous call values. +func WithClientLogMode(v aws.ClientLogMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.ClientLogMode = &v + return nil + } +} + +func (o LoadOptions) getLogConfigurationWarnings(ctx context.Context) (v bool, found bool, err error) { + if o.LogConfigurationWarnings == nil { + return false, false, nil + } + return *o.LogConfigurationWarnings, true, nil +} + +// WithLogConfigurationWarnings is a helper function to construct +// functional options that can be used to set LogConfigurationWarnings +// on LoadOptions. +// +// If multiple WithLogConfigurationWarnings calls are made, the last call +// overrides the previous call values. +func WithLogConfigurationWarnings(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.LogConfigurationWarnings = &v + return nil + } +} + +// GetS3UseARNRegion returns whether to allow ARNs to direct the region +// the S3 client's requests are sent to. +func (o LoadOptions) GetS3UseARNRegion(ctx context.Context) (v bool, found bool, err error) { + if o.S3UseARNRegion == nil { + return false, false, nil + } + return *o.S3UseARNRegion, true, nil +} + +// WithS3UseARNRegion is a helper function to construct functional options +// that can be used to set S3UseARNRegion on LoadOptions. +// If multiple WithS3UseARNRegion calls are made, the last call overrides +// the previous call values. +func WithS3UseARNRegion(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.S3UseARNRegion = &v + return nil + } +} + +// GetEnableEndpointDiscovery returns if the EnableEndpointDiscovery flag is set. +func (o LoadOptions) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { + if o.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + return o.EnableEndpointDiscovery, true, nil +} + +// WithEndpointDiscovery is a helper function to construct functional options +// that can be used to enable endpoint discovery on LoadOptions for supported clients. +// If multiple WithEndpointDiscovery calls are made, the last call overrides +// the previous call values. +func WithEndpointDiscovery(v aws.EndpointDiscoveryEnableState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EnableEndpointDiscovery = v + return nil + } +} + +// getSSOProviderOptions returns AssumeRoleCredentialOptions from LoadOptions +func (o LoadOptions) getSSOProviderOptions(context.Context) (func(options *ssocreds.Options), bool, error) { + if o.SSOProviderOptions == nil { + return nil, false, nil + } + + return o.SSOProviderOptions, true, nil +} + +// WithSSOProviderOptions is a helper function to construct +// functional options that sets a function to use ssocreds.Options +// on config's LoadOptions. If the SSO credential provider options is set to nil, +// the sso provider options value will be ignored. If multiple +// WithSSOProviderOptions calls are made, the last call overrides +// the previous call values. +func WithSSOProviderOptions(v func(*ssocreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SSOProviderOptions = v + return nil + } +} + +// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. +func (o LoadOptions) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { + if o.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { + return imds.ClientDefaultEnableState, false, nil + } + + return o.EC2IMDSClientEnableState, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (o LoadOptions) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if o.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return o.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (o LoadOptions) GetEC2IMDSEndpoint() (string, bool, error) { + if len(o.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return o.EC2IMDSEndpoint, true, nil +} + +// WithEC2IMDSClientEnableState is a helper function to construct functional options that sets the EC2IMDSClientEnableState. +func WithEC2IMDSClientEnableState(v imds.ClientEnableState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSClientEnableState = v + return nil + } +} + +// WithEC2IMDSEndpointMode is a helper function to construct functional options that sets the EC2IMDSEndpointMode. +func WithEC2IMDSEndpointMode(v imds.EndpointModeState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSEndpointMode = v + return nil + } +} + +// WithEC2IMDSEndpoint is a helper function to construct functional options that sets the EC2IMDSEndpoint. +func WithEC2IMDSEndpoint(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSEndpoint = v + return nil + } +} + +// WithUseDualStackEndpoint is a helper function to construct +// functional options that can be used to set UseDualStackEndpoint on LoadOptions. +func WithUseDualStackEndpoint(v aws.DualStackEndpointState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseDualStackEndpoint = v + return nil + } +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (o LoadOptions) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if o.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + return o.UseDualStackEndpoint, true, nil +} + +// WithUseFIPSEndpoint is a helper function to construct +// functional options that can be used to set UseFIPSEndpoint on LoadOptions. +func WithUseFIPSEndpoint(v aws.FIPSEndpointState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseFIPSEndpoint = v + return nil + } +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (o LoadOptions) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if o.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + return o.UseFIPSEndpoint, true, nil +} + +// WithDefaultsMode sets the SDK defaults configuration mode to the value provided. +// +// Zero or more functional options can be provided to provide configuration options for performing +// environment discovery when using aws.DefaultsModeAuto. +func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsModeOptions)) LoadOptionsFunc { + do := DefaultsModeOptions{ + Mode: mode, + } + for _, fn := range optFns { + fn(&do) + } + return func(options *LoadOptions) error { + options.DefaultsModeOptions = do + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go new file mode 100644 index 000000000000..b629137c8218 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go @@ -0,0 +1,51 @@ +package config + +import ( + "fmt" + "net" + "net/url" +) + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + if len(addrs) == 0 { + return false, fmt.Errorf("no addrs found for host, %s", host) + } + + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func validateLocalURL(v string) error { + u, err := url.Parse(v) + if err != nil { + return err + } + + host := u.Hostname() + if len(host) == 0 { + return fmt.Errorf("unable to parse host from local HTTP cred provider URL") + } else if isLoopback, err := isLoopbackHost(host); err != nil { + return fmt.Errorf("failed to resolve host %q, %v", host, err) + } else if !isLoopback { + return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go new file mode 100644 index 000000000000..3f12df1bfe2e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -0,0 +1,533 @@ +package config + +import ( + "context" + "io" + "net/http" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// sharedConfigProfileProvider provides access to the shared config profile +// name external configuration value. +type sharedConfigProfileProvider interface { + getSharedConfigProfile(ctx context.Context) (string, bool, error) +} + +// getSharedConfigProfile searches the configs for a sharedConfigProfileProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedConfigProfile(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedConfigProfileProvider); ok { + value, found, err = p.getSharedConfigProfile(ctx) + if err != nil || found { + break + } + } + } + return +} + +// sharedConfigFilesProvider provides access to the shared config filesnames +// external configuration value. +type sharedConfigFilesProvider interface { + getSharedConfigFiles(ctx context.Context) ([]string, bool, error) +} + +// getSharedConfigFiles searches the configs for a sharedConfigFilesProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedConfigFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedConfigFilesProvider); ok { + value, found, err = p.getSharedConfigFiles(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// sharedCredentialsFilesProvider provides access to the shared credentials filesnames +// external configuration value. +type sharedCredentialsFilesProvider interface { + getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) +} + +// getSharedCredentialsFiles searches the configs for a sharedCredentialsFilesProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedCredentialsFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedCredentialsFilesProvider); ok { + value, found, err = p.getSharedCredentialsFiles(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// customCABundleProvider provides access to the custom CA bundle PEM bytes. +type customCABundleProvider interface { + getCustomCABundle(ctx context.Context) (io.Reader, bool, error) +} + +// getCustomCABundle searches the configs for a customCABundleProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getCustomCABundle(ctx context.Context, configs configs) (value io.Reader, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(customCABundleProvider); ok { + value, found, err = p.getCustomCABundle(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// regionProvider provides access to the region external configuration value. +type regionProvider interface { + getRegion(ctx context.Context) (string, bool, error) +} + +// getRegion searches the configs for a regionProvider and returns the value +// if found. Returns an error if a provider fails before a value is found. +func getRegion(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(regionProvider); ok { + value, found, err = p.getRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ec2IMDSRegionProvider provides access to the ec2 imds region +// configuration value +type ec2IMDSRegionProvider interface { + getEC2IMDSRegion(ctx context.Context) (string, bool, error) +} + +// getEC2IMDSRegion searches the configs for a ec2IMDSRegionProvider and +// returns the value if found. Returns an error if a provider fails before +// a value is found. +func getEC2IMDSRegion(ctx context.Context, configs configs) (region string, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(ec2IMDSRegionProvider); ok { + region, found, err = provider.getEC2IMDSRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// credentialsProviderProvider provides access to the credentials external +// configuration value. +type credentialsProviderProvider interface { + getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) +} + +// getCredentialsProvider searches the configs for a credentialsProviderProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getCredentialsProvider(ctx context.Context, configs configs) (p aws.CredentialsProvider, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(credentialsProviderProvider); ok { + p, found, err = provider.getCredentialsProvider(ctx) + if err != nil || found { + break + } + } + } + return +} + +// credentialsCacheOptionsProvider is an interface for retrieving a function for setting +// the aws.CredentialsCacheOptions. +type credentialsCacheOptionsProvider interface { + getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) +} + +// getCredentialsCacheOptionsProvider is an interface for retrieving a function for setting +// the aws.CredentialsCacheOptions. +func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) ( + f func(*aws.CredentialsCacheOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(credentialsCacheOptionsProvider); ok { + f, found, err = p.getCredentialsCacheOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// processCredentialOptions is an interface for retrieving a function for setting +// the processcreds.Options. +type processCredentialOptions interface { + getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) +} + +// getProcessCredentialOptions searches the slice of configs and returns the first function found +func getProcessCredentialOptions(ctx context.Context, configs configs) (f func(*processcreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(processCredentialOptions); ok { + f, found, err = p.getProcessCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ec2RoleCredentialOptionsProvider is an interface for retrieving a function +// for setting the ec2rolecreds.Provider options. +type ec2RoleCredentialOptionsProvider interface { + getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) +} + +// getEC2RoleCredentialProviderOptions searches the slice of configs and returns the first function found +func getEC2RoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*ec2rolecreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(ec2RoleCredentialOptionsProvider); ok { + f, found, err = p.getEC2RoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// defaultRegionProvider is an interface for retrieving a default region if a region was not resolved from other sources +type defaultRegionProvider interface { + getDefaultRegion(ctx context.Context) (string, bool, error) +} + +// getDefaultRegion searches the slice of configs and returns the first fallback region found +func getDefaultRegion(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, config := range configs { + if p, ok := config.(defaultRegionProvider); ok { + value, found, err = p.getDefaultRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointCredentialOptionsProvider is an interface for retrieving a function for setting +// the endpointcreds.ProviderOptions. +type endpointCredentialOptionsProvider interface { + getEndpointCredentialOptions(ctx context.Context) (func(*endpointcreds.Options), bool, error) +} + +// getEndpointCredentialProviderOptions searches the slice of configs and returns the first function found +func getEndpointCredentialProviderOptions(ctx context.Context, configs configs) (f func(*endpointcreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(endpointCredentialOptionsProvider); ok { + f, found, err = p.getEndpointCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// webIdentityRoleCredentialOptionsProvider is an interface for retrieving a function for setting +// the stscreds.WebIdentityRoleProvider. +type webIdentityRoleCredentialOptionsProvider interface { + getWebIdentityRoleCredentialOptions(ctx context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) +} + +// getWebIdentityCredentialProviderOptions searches the slice of configs and returns the first function found +func getWebIdentityCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.WebIdentityRoleOptions), found bool, err error) { + for _, config := range configs { + if p, ok := config.(webIdentityRoleCredentialOptionsProvider); ok { + f, found, err = p.getWebIdentityRoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// assumeRoleCredentialOptionsProvider is an interface for retrieving a function for setting +// the stscreds.AssumeRoleOptions. +type assumeRoleCredentialOptionsProvider interface { + getAssumeRoleCredentialOptions(ctx context.Context) (func(*stscreds.AssumeRoleOptions), bool, error) +} + +// getAssumeRoleCredentialProviderOptions searches the slice of configs and returns the first function found +func getAssumeRoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.AssumeRoleOptions), found bool, err error) { + for _, config := range configs { + if p, ok := config.(assumeRoleCredentialOptionsProvider); ok { + f, found, err = p.getAssumeRoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// HTTPClient is an HTTP client implementation +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// httpClientProvider is an interface for retrieving HTTPClient +type httpClientProvider interface { + getHTTPClient(ctx context.Context) (HTTPClient, bool, error) +} + +// getHTTPClient searches the slice of configs and returns the HTTPClient set on configs +func getHTTPClient(ctx context.Context, configs configs) (client HTTPClient, found bool, err error) { + for _, config := range configs { + if p, ok := config.(httpClientProvider); ok { + client, found, err = p.getHTTPClient(ctx) + if err != nil || found { + break + } + } + } + return +} + +// apiOptionsProvider is an interface for retrieving APIOptions +type apiOptionsProvider interface { + getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) +} + +// getAPIOptions searches the slice of configs and returns the APIOptions set on configs +func getAPIOptions(ctx context.Context, configs configs) (apiOptions []func(*middleware.Stack) error, found bool, err error) { + for _, config := range configs { + if p, ok := config.(apiOptionsProvider); ok { + // retrieve APIOptions from configs and set it on cfg + apiOptions, found, err = p.getAPIOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointResolverProvider is an interface for retrieving an aws.EndpointResolver from a configuration source +type endpointResolverProvider interface { + getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) +} + +// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used +// to configure the aws.Config.EndpointResolver value. +func getEndpointResolver(ctx context.Context, configs configs) (f aws.EndpointResolver, found bool, err error) { + for _, c := range configs { + if p, ok := c.(endpointResolverProvider); ok { + f, found, err = p.getEndpointResolver(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointResolverWithOptionsProvider is an interface for retrieving an aws.EndpointResolverWithOptions from a configuration source +type endpointResolverWithOptionsProvider interface { + getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) +} + +// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used +// to configure the aws.Config.EndpointResolver value. +func getEndpointResolverWithOptions(ctx context.Context, configs configs) (f aws.EndpointResolverWithOptions, found bool, err error) { + for _, c := range configs { + if p, ok := c.(endpointResolverWithOptionsProvider); ok { + f, found, err = p.getEndpointResolverWithOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// loggerProvider is an interface for retrieving a logging.Logger from a configuration source. +type loggerProvider interface { + getLogger(ctx context.Context) (logging.Logger, bool, error) +} + +// getLogger searches the provided config sources for a logging.Logger that can be used +// to configure the aws.Config.Logger value. +func getLogger(ctx context.Context, configs configs) (l logging.Logger, found bool, err error) { + for _, c := range configs { + if p, ok := c.(loggerProvider); ok { + l, found, err = p.getLogger(ctx) + if err != nil || found { + break + } + } + } + return +} + +// clientLogModeProvider is an interface for retrieving the aws.ClientLogMode from a configuration source. +type clientLogModeProvider interface { + getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) +} + +func getClientLogMode(ctx context.Context, configs configs) (m aws.ClientLogMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(clientLogModeProvider); ok { + m, found, err = p.getClientLogMode(ctx) + if err != nil || found { + break + } + } + } + return +} + +// retryProvider is an configuration provider for custom Retryer. +type retryProvider interface { + getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) +} + +func getRetryer(ctx context.Context, configs configs) (v func() aws.Retryer, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryProvider); ok { + v, found, err = p.getRetryer(ctx) + if err != nil || found { + break + } + } + } + return +} + +// logConfigurationWarningsProvider is an configuration provider for +// retrieving a boolean indicating whether configuration issues should +// be logged when loading from config sources +type logConfigurationWarningsProvider interface { + getLogConfigurationWarnings(ctx context.Context) (bool, bool, error) +} + +func getLogConfigurationWarnings(ctx context.Context, configs configs) (v bool, found bool, err error) { + for _, c := range configs { + if p, ok := c.(logConfigurationWarningsProvider); ok { + v, found, err = p.getLogConfigurationWarnings(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoCredentialOptionsProvider is an interface for retrieving a function for setting +// the ssocreds.Options. +type ssoCredentialOptionsProvider interface { + getSSOProviderOptions(context.Context) (func(*ssocreds.Options), bool, error) +} + +func getSSOProviderOptions(ctx context.Context, configs configs) (v func(options *ssocreds.Options), found bool, err error) { + for _, c := range configs { + if p, ok := c.(ssoCredentialOptionsProvider); ok { + v, found, err = p.getSSOProviderOptions(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type defaultsModeIMDSClientProvider interface { + getDefaultsModeIMDSClient(context.Context) (*imds.Client, bool, error) +} + +func getDefaultsModeIMDSClient(ctx context.Context, configs configs) (v *imds.Client, found bool, err error) { + for _, c := range configs { + if p, ok := c.(defaultsModeIMDSClientProvider); ok { + v, found, err = p.getDefaultsModeIMDSClient(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type defaultsModeProvider interface { + getDefaultsMode(context.Context) (aws.DefaultsMode, bool, error) +} + +func getDefaultsMode(ctx context.Context, configs configs) (v aws.DefaultsMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(defaultsModeProvider); ok { + v, found, err = p.getDefaultsMode(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type retryMaxAttemptsProvider interface { + GetRetryMaxAttempts(context.Context) (int, bool, error) +} + +func getRetryMaxAttempts(ctx context.Context, configs configs) (v int, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryMaxAttemptsProvider); ok { + v, found, err = p.GetRetryMaxAttempts(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type retryModeProvider interface { + GetRetryMode(context.Context) (aws.RetryMode, bool, error) +} + +func getRetryMode(ctx context.Context, configs configs) (v aws.RetryMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryModeProvider); ok { + v, found, err = p.GetRetryMode(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go new file mode 100644 index 000000000000..4428ba49c203 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -0,0 +1,307 @@ +package config + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/smithy-go/logging" +) + +// resolveDefaultAWSConfig will write default configuration values into the cfg +// value. It will write the default values, overwriting any previous value. +// +// This should be used as the first resolver in the slice of resolvers when +// resolving external configuration. +func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error { + var sources []interface{} + for _, s := range cfgs { + sources = append(sources, s) + } + + *cfg = aws.Config{ + Credentials: aws.AnonymousCredentials{}, + Logger: logging.NewStandardLogger(os.Stderr), + ConfigSources: sources, + } + return nil +} + +// resolveCustomCABundle extracts the first instance of a custom CA bundle filename +// from the external configurations. It will update the HTTP Client's builder +// to be configured with the custom CA bundle. +// +// Config provider used: +// * customCABundleProvider +func resolveCustomCABundle(ctx context.Context, cfg *aws.Config, cfgs configs) error { + pemCerts, found, err := getCustomCABundle(ctx, cfgs) + if err != nil { + // TODO error handling, What is the best way to handle this? + // capture previous errors continue. error out if all errors + return err + } + if !found { + return nil + } + + if cfg.HTTPClient == nil { + cfg.HTTPClient = awshttp.NewBuildableClient() + } + + trOpts, ok := cfg.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return fmt.Errorf("unable to add custom RootCAs HTTPClient, "+ + "has no WithTransportOptions, %T", cfg.HTTPClient) + } + + var appendErr error + client := trOpts.WithTransportOptions(func(tr *http.Transport) { + if tr.TLSClientConfig == nil { + tr.TLSClientConfig = &tls.Config{} + } + if tr.TLSClientConfig.RootCAs == nil { + tr.TLSClientConfig.RootCAs = x509.NewCertPool() + } + + b, err := ioutil.ReadAll(pemCerts) + if err != nil { + appendErr = fmt.Errorf("failed to read custom CA bundle PEM file") + } + + if !tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(b) { + appendErr = fmt.Errorf("failed to load custom CA bundle PEM file") + } + }) + if appendErr != nil { + return appendErr + } + + cfg.HTTPClient = client + return err +} + +// resolveRegion extracts the first instance of a Region from the configs slice. +// +// Config providers used: +// * regionProvider +func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + v, found, err := getRegion(ctx, configs) + if err != nil { + // TODO error handling, What is the best way to handle this? + // capture previous errors continue. error out if all errors + return err + } + if !found { + return nil + } + + cfg.Region = v + return nil +} + +// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default +// region if region had not been resolved from other sources. +func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + if len(cfg.Region) > 0 { + return nil + } + + v, found, err := getDefaultRegion(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Region = v + + return nil +} + +// resolveHTTPClient extracts the first instance of a HTTPClient and sets `aws.Config.HTTPClient` to the HTTPClient instance +// if one has not been resolved from other sources. +func resolveHTTPClient(ctx context.Context, cfg *aws.Config, configs configs) error { + c, found, err := getHTTPClient(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.HTTPClient = c + return nil +} + +// resolveAPIOptions extracts the first instance of APIOptions and sets `aws.Config.APIOptions` to the resolved API options +// if one has not been resolved from other sources. +func resolveAPIOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + o, found, err := getAPIOptions(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.APIOptions = o + + return nil +} + +// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice +// and sets the functions result on the aws.Config.EndpointResolver +func resolveEndpointResolver(ctx context.Context, cfg *aws.Config, configs configs) error { + endpointResolver, found, err := getEndpointResolver(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.EndpointResolver = endpointResolver + + return nil +} + +// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice +// and sets the functions result on the aws.Config.EndpointResolver +func resolveEndpointResolverWithOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + endpointResolver, found, err := getEndpointResolverWithOptions(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.EndpointResolverWithOptions = endpointResolver + + return nil +} + +func resolveLogger(ctx context.Context, cfg *aws.Config, configs configs) error { + logger, found, err := getLogger(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Logger = logger + + return nil +} + +func resolveClientLogMode(ctx context.Context, cfg *aws.Config, configs configs) error { + mode, found, err := getClientLogMode(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.ClientLogMode = mode + + return nil +} + +func resolveRetryer(ctx context.Context, cfg *aws.Config, configs configs) error { + retryer, found, err := getRetryer(ctx, configs) + if err != nil { + return err + } + + if found { + cfg.Retryer = retryer + return nil + } + + // Only load the retry options if a custom retryer has not be specified. + if err = resolveRetryMaxAttempts(ctx, cfg, configs); err != nil { + return err + } + return resolveRetryMode(ctx, cfg, configs) +} + +func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + if len(cfg.Region) > 0 { + return nil + } + + region, found, err := getEC2IMDSRegion(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Region = region + + return nil +} + +func resolveDefaultsModeOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + defaultsMode, found, err := getDefaultsMode(ctx, configs) + if err != nil { + return err + } + if !found { + defaultsMode = aws.DefaultsModeLegacy + } + + var environment aws.RuntimeEnvironment + if defaultsMode == aws.DefaultsModeAuto { + envConfig, _, _ := getAWSConfigSources(configs) + + client, found, err := getDefaultsModeIMDSClient(ctx, configs) + if err != nil { + return err + } + if !found { + client = imds.NewFromConfig(*cfg) + } + + environment, err = resolveDefaultsModeRuntimeEnvironment(ctx, envConfig, client) + if err != nil { + return err + } + } + + cfg.DefaultsMode = defaultsMode + cfg.RuntimeEnvironment = environment + + return nil +} + +func resolveRetryMaxAttempts(ctx context.Context, cfg *aws.Config, configs configs) error { + maxAttempts, found, err := getRetryMaxAttempts(ctx, configs) + if err != nil || !found { + return err + } + cfg.RetryMaxAttempts = maxAttempts + + return nil +} + +func resolveRetryMode(ctx context.Context, cfg *aws.Config, configs configs) error { + retryMode, found, err := getRetryMode(ctx, configs) + if err != nil || !found { + return err + } + cfg.RetryMode = retryMode + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go new file mode 100644 index 000000000000..42904ed740de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -0,0 +1,470 @@ +package config + +import ( + "context" + "fmt" + "net/url" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/sso" + "github.com/aws/aws-sdk-go-v2/service/sts" +) + +const ( + // valid credential source values + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +var ( + ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing +) + +// resolveCredentials extracts a credential provider from slice of config sources. +// +// If an explict credential provider is not found the resolver will fallback to resolving +// credentials by extracting a credential provider from EnvConfig and SharedConfig. +func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { + found, err := resolveCredentialProvider(ctx, cfg, configs) + if err != nil { + return err + } + if found { + return nil + } + + err = resolveCredentialChain(ctx, cfg, configs) + if err != nil { + return err + } + + return nil +} + +// resolveCredentialProvider extracts the first instance of Credentials from the +// config slices. +// +// The resolved CredentialProvider will be wrapped in a cache to ensure the +// credentials are only refreshed when needed. This also protects the +// credential provider to be used concurrently. +// +// Config providers used: +// * credentialsProviderProvider +func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { + credProvider, found, err := getCredentialsProvider(ctx, configs) + if err != nil { + return false, err + } + if !found { + return false, nil + } + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider) + if err != nil { + return false, err + } + + return true, nil +} + +// resolveCredentialChain resolves a credential provider chain using EnvConfig +// and SharedConfig if present in the slice of provided configs. +// +// The resolved CredentialProvider will be wrapped in a cache to ensure the +// credentials are only refreshed when needed. This also protects the +// credential provider to be used concurrently. +func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { + envConfig, sharedConfig, other := getAWSConfigSources(configs) + + // When checking if a profile was specified programmatically we should only consider the "other" + // configuration sources that have been provided. This ensures we correctly honor the expected credential + // hierarchy. + _, sharedProfileSet, err := getSharedConfigProfile(ctx, other) + if err != nil { + return err + } + + switch { + case sharedProfileSet: + err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + case envConfig.Credentials.HasKeys(): + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} + case len(envConfig.WebIdentityTokenFilePath) > 0: + err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs) + default: + err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + } + if err != nil { + return err + } + + // Wrap the resolved provider in a cache so the SDK will cache credentials. + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, cfg.Credentials) + if err != nil { + return err + } + + return nil +} + +func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (err error) { + + switch { + case sharedConfig.Source != nil: + // Assume IAM role with credentials source from a different profile. + err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs) + + case sharedConfig.Credentials.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + cfg.Credentials = credentials.StaticCredentialsProvider{ + Value: sharedConfig.Credentials, + } + + case len(sharedConfig.CredentialSource) != 0: + err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs) + + case len(sharedConfig.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs) + + case sharedConfig.hasSSOConfiguration(): + err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs) + + case len(sharedConfig.CredentialProcess) != 0: + // Get credentials from CredentialProcess + err = processCredentials(ctx, cfg, sharedConfig, configs) + + case len(envConfig.ContainerCredentialsEndpoint) != 0: + err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) + + case len(envConfig.ContainerCredentialsRelativePath) != 0: + err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + + default: + err = resolveEC2RoleCredentials(ctx, cfg, configs) + } + if err != nil { + return err + } + + if len(sharedConfig.RoleARN) > 0 { + return credsFromAssumeRole(ctx, cfg, sharedConfig, configs) + } + + return nil +} + +func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { + if err := sharedConfig.validateSSOConfiguration(); err != nil { + return err + } + + var options []func(*ssocreds.Options) + v, found, err := getSSOProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + options = append(options, v) + } + + cfgCopy := cfg.Copy() + cfgCopy.Region = sharedConfig.SSORegion + + cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...) + + return nil +} + +func ecsContainerURI(path string) string { + return fmt.Sprintf("%s%s", ecsContainerEndpoint, path) +} + +func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { + var opts []func(*processcreds.Options) + + options, found, err := getProcessCredentialOptions(ctx, configs) + if err != nil { + return err + } + if found { + opts = append(opts, options) + } + + cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...) + + return nil +} + +func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error { + var resolveErr error + + parsed, err := url.Parse(endpointURL) + if err != nil { + resolveErr = fmt.Errorf("invalid URL, %w", err) + } else { + host := parsed.Hostname() + if len(host) == 0 { + resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL") + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback hosts are allowed", host) + } + } + + if resolveErr != nil { + return resolveErr + } + + return resolveHTTPCredProvider(ctx, cfg, endpointURL, authToken, configs) +} + +func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToken string, configs configs) error { + optFns := []func(*endpointcreds.Options){ + func(options *endpointcreds.Options) { + if len(authToken) != 0 { + options.AuthorizationToken = authToken + } + options.APIOptions = cfg.APIOptions + if cfg.Retryer != nil { + options.Retryer = cfg.Retryer() + } + }, + } + + optFn, found, err := getEndpointCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + provider := endpointcreds.New(url, optFns...) + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider, func(options *aws.CredentialsCacheOptions) { + options.ExpiryWindow = 5 * time.Minute + }) + if err != nil { + return err + } + + return nil +} + +func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (err error) { + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + return resolveEC2RoleCredentials(ctx, cfg, configs) + + case credSourceEnvironment: + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} + + case credSourceECSContainer: + if len(envConfig.ContainerCredentialsRelativePath) == 0 { + return fmt.Errorf("EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set") + } + return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + + default: + return fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") + } + + return nil +} + +func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { + optFns := make([]func(*ec2rolecreds.Options), 0, 2) + + optFn, found, err := getEC2RoleCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + optFns = append(optFns, func(o *ec2rolecreds.Options) { + // Only define a client from config if not already defined. + if o.Client == nil { + o.Client = imds.NewFromConfig(*cfg) + } + }) + + provider := ec2rolecreds.New(optFns...) + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider) + if err != nil { + return err + } + + return nil +} + +func getAWSConfigSources(cfgs configs) (*EnvConfig, *SharedConfig, configs) { + var ( + envConfig *EnvConfig + sharedConfig *SharedConfig + other configs + ) + + for i := range cfgs { + switch c := cfgs[i].(type) { + case EnvConfig: + if envConfig == nil { + envConfig = &c + } + case *EnvConfig: + if envConfig == nil { + envConfig = c + } + case SharedConfig: + if sharedConfig == nil { + sharedConfig = &c + } + case *SharedConfig: + if envConfig == nil { + sharedConfig = c + } + default: + other = append(other, c) + } + } + + if envConfig == nil { + envConfig = &EnvConfig{} + } + + if sharedConfig == nil { + sharedConfig = &SharedConfig{} + } + + return envConfig, sharedConfig, other +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Error is the error message +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, roleARN, sessionName string, configs configs) error { + if len(filepath) == 0 { + return fmt.Errorf("token file path is not set") + } + + if len(roleARN) == 0 { + return fmt.Errorf("role ARN is not set") + } + + optFns := []func(*stscreds.WebIdentityRoleOptions){ + func(options *stscreds.WebIdentityRoleOptions) { + options.RoleSessionName = sessionName + }, + } + + optFn, found, err := getWebIdentityCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + provider := stscreds.NewWebIdentityRoleProvider(sts.NewFromConfig(*cfg), roleARN, stscreds.IdentityTokenFile(filepath), optFns...) + + cfg.Credentials = provider + + return nil +} + +func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) { + optFns := []func(*stscreds.AssumeRoleOptions){ + func(options *stscreds.AssumeRoleOptions) { + options.RoleSessionName = sharedCfg.RoleSessionName + if sharedCfg.RoleDurationSeconds != nil { + if *sharedCfg.RoleDurationSeconds/time.Minute > 15 { + options.Duration = *sharedCfg.RoleDurationSeconds + } + } + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + options.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) != 0 { + options.SerialNumber = aws.String(sharedCfg.MFASerial) + } + }, + } + + optFn, found, err := getAssumeRoleCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + { + // Synthesize options early to validate configuration errors sooner to ensure a token provider + // is present if the SerialNumber was set. + var o stscreds.AssumeRoleOptions + for _, fn := range optFns { + fn(&o) + } + if o.TokenProvider == nil && o.SerialNumber != nil { + return AssumeRoleTokenProviderNotSetError{} + } + } + + cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...) + + return nil +} + +// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache +// with the provided options if the provider is not already a +// aws.CredentialsCache. +func wrapWithCredentialsCache( + ctx context.Context, + cfgs configs, + provider aws.CredentialsProvider, + optFns ...func(options *aws.CredentialsCacheOptions), +) (aws.CredentialsProvider, error) { + _, ok := provider.(*aws.CredentialsCache) + if ok { + return provider, nil + } + + credCacheOptions, found, err := getCredentialsCacheOptionsProvider(ctx, cfgs) + if err != nil { + return nil, err + } + + // force allocation of a new slice if the additional options are + // needed, to prevent overwriting the passed in slice of options. + optFns = optFns[:len(optFns):len(optFns)] + if found { + optFns = append(optFns, credCacheOptions) + } + + return aws.NewCredentialsCache(provider, optFns...), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go new file mode 100644 index 000000000000..4c43a165d46d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -0,0 +1,1265 @@ +package config + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/internal/ini" + "github.com/aws/smithy-go/logging" +) + +const ( + // Prefix to use for filtering profiles + profilePrefix = `profile ` + + // string equivalent for boolean + endpointDiscoveryDisabled = `false` + endpointDiscoveryEnabled = `true` + endpointDiscoveryAuto = `auto` + + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // AWS Single Sign-On (AWS SSO) group + ssoAccountIDKey = "sso_account_id" + ssoRegionKey = "sso_region" + ssoRoleNameKey = "sso_role_name" + ssoStartURL = "sso_start_url" + + // Additional Config fields + regionKey = `region` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" + + ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" + + ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + + // Use DualStack Endpoint Resolution + useDualStackEndpoint = "use_dualstack_endpoint" + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 Disable Multi-Region AccessPoints + s3DisableMultiRegionAccessPointsKey = `s3_disable_multiregion_access_points` + + useFIPSEndpointKey = "use_fips_endpoint" + + defaultsModeKey = "defaults_mode" + + // Retry options + retryMaxAttemptsKey = "max_attempts" + retryModeKey = "retry_mode" + + caBundleKey = "ca_bundle" +) + +// defaultSharedConfigProfile allows for swapping the default profile for testing +var defaultSharedConfigProfile = DefaultSharedConfigProfile + +// DefaultSharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func DefaultSharedCredentialsFilename() string { + return filepath.Join(userHomeDir(), ".aws", "credentials") +} + +// DefaultSharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func DefaultSharedConfigFilename() string { + return filepath.Join(userHomeDir(), ".aws", "config") +} + +// DefaultSharedConfigFiles is a slice of the default shared config files that +// the will be used in order to load the SharedConfig. +var DefaultSharedConfigFiles = []string{ + DefaultSharedConfigFilename(), +} + +// DefaultSharedCredentialsFiles is a slice of the default shared credentials files that +// the will be used in order to load the SharedConfig. +var DefaultSharedCredentialsFiles = []string{ + DefaultSharedCredentialsFilename(), +} + +// SharedConfig represents the configuration fields of the SDK config files. +type SharedConfig struct { + Profile string + + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Credentials aws.Credentials + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + SSOAccountID string + SSORegion string + SSORoleName string + SSOStartURL string + + RoleARN string + ExternalID string + MFASerial string + RoleSessionName string + RoleDurationSeconds *time.Duration + + SourceProfileName string + Source *SharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region = us-west-2 + Region string + + // EnableEndpointDiscovery can be enabled or disabled in the shared config + // by setting endpoint_discovery_enabled to true, or false respectively. + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion *bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection + // mode (IPv4 or IPv6) + // + // ec2_metadata_service_endpoint_mode=IPv6 + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If + // specified it overrides EC2IMDSEndpointMode. + // + // ec2_metadata_service_endpoint=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies if the S3 service should disable support for Multi-Region + // access-points + // + // s3_disable_multiregion_access_points=true + S3DisableMultiRegionAccessPoints *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // use_dualstack_endpoint=true + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // use_fips_endpoint=true + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies which defaults mode should be used by services. + // + // defaults_mode=standard + DefaultsMode aws.DefaultsMode + + // Specifies the maximum number attempts an API client will call an + // operation that fails with a retryable error. + // + // max_attempts=3 + RetryMaxAttempts int + + // Specifies the retry model the API client will be created with. + // + // retry_mode=standard + RetryMode aws.RetryMode + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. Only use + // this if you want to configure the SDK to use a custom set of CAs. + // + // Enabling this option will attempt to merge the Transport into the SDK's + // HTTP client. If the client's Transport is not a http.Transport an error + // will be returned. If the Transport's TLS config is set this option will + // cause the SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this + // setting. To use this option and custom HTTP client, the HTTP client + // needs to be provided when creating the config. Not the service client. + // + // ca_bundle=$HOME/my_custom_ca_bundle + CustomCABundle string +} + +func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { + if len(c.DefaultsMode) == 0 { + return "", false, nil + } + + return c.DefaultsMode, true, nil +} + +// GetRetryMaxAttempts returns the maximum number of attempts an API client +// created Retryer should attempt an operation call before failing. +func (c SharedConfig) GetRetryMaxAttempts(ctx context.Context) (value int, ok bool, err error) { + if c.RetryMaxAttempts == 0 { + return 0, false, nil + } + + return c.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the model the API client should create its Retryer in. +func (c SharedConfig) GetRetryMode(ctx context.Context) (value aws.RetryMode, ok bool, err error) { + if len(c.RetryMode) == 0 { + return "", false, nil + } + + return c.RetryMode, true, nil +} + +// GetS3UseARNRegion returns if the S3 service should allow ARNs to direct the region +// the client's requests are sent to. +func (c SharedConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { + if c.S3UseARNRegion == nil { + return false, false, nil + } + + return *c.S3UseARNRegion, true, nil +} + +// GetEnableEndpointDiscovery returns if the enable_endpoint_discovery is set. +func (c SharedConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { + if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + + return c.EnableEndpointDiscovery, true, nil +} + +// GetS3DisableMultiRegionAccessPoints returns if the S3 service should disable support for Multi-Region +// access-points. +func (c SharedConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { + if c.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + + return *c.S3DisableMultiRegionAccessPoints, true, nil +} + +// GetRegion returns the region for the profile if a region is set. +func (c SharedConfig) getRegion(ctx context.Context) (string, bool, error) { + if len(c.Region) == 0 { + return "", false, nil + } + return c.Region, true, nil +} + +// GetCredentialsProvider returns the credentials for a profile if they were set. +func (c SharedConfig) getCredentialsProvider() (aws.Credentials, bool, error) { + return c.Credentials, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (c SharedConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return c.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) { + if len(c.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return c.EC2IMDSEndpoint, true, nil +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + + return c.UseDualStackEndpoint, true, nil +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + + return c.UseFIPSEndpoint, true, nil +} + +// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was +func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { + if len(c.CustomCABundle) == 0 { + return nil, false, nil + } + + b, err := ioutil.ReadFile(c.CustomCABundle) + if err != nil { + return nil, false, err + } + return bytes.NewReader(b), true, nil +} + +// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the +// addition of ignoring when none of the files exist or when the profile +// is not found in any of the files. +func loadSharedConfigIgnoreNotExist(ctx context.Context, configs configs) (Config, error) { + cfg, err := loadSharedConfig(ctx, configs) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistError); ok { + return SharedConfig{}, nil + } + return nil, err + } + + return cfg, nil +} + +// loadSharedConfig uses the configs passed in to load the SharedConfig from file +// The file names and profile name are sourced from the configs. +// +// If profile name is not provided DefaultSharedConfigProfile (default) will +// be used. +// +// If shared config filenames are not provided DefaultSharedConfigFiles will +// be used. +// +// Config providers used: +// * sharedConfigProfileProvider +// * sharedConfigFilesProvider +func loadSharedConfig(ctx context.Context, configs configs) (Config, error) { + var profile string + var configFiles []string + var credentialsFiles []string + var ok bool + var err error + + profile, ok, err = getSharedConfigProfile(ctx, configs) + if err != nil { + return nil, err + } + if !ok { + profile = defaultSharedConfigProfile + } + + configFiles, ok, err = getSharedConfigFiles(ctx, configs) + if err != nil { + return nil, err + } + + credentialsFiles, ok, err = getSharedCredentialsFiles(ctx, configs) + if err != nil { + return nil, err + } + + // setup logger if log configuration warning is seti + var logger logging.Logger + logWarnings, found, err := getLogConfigurationWarnings(ctx, configs) + if err != nil { + return SharedConfig{}, err + } + if found && logWarnings { + logger, found, err = getLogger(ctx, configs) + if err != nil { + return SharedConfig{}, err + } + if !found { + logger = logging.NewStandardLogger(os.Stderr) + } + } + + return LoadSharedConfigProfile(ctx, profile, + func(o *LoadSharedConfigOptions) { + o.Logger = logger + o.ConfigFiles = configFiles + o.CredentialsFiles = credentialsFiles + }, + ) +} + +// LoadSharedConfigOptions struct contains optional values that can be used to load the config. +type LoadSharedConfigOptions struct { + + // CredentialsFiles are the shared credentials files + CredentialsFiles []string + + // ConfigFiles are the shared config files + ConfigFiles []string + + // Logger is the logger used to log shared config behavior + Logger logging.Logger +} + +// LoadSharedConfigProfile retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// If config files are not set, SDK will default to using a file at location `.aws/config` if present. +// If credentials files are not set, SDK will default to using a file at location `.aws/credentials` if present. +// No default files are set, if files set to an empty slice. +// +// You can read more about shared config and credentials file location at +// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location +// +func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) { + var option LoadSharedConfigOptions + for _, fn := range optFns { + fn(&option) + } + + if option.ConfigFiles == nil { + option.ConfigFiles = DefaultSharedConfigFiles + } + + if option.CredentialsFiles == nil { + option.CredentialsFiles = DefaultSharedCredentialsFiles + } + + // load shared configuration sections from shared configuration INI options + configSections, err := loadIniFiles(option.ConfigFiles) + if err != nil { + return SharedConfig{}, err + } + + // check for profile prefix and drop duplicates or invalid profiles + err = processConfigSections(ctx, configSections, option.Logger) + if err != nil { + return SharedConfig{}, err + } + + // load shared credentials sections from shared credentials INI options + credentialsSections, err := loadIniFiles(option.CredentialsFiles) + if err != nil { + return SharedConfig{}, err + } + + // check for profile prefix and drop duplicates or invalid profiles + err = processCredentialsSections(ctx, credentialsSections, option.Logger) + if err != nil { + return SharedConfig{}, err + } + + err = mergeSections(configSections, credentialsSections) + if err != nil { + return SharedConfig{}, err + } + + cfg := SharedConfig{} + profiles := map[string]struct{}{} + if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil { + return SharedConfig{}, err + } + + return cfg, nil +} + +func processConfigSections(ctx context.Context, sections ini.Sections, logger logging.Logger) error { + for _, section := range sections.List() { + // drop profiles without prefix for config files + if !strings.HasPrefix(section, profilePrefix) && !strings.EqualFold(section, "default") { + // drop this section, as invalid profile name + sections.DeleteSection(section) + + if logger != nil { + logger.Logf(logging.Debug, + "A profile defined with name `%v` is ignored. For use within a shared configuration file, "+ + "a non-default profile must have `profile ` prefixed to the profile name.\n", + section, + ) + } + } + } + + // rename sections to remove `profile ` prefixing to match with credentials file. + // if default is already present, it will be dropped. + for _, section := range sections.List() { + if strings.HasPrefix(section, profilePrefix) { + v, ok := sections.GetSection(section) + if !ok { + return fmt.Errorf("error processing profiles within the shared configuration files") + } + + // delete section with profile as prefix + sections.DeleteSection(section) + + // set the value to non-prefixed name in sections. + section = strings.TrimPrefix(section, profilePrefix) + if sections.HasSection(section) { + oldSection, _ := sections.GetSection(section) + v.Logs = append(v.Logs, + fmt.Sprintf("A default profile prefixed with `profile ` found in %s, "+ + "overrided non-prefixed default profile from %s", v.SourceFile, oldSection.SourceFile)) + } + + // assign non-prefixed name to section + v.Name = section + sections.SetSection(section, v) + } + } + return nil +} + +func processCredentialsSections(ctx context.Context, sections ini.Sections, logger logging.Logger) error { + for _, section := range sections.List() { + // drop profiles with prefix for credential files + if strings.HasPrefix(section, profilePrefix) { + // drop this section, as invalid profile name + sections.DeleteSection(section) + + if logger != nil { + logger.Logf(logging.Debug, + "The profile defined with name `%v` is ignored. A profile with the `profile ` prefix is invalid "+ + "for the shared credentials file.\n", + section, + ) + } + } + } + return nil +} + +func loadIniFiles(filenames []string) (ini.Sections, error) { + mergedSections := ini.NewSections() + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + var v *ini.UnableToReadFile + if ok := errors.As(err, &v); ok { + // Skip files which can't be opened and read for whatever reason. + // We treat such files as empty, and do not fall back to other locations. + continue + } else if err != nil { + return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} + } + + // mergeSections into mergedSections + err = mergeSections(mergedSections, sections) + if err != nil { + return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} + } + } + + return mergedSections, nil +} + +// mergeSections merges source section properties into destination section properties +func mergeSections(dst, src ini.Sections) error { + for _, sectionName := range src.List() { + srcSection, _ := src.GetSection(sectionName) + + if (!srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey)) || + (srcSection.Has(accessKeyIDKey) && !srcSection.Has(secretAccessKey)) { + srcSection.Errors = append(srcSection.Errors, + fmt.Errorf("partial credentials found for profile %v", sectionName)) + } + + if !dst.HasSection(sectionName) { + dst.SetSection(sectionName, srcSection) + continue + } + + // merge with destination srcSection + dstSection, _ := dst.GetSection(sectionName) + + // errors should be overriden if any + dstSection.Errors = srcSection.Errors + + // Access key id update + if srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey) { + accessKey := srcSection.String(accessKeyIDKey) + secretKey := srcSection.String(secretAccessKey) + + if dstSection.Has(accessKeyIDKey) { + dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, accessKeyIDKey, + dstSection.SourceFile[accessKeyIDKey], srcSection.SourceFile[accessKeyIDKey])) + } + + // update access key + v, err := ini.NewStringValue(accessKey) + if err != nil { + return fmt.Errorf("error merging access key, %w", err) + } + dstSection.UpdateValue(accessKeyIDKey, v) + + // update secret key + v, err = ini.NewStringValue(secretKey) + if err != nil { + return fmt.Errorf("error merging secret key, %w", err) + } + dstSection.UpdateValue(secretAccessKey, v) + + // update session token + if err = mergeStringKey(&srcSection, &dstSection, sectionName, sessionTokenKey); err != nil { + return err + } + + // update source file to reflect where the static creds came from + dstSection.UpdateSourceFile(accessKeyIDKey, srcSection.SourceFile[accessKeyIDKey]) + dstSection.UpdateSourceFile(secretAccessKey, srcSection.SourceFile[secretAccessKey]) + } + + stringKeys := []string{ + roleArnKey, + sourceProfileKey, + credentialSourceKey, + externalIDKey, + mfaSerialKey, + roleSessionNameKey, + regionKey, + enableEndpointDiscoveryKey, + credentialProcessKey, + webIdentityTokenFileKey, + s3UseARNRegionKey, + s3DisableMultiRegionAccessPointsKey, + ec2MetadataServiceEndpointModeKey, + ec2MetadataServiceEndpointKey, + useDualStackEndpoint, + useFIPSEndpointKey, + defaultsModeKey, + retryModeKey, + } + for i := range stringKeys { + if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil { + return err + } + } + + intKeys := []string{ + roleDurationSecondsKey, + retryMaxAttemptsKey, + } + for i := range intKeys { + if err := mergeIntKey(&srcSection, &dstSection, sectionName, intKeys[i]); err != nil { + return err + } + } + + // set srcSection on dst srcSection + dst = dst.SetSection(sectionName, dstSection) + } + + return nil +} + +func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error { + if srcSection.Has(key) { + srcValue := srcSection.String(key) + val, err := ini.NewStringValue(srcValue) + if err != nil { + return fmt.Errorf("error merging %s, %w", key, err) + } + + if dstSection.Has(key) { + dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key, + dstSection.SourceFile[key], srcSection.SourceFile[key])) + } + + dstSection.UpdateValue(key, val) + dstSection.UpdateSourceFile(key, srcSection.SourceFile[key]) + } + return nil +} + +func mergeIntKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error { + if srcSection.Has(key) { + srcValue := srcSection.Int(key) + v, err := ini.NewIntValue(srcValue) + if err != nil { + return fmt.Errorf("error merging %s, %w", key, err) + } + + if dstSection.Has(key) { + dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key, + dstSection.SourceFile[key], srcSection.SourceFile[key])) + + } + + dstSection.UpdateValue(key, v) + dstSection.UpdateSourceFile(key, srcSection.SourceFile[key]) + } + return nil +} + +func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string { + return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+ + "with a %v value found in a duplicate profile defined at file %v. \n", + sectionName, key, dstSourceFile, key, srcSourceFile) +} + +// Returns an error if all of the files fail to load. If at least one file is +// successfully loaded and contains the profile, no error will be returned. +func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile string, + sections ini.Sections, logger logging.Logger) error { + c.Profile = profile + + section, ok := sections.GetSection(profile) + if !ok { + return SharedConfigProfileNotExistError{ + Profile: profile, + } + } + + // if logs are appended to the section, log them + if section.Logs != nil && logger != nil { + for _, log := range section.Logs { + logger.Logf(logging.Debug, log) + } + } + + // set config from the provided ini section + err := c.setFromIniSection(profile, section) + if err != nil { + return fmt.Errorf("error fetching config from profile, %v, %w", profile, err) + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + c.clearAssumeRoleOptions() + } else { + // First time a profile has been seen, It must either be a assume role + // credentials, or SSO. Assert if the credential type requires a role ARN, + // the ARN is also set, or validate that the SSO configuration is complete. + if err := c.validateCredentialsConfig(profile); err != nil { + return err + } + } + + // if not top level profile and has credentials, return with credentials. + if len(profiles) != 0 && c.Credentials.HasKeys() { + return nil + } + + profiles[profile] = struct{}{} + + // validate no colliding credentials type are present + if err := c.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(c.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + c.clearCredentialOptions() + + srcCfg := &SharedConfig{} + err := srcCfg.setFromIniSections(profiles, c.SourceProfileName, sections, logger) + if err != nil { + // SourceProfileName that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: c.RoleARN, + Profile: c.SourceProfileName, + Err: err, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: c.RoleARN, + Profile: c.SourceProfileName, + } + } + + c.Source = srcCfg + } + + return nil +} + +// setFromIniSection loads the configuration from the profile section defined in +// the provided ini file. A SharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) error { + if len(section.Name) == 0 { + sources := make([]string, 0) + for _, v := range section.SourceFile { + sources = append(sources, v) + } + + return fmt.Errorf("parsing error : could not find profile section name after processing files: %v", sources) + } + + if len(section.Errors) != 0 { + var errStatement string + for i, e := range section.Errors { + errStatement = fmt.Sprintf("%d, %v\n", i+1, e.Error()) + } + return fmt.Errorf("Error using profile: \n %v", errStatement) + } + + // Assume Role + updateString(&c.RoleARN, section, roleArnKey) + updateString(&c.ExternalID, section, externalIDKey) + updateString(&c.MFASerial, section, mfaSerialKey) + updateString(&c.RoleSessionName, section, roleSessionNameKey) + updateString(&c.SourceProfileName, section, sourceProfileKey) + updateString(&c.CredentialSource, section, credentialSourceKey) + updateString(&c.Region, section, regionKey) + + // AWS Single Sign-On (AWS SSO) + updateString(&c.SSOAccountID, section, ssoAccountIDKey) + updateString(&c.SSORegion, section, ssoRegionKey) + updateString(&c.SSORoleName, section, ssoRoleNameKey) + updateString(&c.SSOStartURL, section, ssoStartURL) + + if section.Has(roleDurationSecondsKey) { + d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + c.RoleDurationSeconds = &d + } + + updateString(&c.CredentialProcess, section, credentialProcessKey) + updateString(&c.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey) + updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey) + + if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err) + } + updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + + updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint) + updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey) + + if err := updateDefaultsMode(&c.DefaultsMode, section, defaultsModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", defaultsModeKey, err) + } + + if err := updateInt(&c.RetryMaxAttempts, section, retryMaxAttemptsKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", retryMaxAttemptsKey, err) + } + if err := updateRetryMode(&c.RetryMode, section, retryModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", retryModeKey, err) + } + + updateString(&c.CustomCABundle, section, caBundleKey) + + // Shared Credentials + creds := aws.Credentials{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]), + } + + if creds.HasKeys() { + c.Credentials = creds + } + + return nil +} + +func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + if ok := mode.SetFromString(value); !ok { + return fmt.Errorf("invalid value: %s", value) + } + return nil +} + +func updateRetryMode(mode *aws.RetryMode, section ini.Section, key string) (err error) { + if !section.Has(key) { + return nil + } + value := section.String(key) + if *mode, err = aws.ParseRetryMode(value); err != nil { + return err + } + return nil +} + +func updateEC2MetadataServiceEndpointMode(endpointMode *imds.EndpointModeState, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + return endpointMode.SetFromString(value) +} + +func (c *SharedConfig) validateCredentialsConfig(profile string) error { + if err := c.validateCredentialsRequireARN(profile); err != nil { + return err + } + + return nil +} + +func (c *SharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(c.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(c.CredentialSource) != 0: + credSource = credentialSourceKey + case len(c.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(c.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (c *SharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(c.SourceProfileName) != 0, + len(c.CredentialSource) != 0, + len(c.CredentialProcess) != 0, + len(c.WebIdentityTokenFile) != 0, + ) { + return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso") + } + + return nil +} + +func (c *SharedConfig) validateSSOConfiguration() error { + if !c.hasSSOConfiguration() { + return nil + } + + var missing []string + if len(c.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(c.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(c.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + + return nil +} + +func (c *SharedConfig) hasCredentials() bool { + switch { + case len(c.SourceProfileName) != 0: + case len(c.CredentialSource) != 0: + case len(c.CredentialProcess) != 0: + case len(c.WebIdentityTokenFile) != 0: + case c.hasSSOConfiguration(): + case c.Credentials.HasKeys(): + default: + return false + } + + return true +} + +func (c *SharedConfig) hasSSOConfiguration() bool { + switch { + case len(c.SSOAccountID) != 0: + case len(c.SSORegion) != 0: + case len(c.SSORoleName) != 0: + case len(c.SSOStartURL) != 0: + default: + return false + } + return true +} + +func (c *SharedConfig) clearAssumeRoleOptions() { + c.RoleARN = "" + c.ExternalID = "" + c.MFASerial = "" + c.RoleSessionName = "" + c.SourceProfileName = "" +} + +func (c *SharedConfig) clearCredentialOptions() { + c.CredentialSource = "" + c.CredentialProcess = "" + c.WebIdentityTokenFile = "" + c.Credentials = aws.Credentials{} + c.SSOAccountID = "" + c.SSORegion = "" + c.SSORoleName = "" + c.SSOStartURL = "" +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigLoadError) Unwrap() error { + return e.Err +} + +func (e SharedConfigLoadError) Error() string { + return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err) +} + +// SharedConfigProfileNotExistError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistError struct { + Filename []string + Profile string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigProfileNotExistError) Unwrap() error { + return e.Err +} + +func (e SharedConfigProfileNotExistError) Error() string { + return fmt.Sprintf("failed to get shared config profile, %s", e.Profile) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + Profile string + RoleARN string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) Unwrap() error { + return e.Err +} + +func (e SharedConfigAssumeRoleError) Error() string { + return fmt.Sprintf("failed to load assume role %s, of profile %s, %v", + e.RoleARN, e.Profile, e.Err) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +func userHomeDir() string { + // Ignore errors since we only care about Windows and *nix. + homedir, _ := os.UserHomeDir() + return homedir +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateInt will only update the dst with the value in the section key, key +// is present in the section. +// +// Down casts the INI integer value from a int64 to an int, which could be +// different bit size depending on platform. +func updateInt(dst *int, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + if vt, _ := section.ValueType(key); vt != ini.IntegerType { + return fmt.Errorf("invalid value %s=%s, expect integer", + key, section.String(key)) + + } + *dst = int(section.Int(key)) + return nil +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateEndpointDiscoveryType(dst *aws.EndpointDiscoveryEnableState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + value := section.String(key) + if len(value) == 0 { + return + } + + switch { + case strings.EqualFold(value, endpointDiscoveryDisabled): + *dst = aws.EndpointDiscoveryDisabled + case strings.EqualFold(value, endpointDiscoveryEnabled): + *dst = aws.EndpointDiscoveryEnabled + case strings.EqualFold(value, endpointDiscoveryAuto): + *dst = aws.EndpointDiscoveryAuto + } +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + if section.Bool(key) { + *dst = aws.DualStackEndpointStateEnabled + } else { + *dst = aws.DualStackEndpointStateDisabled + } + + return +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + if section.Bool(key) { + *dst = aws.FIPSEndpointStateEnabled + } else { + *dst = aws.FIPSEndpointStateDisabled + } + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md new file mode 100644 index 000000000000..9783fe3133ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -0,0 +1,117 @@ +# v1.12.0 (2022-04-25) + +* **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.2 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-03-23) + +* **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-02-24) + +* **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575) +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.5 (2021-12-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.4 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.3 (2021-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.2 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-11-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-09-10) + +* **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders. + +# v1.4.0 (2021-08-27) + +* **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723 +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go new file mode 100644 index 000000000000..f6e2873ab906 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go @@ -0,0 +1,4 @@ +/* +Package credentials provides types for retrieving credentials from credentials sources. +*/ +package credentials diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go new file mode 100644 index 000000000000..ae25c3a489c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go @@ -0,0 +1,58 @@ +// Package ec2rolecreds provides the credentials provider implementation for +// retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS. +// +// Concurrency and caching +// +// The Provider is not safe to be used concurrently, and does not provide any +// caching of credentials retrieved. You should wrap the Provider with a +// `aws.CredentialsCache` to provide concurrency safety, and caching of +// credentials. +// +// Loading credentials with the SDK's AWS Config +// +// The EC2 Instance role credentials provider will automatically be the resolved +// credential provider int he credential chain if no other credential provider is +// resolved first. +// +// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance +// role for credentials, you specify a `credentials_source` property in the config +// profile the SDK will load. +// +// [default] +// credential_source = Ec2InstanceMetadata +// +// Loading credentials with the Provider directly +// +// Another way to use the EC2 Instance role credentials provider is to create it +// directly and assign it as the credentials provider for an API client. +// +// The following example creates a credentials provider for a command, and wraps +// it with the CredentialsCache before assigning the provider to the Amazon S3 API +// client's Credentials option. +// +// provider := imds.New(imds.Options{}) +// +// // Create the service client value configured for credentials. +// svc := s3.New(s3.Options{ +// Credentials: aws.NewCredentialsCache(provider), +// }) +// +// If you need more control, you can set the configuration options on the +// credentials provider using the imds.Options type to configure the EC2 IMDS +// API Client and ExpiryWindow of the retrieved credentials. +// +// provider := imds.New(imds.Options{ +// // See imds.Options type's documentation for more options available. +// Client: imds.New(Options{ +// HTTPClient: customHTTPClient, +// }), +// +// // Modify how soon credentials expire prior to their original expiry time. +// ExpiryWindow: 5 * time.Minute, +// }) +// +// EC2 IMDS API Client +// +// See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on +// configuring the client, and options available. +package ec2rolecreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go new file mode 100644 index 000000000000..aeb79ac3c97d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go @@ -0,0 +1,229 @@ +package ec2rolecreds + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "math" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// GetMetadataAPIClient provides the interface for an EC2 IMDS API client for the +// GetMetadata operation. +type GetMetadataAPIClient interface { + GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error) +} + +// A Provider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// The New function must be used to create the with a custom EC2 IMDS client. +// +// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{ +// o.Client = imds.New(imds.Options{/* custom options */}) +// }) +type Provider struct { + options Options +} + +// Options is a list of user settable options for setting the behavior of the Provider. +type Options struct { + // The API client that will be used by the provider to make GetMetadata API + // calls to EC2 IMDS. + // + // If nil, the provider will default to the EC2 IMDS client. + Client GetMetadataAPIClient +} + +// New returns an initialized Provider value configured to retrieve +// credentials from EC2 Instance Metadata service. +func New(optFns ...func(*Options)) *Provider { + options := Options{} + + for _, fn := range optFns { + fn(&options) + } + + if options.Client == nil { + options.Client = imds.New(imds.Options{}) + } + + return &Provider{ + options: options, + } +} + +// Retrieve retrieves credentials from the EC2 service. Error will be returned +// if the request fails, or unable to extract the desired credentials. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + credsList, err := requestCredList(ctx, p.options.Client) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + if len(credsList) == 0 { + return aws.Credentials{Source: ProviderName}, + fmt.Errorf("unexpected empty EC2 IMDS role list") + } + credsName := credsList[0] + + roleCreds, err := requestCred(ctx, p.options.Client, credsName) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + creds := aws.Credentials{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + Source: ProviderName, + + CanExpire: true, + Expires: roleCreds.Expiration, + } + + // Cap role credentials Expires to 1 hour so they can be refreshed more + // often. Jitter will be applied credentials cache if being used. + if anHour := sdk.NowTime().Add(1 * time.Hour); creds.Expires.After(anHour) { + creds.Expires = anHour + } + + return creds, nil +} + +// HandleFailToRefresh will extend the credentials Expires time if it it is +// expired. If the credentials will not expire within the minimum time, they +// will be returned. +// +// If the credentials cannot expire, the original error will be returned. +func (p *Provider) HandleFailToRefresh(ctx context.Context, prevCreds aws.Credentials, err error) ( + aws.Credentials, error, +) { + if !prevCreds.CanExpire { + return aws.Credentials{}, err + } + + if prevCreds.Expires.After(sdk.NowTime().Add(5 * time.Minute)) { + return prevCreds, nil + } + + newCreds := prevCreds + randFloat64, err := sdkrand.CryptoRandFloat64() + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to get random float, %w", err) + } + + // Random distribution of [5,15) minutes. + expireOffset := time.Duration(randFloat64*float64(10*time.Minute)) + 5*time.Minute + newCreds.Expires = sdk.NowTime().Add(expireOffset) + + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "Attempting credential expiration extension due to a credential service availability issue. A refresh of these credentials will be attempted again in %v minutes.", math.Floor(expireOffset.Minutes())) + + return newCreds, nil +} + +// AdjustExpiresBy will adds the passed in duration to the passed in +// credential's Expires time, unless the time until Expires is less than 15 +// minutes. Returns the credentials, even if not updated. +func (p *Provider) AdjustExpiresBy(creds aws.Credentials, dur time.Duration) ( + aws.Credentials, error, +) { + if !creds.CanExpire { + return creds, nil + } + if creds.Expires.Before(sdk.NowTime().Add(15 * time.Minute)) { + return creds, nil + } + + creds.Expires = creds.Expires.Add(dur) + return creds, nil +} + +// ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. If +// there are no credentials, or there is an error making or receiving the +// request +func requestCredList(ctx context.Context, client GetMetadataAPIClient) ([]string, error) { + resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ + Path: iamSecurityCredsPath, + }) + if err != nil { + return nil, fmt.Errorf("no EC2 IMDS role found, %w", err) + } + defer resp.Content.Close() + + credsList := []string{} + s := bufio.NewScanner(resp.Content) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("failed to read EC2 IMDS role, %w", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ + Path: path.Join(iamSecurityCredsPath, credsName), + }) + if err != nil { + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", + credsName, err) + } + defer resp.Content.Close() + + var respCreds ec2RoleCredRespBody + if err := json.NewDecoder(resp.Content).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to decode %s EC2 IMDS role credentials, %w", + credsName, err) + } + + if !strings.EqualFold(respCreds.Code, "Success") { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", + credsName, + &smithy.GenericAPIError{Code: respCreds.Code, Message: respCreds.Message}) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go new file mode 100644 index 000000000000..60b8298f86fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go @@ -0,0 +1,148 @@ +package client + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/smithy-go" + smithymiddleware "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ServiceID is the client identifer +const ServiceID = "endpoint-credentials" + +// HTTPClient is a client for sending HTTP requests +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Options is the endpoint client configurable options +type Options struct { + // The endpoint to retrieve credentials from + Endpoint string + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + Retryer aws.Retryer + + // Set of options to modify how the credentials operation is invoked. + APIOptions []func(*smithymiddleware.Stack) error +} + +// Copy creates a copy of the API options. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*smithymiddleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + return to +} + +// Client is an client for retrieving AWS credentials from an endpoint +type Client struct { + options Options +} + +// New constructs a new Client from the given options +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + if options.HTTPClient == nil { + options.HTTPClient = awshttp.NewBuildableClient() + } + + if options.Retryer == nil { + options.Retryer = retry.NewStandard() + } + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +// GetCredentialsInput is the input to send with the endpoint service to receive credentials. +type GetCredentialsInput struct { + AuthorizationToken string +} + +// GetCredentials retrieves credentials from credential endpoint +func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) { + stack := smithymiddleware.NewStack("GetCredentials", smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After) + stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After) + stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After) + retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer}) + middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID) + smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) + smithyhttp.AddCloseResponseBodyMiddleware(stack) + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, err + } + } + + handler := smithymiddleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, _, err := handler.Handle(ctx, params) + if err != nil { + return nil, err + } + + return result.(*GetCredentialsOutput), err +} + +// GetCredentialsOutput is the response from the credential endpoint +type GetCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +// EndpointError is an error returned from the endpoint service +type EndpointError struct { + Code string `json:"code"` + Message string `json:"message"` + Fault smithy.ErrorFault `json:"-"` +} + +// Error is the error mesage string +func (e *EndpointError) Error() string { + return fmt.Sprintf("%s: %s", e.Code, e.Message) +} + +// ErrorCode is the error code returned by the endpoint +func (e *EndpointError) ErrorCode() string { + return e.Code +} + +// ErrorMessage is the error message returned by the endpoint +func (e *EndpointError) ErrorMessage() string { + return e.Message +} + +// ErrorFault indicates error fault classification +func (e *EndpointError) ErrorFault() smithy.ErrorFault { + return e.Fault +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go new file mode 100644 index 000000000000..40747a53c18f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go @@ -0,0 +1,120 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/aws/smithy-go" + smithymiddleware "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type buildEndpoint struct { + Endpoint string +} + +func (b *buildEndpoint) ID() string { + return "BuildEndpoint" +} + +func (b *buildEndpoint) HandleBuild(ctx context.Context, in smithymiddleware.BuildInput, next smithymiddleware.BuildHandler) ( + out smithymiddleware.BuildOutput, metadata smithymiddleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport, %T", in.Request) + } + + if len(b.Endpoint) == 0 { + return out, metadata, fmt.Errorf("endpoint not provided") + } + + parsed, err := url.Parse(b.Endpoint) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint, %w", err) + } + + request.URL = parsed + + return next.HandleBuild(ctx, in) +} + +type serializeOpGetCredential struct{} + +func (s *serializeOpGetCredential) ID() string { + return "OperationSerializer" +} + +func (s *serializeOpGetCredential) HandleSerialize(ctx context.Context, in smithymiddleware.SerializeInput, next smithymiddleware.SerializeHandler) ( + out smithymiddleware.SerializeOutput, metadata smithymiddleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type, %T", in.Request) + } + + params, ok := in.Parameters.(*GetCredentialsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters, %T", in.Parameters) + } + + const acceptHeader = "Accept" + request.Header[acceptHeader] = append(request.Header[acceptHeader][:0], "application/json") + + if len(params.AuthorizationToken) > 0 { + const authHeader = "Authorization" + request.Header[authHeader] = append(request.Header[authHeader][:0], params.AuthorizationToken) + } + + return next.HandleSerialize(ctx, in) +} + +type deserializeOpGetCredential struct{} + +func (d *deserializeOpGetCredential) ID() string { + return "OperationDeserializer" +} + +func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in smithymiddleware.DeserializeInput, next smithymiddleware.DeserializeHandler) ( + out smithymiddleware.DeserializeOutput, metadata smithymiddleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, deserializeError(response) + } + + var shape *GetCredentialsOutput + if err = json.NewDecoder(response.Body).Decode(&shape); err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize json response, %w", err)} + } + + out.Result = shape + return out, metadata, err +} + +func deserializeError(response *smithyhttp.Response) error { + var errShape *EndpointError + err := json.NewDecoder(response.Body).Decode(&errShape) + if err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to decode error message, %w", err)} + } + + if response.StatusCode >= 500 { + errShape.Fault = smithy.FaultServer + } else { + errShape.Fault = smithy.FaultClient + } + + return errShape +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go new file mode 100644 index 000000000000..40cd7addb374 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go @@ -0,0 +1,133 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "context" + "fmt" + "net/http" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client" + "github.com/aws/smithy-go/middleware" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +type getCredentialsAPIClient interface { + GetCredentials(context.Context, *client.GetCredentialsInput, ...func(*client.Options)) (*client.GetCredentialsOutput, error) +} + +// Provider satisfies the aws.CredentialsProvider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + // The AWS Client to make HTTP requests to the endpoint with. The endpoint + // the request will be made to is provided by the aws.Config's + // EndpointResolver. + client getCredentialsAPIClient + + options Options +} + +// HTTPClient is a client for sending HTTP requests +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Options is structure of configurable options for Provider +type Options struct { + // Endpoint to retrieve credentials from. Required + Endpoint string + + // HTTPClient to handle sending HTTP requests to the target endpoint. + HTTPClient HTTPClient + + // Set of options to modify how the credentials operation is invoked. + APIOptions []func(*middleware.Stack) error + + // The Retryer to be used for determining whether a failed requested should be retried + Retryer aws.Retryer + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// New returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func New(endpoint string, optFns ...func(*Options)) *Provider { + o := Options{ + Endpoint: endpoint, + } + + for _, fn := range optFns { + fn(&o) + } + + p := &Provider{ + client: client.New(client.Options{ + HTTPClient: o.HTTPClient, + Endpoint: o.Endpoint, + APIOptions: o.APIOptions, + Retryer: o.Retryer, + }), + options: o, + } + + return p +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to load credentials, %w", err) + } + + creds := aws.Credentials{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + Source: ProviderName, + } + + if resp.Expiration != nil { + creds.CanExpire = true + creds.Expires = *resp.Expiration + } + + return creds, nil +} + +func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) { + return p.client.GetCredentials(ctx, &client.GetCredentialsInput{AuthorizationToken: p.options.AuthorizationToken}) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go new file mode 100644 index 000000000000..5e2c98bd26a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package credentials + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.12.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go new file mode 100644 index 000000000000..d56dd8260d78 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go @@ -0,0 +1,92 @@ +// Package processcreds is a credentials provider to retrieve credentials from a +// external CLI invoked process. +// +// WARNING: The following describes a method of sourcing credentials from an external +// process. This can potentially be dangerous, so proceed with caution. Other +// credential providers should be preferred if at all possible. If using this +// option, you should make sure that the config file is as locked down as possible +// using security best practices for your operating system. +// +// Concurrency and caching +// +// The Provider is not safe to be used concurrently, and does not provide any +// caching of credentials retrieved. You should wrap the Provider with a +// `aws.CredentialsCache` to provide concurrency safety, and caching of +// credentials. +// +// Loading credentials with the SDKs AWS Config +// +// You can use credentials from a AWS shared config `credential_process` in a +// variety of ways. +// +// One way is to setup your shared config file, located in the default +// location, with the `credential_process` key and the command you want to be +// called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +// (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. +// +// [default] +// credential_process = /command/to/call +// +// Loading configuration using external will use the credential process to +// retrieve credentials. NOTE: If there are credentials in the profile you are +// using, the credential process will not be used. +// +// // Initialize a session to load credentials. +// cfg, _ := config.LoadDefaultConfig(context.TODO()) +// +// // Create S3 service client to use the credentials. +// svc := s3.NewFromConfig(cfg) +// +// Loading credentials with the Provider directly +// +// Another way to use the credentials process provider is by using the +// `NewProvider` constructor to create the provider and providing a it with a +// command to be executed to retrieve credentials. +// +// The following example creates a credentials provider for a command, and wraps +// it with the CredentialsCache before assigning the provider to the Amazon S3 API +// client's Credentials option. +// +// // Create credentials using the Provider. +// provider := processcreds.NewProvider("/path/to/command") +// +// // Create the service client value configured for credentials. +// svc := s3.New(s3.Options{ +// Credentials: aws.NewCredentialsCache(provider), +// }) +// +// If you need more control, you can set any configurable options in the +// credentials using one or more option functions. +// +// provider := processcreds.NewProvider("/path/to/command", +// func(o *processcreds.Options) { +// // Override the provider's default timeout +// o.Timeout = 2 * time.Minute +// }) +// +// You can also use your own `exec.Cmd` value by satisfying a value that satisfies +// the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor. +// +// // Create an exec.Cmd +// cmdBuilder := processcreds.NewCommandBuilderFunc( +// func(ctx context.Context) (*exec.Cmd, error) { +// cmd := exec.CommandContext(ctx, +// "customCLICommand", +// "-a", "argument", +// ) +// cmd.Env = []string{ +// "ENV_VAR_FOO=value", +// "ENV_VAR_BAR=other_value", +// } +// +// return cmd, nil +// }, +// ) +// +// // Create credentials using your exec.Cmd and custom timeout +// provider := processcreds.NewProviderCommand(cmdBuilder, +// func(opt *processcreds.Provider) { +// // optionally override the provider's default timeout +// opt.Timeout = 1 * time.Second +// }) +package processcreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go new file mode 100644 index 000000000000..3921da34cd7e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go @@ -0,0 +1,269 @@ +package processcreds + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProviderError is an error indicating failure initializing or executing the +// process credentials provider +type ProviderError struct { + Err error +} + +// Error returns the error message. +func (e *ProviderError) Error() string { + return fmt.Sprintf("process provider error: %v", e.Err) +} + +// Unwrap returns the underlying error the provider error wraps. +func (e *ProviderError) Unwrap() error { + return e.Err +} + +// Provider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type Provider struct { + // Provides a constructor for exec.Cmd that are invoked by the provider for + // retrieving credentials. Use this to provide custom creation of exec.Cmd + // with things like environment variables, or other configuration. + // + // The provider defaults to the DefaultNewCommand function. + commandBuilder NewCommandBuilder + + options Options +} + +// Options is the configuration options for configuring the Provider. +type Options struct { + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCommandBuilder provides the interface for specifying how command will be +// created that the Provider will use to retrieve credentials with. +type NewCommandBuilder interface { + NewCommand(context.Context) (*exec.Cmd, error) +} + +// NewCommandBuilderFunc provides a wrapper type around a function pointer to +// satisfy the NewCommandBuilder interface. +type NewCommandBuilderFunc func(context.Context) (*exec.Cmd, error) + +// NewCommand calls the underlying function pointer the builder was initialized with. +func (fn NewCommandBuilderFunc) NewCommand(ctx context.Context) (*exec.Cmd, error) { + return fn(ctx) +} + +// DefaultNewCommandBuilder provides the default NewCommandBuilder +// implementation used by the provider. It takes a command and arguments to +// invoke. The command will also be initialized with the current process +// environment variables, stderr, and stdin pipes. +type DefaultNewCommandBuilder struct { + Args []string +} + +// NewCommand returns an initialized exec.Cmd with the builder's initialized +// Args. The command is also initialized current process environment variables, +// stderr, and stdin pipes. +func (b DefaultNewCommandBuilder) NewCommand(ctx context.Context) (*exec.Cmd, error) { + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(b.Args) == 0 { + return nil, &ProviderError{ + Err: fmt.Errorf("failed to prepare command: command must not be empty"), + } + } + + cmdArgs = append(cmdArgs, b.Args...) + cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...) + cmd.Env = os.Environ() + + cmd.Stderr = os.Stderr // display stderr on console for MFA + cmd.Stdin = os.Stdin // enable stdin for MFA + + return cmd, nil +} + +// NewProvider returns a pointer to a new Credentials object wrapping the +// Provider. +// +// The provider defaults to the DefaultNewCommandBuilder for creating command +// the Provider will use to retrieve credentials with. +func NewProvider(command string, options ...func(*Options)) *Provider { + var args []string + + // Ensure that the command arguments are not set if the provided command is + // empty. This will error out when the command is executed since no + // arguments are specified. + if len(command) > 0 { + args = []string{command} + } + + commanBuilder := DefaultNewCommandBuilder{ + Args: args, + } + return NewProviderCommand(commanBuilder, options...) +} + +// NewProviderCommand returns a pointer to a new Credentials object with the +// specified command, and default timeout duration. Use this to provide custom +// creation of exec.Cmd for options like environment variables, or other +// configuration. +func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *Provider { + p := &Provider{ + commandBuilder: builder, + options: Options{ + Timeout: DefaultTimeout, + }, + } + + for _, option := range options { + option(&p.options) + } + + return p +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the credential process command and returns the +// credentials, or error if the command fails. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + out, err := p.executeCredentialProcess(ctx) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err), + } + } + + if resp.Version != 1 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("wrong version in process output (not 1)"), + } + } + + if len(resp.AccessKeyID) == 0 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("missing AccessKeyId in process output"), + } + } + + if len(resp.SecretAccessKey) == 0 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("missing SecretAccessKey in process output"), + } + } + + creds := aws.Credentials{ + Source: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + } + + // Handle expiration + if resp.Expiration != nil { + creds.CanExpire = true + creds.Expires = *resp.Expiration + } + + return creds, nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) { + if p.options.Timeout >= 0 { + var cancelFunc func() + ctx, cancelFunc = context.WithTimeout(ctx, p.options.Timeout) + defer cancelFunc() + } + + cmd, err := p.commandBuilder.NewCommand(ctx) + if err != nil { + return nil, err + } + + // get creds json on process's stdout + output := bytes.NewBuffer(make([]byte, 0, int(8*sdkio.KibiByte))) + if cmd.Stdout != nil { + cmd.Stdout = io.MultiWriter(cmd.Stdout, output) + } else { + cmd.Stdout = output + } + + execCh := make(chan error, 1) + go executeCommand(cmd, execCh) + + select { + case execError := <-execCh: + if execError == nil { + break + } + select { + case <-ctx.Done(): + return output.Bytes(), &ProviderError{ + Err: fmt.Errorf("credential process timed out: %w", execError), + } + default: + return output.Bytes(), &ProviderError{ + Err: fmt.Errorf("error in credential_process: %w", execError), + } + } + } + + out := output.Bytes() + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`)) + } + + return out, nil +} + +func executeCommand(cmd *exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go new file mode 100644 index 000000000000..2f396c0a1180 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go @@ -0,0 +1,63 @@ +// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token. +// +// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider +// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by +// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in +// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned. +// +// Loading AWS SSO credentials with the AWS shared configuration file +// +// You can use configure AWS SSO credentials from the AWS shared configuration file by +// providing the specifying the required keys in the profile: +// +// sso_account_id +// sso_region +// sso_role_name +// sso_start_url +// +// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target +// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be +// provided, or an error will be returned. +// +// [profile devsso] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_role_name = SSOReadOnlyRole +// sso_region = us-east-1 +// sso_account_id = 123456789012 +// +// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to +// retrieve credentials. For example: +// +// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso")) +// if err != nil { +// return err +// } +// +// Programmatically loading AWS SSO credentials directly +// +// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information +// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache. +// +// client := sso.NewFromConfig(cfg) +// +// var provider aws.CredentialsProvider +// provider = ssocreds.New(client, "123456789012", "SSOReadOnlyRole", "us-east-1", "https://my-sso-portal.awsapps.com/start") +// +// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time +// provider = aws.NewCredentialsCache(provider) +// +// credentials, err := provider.Retrieve(context.TODO()) +// if err != nil { +// return err +// } +// +// It is important that you wrap the Provider with aws.CredentialsCache if you are programmatically constructing the +// provider directly. This prevents your application from accessing the cached access token and requesting new +// credentials each time the credentials are used. +// +// Additional Resources +// +// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// +// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os.go new file mode 100644 index 000000000000..d4df39a7a229 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os.go @@ -0,0 +1,10 @@ +//go:build !windows +// +build !windows + +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os_windows.go new file mode 100644 index 000000000000..eb48f61e5bc8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os_windows.go @@ -0,0 +1,7 @@ +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("USERPROFILE") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/provider.go new file mode 100644 index 000000000000..279df7a1318a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/provider.go @@ -0,0 +1,184 @@ +package ssocreds + +import ( + "context" + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/sso" +) + +// ProviderName is the name of the provider used to specify the source of credentials. +const ProviderName = "SSOProvider" + +var defaultCacheLocation func() string + +func defaultCacheLocationImpl() string { + return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache") +} + +func init() { + defaultCacheLocation = defaultCacheLocationImpl +} + +// GetRoleCredentialsAPIClient is a API client that implements the GetRoleCredentials operation. +type GetRoleCredentialsAPIClient interface { + GetRoleCredentials(ctx context.Context, params *sso.GetRoleCredentialsInput, optFns ...func(*sso.Options)) (*sso.GetRoleCredentialsOutput, error) +} + +// Options is the Provider options structure. +type Options struct { + // The Client which is configured for the AWS Region where the AWS SSO user portal is located. + Client GetRoleCredentialsAPIClient + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. + StartURL string +} + +// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token. +type Provider struct { + options Options +} + +// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider { + options := Options{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(&options) + } + + return &Provider{ + options: options, + } +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + tokenFile, err := loadTokenFile(p.options.StartURL) + if err != nil { + return aws.Credentials{}, err + } + + output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: &tokenFile.AccessToken, + AccountId: &p.options.AccountID, + RoleName: &p.options.RoleName, + }) + if err != nil { + return aws.Credentials{}, err + } + + return aws.Credentials{ + AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.ToString(output.RoleCredentials.SessionToken), + Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), + CanExpire: true, + Source: ProviderName, + }, nil +} + +func getCacheFileName(url string) (string, error) { + hash := sha1.New() + _, err := hash.Write([]byte(url)) + if err != nil { + return "", err + } + return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil +} + +type rfc3339 time.Time + +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + + if err := json.Unmarshal(bytes, &value); err != nil { + return err + } + + parse, err := time.Parse(time.RFC3339, value) + if err != nil { + return fmt.Errorf("expected RFC3339 timestamp: %w", err) + } + + *r = rfc3339(parse) + + return nil +} + +type token struct { + AccessToken string `json:"accessToken"` + ExpiresAt rfc3339 `json:"expiresAt"` + Region string `json:"region,omitempty"` + StartURL string `json:"startUrl,omitempty"` +} + +func (t token) Expired() bool { + return sdk.NowTime().Round(0).After(time.Time(t.ExpiresAt)) +} + +// InvalidTokenError is the error type that is returned if loaded token has expired or is otherwise invalid. +// To refresh the SSO session run aws sso login with the corresponding profile. +type InvalidTokenError struct { + Err error +} + +func (i *InvalidTokenError) Unwrap() error { + return i.Err +} + +func (i *InvalidTokenError) Error() string { + const msg = "the SSO session has expired or is invalid" + if i.Err == nil { + return msg + } + return msg + ": " + i.Err.Error() +} + +func loadTokenFile(startURL string) (t token, err error) { + key, err := getCacheFileName(startURL) + if err != nil { + return token{}, &InvalidTokenError{Err: err} + } + + fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) + if err != nil { + return token{}, &InvalidTokenError{Err: err} + } + + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, &InvalidTokenError{Err: err} + } + + if len(t.AccessToken) == 0 { + return token{}, &InvalidTokenError{} + } + + if t.Expired() { + return token{}, &InvalidTokenError{Err: fmt.Errorf("access token is expired")} + } + + return t, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go new file mode 100644 index 000000000000..d525cac09601 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go @@ -0,0 +1,53 @@ +package credentials + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +const ( + // StaticCredentialsName provides a name of Static provider + StaticCredentialsName = "StaticCredentials" +) + +// StaticCredentialsEmptyError is emitted when static credentials are empty. +type StaticCredentialsEmptyError struct{} + +func (*StaticCredentialsEmptyError) Error() string { + return "static credentials are empty" +} + +// A StaticCredentialsProvider is a set of credentials which are set, and will +// never expire. +type StaticCredentialsProvider struct { + Value aws.Credentials +} + +// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS +// credentials passed in. +func NewStaticCredentialsProvider(key, secret, session string) StaticCredentialsProvider { + return StaticCredentialsProvider{ + Value: aws.Credentials{ + AccessKeyID: key, + SecretAccessKey: secret, + SessionToken: session, + }, + } +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s StaticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) { + v := s.Value + if v.AccessKeyID == "" || v.SecretAccessKey == "" { + return aws.Credentials{ + Source: StaticCredentialsName, + }, &StaticCredentialsEmptyError{} + } + + if len(v.Source) == 0 { + v.Source = StaticCredentialsName + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go new file mode 100644 index 000000000000..314fbd8bc4de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,320 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +// +// The SDK will ensure that per instance of credentials.Credentials all requests +// to refresh the credentials will be synchronized. But, the SDK is unable to +// ensure synchronous usage of the AssumeRoleProvider if the value is shared +// between multiple Credentials or service clients. +// +// Assume Role +// +// To assume an IAM role using STS with the SDK you can create a new Credentials +// with the SDKs's stscreds package. +// +// // Initial credentials loaded from SDK's default credential chain. Such as +// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance +// // Role. These credentials will be used to to make the STS Assume Role API. +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN. +// stsSvc := sts.NewFromConfig(cfg) +// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn") +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +// +// Assume Role with custom MFA Token provider +// +// To assume an IAM role with a MFA token you can either specify a custom MFA +// token provider or use the SDK's built in StdinTokenProvider that will prompt +// the user for a token code each time the credentials need to to be refreshed. +// Specifying a custom token provider allows you to control where the token +// code is retrieved from, and how it is refreshed. +// +// With a custom token provider, the provider is responsible for refreshing the +// token code when called. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// staticTokenProvider := func() (string, error) { +// return someTokenCode, nil +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN using the MFA token code provided. +// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { +// o.SerialNumber = aws.String("myTokenSerialNumber") +// o.TokenProvider = staticTokenProvider +// }) +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +// +// Assume Role with MFA Token Provider +// +// To assume an IAM role with MFA for longer running tasks where the credentials +// may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +// will allow the credential provider to prompt for new MFA token code when the +// role's credentials need to be refreshed. +// +// The StdinTokenProvider function is available to prompt on stdin to retrieve +// the MFA token code from the user. You can also implement custom prompts by +// satisfying the TokenProvider function signature. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN using the MFA token code provided. +// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { +// o.SerialNumber = aws.String("myTokenSerialNumber") +// o.TokenProvider = stscreds.StdinTokenProvider +// }) +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +package stscreds + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/aws-sdk-go-v2/service/sts/types" +) + +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Printf("Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoleAPIClient is a client capable of the STS AssumeRole operation. +type AssumeRoleAPIClient interface { + AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the +// credentials will be valid for. This value is only used by AssumeRoleProvider +// for specifying the default expiry duration of an assume role. +// +// Other providers such as WebIdentityRoleProvider do not use this value, and +// instead rely on STS API's default parameter handing to assign a default +// value. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + options AssumeRoleOptions +} + +// AssumeRoleOptions is the configurable options for AssumeRoleProvider +type AssumeRoleOptions struct { + // Client implementation of the AssumeRole operation. Required + Client AssumeRoleAPIClient + + // IAM Role ARN to be assumed. Required + RoleARN string + + // Session name, if you wish to uniquely identify this session. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyARNs []types.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + SourceIdentity *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is set. + TokenProvider func() (string, error) + + // A list of session tags that you want to pass. Each session tag consists of a key + // name and an associated value. For more information about session tags, see + // Tagging STS Sessions + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the + // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. + Tags []types.Tag + + // A list of keys for session tags that you want to set as transitive. If you set a + // tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. This parameter is optional. + TransitiveTagKeys []string +} + +// NewAssumeRoleProvider constructs and returns a credentials provider that +// will retrieve credentials by assuming a IAM role using STS. +func NewAssumeRoleProvider(client AssumeRoleAPIClient, roleARN string, optFns ...func(*AssumeRoleOptions)) *AssumeRoleProvider { + o := AssumeRoleOptions{ + Client: client, + RoleARN: roleARN, + } + + for _, fn := range optFns { + fn(&o) + } + + return &AssumeRoleProvider{ + options: o, + } +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { + // Apply defaults where parameters are not set. + if len(p.options.RoleSessionName) == 0 { + // Try to work out a role name that will hopefully end up unique. + p.options.RoleSessionName = fmt.Sprintf("aws-go-sdk-%d", time.Now().UTC().UnixNano()) + } + if p.options.Duration == 0 { + // Expire as often as AWS permits. + p.options.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int32(int32(p.options.Duration / time.Second)), + PolicyArns: p.options.PolicyARNs, + RoleArn: aws.String(p.options.RoleARN), + RoleSessionName: aws.String(p.options.RoleSessionName), + ExternalId: p.options.ExternalID, + SourceIdentity: p.options.SourceIdentity, + Tags: p.options.Tags, + TransitiveTagKeys: p.options.TransitiveTagKeys, + } + if p.options.Policy != nil { + input.Policy = p.options.Policy + } + if p.options.SerialNumber != nil { + if p.options.TokenProvider != nil { + input.SerialNumber = p.options.SerialNumber + code, err := p.options.TokenProvider() + if err != nil { + return aws.Credentials{}, err + } + input.TokenCode = aws.String(code) + } else { + return aws.Credentials{}, fmt.Errorf("assume role with MFA enabled, but TokenProvider is not set") + } + } + + resp, err := p.options.Client.AssumeRole(ctx, input) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + return aws.Credentials{ + AccessKeyID: *resp.Credentials.AccessKeyId, + SecretAccessKey: *resp.Credentials.SecretAccessKey, + SessionToken: *resp.Credentials.SessionToken, + Source: ProviderName, + + CanExpire: true, + Expires: *resp.Credentials.Expiration, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go new file mode 100644 index 000000000000..ddaf6df6ce11 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,150 @@ +package stscreds + +import ( + "context" + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/aws-sdk-go-v2/service/sts/types" +) + +var invalidIdentityTokenExceptionCode = (&types.InvalidIdentityTokenException{}).ErrorCode() + +const ( + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// AssumeRoleWithWebIdentityAPIClient is a client capable of the STS AssumeRoleWithWebIdentity operation. +type AssumeRoleWithWebIdentityAPIClient interface { + AssumeRoleWithWebIdentity(ctx context.Context, params *sts.AssumeRoleWithWebIdentityInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error) +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + options WebIdentityRoleOptions +} + +// WebIdentityRoleOptions is a structure of configurable options for WebIdentityRoleProvider +type WebIdentityRoleOptions struct { + // Client implementation of the AssumeRoleWithWebIdentity operation. Required + Client AssumeRoleWithWebIdentityAPIClient + + // JWT Token Provider. Required + TokenRetriever IdentityTokenRetriever + + // IAM Role ARN to assume. Required + RoleARN string + + // Session name, if you wish to uniquely identify this session. + RoleSessionName string + + // Expiry duration of the STS credentials. STS will assign a default expiry + // duration if this value is unset. This is different from the Duration + // option of AssumeRoleProvider, which automatically assigns 15 minutes if + // Duration is unset. + // + // See the STS AssumeRoleWithWebIdentity API reference guide for more + // information on defaults. + // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + Duration time.Duration + + // An IAM policy in JSON format that you want to use as an inline session policy. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you + // want to use as managed session policies. The policies must exist in the + // same account as the role. + PolicyARNs []types.PolicyDescriptorType +} + +// IdentityTokenRetriever is an interface for retrieving a JWT +type IdentityTokenRetriever interface { + GetIdentityToken() ([]byte, error) +} + +// IdentityTokenFile is for retrieving an identity token from the given file name +type IdentityTokenFile string + +// GetIdentityToken retrieves the JWT token from the file and returns the contents as a []byte +func (j IdentityTokenFile) GetIdentityToken() ([]byte, error) { + b, err := ioutil.ReadFile(string(j)) + if err != nil { + return nil, fmt.Errorf("unable to read file at %s: %v", string(j), err) + } + + return b, nil +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.ClientAPI +func NewWebIdentityRoleProvider(client AssumeRoleWithWebIdentityAPIClient, roleARN string, tokenRetriever IdentityTokenRetriever, optFns ...func(*WebIdentityRoleOptions)) *WebIdentityRoleProvider { + o := WebIdentityRoleOptions{ + Client: client, + RoleARN: roleARN, + TokenRetriever: tokenRetriever, + } + + for _, fn := range optFns { + fn(&o) + } + + return &WebIdentityRoleProvider{options: o} +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { + b, err := p.options.TokenRetriever.GetIdentityToken() + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to retrieve jwt from provide source, %w", err) + } + + sessionName := p.options.RoleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10) + } + input := &sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.options.PolicyARNs, + RoleArn: &p.options.RoleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + } + if p.options.Duration != 0 { + // If set use the value, otherwise STS will assign a default expiration duration. + input.DurationSeconds = aws.Int32(int32(p.options.Duration / time.Second)) + } + if p.options.Policy != nil { + input.Policy = p.options.Policy + } + + resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, input, func(options *sts.Options) { + options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode) + }) + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err) + } + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + + value := aws.Credentials{ + AccessKeyID: aws.ToString(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.ToString(resp.Credentials.SecretAccessKey), + SessionToken: aws.ToString(resp.Credentials.SessionToken), + Source: WebIdentityProviderName, + CanExpire: true, + Expires: *resp.Credentials.Expiration, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/doc.go new file mode 100644 index 000000000000..81644bf8b7a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/doc.go @@ -0,0 +1,58 @@ +// Package sdk is the official AWS SDK v2 for the Go programming language. +// +// aws-sdk-go-v2 is the the v2 of the AWS SDK for the Go programming language. +// +// Getting started +// +// The best way to get started working with the SDK is to use `go get` to add the +// SDK and desired service clients to your Go dependencies explicitly. +// +// go get github.com/aws/aws-sdk-go-v2 +// go get github.com/aws/aws-sdk-go-v2/config +// go get github.com/aws/aws-sdk-go-v2/service/dynamodb +// +// Hello AWS +// +// This example shows how you can use the v2 SDK to make an API request using the +// SDK's Amazon DynamoDB client. +// +// package main +// +// import ( +// "context" +// "fmt" +// "log" +// +// "github.com/aws/aws-sdk-go-v2/aws" +// "github.com/aws/aws-sdk-go-v2/config" +// "github.com/aws/aws-sdk-go-v2/service/dynamodb" +// ) +// +// func main() { +// // Using the SDK's default configuration, loading additional config +// // and credentials values from the environment variables, shared +// // credentials, and shared configuration files +// cfg, err := config.LoadDefaultConfig(context.TODO(), +// config.WithRegion("us-west-2"), +// ) +// if err != nil { +// log.Fatalf("unable to load SDK config, %v", err) +// } +// +// // Using the Config value, create the DynamoDB client +// svc := dynamodb.NewFromConfig(cfg) +// +// // Build the request with its input parameters +// resp, err := svc.ListTables(context.TODO(), &dynamodb.ListTablesInput{ +// Limit: aws.Int32(5), +// }) +// if err != nil { +// log.Fatalf("failed to list tables, %v", err) +// } +// +// fmt.Println("Tables:") +// for _, tableName := range resp.TableNames { +// fmt.Println(tableName) +// } +// } +package sdk diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md new file mode 100644 index 000000000000..6f36dfd43057 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -0,0 +1,99 @@ +# v1.12.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-11) + +* **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout. +* **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-07-15) + +* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go new file mode 100644 index 000000000000..53f3d3c781ae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go @@ -0,0 +1,318 @@ +package imds + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalconfig "github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ServiceID provides the unique name of this API client +const ServiceID = "ec2imds" + +// Client provides the API client for interacting with the Amazon EC2 Instance +// Metadata Service API. +type Client struct { + options Options +} + +// ClientEnableState provides an enumeration if the client is enabled, +// disabled, or default behavior. +type ClientEnableState = internalconfig.ClientEnableState + +// Enumeration values for ClientEnableState +const ( + ClientDefaultEnableState ClientEnableState = internalconfig.ClientDefaultEnableState // default behavior + ClientDisabled ClientEnableState = internalconfig.ClientDisabled // client disabled + ClientEnabled ClientEnableState = internalconfig.ClientEnabled // client enabled +) + +// EndpointModeState is an enum configuration variable describing the client endpoint mode. +// Not configurable directly, but used when using the NewFromConfig. +type EndpointModeState = internalconfig.EndpointModeState + +// Enumeration values for EndpointModeState +const ( + EndpointModeStateUnset EndpointModeState = internalconfig.EndpointModeStateUnset + EndpointModeStateIPv4 EndpointModeState = internalconfig.EndpointModeStateIPv4 + EndpointModeStateIPv6 EndpointModeState = internalconfig.EndpointModeStateIPv6 +) + +const ( + disableClientEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Client endpoint options + endpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" + + defaultIPv4Endpoint = "http://169.254.169.254" + defaultIPv6Endpoint = "http://[fd00:ec2::254]" +) + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + options.HTTPClient = resolveHTTPClient(options.HTTPClient) + + if options.Retryer == nil { + options.Retryer = retry.NewStandard() + } + options.Retryer = retry.AddWithMaxBackoffDelay(options.Retryer, 1*time.Second) + + if options.ClientEnableState == ClientDefaultEnableState { + if v := os.Getenv(disableClientEnvVar); strings.EqualFold(v, "true") { + options.ClientEnableState = ClientDisabled + } + } + + if len(options.Endpoint) == 0 { + if v := os.Getenv(endpointEnvVar); len(v) != 0 { + options.Endpoint = v + } + } + + client := &Client{ + options: options, + } + + if client.options.tokenProvider == nil && !client.options.disableAPIToken { + client.options.tokenProvider = newTokenProvider(client, defaultTokenTTL) + } + + return client +} + +// NewFromConfig returns an initialized Client based the AWS SDK config, and +// functional options. Provide additional functional options to further +// configure the behavior of the client, such as changing the client's endpoint +// or adding custom middleware behavior. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...), + HTTPClient: cfg.HTTPClient, + } + + if cfg.Retryer != nil { + opts.Retryer = cfg.Retryer() + } + + resolveClientEnableState(cfg, &opts) + resolveEndpointConfig(cfg, &opts) + resolveEndpointModeConfig(cfg, &opts) + + return New(opts, optFns...) +} + +// Options provides the fields for configuring the API client's behavior. +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation + // call to modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The endpoint the client will use to retrieve EC2 instance metadata. + // + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EndpointMode. + // + // If unset, and the environment variable AWS_EC2_METADATA_SERVICE_ENDPOINT + // has a value the client will use the value of the environment variable as + // the endpoint for operation calls. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + Endpoint string + + // The endpoint selection mode the client will use if no explicit endpoint is provided using the Endpoint field. + // + // Setting EndpointMode to EndpointModeStateIPv4 will configure the client to use the default EC2 IPv4 endpoint. + // Setting EndpointMode to EndpointModeStateIPv6 will configure the client to use the default EC2 IPv6 endpoint. + // + // By default if EndpointMode is not set (EndpointModeStateUnset) than the default endpoint selection mode EndpointModeStateIPv4. + EndpointMode EndpointModeState + + // The HTTP client to invoke API calls with. Defaults to client's default + // HTTP implementation if nil. + HTTPClient HTTPClient + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + Retryer aws.Retryer + + // Changes if the EC2 Instance Metadata client is enabled or not. Client + // will default to enabled if not set to ClientDisabled. When the client is + // disabled it will return an error for all operation calls. + // + // If ClientEnableState value is ClientDefaultEnableState (default value), + // and the environment variable "AWS_EC2_METADATA_DISABLED" is set to + // "true", the client will be disabled. + // + // AWS_EC2_METADATA_DISABLED=true + ClientEnableState ClientEnableState + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // provides the caching of API tokens used for operation calls. If unset, + // the API token will not be retrieved for the operation. + tokenProvider *tokenProvider + + // option to disable the API token provider for testing. + disableAPIToken bool +} + +// HTTPClient provides the interface for a client making HTTP requests with the +// API. +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a copy of the API options. +func (o Options) Copy() Options { + to := o + to.APIOptions = append([]func(*middleware.Stack) error{}, o.APIOptions...) + return to +} + +// WithAPIOptions wraps the API middleware functions, as a functional option +// for the API Client Options. Use this helper to add additional functional +// options to the API client, or operation calls. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), + stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + if options.ClientEnableState == ClientDisabled { + return nil, metadata, &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: fmt.Errorf( + "access disabled to EC2 IMDS via client option, or %q environment variable", + disableClientEnvVar), + } + } + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + return nil, metadata, &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + + return result, metadata, err +} + +const ( + // HTTP client constants + defaultDialerTimeout = 250 * time.Millisecond + defaultResponseHeaderTimeout = 500 * time.Millisecond +) + +func resolveHTTPClient(client HTTPClient) HTTPClient { + if client == nil { + client = awshttp.NewBuildableClient() + } + + if c, ok := client.(*awshttp.BuildableClient); ok { + client = c. + WithDialerOptions(func(d *net.Dialer) { + // Use a custom Dial timeout for the EC2 Metadata service to account + // for the possibility the application might not be running in an + // environment with the service present. The client should fail fast in + // this case. + d.Timeout = defaultDialerTimeout + }). + WithTransportOptions(func(tr *http.Transport) { + // Use a custom Transport timeout for the EC2 Metadata service to + // account for the possibility that the application might be running in + // a container, and EC2Metadata service drops the connection after a + // single IP Hop. The client should fail fast in this case. + tr.ResponseHeaderTimeout = defaultResponseHeaderTimeout + }) + } + + return client +} + +func resolveClientEnableState(cfg aws.Config, options *Options) error { + if options.ClientEnableState != ClientDefaultEnableState { + return nil + } + value, found, err := internalconfig.ResolveClientEnableState(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.ClientEnableState = value + return nil +} + +func resolveEndpointModeConfig(cfg aws.Config, options *Options) error { + if options.EndpointMode != EndpointModeStateUnset { + return nil + } + value, found, err := internalconfig.ResolveEndpointModeConfig(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.EndpointMode = value + return nil +} + +func resolveEndpointConfig(cfg aws.Config, options *Options) error { + if len(options.Endpoint) != 0 { + return nil + } + value, found, err := internalconfig.ResolveEndpointConfig(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.Endpoint = value + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go new file mode 100644 index 000000000000..9e3bdb0e66e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go @@ -0,0 +1,76 @@ +package imds + +import ( + "context" + "fmt" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getDynamicDataPath = "/latest/dynamic" + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *Client) GetDynamicData(ctx context.Context, params *GetDynamicDataInput, optFns ...func(*Options)) (*GetDynamicDataOutput, error) { + if params == nil { + params = &GetDynamicDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetDynamicData", params, optFns, + addGetDynamicDataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetDynamicDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetDynamicDataInput provides the input parameters for the GetDynamicData +// operation. +type GetDynamicDataInput struct { + // The relative dynamic data path to retrieve. Can be empty string to + // retrieve a response containing a new line separated list of dynamic data + // resources available. + // + // Must not include the dynamic data base path. + // + // May include leading slash. If Path includes trailing slash the trailing + // slash will be included in the request for the resource. + Path string +} + +// GetDynamicDataOutput provides the output parameters for the GetDynamicData +// operation. +type GetDynamicDataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetDynamicDataPath, + buildGetDynamicDataOutput) +} + +func buildGetDynamicDataPath(params interface{}) (string, error) { + p, ok := params.(*GetDynamicDataInput) + if !ok { + return "", fmt.Errorf("unknown parameter type %T", params) + } + + return appendURIPath(getDynamicDataPath, p.Path), nil +} + +func buildGetDynamicDataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetDynamicDataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go new file mode 100644 index 000000000000..24845dccd6d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go @@ -0,0 +1,102 @@ +package imds + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getIAMInfoPath = getMetadataPath + "/iam/info" + +// GetIAMInfo retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetIAMInfo( + ctx context.Context, params *GetIAMInfoInput, optFns ...func(*Options), +) ( + *GetIAMInfoOutput, error, +) { + if params == nil { + params = &GetIAMInfoInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetIAMInfo", params, optFns, + addGetIAMInfoMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetIAMInfoOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetIAMInfoInput provides the input parameters for GetIAMInfo operation. +type GetIAMInfoInput struct{} + +// GetIAMInfoOutput provides the output parameters for GetIAMInfo operation. +type GetIAMInfoOutput struct { + IAMInfo + + ResultMetadata middleware.Metadata +} + +func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetIAMInfoPath, + buildGetIAMInfoOutput, + ) +} + +func buildGetIAMInfoPath(params interface{}) (string, error) { + return getIAMInfoPath, nil +} + +func buildGetIAMInfoOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(resp.Body, ringBuffer) + + imdsResult := &GetIAMInfoOutput{} + if err = json.NewDecoder(body).Decode(&imdsResult.IAMInfo); err != nil { + return nil, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode instance identity document, %w", err), + Snapshot: ringBuffer.Bytes(), + } + } + // Any code other success is an error + if !strings.EqualFold(imdsResult.Code, "success") { + return nil, fmt.Errorf("failed to get EC2 IMDS IAM info, %s", + imdsResult.Code) + } + + return imdsResult, nil +} + +// IAMInfo provides the shape for unmarshaling an IAM info from the metadata +// API. +type IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go new file mode 100644 index 000000000000..a87758ed302d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go @@ -0,0 +1,109 @@ +package imds + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getInstanceIdentityDocumentPath = getDynamicDataPath + "/instance-identity/document" + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetInstanceIdentityDocument( + ctx context.Context, params *GetInstanceIdentityDocumentInput, optFns ...func(*Options), +) ( + *GetInstanceIdentityDocumentOutput, error, +) { + if params == nil { + params = &GetInstanceIdentityDocumentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetInstanceIdentityDocument", params, optFns, + addGetInstanceIdentityDocumentMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetInstanceIdentityDocumentOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetInstanceIdentityDocumentInput provides the input parameters for +// GetInstanceIdentityDocument operation. +type GetInstanceIdentityDocumentInput struct{} + +// GetInstanceIdentityDocumentOutput provides the output parameters for +// GetInstanceIdentityDocument operation. +type GetInstanceIdentityDocumentOutput struct { + InstanceIdentityDocument + + ResultMetadata middleware.Metadata +} + +func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetInstanceIdentityDocumentPath, + buildGetInstanceIdentityDocumentOutput, + ) +} + +func buildGetInstanceIdentityDocumentPath(params interface{}) (string, error) { + return getInstanceIdentityDocumentPath, nil +} + +func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(resp.Body, ringBuffer) + + output := &GetInstanceIdentityDocumentOutput{} + if err = json.NewDecoder(body).Decode(&output.InstanceIdentityDocument); err != nil { + return nil, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode instance identity document, %w", err), + Snapshot: ringBuffer.Bytes(), + } + } + + return output, nil +} + +// InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go new file mode 100644 index 000000000000..cb0ce4c0004d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go @@ -0,0 +1,76 @@ +package imds + +import ( + "context" + "fmt" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getMetadataPath = "/latest/meta-data" + +// GetMetadata uses the path provided to request information from the Amazon +// EC2 Instance Metadata Service. The content will be returned as a string, or +// error if the request failed. +func (c *Client) GetMetadata(ctx context.Context, params *GetMetadataInput, optFns ...func(*Options)) (*GetMetadataOutput, error) { + if params == nil { + params = &GetMetadataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetMetadata", params, optFns, + addGetMetadataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetMetadataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetMetadataInput provides the input parameters for the GetMetadata +// operation. +type GetMetadataInput struct { + // The relative metadata path to retrieve. Can be empty string to retrieve + // a response containing a new line separated list of metadata resources + // available. + // + // Must not include the metadata base path. + // + // May include leading slash. If Path includes trailing slash the trailing slash + // will be included in the request for the resource. + Path string +} + +// GetMetadataOutput provides the output parameters for the GetMetadata +// operation. +type GetMetadataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetMetadataPath, + buildGetMetadataOutput) +} + +func buildGetMetadataPath(params interface{}) (string, error) { + p, ok := params.(*GetMetadataInput) + if !ok { + return "", fmt.Errorf("unknown parameter type %T", params) + } + + return appendURIPath(getMetadataPath, p.Path), nil +} + +func buildGetMetadataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetMetadataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go new file mode 100644 index 000000000000..7b9b48912af3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go @@ -0,0 +1,72 @@ +package imds + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// GetRegion retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetRegion( + ctx context.Context, params *GetRegionInput, optFns ...func(*Options), +) ( + *GetRegionOutput, error, +) { + if params == nil { + params = &GetRegionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRegion", params, optFns, + addGetRegionMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetRegionOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetRegionInput provides the input parameters for GetRegion operation. +type GetRegionInput struct{} + +// GetRegionOutput provides the output parameters for GetRegion operation. +type GetRegionOutput struct { + Region string + + ResultMetadata middleware.Metadata +} + +func addGetRegionMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetInstanceIdentityDocumentPath, + buildGetRegionOutput, + ) +} + +func buildGetRegionOutput(resp *smithyhttp.Response) (interface{}, error) { + out, err := buildGetInstanceIdentityDocumentOutput(resp) + if err != nil { + return nil, err + } + + result, ok := out.(*GetInstanceIdentityDocumentOutput) + if !ok { + return nil, fmt.Errorf("unexpected instance identity document type, %T", out) + } + + region := result.Region + if len(region) == 0 { + return "", fmt.Errorf("instance metadata did not return a region value") + } + + return &GetRegionOutput{ + Region: region, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go new file mode 100644 index 000000000000..841f802c1a36 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go @@ -0,0 +1,118 @@ +package imds + +import ( + "context" + "fmt" + "io" + "strconv" + "strings" + "time" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getTokenPath = "/latest/api/token" +const tokenTTLHeader = "X-Aws-Ec2-Metadata-Token-Ttl-Seconds" + +// getToken uses the duration to return a token for EC2 IMDS, or an error if +// the request failed. +func (c *Client) getToken(ctx context.Context, params *getTokenInput, optFns ...func(*Options)) (*getTokenOutput, error) { + if params == nil { + params = &getTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "getToken", params, optFns, + addGetTokenMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*getTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type getTokenInput struct { + TokenTTL time.Duration +} + +type getTokenOutput struct { + Token string + TokenTTL time.Duration + + ResultMetadata middleware.Metadata +} + +func addGetTokenMiddleware(stack *middleware.Stack, options Options) error { + err := addRequestMiddleware(stack, + options, + "PUT", + buildGetTokenPath, + buildGetTokenOutput) + if err != nil { + return err + } + + err = stack.Serialize.Add(&tokenTTLRequestHeader{}, middleware.After) + if err != nil { + return err + } + + return nil +} + +func buildGetTokenPath(interface{}) (string, error) { + return getTokenPath, nil +} + +func buildGetTokenOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + ttlHeader := resp.Header.Get(tokenTTLHeader) + tokenTTL, err := strconv.ParseInt(ttlHeader, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse API token, %w", err) + } + + var token strings.Builder + if _, err = io.Copy(&token, resp.Body); err != nil { + return nil, fmt.Errorf("unable to read API token, %w", err) + } + + return &getTokenOutput{ + Token: token.String(), + TokenTTL: time.Duration(tokenTTL) * time.Second, + }, nil +} + +type tokenTTLRequestHeader struct{} + +func (*tokenTTLRequestHeader) ID() string { return "tokenTTLRequestHeader" } +func (*tokenTTLRequestHeader) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("expect HTTP transport, got %T", in.Request) + } + + input, ok := in.Parameters.(*getTokenInput) + if !ok { + return out, metadata, fmt.Errorf("expect getTokenInput, got %T", in.Parameters) + } + + req.Header.Set(tokenTTLHeader, strconv.Itoa(int(input.TokenTTL/time.Second))) + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go new file mode 100644 index 000000000000..88aa61e9ad95 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go @@ -0,0 +1,60 @@ +package imds + +import ( + "context" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getUserDataPath = "/latest/user-data" + +// GetUserData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *Client) GetUserData(ctx context.Context, params *GetUserDataInput, optFns ...func(*Options)) (*GetUserDataOutput, error) { + if params == nil { + params = &GetUserDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetUserData", params, optFns, + addGetUserDataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetUserDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetUserDataInput provides the input parameters for the GetUserData +// operation. +type GetUserDataInput struct{} + +// GetUserDataOutput provides the output parameters for the GetUserData +// operation. +type GetUserDataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetUserDataPath, + buildGetUserDataOutput) +} + +func buildGetUserDataPath(params interface{}) (string, error) { + return getUserDataPath, nil +} + +func buildGetUserDataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetUserDataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go new file mode 100644 index 000000000000..bacdb5d21f26 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go @@ -0,0 +1,11 @@ +// Package imds provides the API client for interacting with the Amazon EC2 +// Instance Metadata Service. +// +// All Client operation calls have a default timeout. If the operation is not +// completed before this timeout expires, the operation will be canceled. This +// timeout can be overridden by providing Context with a timeout or deadline +// with calling the client's operations. +// +// See the EC2 IMDS user guide for more information on using the API. +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html +package imds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go new file mode 100644 index 000000000000..249787c83b1b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package imds + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.12.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go new file mode 100644 index 000000000000..d72fcb5626f6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go @@ -0,0 +1,98 @@ +package config + +import ( + "fmt" + "strings" +) + +// ClientEnableState provides an enumeration if the client is enabled, +// disabled, or default behavior. +type ClientEnableState uint + +// Enumeration values for ClientEnableState +const ( + ClientDefaultEnableState ClientEnableState = iota + ClientDisabled + ClientEnabled +) + +// EndpointModeState is the EC2 IMDS Endpoint Configuration Mode +type EndpointModeState uint + +// Enumeration values for ClientEnableState +const ( + EndpointModeStateUnset EndpointModeState = iota + EndpointModeStateIPv4 + EndpointModeStateIPv6 +) + +// SetFromString sets the EndpointModeState based on the provided string value. Unknown values will default to EndpointModeStateUnset +func (e *EndpointModeState) SetFromString(v string) error { + v = strings.TrimSpace(v) + + switch { + case len(v) == 0: + *e = EndpointModeStateUnset + case strings.EqualFold(v, "IPv6"): + *e = EndpointModeStateIPv6 + case strings.EqualFold(v, "IPv4"): + *e = EndpointModeStateIPv4 + default: + return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") + } + return nil +} + +// ClientEnableStateResolver is a config resolver interface for retrieving whether the IMDS client is disabled. +type ClientEnableStateResolver interface { + GetEC2IMDSClientEnableState() (ClientEnableState, bool, error) +} + +// EndpointModeResolver is a config resolver interface for retrieving the EndpointModeState configuration. +type EndpointModeResolver interface { + GetEC2IMDSEndpointMode() (EndpointModeState, bool, error) +} + +// EndpointResolver is a config resolver interface for retrieving the endpoint. +type EndpointResolver interface { + GetEC2IMDSEndpoint() (string, bool, error) +} + +// ResolveClientEnableState resolves the ClientEnableState from a list of configuration sources. +func ResolveClientEnableState(sources []interface{}) (value ClientEnableState, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(ClientEnableStateResolver); ok { + value, found, err = resolver.GetEC2IMDSClientEnableState() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveEndpointModeConfig resolves the EndpointModeState from a list of configuration sources. +func ResolveEndpointModeConfig(sources []interface{}) (value EndpointModeState, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(EndpointModeResolver); ok { + value, found, err = resolver.GetEC2IMDSEndpointMode() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveEndpointConfig resolves the endpoint from a list of configuration sources. +func ResolveEndpointConfig(sources []interface{}) (value string, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(EndpointResolver); ok { + value, found, err = resolver.GetEC2IMDSEndpoint() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go new file mode 100644 index 000000000000..605cbd13140f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go @@ -0,0 +1,266 @@ +package imds + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "net/url" + "path" + "time" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func addAPIRequestMiddleware(stack *middleware.Stack, + options Options, + getPath func(interface{}) (string, error), + getOutput func(*smithyhttp.Response) (interface{}, error), +) (err error) { + err = addRequestMiddleware(stack, options, "GET", getPath, getOutput) + if err != nil { + return err + } + + // Token Serializer build and state management. + if !options.disableAPIToken { + err = stack.Finalize.Insert(options.tokenProvider, (*retry.Attempt)(nil).ID(), middleware.After) + if err != nil { + return err + } + + err = stack.Deserialize.Insert(options.tokenProvider, "OperationDeserializer", middleware.Before) + if err != nil { + return err + } + } + + return nil +} + +func addRequestMiddleware(stack *middleware.Stack, + options Options, + method string, + getPath func(interface{}) (string, error), + getOutput func(*smithyhttp.Response) (interface{}, error), +) (err error) { + err = awsmiddleware.AddSDKAgentKey(awsmiddleware.FeatureMetadata, "ec2-imds")(stack) + if err != nil { + return err + } + + // Operation timeout + err = stack.Initialize.Add(&operationTimeout{ + DefaultTimeout: defaultOperationTimeout, + }, middleware.Before) + if err != nil { + return err + } + + // Operation Serializer + err = stack.Serialize.Add(&serializeRequest{ + GetPath: getPath, + Method: method, + }, middleware.After) + if err != nil { + return err + } + + // Operation endpoint resolver + err = stack.Serialize.Insert(&resolveEndpoint{ + Endpoint: options.Endpoint, + EndpointMode: options.EndpointMode, + }, "OperationSerializer", middleware.Before) + if err != nil { + return err + } + + // Operation Deserializer + err = stack.Deserialize.Add(&deserializeResponse{ + GetOutput: getOutput, + }, middleware.After) + if err != nil { + return err + } + + // Retry support + return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{ + Retryer: options.Retryer, + LogRetryAttempts: options.ClientLogMode.IsRetries(), + }) +} + +type serializeRequest struct { + GetPath func(interface{}) (string, error) + Method string +} + +func (*serializeRequest) ID() string { + return "OperationSerializer" +} + +func (m *serializeRequest) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + reqPath, err := m.GetPath(in.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("unable to get request URL path, %w", err) + } + + request.Request.URL.Path = reqPath + request.Request.Method = m.Method + + return next.HandleSerialize(ctx, in) +} + +type deserializeResponse struct { + GetOutput func(*smithyhttp.Response) (interface{}, error) +} + +func (*deserializeResponse) ID() string { + return "OperationDeserializer" +} + +func (m *deserializeResponse) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf( + "unexpected transport response type, %T, want %T", out.RawResponse, resp) + } + defer resp.Body.Close() + + // read the full body so that any operation timeouts cleanup will not race + // the body being read. + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return out, metadata, fmt.Errorf("read response body failed, %w", err) + } + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + + // Anything that's not 200 |< 300 is error + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return out, metadata, &smithyhttp.ResponseError{ + Response: resp, + Err: fmt.Errorf("request to EC2 IMDS failed"), + } + } + + result, err := m.GetOutput(resp) + if err != nil { + return out, metadata, fmt.Errorf( + "unable to get deserialized result for response, %w", err, + ) + } + out.Result = result + + return out, metadata, err +} + +type resolveEndpoint struct { + Endpoint string + EndpointMode EndpointModeState +} + +func (*resolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *resolveEndpoint) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + var endpoint string + if len(m.Endpoint) > 0 { + endpoint = m.Endpoint + } else { + switch m.EndpointMode { + case EndpointModeStateIPv6: + endpoint = defaultIPv6Endpoint + case EndpointModeStateIPv4: + fallthrough + case EndpointModeStateUnset: + endpoint = defaultIPv4Endpoint + default: + return out, metadata, fmt.Errorf("unsupported IMDS endpoint mode") + } + } + + req.URL, err = url.Parse(endpoint) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + return next.HandleSerialize(ctx, in) +} + +const ( + defaultOperationTimeout = 5 * time.Second +) + +// operationTimeout adds a timeout on the middleware stack if the Context the +// stack was called with does not have a deadline. The next middleware must +// complete before the timeout, or the context will be canceled. +// +// If DefaultTimeout is zero, no default timeout will be used if the Context +// does not have a timeout. +// +// The next middleware must also ensure that any resources that are also +// canceled by the stack's context are completely consumed before returning. +// Otherwise the timeout cleanup will race the resource being consumed +// upstream. +type operationTimeout struct { + DefaultTimeout time.Duration +} + +func (*operationTimeout) ID() string { return "OperationTimeout" } + +func (m *operationTimeout) HandleInitialize( + ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, +) ( + output middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 { + var cancelFn func() + ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout) + defer cancelFn() + } + + return next.HandleInitialize(ctx, input) +} + +// appendURIPath joins a URI path component to the existing path with `/` +// separators between the path components. If the path being added ends with a +// trailing `/` that slash will be maintained. +func appendURIPath(base, add string) string { + reqPath := path.Join(base, add) + if len(add) != 0 && add[len(add)-1] == '/' { + reqPath += "/" + } + return reqPath +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go new file mode 100644 index 000000000000..275fade488a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go @@ -0,0 +1,237 @@ +package imds + +import ( + "context" + "errors" + "fmt" + "net/http" + "sync" + "sync/atomic" + "time" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const ( + // Headers for Token and TTL + tokenHeader = "x-aws-ec2-metadata-token" + defaultTokenTTL = 5 * time.Minute +) + +type tokenProvider struct { + client *Client + tokenTTL time.Duration + + token *apiToken + tokenMux sync.RWMutex + + disabled uint32 // Atomic updated +} + +func newTokenProvider(client *Client, ttl time.Duration) *tokenProvider { + return &tokenProvider{ + client: client, + tokenTTL: ttl, + } +} + +// apiToken provides the API token used by all operation calls for th EC2 +// Instance metadata service. +type apiToken struct { + token string + expires time.Time +} + +var timeNow = time.Now + +// Expired returns if the token is expired. +func (t *apiToken) Expired() bool { + // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry + // time is always based on reported wall-clock time. + return timeNow().Round(0).After(t.expires) +} + +func (t *tokenProvider) ID() string { return "APITokenProvider" } + +// HandleFinalize is the finalize stack middleware, that if the token provider is +// enabled, will attempt to add the cached API token to the request. If the API +// token is not cached, it will be retrieved in a separate API call, getToken. +// +// For retry attempts, handler must be added after attempt retryer. +// +// If request for getToken fails the token provider may be disabled from future +// requests, depending on the response status code. +func (t *tokenProvider) HandleFinalize( + ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if !t.enabled() { + // short-circuits to insecure data flow if token provider is disabled. + return next.HandleFinalize(ctx, input) + } + + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport request type %T", input.Request) + } + + tok, err := t.getToken(ctx) + if err != nil { + // If the error allows the token to downgrade to insecure flow allow that. + var bypassErr *bypassTokenRetrievalError + if errors.As(err, &bypassErr) { + return next.HandleFinalize(ctx, input) + } + + return out, metadata, fmt.Errorf("failed to get API token, %w", err) + } + + req.Header.Set(tokenHeader, tok.token) + + return next.HandleFinalize(ctx, input) +} + +// HandleDeserialize is the deserialize stack middleware for determining if the +// operation the token provider is decorating failed because of a 401 +// unauthorized status code. If the operation failed for that reason the token +// provider needs to be re-enabled so that it can start adding the API token to +// operation calls. +func (t *tokenProvider) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, input) + if err == nil { + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf("expect HTTP transport, got %T", out.RawResponse) + } + + if resp.StatusCode == http.StatusUnauthorized { // unauthorized + err = &retryableError{Err: err} + t.enable() + } + + return out, metadata, err +} + +type retryableError struct { + Err error +} + +func (*retryableError) RetryableError() bool { return true } + +func (e *retryableError) Error() string { return e.Err.Error() } + +func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) { + if !t.enabled() { + return nil, &bypassTokenRetrievalError{ + Err: fmt.Errorf("cannot get API token, provider disabled"), + } + } + + t.tokenMux.RLock() + tok = t.token + t.tokenMux.RUnlock() + + if tok != nil && !tok.Expired() { + return tok, nil + } + + tok, err = t.updateToken(ctx) + if err != nil { + return nil, fmt.Errorf("cannot get API token, %w", err) + } + + return tok, nil +} + +func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { + t.tokenMux.Lock() + defer t.tokenMux.Unlock() + + // Prevent multiple requests to update retrieving the token. + if t.token != nil && !t.token.Expired() { + tok := t.token + return tok, nil + } + + result, err := t.client.getToken(ctx, &getTokenInput{ + TokenTTL: t.tokenTTL, + }) + if err != nil { + // change the disabled flag on token provider to true, when error is request timeout error. + var statusErr interface{ HTTPStatusCode() int } + if errors.As(err, &statusErr) { + switch statusErr.HTTPStatusCode() { + + // Disable get token if failed because of 403, 404, or 405 + case http.StatusForbidden, + http.StatusNotFound, + http.StatusMethodNotAllowed: + + t.disable() + + // 400 errors are terminal, and need to be upstreamed + case http.StatusBadRequest: + return nil, err + } + } + + // Disable if request send failed or timed out getting response + var re *smithyhttp.RequestSendError + var ce *smithy.CanceledError + if errors.As(err, &re) || errors.As(err, &ce) { + atomic.StoreUint32(&t.disabled, 1) + } + + // Token couldn't be retrieved, but bypass this, and allow the + // request to continue. + return nil, &bypassTokenRetrievalError{Err: err} + } + + tok := &apiToken{ + token: result.Token, + expires: timeNow().Add(result.TokenTTL), + } + t.token = tok + + return tok, nil +} + +type bypassTokenRetrievalError struct { + Err error +} + +func (e *bypassTokenRetrievalError) Error() string { + return fmt.Sprintf("bypass token retrieval, %v", e.Err) +} + +func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err } + +// enabled returns if the token provider is current enabled or not. +func (t *tokenProvider) enabled() bool { + return atomic.LoadUint32(&t.disabled) == 0 +} + +// disable disables the token provider and it will no longer attempt to inject +// the token, nor request updates. +func (t *tokenProvider) disable() { + atomic.StoreUint32(&t.disabled, 1) +} + +// enable enables the token provide to start refreshing tokens, and adding them +// to the pending request. +func (t *tokenProvider) enable() { + t.tokenMux.Lock() + t.token = nil + t.tokenMux.Unlock() + atomic.StoreUint32(&t.disabled, 0) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md new file mode 100644 index 000000000000..11b4965e92fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md @@ -0,0 +1,157 @@ +# v1.11.10 (2022-05-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.9 (2022-05-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.8 (2022-05-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.7 (2022-04-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.6 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.5 (2022-04-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.4 (2022-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.1 (2022-01-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.5 (2021-12-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.4 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.3 (2021-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.2 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2021-11-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.4 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.3 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.2 (2021-09-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2021-06-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2021-05-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go new file mode 100644 index 000000000000..4059f9851d73 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go @@ -0,0 +1,37 @@ +package manager + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/s3" +) + +// DeleteObjectsAPIClient is an S3 API client that can invoke the DeleteObjects operation. +type DeleteObjectsAPIClient interface { + DeleteObjects(context.Context, *s3.DeleteObjectsInput, ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error) +} + +// DownloadAPIClient is an S3 API client that can invoke the GetObject operation. +type DownloadAPIClient interface { + GetObject(context.Context, *s3.GetObjectInput, ...func(*s3.Options)) (*s3.GetObjectOutput, error) +} + +// HeadBucketAPIClient is an S3 API client that can invoke the HeadBucket operation. +type HeadBucketAPIClient interface { + HeadBucket(context.Context, *s3.HeadBucketInput, ...func(*s3.Options)) (*s3.HeadBucketOutput, error) +} + +// ListObjectsV2APIClient is an S3 API client that can invoke the ListObjectV2 operation. +type ListObjectsV2APIClient interface { + ListObjectsV2(context.Context, *s3.ListObjectsV2Input, ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) +} + +// UploadAPIClient is an S3 API client that can invoke PutObject, UploadPart, CreateMultipartUpload, +// CompleteMultipartUpload, and AbortMultipartUpload operations. +type UploadAPIClient interface { + PutObject(context.Context, *s3.PutObjectInput, ...func(*s3.Options)) (*s3.PutObjectOutput, error) + UploadPart(context.Context, *s3.UploadPartInput, ...func(*s3.Options)) (*s3.UploadPartOutput, error) + CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput, ...func(*s3.Options)) (*s3.CreateMultipartUploadOutput, error) + CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput, ...func(*s3.Options)) (*s3.CompleteMultipartUploadOutput, error) + AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput, ...func(*s3.Options)) (*s3.AbortMultipartUploadOutput, error) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go new file mode 100644 index 000000000000..d3b828979fcb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go @@ -0,0 +1,23 @@ +package manager + +import ( + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +func validateSupportedARNType(bucket string) error { + if !arn.IsARN(bucket) { + return nil + } + + parsedARN, err := arn.Parse(bucket) + if err != nil { + return err + } + + if parsedARN.Service == "s3-object-lambda" { + return fmt.Errorf("manager does not support s3-object-lambda service ARNs") + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go new file mode 100644 index 000000000000..2d8bd7e00538 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go @@ -0,0 +1,139 @@ +package manager + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const bucketRegionHeader = "X-Amz-Bucket-Region" + +// GetBucketRegion will attempt to get the region for a bucket using the +// client's configured region to determine which AWS partition to perform the query on. +// +// The request will not be signed, and will not use your AWS credentials. +// +// A BucketNotFound error will be returned if the bucket does not exist in the +// AWS partition the client region belongs to. +// +// For example to get the region of a bucket which exists in "eu-central-1" +// you could provide a region hint of "us-west-2". +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// log.Println("error:", err) +// return +// } +// +// bucket := "my-bucket" +// region, err := manager.GetBucketRegion(ctx, s3.NewFromConfig(cfg), bucket) +// if err != nil { +// var bnf manager.BucketNotFound +// if errors.As(err, &bnf) { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region\n", bucket) +// } +// return +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// By default the request will be made to the Amazon S3 endpoint using the virtual-hosted-style addressing. +// +// bucketname.s3.us-west-2.amazonaws.com/ +// +// To configure the GetBucketRegion to make a request via the Amazon +// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g. +// fips-us-gov-west-1) set the EndpointResolver on the config or client the +// utility is called with. +// +// cfg, err := config.LoadDefaultConfig(context.TODO(), +// config.WithEndpointResolver( +// aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { +// return aws.Endpoint{URL: "https://s3-fips.us-west-2.amazonaws.com"}, nil +// }), +// ) +// if err != nil { +// panic(err) +// } +func GetBucketRegion(ctx context.Context, client HeadBucketAPIClient, bucket string, optFns ...func(*s3.Options)) (string, error) { + var captureBucketRegion deserializeBucketRegion + + clientOptionFns := make([]func(*s3.Options), len(optFns)+1) + clientOptionFns[0] = func(options *s3.Options) { + options.Credentials = aws.AnonymousCredentials{} + options.APIOptions = append(options.APIOptions, captureBucketRegion.RegisterMiddleware) + } + copy(clientOptionFns[1:], optFns) + + _, err := client.HeadBucket(ctx, &s3.HeadBucketInput{ + Bucket: aws.String(bucket), + }, clientOptionFns...) + if len(captureBucketRegion.BucketRegion) == 0 && err != nil { + var httpStatusErr interface { + HTTPStatusCode() int + } + if !errors.As(err, &httpStatusErr) { + return "", err + } + + if httpStatusErr.HTTPStatusCode() == http.StatusNotFound { + return "", &bucketNotFound{} + } + + return "", err + } + + return captureBucketRegion.BucketRegion, nil +} + +type deserializeBucketRegion struct { + BucketRegion string +} + +func (d *deserializeBucketRegion) RegisterMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Add(d, middleware.After) +} + +func (d *deserializeBucketRegion) ID() string { + return "DeserializeBucketRegion" +} + +func (d *deserializeBucketRegion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse) + } + + d.BucketRegion = resp.Header.Get(bucketRegionHeader) + + return out, metadata, err +} + +// BucketNotFound indicates the bucket was not found in the partition when calling GetBucketRegion. +type BucketNotFound interface { + error + + isBucketNotFound() +} + +type bucketNotFound struct{} + +func (b *bucketNotFound) Error() string { + return "bucket not found" +} + +func (b *bucketNotFound) isBucketNotFound() {} + +var _ BucketNotFound = (*bucketNotFound)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go new file mode 100644 index 000000000000..e781aef610d4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go @@ -0,0 +1,79 @@ +package manager + +import ( + "io" +) + +// BufferedReadSeeker is buffered io.ReadSeeker +type BufferedReadSeeker struct { + r io.ReadSeeker + buffer []byte + readIdx, writeIdx int +} + +// NewBufferedReadSeeker returns a new BufferedReadSeeker +// if len(b) == 0 then the buffer will be initialized to 64 KiB. +func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker { + if len(b) == 0 { + b = make([]byte, 64*1024) + } + return &BufferedReadSeeker{r: r, buffer: b} +} + +func (b *BufferedReadSeeker) reset(r io.ReadSeeker) { + b.r = r + b.readIdx, b.writeIdx = 0, 0 +} + +// Read will read up len(p) bytes into p and will return +// the number of bytes read and any error that occurred. +// If the len(p) > the buffer size then a single read request +// will be issued to the underlying io.ReadSeeker for len(p) bytes. +// A Read request will at most perform a single Read to the underlying +// io.ReadSeeker, and may return < len(p) if serviced from the buffer. +func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return n, err + } + + if b.readIdx == b.writeIdx { + if len(p) >= len(b.buffer) { + n, err = b.r.Read(p) + return n, err + } + b.readIdx, b.writeIdx = 0, 0 + + n, err = b.r.Read(b.buffer) + if n == 0 { + return n, err + } + + b.writeIdx += n + } + + n = copy(p, b.buffer[b.readIdx:b.writeIdx]) + b.readIdx += n + + return n, err +} + +// Seek will position then underlying io.ReadSeeker to the given offset +// and will clear the buffer. +func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { + n, err := b.r.Seek(offset, whence) + + b.reset(b.r) + + return n, err +} + +// ReadAt will read up to len(p) bytes at the given file offset. +// This will result in the buffer being cleared. +func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) { + _, err := b.Seek(off, io.SeekStart) + if err != nil { + return 0, err + } + + return b.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go new file mode 100644 index 000000000000..e2ab143b6c0a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go @@ -0,0 +1,8 @@ +//go:build !windows +// +build !windows + +package manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go new file mode 100644 index 000000000000..1ae881c104aa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go @@ -0,0 +1,5 @@ +package manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return NewBufferedReadSeekerWriteToPool(1024 * 1024) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go new file mode 100644 index 000000000000..179fe10f4035 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go @@ -0,0 +1,8 @@ +//go:build !windows +// +build !windows + +package manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go new file mode 100644 index 000000000000..88887ff586e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go @@ -0,0 +1,5 @@ +package manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return NewPooledBufferedWriterReadFromProvider(1024 * 1024) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go new file mode 100644 index 000000000000..31171a69875a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go @@ -0,0 +1,3 @@ +// Package manager provides utilities to upload and download objects from +// S3 concurrently. Helpful for when working with large objects. +package manager diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go new file mode 100644 index 000000000000..c3fbe0219825 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go @@ -0,0 +1,519 @@ +package manager + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/smithy-go/logging" +) + +const userAgentKey = "s3-transfer" + +// DefaultDownloadPartSize is the default range of bytes to get at a time when +// using Download(). +const DefaultDownloadPartSize = 1024 * 1024 * 5 + +// DefaultDownloadConcurrency is the default number of goroutines to spin up +// when using Download(). +const DefaultDownloadConcurrency = 5 + +// DefaultPartBodyMaxRetries is the default number of retries to make when a part fails to download. +const DefaultPartBodyMaxRetries = 3 + +type errReadingBody struct { + err error +} + +func (e *errReadingBody) Error() string { + return fmt.Sprintf("failed to read part body: %v", e.err) +} + +func (e *errReadingBody) Unwrap() error { + return e.err +} + +// The Downloader structure that calls Download(). It is safe to call Download() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Downloader's properties is not safe to be done concurrently. +type Downloader struct { + // The size (in bytes) to request from S3 for each part. + // The minimum allowed part size is 5MB, and if this value is set to zero, + // the DefaultDownloadPartSize value will be used. + // + // PartSize is ignored if the Range input parameter is provided. + PartSize int64 + + // PartBodyMaxRetries is the number of retry attempts to make for failed part downloads. + PartBodyMaxRetries int + + // Logger to send logging messages to + Logger logging.Logger + + // Enable Logging of part download retry attempts + LogInterruptedDownloads bool + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultDownloadConcurrency value will be used. + // + // Concurrency of 1 will download the parts sequentially. + // + // Concurrency is ignored if the Range input parameter is provided. + Concurrency int + + // An S3 client to use when performing downloads. + S3 DownloadAPIClient + + // List of client options that will be passed down to individual API + // operation requests made by the downloader. + ClientOptions []func(*s3.Options) + + // Defines the buffer strategy used when downloading a part. + // + // If a WriterReadFromProvider is given the Download manager + // will pass the io.WriterAt of the Download request to the provider + // and will use the returned WriterReadFrom from the provider as the + // destination writer when copying from http response body. + BufferProvider WriterReadFromProvider +} + +// WithDownloaderClientOptions appends to the Downloader's API request options. +func WithDownloaderClientOptions(opts ...func(*s3.Options)) func(*Downloader) { + return func(d *Downloader) { + d.ClientOptions = append(d.ClientOptions, opts...) + } +} + +// NewDownloader creates a new Downloader instance to downloads objects from +// S3 in concurrent chunks. Pass in additional functional options to customize +// the downloader behavior. Requires a client.ConfigProvider in order to create +// a S3 service client. The session.Session satisfies the client.ConfigProvider +// interface. +// +// Example: +// // Load AWS Config +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create an S3 client using the loaded configuration +// s3.NewFromConfig(cfg) +// +// // Create a downloader passing it the S3 client +// downloader := manager.NewDownloader(s3.NewFromConfig(cfg)) +// +// // Create a downloader with the client and custom downloader options +// downloader := manager.NewDownloader(client, func(d *manager.Downloader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloader(c DownloadAPIClient, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: c, + PartSize: DefaultDownloadPartSize, + PartBodyMaxRetries: DefaultPartBodyMaxRetries, + Concurrency: DefaultDownloadConcurrency, + BufferProvider: defaultDownloadBufferProvider(), + } + for _, option := range options { + option(d) + } + + return d +} + +// Download downloads an object in S3 and writes the payload into w +// using concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. +// +// DownloadWithContext is the same as Download with the additional support for +// Context input parameters. The Context must not be nil. A nil Context will +// cause a panic. Use the Context to add deadlining, timeouts, etc. The +// DownloadWithContext may create sub-contexts for individual underlying +// requests. +// +// Additional functional options can be provided to configure the individual +// download. These options are copies of the Downloader instance Download is +// called from. Modifying the options will not impact the original Downloader +// instance. Use the WithDownloaderClientOptions helper function to pass in request +// options that will be applied to all API operations made with this downloader. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. In case you download +// files into memory do not forget to pre-allocate memory to avoid additional allocations +// and GC runs. +// +// Example: +// // pre-allocate in memory buffer, where headObject type is *s3.HeadObjectOutput +// buf := make([]byte, int(headObject.ContentLength)) +// // wrap with aws.WriteAtBuffer +// w := s3manager.NewWriteAtBuffer(buf) +// // download file into the memory +// numBytesDownloaded, err := downloader.Download(ctx, w, &s3.GetObjectInput{ +// Bucket: aws.String(bucket), +// Key: aws.String(item), +// }) +// +// Specifying a Downloader.Concurrency of 1 will cause the Downloader to +// download the parts from S3 sequentially. +// +// It is safe to call this method concurrently across goroutines. +// +// If the GetObjectInput's Range value is provided that will cause the downloader +// to perform a single GetObjectInput request for that object's range. This will +// caused the part size, and concurrency configurations to be ignored. +func (d Downloader) Download(ctx context.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + if err := validateSupportedARNType(aws.ToString(input.Bucket)); err != nil { + return 0, err + } + + impl := downloader{w: w, in: input, cfg: d, ctx: ctx} + + // Copy ClientOptions + clientOptions := make([]func(*s3.Options), 0, len(impl.cfg.ClientOptions)+1) + clientOptions = append(clientOptions, func(o *s3.Options) { + o.APIOptions = append(o.APIOptions, middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey)) + }) + clientOptions = append(clientOptions, impl.cfg.ClientOptions...) + impl.cfg.ClientOptions = clientOptions + + for _, option := range options { + option(&impl.cfg) + } + + // Ensures we don't need nil checks later on + impl.cfg.Logger = logging.WithContext(ctx, impl.cfg.Logger) + + impl.partBodyMaxRetries = d.PartBodyMaxRetries + + impl.totalBytes = -1 + if impl.cfg.Concurrency == 0 { + impl.cfg.Concurrency = DefaultDownloadConcurrency + } + + if impl.cfg.PartSize == 0 { + impl.cfg.PartSize = DefaultDownloadPartSize + } + + return impl.download() +} + +// downloader is the implementation structure used internally by Downloader. +type downloader struct { + ctx context.Context + cfg Downloader + + in *s3.GetObjectInput + w io.WriterAt + + wg sync.WaitGroup + m sync.Mutex + + pos int64 + totalBytes int64 + written int64 + err error + + partBodyMaxRetries int +} + +// download performs the implementation of the object download across ranged +// GETs. +func (d *downloader) download() (n int64, err error) { + // If range is specified fall back to single download of that range + // this enables the functionality of ranged gets with the downloader but + // at the cost of no multipart downloads. + if rng := aws.ToString(d.in.Range); len(rng) > 0 { + d.downloadRange(rng) + return d.written, d.err + } + + // Spin off first worker to check additional header information + d.getChunk() + + if total := d.getTotalBytes(); total >= 0 { + // Spin up workers + ch := make(chan dlchunk, d.cfg.Concurrency) + + for i := 0; i < d.cfg.Concurrency; i++ { + d.wg.Add(1) + go d.downloadPart(ch) + } + + // Assign work + for d.getErr() == nil { + if d.pos >= total { + break // We're finished queuing chunks + } + + // Queue the next range of bytes to read. + ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} + d.pos += d.cfg.PartSize + } + + // Wait for completion + close(ch) + d.wg.Wait() + } else { + // Checking if we read anything new + for d.err == nil { + d.getChunk() + } + + // We expect a 416 error letting us know we are done downloading the + // total bytes. Since we do not know the content's length, this will + // keep grabbing chunks of data until the range of bytes specified in + // the request is out of range of the content. Once, this happens, a + // 416 should occur. + var responseError interface { + HTTPStatusCode() int + } + if errors.As(d.err, &responseError) { + if responseError.HTTPStatusCode() == http.StatusRequestedRangeNotSatisfiable { + d.err = nil + } + } + } + + // Return error + return d.written, d.err +} + +// downloadPart is an individual goroutine worker reading from the ch channel +// and performing a GetObject request on the data with a given byte range. +// +// If this is the first worker, this operation also resolves the total number +// of bytes to be read so that the worker manager knows when it is finished. +func (d *downloader) downloadPart(ch chan dlchunk) { + defer d.wg.Done() + for { + chunk, ok := <-ch + if !ok { + break + } + if d.getErr() != nil { + // Drain the channel if there is an error, to prevent deadlocking + // of download producer. + continue + } + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } + } +} + +// getChunk grabs a chunk of data from the body. +// Not thread safe. Should only used when grabbing data on a single thread. +func (d *downloader) getChunk() { + if d.getErr() != nil { + return + } + + chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} + d.pos += d.cfg.PartSize + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } +} + +// downloadRange downloads an Object given the passed in Byte-Range value. +// The chunk used down download the range will be configured for that range. +func (d *downloader) downloadRange(rng string) { + if d.getErr() != nil { + return + } + + chunk := dlchunk{w: d.w, start: d.pos} + // Ranges specified will short circuit the multipart download + chunk.withRange = rng + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } + + // Update the position based on the amount of data received. + d.pos = d.written +} + +// downloadChunk downloads the chunk from s3 +func (d *downloader) downloadChunk(chunk dlchunk) error { + var params s3.GetObjectInput + awsutil.Copy(¶ms, d.in) + + // Get the next byte range of data + params.Range = aws.String(chunk.ByteRange()) + + var n int64 + var err error + for retry := 0; retry <= d.partBodyMaxRetries; retry++ { + n, err = d.tryDownloadChunk(¶ms, &chunk) + if err == nil { + break + } + // Check if the returned error is an errReadingBody. + // If err is errReadingBody this indicates that an error + // occurred while copying the http response body. + // If this occurs we unwrap the err to set the underlying error + // and attempt any remaining retries. + if bodyErr, ok := err.(*errReadingBody); ok { + err = bodyErr.Unwrap() + } else { + return err + } + + chunk.cur = 0 + + d.cfg.Logger.Logf(logging.Debug, + "object part body download interrupted %s, err, %v, retrying attempt %d", + aws.ToString(params.Key), err, retry) + } + + d.incrWritten(n) + + return err +} + +func (d *downloader) tryDownloadChunk(params *s3.GetObjectInput, w io.Writer) (int64, error) { + cleanup := func() {} + if d.cfg.BufferProvider != nil { + w, cleanup = d.cfg.BufferProvider.GetReadFrom(w) + } + defer cleanup() + + resp, err := d.cfg.S3.GetObject(d.ctx, params, d.cfg.ClientOptions...) + if err != nil { + return 0, err + } + d.setTotalBytes(resp) // Set total if not yet set. + + n, err := io.Copy(w, resp.Body) + resp.Body.Close() + if err != nil { + return n, &errReadingBody{err: err} + } + + return n, nil +} + +// getTotalBytes is a thread-safe getter for retrieving the total byte status. +func (d *downloader) getTotalBytes() int64 { + d.m.Lock() + defer d.m.Unlock() + + return d.totalBytes +} + +// setTotalBytes is a thread-safe setter for setting the total byte status. +// Will extract the object's total bytes from the Content-Range if the file +// will be chunked, or Content-Length. Content-Length is used when the response +// does not include a Content-Range. Meaning the object was not chunked. This +// occurs when the full file fits within the PartSize directive. +func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { + d.m.Lock() + defer d.m.Unlock() + + if d.totalBytes >= 0 { + return + } + + if resp.ContentRange == nil { + // ContentRange is nil when the full file contents is provided, and + // is not chunked. Use ContentLength instead. + if resp.ContentLength > 0 { + d.totalBytes = resp.ContentLength + return + } + } else { + parts := strings.Split(*resp.ContentRange, "/") + + total := int64(-1) + var err error + // Checking for whether or not a numbered total exists + // If one does not exist, we will assume the total to be -1, undefined, + // and sequentially download each chunk until hitting a 416 error + totalStr := parts[len(parts)-1] + if totalStr != "*" { + total, err = strconv.ParseInt(totalStr, 10, 64) + if err != nil { + d.err = err + return + } + } + + d.totalBytes = total + } +} + +func (d *downloader) incrWritten(n int64) { + d.m.Lock() + defer d.m.Unlock() + + d.written += n +} + +// getErr is a thread-safe getter for the error object +func (d *downloader) getErr() error { + d.m.Lock() + defer d.m.Unlock() + + return d.err +} + +// setErr is a thread-safe setter for the error object +func (d *downloader) setErr(e error) { + d.m.Lock() + defer d.m.Unlock() + + d.err = e +} + +// dlchunk represents a single chunk of data to write by the worker routine. +// This structure also implements an io.SectionReader style interface for +// io.WriterAt, effectively making it an io.SectionWriter (which does not +// exist). +type dlchunk struct { + w io.WriterAt + start int64 + size int64 + cur int64 + + // specifies the byte range the chunk should be downloaded with. + withRange string +} + +// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start +// position to its end (or EOF). +// +// If a range is specified on the dlchunk the size will be ignored when writing. +// as the total size may not of be known ahead of time. +func (c *dlchunk) Write(p []byte) (n int, err error) { + if c.cur >= c.size && len(c.withRange) == 0 { + return 0, io.EOF + } + + n, err = c.w.WriteAt(p, c.start+c.cur) + c.cur += int64(n) + + return +} + +// ByteRange returns a HTTP Byte-Range header value that should be used by the +// client to request the chunk's range. +func (c *dlchunk) ByteRange() string { + if len(c.withRange) != 0 { + return c.withRange + } + + return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go new file mode 100644 index 000000000000..1e16ec7ab73a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package manager + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.11.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go new file mode 100644 index 000000000000..6b93a3bc443a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go @@ -0,0 +1,251 @@ +package manager + +import ( + "context" + "fmt" + "sync" +) + +type byteSlicePool interface { + Get(context.Context) (*[]byte, error) + Put(*[]byte) + ModifyCapacity(int) + SliceSize() int64 + Close() +} + +type maxSlicePool struct { + // allocator is defined as a function pointer to allow + // for test cases to instrument custom tracers when allocations + // occur. + allocator sliceAllocator + + slices chan *[]byte + allocations chan struct{} + capacityChange chan struct{} + + max int + sliceSize int64 + + mtx sync.RWMutex +} + +func newMaxSlicePool(sliceSize int64) *maxSlicePool { + p := &maxSlicePool{sliceSize: sliceSize} + p.allocator = p.newSlice + + return p +} + +var errZeroCapacity = fmt.Errorf("get called on zero capacity pool") + +func (p *maxSlicePool) Get(ctx context.Context) (*[]byte, error) { + // check if context is canceled before attempting to get a slice + // this ensures priority is given to the cancel case first + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + p.mtx.RLock() + + for { + select { + case bs, ok := <-p.slices: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return bs, nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // pass + } + + select { + case _, ok := <-p.allocations: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return p.allocator(), nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // In the event that there are no slices or allocations available + // This prevents some deadlock situations that can occur around sync.RWMutex + // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock. + // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where + // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock, + // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity. + + // Short-circuit if the pool capacity is zero. + if p.max == 0 { + p.mtx.RUnlock() + return nil, errZeroCapacity + } + + // Since we will be releasing the read-lock we need to take the reference to the channel. + // Since channels are references we will still get notified if slices are added, or if + // the channel is closed due to a capacity modification. This specifically avoids a data race condition + // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock. + c := p.capacityChange + + p.mtx.RUnlock() + + select { + case _ = <-c: + p.mtx.RLock() + case <-ctx.Done(): + return nil, ctx.Err() + } + } + } +} + +func (p *maxSlicePool) Put(bs *[]byte) { + p.mtx.RLock() + defer p.mtx.RUnlock() + + if p.max == 0 { + return + } + + select { + case p.slices <- bs: + p.notifyCapacity() + default: + // If the new channel when attempting to add the slice then we drop the slice. + // The logic here is to prevent a deadlock situation if channel is already at max capacity. + // Allows us to reap allocations that are returned and are no longer needed. + } +} + +func (p *maxSlicePool) ModifyCapacity(delta int) { + if delta == 0 { + return + } + + p.mtx.Lock() + defer p.mtx.Unlock() + + p.max += delta + + if p.max == 0 { + p.empty() + return + } + + if p.capacityChange != nil { + close(p.capacityChange) + } + p.capacityChange = make(chan struct{}, p.max) + + origAllocations := p.allocations + p.allocations = make(chan struct{}, p.max) + + newAllocs := len(origAllocations) + delta + for i := 0; i < newAllocs; i++ { + p.allocations <- struct{}{} + } + + if origAllocations != nil { + close(origAllocations) + } + + origSlices := p.slices + p.slices = make(chan *[]byte, p.max) + if origSlices == nil { + return + } + + close(origSlices) + for bs := range origSlices { + select { + case p.slices <- bs: + default: + // If the new channel blocks while adding slices from the old channel + // then we drop the slice. The logic here is to prevent a deadlock situation + // if the new channel has a smaller capacity then the old. + } + } +} + +func (p *maxSlicePool) notifyCapacity() { + select { + case p.capacityChange <- struct{}{}: + default: + // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized + // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur. + } +} + +func (p *maxSlicePool) SliceSize() int64 { + return p.sliceSize +} + +func (p *maxSlicePool) Close() { + p.mtx.Lock() + defer p.mtx.Unlock() + p.empty() +} + +func (p *maxSlicePool) empty() { + p.max = 0 + + if p.capacityChange != nil { + close(p.capacityChange) + p.capacityChange = nil + } + + if p.allocations != nil { + close(p.allocations) + for range p.allocations { + // drain channel + } + p.allocations = nil + } + + if p.slices != nil { + close(p.slices) + for range p.slices { + // drain channel + } + p.slices = nil + } +} + +func (p *maxSlicePool) newSlice() *[]byte { + bs := make([]byte, p.sliceSize) + return &bs +} + +type returnCapacityPoolCloser struct { + byteSlicePool + returnCapacity int +} + +func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) { + if delta > 0 { + n.returnCapacity = -1 * delta + } + n.byteSlicePool.ModifyCapacity(delta) +} + +func (n *returnCapacityPoolCloser) Close() { + if n.returnCapacity < 0 { + n.byteSlicePool.ModifyCapacity(n.returnCapacity) + } +} + +type sliceAllocator func() *[]byte + +var newByteSlicePool = func(sliceSize int64) byteSlicePool { + return newMaxSlicePool(sliceSize) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go new file mode 100644 index 000000000000..ce117c32a130 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go @@ -0,0 +1,65 @@ +package manager + +import ( + "io" + "sync" +) + +// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker +type ReadSeekerWriteTo interface { + io.ReadSeeker + io.WriterTo +} + +// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt +// implementation. +type BufferedReadSeekerWriteTo struct { + *BufferedReadSeeker +} + +// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or +// an error occurs. Returns the number of bytes written and any error encountered during the write. +func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) { + return io.Copy(writer, b.BufferedReadSeeker) +} + +// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker +type ReadSeekerWriteToProvider interface { + GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) +} + +// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse +// []byte slices for buffering parts in memory +type BufferedReadSeekerWriteToPool struct { + pool sync.Pool +} + +// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create +// a pool of reusable buffers . If size is less then < 64 KiB then the buffer +// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom +// respectively will default to copying 32 KiB. +func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool { + if size < 65536 { + size = 65536 + } + + return &BufferedReadSeekerWriteToPool{ + pool: sync.Pool{New: func() interface{} { + return make([]byte, size) + }}, + } +} + +// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo. +// The provided cleanup must be called after operations have been completed on the +// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool. +func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) { + buffer := p.pool.Get().([]byte) + + r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)} + cleanup = func() { + p.pool.Put(buffer) + } + + return r, cleanup +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go new file mode 100644 index 000000000000..968f907327fb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go @@ -0,0 +1,187 @@ +package manager + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A readSeekCloser wrapping an nonseekable io.Reader used in an API operation's +// input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if yhe operation +// requires payload signing. +// +// Note: If using with S3 PutObject to stream an object upload. The SDK's S3 +// Upload Manager(s3manager.Uploader) provides support for streaming +// with the ability to retry network errors. +func ReadSeekCloser(r io.Reader) *ReaderSeekerCloser { + return &ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// seekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func seekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case *ReaderSeekerCloser: + return v.GetLen() + } + + return computeSeekerLength(s) +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r *ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return computeSeekerLength(s) + } + + return -1, nil +} + +func computeSeekerLength(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, io.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r *ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r *ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r *ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r *ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r *ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go new file mode 100644 index 000000000000..3e80df4111f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go @@ -0,0 +1,808 @@ +package manager + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "sort" + "sync" + + "github.com/aws/aws-sdk-go-v2/aws" + + "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" +) + +// MaxUploadParts is the maximum allowed number of parts in a multi-part upload +// on Amazon S3. +const MaxUploadParts int32 = 10000 + +// MinUploadPartSize is the minimum allowed part size when uploading a part to +// Amazon S3. +const MinUploadPartSize int64 = 1024 * 1024 * 5 + +// DefaultUploadPartSize is the default part size to buffer chunks of a +// payload into. +const DefaultUploadPartSize = MinUploadPartSize + +// DefaultUploadConcurrency is the default number of goroutines to spin up when +// using Upload(). +const DefaultUploadConcurrency = 5 + +// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned +// will satisfy this interface when a multi part upload failed to upload all +// chucks to S3. In the case of a failure the UploadID is needed to operate on +// the chunks, if any, which were uploaded. +// +// Example: +// +// u := manager.NewUploader(client) +// output, err := u.upload(context.Background(), input) +// if err != nil { +// var multierr manager.MultiUploadFailure +// if errors.As(err, &multierr) { +// fmt.Printf("upload failure UploadID=%s, %s\n", multierr.UploadID(), multierr.Error()) +// } else { +// fmt.Printf("upload failure, %s\n", err.Error()) +// } +// } +// +type MultiUploadFailure interface { + error + + // UploadID returns the upload id for the S3 multipart upload that failed. + UploadID() string +} + +// A multiUploadError wraps the upload ID of a failed s3 multipart upload. +// Composed of BaseError for code, message, and original error +// +// Should be used for an error that occurred failing a S3 multipart upload, +// and a upload ID is available. If an uploadID is not available a more relevant +type multiUploadError struct { + err error + + // ID for multipart upload which failed. + uploadID string +} + +// batchItemError returns the string representation of the error. +// +// See apierr.BaseError ErrorWithExtra for output format +// +// Satisfies the error interface. +func (m *multiUploadError) Error() string { + var extra string + if m.err != nil { + extra = fmt.Sprintf(", cause: %s", m.err.Error()) + } + return fmt.Sprintf("upload multipart failed, upload id: %s%s", m.uploadID, extra) +} + +// Unwrap returns the underlying error that cause the upload failure +func (m *multiUploadError) Unwrap() error { + return m.err +} + +// UploadID returns the id of the S3 upload which failed. +func (m *multiUploadError) UploadID() string { + return m.uploadID +} + +// UploadOutput represents a response from the Upload() call. +type UploadOutput struct { + // The URL where the object was uploaded to. + Location string + + // The ID for a multipart upload to S3. In the case of an error the error + // can be cast to the MultiUploadFailure interface to extract the upload ID. + // Will be empty string if multipart upload was not used, and the object + // was uploaded as a single PutObject call. + UploadID string + + // The list of parts that were uploaded and their checksums. Will be empty + // if multipart upload was not used, and the object was uploaded as a + // single PutObject call. + CompletedParts []types.CompletedPart + + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // The base64-encoded, 32-bit CRC32 checksum of the object. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. + ChecksumSHA256 *string + + // Entity tag for the uploaded object. + ETag *string + + // If the object expiration is configured, this will contain the expiration date + // (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string + + // The object key of the newly created object. + Key *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) + // that was used for the object. + SSEKMSKeyId *string + + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an Amazon Web Services KMS customer master key (CMK) in your + // initiate multipart upload request, the response includes this header. It + // confirms the encryption algorithm that Amazon S3 used to encrypt the object. + ServerSideEncryption types.ServerSideEncryption + + // The version of the object that was uploaded. Will only be populated if + // the S3 Bucket is versioned. If the bucket is not versioned this field + // will not be set. + VersionID *string +} + +// WithUploaderRequestOptions appends to the Uploader's API client options. +func WithUploaderRequestOptions(opts ...func(*s3.Options)) func(*Uploader) { + return func(u *Uploader) { + u.ClientOptions = append(u.ClientOptions, opts...) + } +} + +// The Uploader structure that calls Upload(). It is safe to call Upload() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Uploader's properties is not safe to be done concurrently. +// +// Pre-computed Checksums +// +// Care must be taken when using pre-computed checksums the transfer upload +// manager. The format and value of the checksum differs based on if the upload +// will preformed as a single or multipart upload. +// +// Uploads that are smaller than the Uploader's PartSize will be uploaded using +// the PutObject API operation. Pre-computed checksum of the uploaded object's +// content are valid for these single part uploads. If the checksum provided +// does not match the uploaded content the upload will fail. +// +// Uploads that are larger than the Uploader's PartSize will be uploaded using +// multi-part upload. The Pre-computed checksums for these uploads are a +// checksum of checksums of each part. Not a checksum of the full uploaded +// bytes. With the format of "-", (e.g. +// "DUoRhQ==-3"). If a pre-computed checksum is provided that does not match +// this format, as matches the content uploaded, the upload will fail. +// +// ContentMD5 for multipart upload is explicitly ignored for multipart upload, +// and its value is suppressed. +// +// Automatically Computed Checksums +// +// When the ChecksumAlgorithm member of Upload's input parameter PutObjectInput +// is set to a valid value, the SDK will automatically compute the checksum of +// the individual uploaded parts. The UploadOutput result from Upload will +// include the checksum of part checksums provided by S3 +// CompleteMultipartUpload API call. +type Uploader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultUploadPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel per call to Upload when + // sending parts. If this is set to zero, the DefaultUploadConcurrency value + // will be used. + // + // The concurrency pool is not shared between calls to Upload. + Concurrency int + + // Setting this value to true will cause the SDK to avoid calling + // AbortMultipartUpload on a failure, leaving all successfully uploaded + // parts on S3 for manual recovery. + // + // Note that storing parts of an incomplete multipart upload counts towards + // space usage on S3 and will add additional costs if not cleaned up. + LeavePartsOnError bool + + // MaxUploadParts is the max number of parts which will be uploaded to S3. + // Will be used to calculate the partsize of the object to be uploaded. + // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file + // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). + // + // MaxUploadParts must not be used to limit the total number of bytes uploaded. + // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) + // instead. An io.LimitReader is helpful when uploading an unbounded reader + // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned + // error must be used to signal end of stream. + // + // Defaults to package const's MaxUploadParts value. + MaxUploadParts int32 + + // The client to use when uploading to S3. + S3 UploadAPIClient + + // List of request options that will be passed down to individual API + // operation requests made by the uploader. + ClientOptions []func(*s3.Options) + + // Defines the buffer strategy used when uploading a part + BufferProvider ReadSeekerWriteToProvider + + // partPool allows for the re-usage of streaming payload part buffers between upload calls + partPool byteSlicePool +} + +// NewUploader creates a new Uploader instance to upload objects to S3. Pass In +// additional functional options to customize the uploader's behavior. Requires a +// client.ConfigProvider in order to create a S3 service client. The session.Session +// satisfies the client.ConfigProvider interface. +// +// Example: +// // Load AWS Config +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create an S3 Client with the config +// client := s3.NewFromConfig(cfg) +// +// // Create an uploader passing it the client +// uploader := manager.NewUploader(client) +// +// // Create an uploader with the client and custom options +// uploader := manager.NewUploader(client, func(u *manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploader(client UploadAPIClient, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: client, + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + BufferProvider: defaultUploadBufferProvider(), + } + + for _, option := range options { + option(u) + } + + u.partPool = newByteSlicePool(u.PartSize) + + return u +} + +// Upload uploads an object to S3, intelligently buffering large +// files into smaller chunks and sending them in parallel across multiple +// goroutines. You can configure the buffer size and concurrency through the +// Uploader parameters. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// Use the WithUploaderRequestOptions helper function to pass in request +// options that will be applied to all API operations made with this uploader. +// +// It is safe to call this method concurrently across goroutines. +func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ...func(*Uploader)) ( + *UploadOutput, error, +) { + i := uploader{in: input, cfg: u, ctx: ctx} + + // Copy ClientOptions + clientOptions := make([]func(*s3.Options), 0, len(i.cfg.ClientOptions)+1) + clientOptions = append(clientOptions, func(o *s3.Options) { + o.APIOptions = append(o.APIOptions, + middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey), + ) + }) + clientOptions = append(clientOptions, i.cfg.ClientOptions...) + i.cfg.ClientOptions = clientOptions + + for _, opt := range opts { + opt(&i.cfg) + } + + return i.upload() +} + +// internal structure to manage an upload to S3. +type uploader struct { + ctx context.Context + cfg Uploader + + in *s3.PutObjectInput + + readerPos int64 // current reader position + totalSize int64 // set to -1 if the size is not known +} + +// internal logic for deciding whether to upload a single part or use a +// multipart upload. +func (u *uploader) upload() (*UploadOutput, error) { + if err := u.init(); err != nil { + return nil, fmt.Errorf("unable to initialize upload: %w", err) + } + defer u.cfg.partPool.Close() + + if u.cfg.PartSize < MinUploadPartSize { + return nil, fmt.Errorf("part size must be at least %d bytes", MinUploadPartSize) + } + + // Do one read to determine if we have more than one part + reader, _, cleanup, err := u.nextReader() + if err == io.EOF { // single part + return u.singlePart(reader, cleanup) + } else if err != nil { + cleanup() + return nil, fmt.Errorf("read upload data failed: %w", err) + } + + mu := multiuploader{uploader: u} + return mu.upload(reader, cleanup) +} + +// init will initialize all default options. +func (u *uploader) init() error { + if err := validateSupportedARNType(aws.ToString(u.in.Bucket)); err != nil { + return err + } + + if u.cfg.Concurrency == 0 { + u.cfg.Concurrency = DefaultUploadConcurrency + } + if u.cfg.PartSize == 0 { + u.cfg.PartSize = DefaultUploadPartSize + } + if u.cfg.MaxUploadParts == 0 { + u.cfg.MaxUploadParts = MaxUploadParts + } + + // Try to get the total size for some optimizations + if err := u.initSize(); err != nil { + return err + } + + // If PartSize was changed or partPool was never setup then we need to allocated a new pool + // so that we return []byte slices of the correct size + poolCap := u.cfg.Concurrency + 1 + if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize { + u.cfg.partPool = newByteSlicePool(u.cfg.PartSize) + u.cfg.partPool.ModifyCapacity(poolCap) + } else { + u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool} + u.cfg.partPool.ModifyCapacity(poolCap) + } + + return nil +} + +// initSize tries to detect the total stream size, setting u.totalSize. If +// the size is not known, totalSize is set to -1. +func (u *uploader) initSize() error { + u.totalSize = -1 + + switch r := u.in.Body.(type) { + case io.Seeker: + n, err := seekerLen(r) + if err != nil { + return err + } + u.totalSize = n + + // Try to adjust partSize if it is too small and account for + // integer division truncation. + if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) { + // Add one to the part size to account for remainders + // during the size calculation. e.g odd number of bytes. + u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1 + } + } + + return nil +} + +// nextReader returns a seekable reader representing the next packet of data. +// This operation increases the shared u.readerPos counter, but note that it +// does not need to be wrapped in a mutex because nextReader is only called +// from the main thread. +func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) { + switch r := u.in.Body.(type) { + case readerAtSeeker: + var err error + + n := u.cfg.PartSize + if u.totalSize >= 0 { + bytesLeft := u.totalSize - u.readerPos + + if bytesLeft <= u.cfg.PartSize { + err = io.EOF + n = bytesLeft + } + } + + var ( + reader io.ReadSeeker + cleanup func() + ) + + reader = io.NewSectionReader(r, u.readerPos, n) + if u.cfg.BufferProvider != nil { + reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader) + } else { + cleanup = func() {} + } + + u.readerPos += n + + return reader, int(n), cleanup, err + + default: + part, err := u.cfg.partPool.Get(u.ctx) + if err != nil { + return nil, 0, func() {}, err + } + + n, err := readFillBuf(r, *part) + u.readerPos += int64(n) + + cleanup := func() { + u.cfg.partPool.Put(part) + } + + return bytes.NewReader((*part)[0:n]), n, cleanup, err + } +} + +func readFillBuf(r io.Reader, b []byte) (offset int, err error) { + for offset < len(b) && err == nil { + var n int + n, err = r.Read(b[offset:]) + offset += n + } + + return offset, err +} + +// singlePart contains upload logic for uploading a single chunk via +// a regular PutObject request. Multipart requests require at least two +// parts, or at least 5MB of data. +func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + defer cleanup() + + var params s3.PutObjectInput + awsutil.Copy(¶ms, u.in) + params.Body = r + + // Need to use request form because URL generated in request is + // used in return. + + var locationRecorder recordLocationClient + out, err := u.cfg.S3.PutObject(u.ctx, ¶ms, + append(u.cfg.ClientOptions, locationRecorder.WrapClient())...) + if err != nil { + return nil, err + } + + return &UploadOutput{ + Location: locationRecorder.location, + + BucketKeyEnabled: out.BucketKeyEnabled, + ChecksumCRC32: out.ChecksumCRC32, + ChecksumCRC32C: out.ChecksumCRC32C, + ChecksumSHA1: out.ChecksumSHA1, + ChecksumSHA256: out.ChecksumSHA256, + ETag: out.ETag, + Expiration: out.Expiration, + Key: params.Key, + RequestCharged: out.RequestCharged, + SSEKMSKeyId: out.SSEKMSKeyId, + ServerSideEncryption: out.ServerSideEncryption, + VersionID: out.VersionId, + }, nil +} + +type httpClient interface { + Do(r *http.Request) (*http.Response, error) +} + +type recordLocationClient struct { + httpClient + location string +} + +func (c *recordLocationClient) WrapClient() func(o *s3.Options) { + return func(o *s3.Options) { + c.httpClient = o.HTTPClient + o.HTTPClient = c + } +} + +func (c *recordLocationClient) Do(r *http.Request) (resp *http.Response, err error) { + resp, err = c.httpClient.Do(r) + if err != nil { + return resp, err + } + + if resp.Request != nil && resp.Request.URL != nil { + url := *resp.Request.URL + url.RawQuery = "" + c.location = url.String() + } + + return resp, err +} + +// internal structure to manage a specific multipart upload to S3. +type multiuploader struct { + *uploader + wg sync.WaitGroup + m sync.Mutex + err error + uploadID string + parts completedParts +} + +// keeps track of a single chunk of data being sent to S3. +type chunk struct { + buf io.ReadSeeker + num int32 + cleanup func() +} + +// completedParts is a wrapper to make parts sortable by their part number, +// since S3 required this list to be sent in sorted order. +type completedParts []types.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// upload will perform a multipart upload using the firstBuf buffer containing +// the first chunk of data. +func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + var params s3.CreateMultipartUploadInput + awsutil.Copy(¶ms, u.in) + + // Create the multipart + var locationRecorder recordLocationClient + resp, err := u.cfg.S3.CreateMultipartUpload(u.ctx, ¶ms, + append(u.cfg.ClientOptions, locationRecorder.WrapClient())...) + if err != nil { + cleanup() + return nil, err + } + u.uploadID = *resp.UploadId + + // Create the workers + ch := make(chan chunk, u.cfg.Concurrency) + for i := 0; i < u.cfg.Concurrency; i++ { + u.wg.Add(1) + go u.readChunk(ch) + } + + // Send part 1 to the workers + var num int32 = 1 + ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup} + + // Read and queue the rest of the parts + for u.geterr() == nil && err == nil { + var ( + reader io.ReadSeeker + nextChunkLen int + ok bool + ) + + reader, nextChunkLen, cleanup, err = u.nextReader() + ok, err = u.shouldContinue(num, nextChunkLen, err) + if !ok { + cleanup() + if err != nil { + u.seterr(err) + } + break + } + + num++ + + ch <- chunk{buf: reader, num: num, cleanup: cleanup} + } + + // Close the channel, wait for workers, and complete upload + close(ch) + u.wg.Wait() + completeOut := u.complete() + + if err := u.geterr(); err != nil { + return nil, &multiUploadError{ + err: err, + uploadID: u.uploadID, + } + } + + return &UploadOutput{ + Location: locationRecorder.location, + UploadID: u.uploadID, + CompletedParts: u.parts, + + BucketKeyEnabled: completeOut.BucketKeyEnabled, + ChecksumCRC32: completeOut.ChecksumCRC32, + ChecksumCRC32C: completeOut.ChecksumCRC32C, + ChecksumSHA1: completeOut.ChecksumSHA1, + ChecksumSHA256: completeOut.ChecksumSHA256, + ETag: completeOut.ETag, + Expiration: completeOut.Expiration, + Key: completeOut.Key, + RequestCharged: completeOut.RequestCharged, + SSEKMSKeyId: completeOut.SSEKMSKeyId, + ServerSideEncryption: completeOut.ServerSideEncryption, + VersionID: completeOut.VersionId, + }, nil +} + +func (u *multiuploader) shouldContinue(part int32, nextChunkLen int, err error) (bool, error) { + if err != nil && err != io.EOF { + return false, fmt.Errorf("read multipart upload data failed, %w", err) + } + + if nextChunkLen == 0 { + // No need to upload empty part, if file was empty to start + // with empty single part would of been created and never + // started multipart upload. + return false, nil + } + + part++ + // This upload exceeded maximum number of supported parts, error now. + if part > u.cfg.MaxUploadParts || part > MaxUploadParts { + var msg string + if part > u.cfg.MaxUploadParts { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.cfg.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + return false, fmt.Errorf(msg) + } + + return true, err +} + +// readChunk runs in worker goroutines to pull chunks off of the ch channel +// and send() them as UploadPart requests. +func (u *multiuploader) readChunk(ch chan chunk) { + defer u.wg.Done() + for { + data, ok := <-ch + + if !ok { + break + } + + if u.geterr() == nil { + if err := u.send(data); err != nil { + u.seterr(err) + } + } + + data.cleanup() + } +} + +// send performs an UploadPart request and keeps track of the completed +// part information. +func (u *multiuploader) send(c chunk) error { + params := &s3.UploadPartInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + Body: c.buf, + SSECustomerAlgorithm: u.in.SSECustomerAlgorithm, + SSECustomerKey: u.in.SSECustomerKey, + SSECustomerKeyMD5: u.in.SSECustomerKeyMD5, + ExpectedBucketOwner: u.in.ExpectedBucketOwner, + RequestPayer: u.in.RequestPayer, + + ChecksumAlgorithm: u.in.ChecksumAlgorithm, + // Invalid to set any of the individual ChecksumXXX members from + // PutObject as they are never valid for individual parts of a + // multipart upload. + + PartNumber: c.num, + UploadId: &u.uploadID, + } + // TODO should do copy then clear? + + resp, err := u.cfg.S3.UploadPart(u.ctx, params, u.cfg.ClientOptions...) + if err != nil { + return err + } + + var completed types.CompletedPart + awsutil.Copy(&completed, resp) + completed.PartNumber = c.num + + u.m.Lock() + u.parts = append(u.parts, completed) + u.m.Unlock() + + return nil +} + +// geterr is a thread-safe getter for the error object +func (u *multiuploader) geterr() error { + u.m.Lock() + defer u.m.Unlock() + + return u.err +} + +// seterr is a thread-safe setter for the error object +func (u *multiuploader) seterr(e error) { + u.m.Lock() + defer u.m.Unlock() + + u.err = e +} + +// fail will abort the multipart unless LeavePartsOnError is set to true. +func (u *multiuploader) fail() { + if u.cfg.LeavePartsOnError { + return + } + + params := &s3.AbortMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + } + _, err := u.cfg.S3.AbortMultipartUpload(u.ctx, params, u.cfg.ClientOptions...) + if err != nil { + // TODO: Add logging + //logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err)) + _ = err + } +} + +// complete successfully completes a multipart upload and returns the response. +func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { + if u.geterr() != nil { + u.fail() + return nil + } + + // Parts must be sorted in PartNumber order. + sort.Sort(u.parts) + + var params s3.CompleteMultipartUploadInput + awsutil.Copy(¶ms, u.in) + params.UploadId = &u.uploadID + params.MultipartUpload = &types.CompletedMultipartUpload{Parts: u.parts} + + resp, err := u.cfg.S3.CompleteMultipartUpload(u.ctx, ¶ms, u.cfg.ClientOptions...) + if err != nil { + u.seterr(err) + u.fail() + } + + return resp +} + +type readerAtSeeker interface { + io.ReaderAt + io.ReadSeeker +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go new file mode 100644 index 000000000000..3df983a652a8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go @@ -0,0 +1,75 @@ +package manager + +import ( + "bufio" + "io" + "sync" + + "github.com/aws/aws-sdk-go-v2/internal/sdkio" +) + +// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom +type WriterReadFrom interface { + io.Writer + io.ReaderFrom +} + +// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer +type WriterReadFromProvider interface { + GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func()) +} + +type bufferedWriter interface { + WriterReadFrom + Flush() error + Reset(io.Writer) +} + +type bufferedReadFrom struct { + bufferedWriter +} + +func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) { + n, err := b.bufferedWriter.ReadFrom(r) + if flushErr := b.Flush(); flushErr != nil && err == nil { + err = flushErr + } + return n, err +} + +// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool +// to manage allocation and reuse of *bufio.Writer structures. +type PooledBufferedReadFromProvider struct { + pool sync.Pool +} + +// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider +// Size is used to control the size of the underlying *bufio.Writer created for +// calls to GetReadFrom. +func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider { + if size < int(32*sdkio.KibiByte) { + size = int(64 * sdkio.KibiByte) + } + + return &PooledBufferedReadFromProvider{ + pool: sync.Pool{ + New: func() interface{} { + return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)} + }, + }, + } +} + +// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom +// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom +// has been completed in order to allow the reuse of the *bufio.Writer +func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) { + buffer := p.pool.Get().(*bufferedReadFrom) + buffer.Reset(writer) + r = buffer + cleanup = func() { + buffer.Reset(nil) // Reset to nil writer to release reference + p.pool.Put(buffer) + } + return r, cleanup +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go new file mode 100644 index 000000000000..938cd14c1e4c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go @@ -0,0 +1,112 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + if dst.Kind() == reflect.String { + dst.SetString(e.String()) + } else { + dst.Set(reflect.New(e)) + } + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if dst.Kind() != reflect.String && src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go new file mode 100644 index 000000000000..bcfe51a2b7ee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go @@ -0,0 +1,33 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + // Special casing for strings as typed enumerations are string aliases + // but are not deep equal. + if ra.Kind() == reflect.String && rb.Kind() == reflect.String { + return ra.String() == rb.String() + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go new file mode 100644 index 000000000000..7e69bd5eb756 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go @@ -0,0 +1,225 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + if dstVal.Kind() == reflect.String { + dstVal.SetString(srcVal.String()) + } else { + dstVal.Set(srcVal) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go new file mode 100644 index 000000000000..1adecae6b941 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go @@ -0,0 +1,131 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + isPtr := false + for v.Kind() == reflect.Ptr { + isPtr = true + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + if isPtr { + buf.WriteRune('&') + } + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + if isPtr { + buf.WriteRune('&') + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + if isPtr { + buf.WriteRune('&') + } + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + + for v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + + if v.Kind() == reflect.Ptr || v.Kind() == reflect.Struct || v.Kind() == reflect.Map || v.Kind() == reflect.Slice { + prettify(v, indent, buf) + return + } + + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go new file mode 100644 index 000000000000..645df2450fc5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go @@ -0,0 +1,88 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md new file mode 100644 index 000000000000..3ab362b42431 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -0,0 +1,78 @@ +# v1.1.10 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.9 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.8 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.7 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.6 (2022-03-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.5 (2022-02-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.4 (2022-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.3 (2022-01-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.7 (2021-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.6 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.5 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.4 (2021-08-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.2 (2021-08-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.1 (2021-07-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2021-06-25) + +* **Release**: Release new modules +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go new file mode 100644 index 000000000000..cd4d19b89821 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go @@ -0,0 +1,65 @@ +package configsources + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" +) + +// EnableEndpointDiscoveryProvider is an interface for retrieving external configuration value +// for Enable Endpoint Discovery +type EnableEndpointDiscoveryProvider interface { + GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) +} + +// ResolveEnableEndpointDiscovery extracts the first instance of a EnableEndpointDiscoveryProvider from the config slice. +// Additionally returns a aws.EndpointDiscoveryEnableState to indicate if the value was found in provided configs, +// and error if one is encountered. +func ResolveEnableEndpointDiscovery(ctx context.Context, configs []interface{}) (value aws.EndpointDiscoveryEnableState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(EnableEndpointDiscoveryProvider); ok { + value, found, err = p.GetEnableEndpointDiscovery(ctx) + if err != nil || found { + break + } + } + } + return +} + +// UseDualStackEndpointProvider is an interface for retrieving external configuration values for UseDualStackEndpoint +type UseDualStackEndpointProvider interface { + GetUseDualStackEndpoint(context.Context) (value aws.DualStackEndpointState, found bool, err error) +} + +// ResolveUseDualStackEndpoint extracts the first instance of a UseDualStackEndpoint from the config slice. +// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseDualStackEndpoint(ctx context.Context, configs []interface{}) (value aws.DualStackEndpointState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseDualStackEndpointProvider); ok { + value, found, err = p.GetUseDualStackEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} + +// UseFIPSEndpointProvider is an interface for retrieving external configuration values for UseFIPSEndpoint +type UseFIPSEndpointProvider interface { + GetUseFIPSEndpoint(context.Context) (value aws.FIPSEndpointState, found bool, err error) +} + +// ResolveUseFIPSEndpoint extracts the first instance of a UseFIPSEndpointProvider from the config slice. +// Additionally, returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseFIPSEndpoint(ctx context.Context, configs []interface{}) (value aws.FIPSEndpointState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseFIPSEndpointProvider); ok { + value, found, err = p.GetUseFIPSEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go new file mode 100644 index 000000000000..23ed79f51637 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package configsources + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.1.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md new file mode 100644 index 000000000000..ff627acb0c3b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -0,0 +1,51 @@ +# v2.4.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.3.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.2.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.1.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.0 (2021-11-06) + +* **Release**: Endpoint Variant Model Support +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go new file mode 100644 index 000000000000..32251a7e3cc1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go @@ -0,0 +1,302 @@ +package endpoints + +import ( + "fmt" + "github.com/aws/smithy-go/logging" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// DefaultKey is a compound map key of a variant and other values. +type DefaultKey struct { + Variant EndpointVariant + ServiceVariant ServiceVariant +} + +// EndpointKey is a compound map key of a region and associated variant value. +type EndpointKey struct { + Region string + Variant EndpointVariant + ServiceVariant ServiceVariant +} + +// EndpointVariant is a bit field to describe the endpoints attributes. +type EndpointVariant uint64 + +const ( + // FIPSVariant indicates that the endpoint is FIPS capable. + FIPSVariant EndpointVariant = 1 << (64 - 1 - iota) + + // DualStackVariant indicates that the endpoint is DualStack capable. + DualStackVariant +) + +// ServiceVariant is a bit field to describe the service endpoint attributes. +type ServiceVariant uint64 + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "s3v4"} +) + +// Options provide configuration needed to direct how endpoints are resolved. +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the provided logger. + LogDeprecated bool + + // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority + // over the region name passed to the ResolveEndpoint call. + ResolvedRegion string + + // Disable usage of HTTPS (TLS / SSL) + DisableHTTPS bool + + // Instruct the resolver to use a service endpoint that supports dual-stack. + // If a service does not have a dual-stack endpoint an error will be returned by the resolver. + UseDualStackEndpoint aws.DualStackEndpointState + + // Instruct the resolver to use a service endpoint that supports FIPS. + // If a service does not have a FIPS endpoint an error will be returned by the resolver. + UseFIPSEndpoint aws.FIPSEndpointState + + // ServiceVariant is a bitfield of service specified endpoint variant data. + ServiceVariant ServiceVariant +} + +// GetEndpointVariant returns the EndpointVariant for the variant associated options. +func (o Options) GetEndpointVariant() (v EndpointVariant) { + if o.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + v |= DualStackVariant + } + if o.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { + v |= FIPSVariant + } + return v +} + +// Partitions is a slice of partition +type Partitions []Partition + +// ResolveEndpoint resolves a service endpoint for the given region and options. +func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { + if len(ps) == 0 { + return aws.Endpoint{}, fmt.Errorf("no partitions found") + } + + if opts.Logger == nil { + opts.Logger = logging.Nop{} + } + + if len(opts.ResolvedRegion) > 0 { + region = opts.ResolvedRegion + } + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(region, opts) { + continue + } + + return ps[i].ResolveEndpoint(region, opts) + } + + // fallback to first partition format to use when resolving the endpoint. + return ps[0].ResolveEndpoint(region, opts) +} + +// Partition is an AWS partition description for a service and its' region endpoints. +type Partition struct { + ID string + RegionRegex *regexp.Regexp + PartitionEndpoint string + IsRegionalized bool + Defaults map[DefaultKey]Endpoint + Endpoints Endpoints +} + +func (p Partition) canResolveEndpoint(region string, opts Options) bool { + _, ok := p.Endpoints[EndpointKey{ + Region: region, + Variant: opts.GetEndpointVariant(), + }] + return ok || p.RegionRegex.MatchString(region) +} + +// ResolveEndpoint resolves and service endpoint for the given region and options. +func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { + if len(region) == 0 && len(p.PartitionEndpoint) != 0 { + region = p.PartitionEndpoint + } + + endpoints := p.Endpoints + + variant := options.GetEndpointVariant() + serviceVariant := options.ServiceVariant + + defaults := p.Defaults[DefaultKey{ + Variant: variant, + ServiceVariant: serviceVariant, + }] + + return p.endpointForRegion(region, variant, serviceVariant, endpoints).resolve(p.ID, region, defaults, options) +} + +func (p Partition) endpointForRegion(region string, variant EndpointVariant, serviceVariant ServiceVariant, endpoints Endpoints) Endpoint { + key := EndpointKey{ + Region: region, + Variant: variant, + } + + if e, ok := endpoints[key]; ok { + return e + } + + if !p.IsRegionalized { + return endpoints[EndpointKey{ + Region: p.PartitionEndpoint, + Variant: variant, + ServiceVariant: serviceVariant, + }] + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return Endpoint{} +} + +// Endpoints is a map of service config regions to endpoints +type Endpoints map[EndpointKey]Endpoint + +// CredentialScope is the credential scope of a region and service +type CredentialScope struct { + Region string + Service string +} + +// Endpoint is a service endpoint description +type Endpoint struct { + // True if the endpoint cannot be resolved for this partition/region/service + Unresolveable aws.Ternary + + Hostname string + Protocols []string + + CredentialScope CredentialScope + + SignatureVersions []string + + // Indicates that this endpoint is deprecated. + Deprecated aws.Ternary +} + +// IsZero returns whether the endpoint structure is an empty (zero) value. +func (e Endpoint) IsZero() bool { + switch { + case e.Unresolveable != aws.UnknownTernary: + return false + case len(e.Hostname) != 0: + return false + case len(e.Protocols) != 0: + return false + case e.CredentialScope != (CredentialScope{}): + return false + case len(e.SignatureVersions) != 0: + return false + } + return true +} + +func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) (aws.Endpoint, error) { + var merged Endpoint + merged.mergeIn(def) + merged.mergeIn(e) + e = merged + + if e.IsZero() { + return aws.Endpoint{}, fmt.Errorf("unable to resolve endpoint for region: %v", region) + } + + var u string + if e.Unresolveable != aws.TrueTernary { + // Only attempt to resolve the endpoint if it can be resolved. + hostname := strings.Replace(e.Hostname, "{region}", region, 1) + + scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) + u = scheme + "://" + hostname + } + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + + if e.Deprecated == aws.TrueTernary && options.LogDeprecated { + options.Logger.Logf(logging.Warn, "endpoint identifier %q, url %q marked as deprecated", region, u) + } + + return aws.Endpoint{ + URL: u, + PartitionID: partition, + SigningRegion: signingRegion, + SigningName: signingName, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + }, nil +} + +func (e *Endpoint) mergeIn(other Endpoint) { + if other.Unresolveable != aws.UnknownTernary { + e.Unresolveable = other.Unresolveable + } + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if other.Deprecated != aws.UnknownTernary { + e.Deprecated = other.Deprecated + } +} + +func getEndpointScheme(protocols []string, disableHTTPS bool) string { + if disableHTTPS { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go new file mode 100644 index 000000000000..79bdc8efdfae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package endpoints + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "2.4.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md new file mode 100644 index 000000000000..8a68f7567725 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -0,0 +1,91 @@ +# v1.3.11 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2022-03-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2022-02-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2022-01-28) + +* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. + +# v1.3.4 (2022-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2022-01-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.5 (2021-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.4 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2021-08-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-07-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-07-01) + +* **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values. + +# v1.0.1 (2021-06-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2021-05-20) + +* **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go new file mode 100644 index 000000000000..e83a99886bcc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go new file mode 100644 index 000000000000..0895d53cbe65 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go new file mode 100644 index 000000000000..0b76999ba1f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/dependency.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/dependency.go new file mode 100644 index 000000000000..f5ebe52e1a55 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/dependency.go @@ -0,0 +1,6 @@ +package ini + +import ( + // internal/ini module was carved out of this module + _ "github.com/aws/aws-sdk-go-v2" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go new file mode 100644 index 000000000000..1e55bbd07b91 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go @@ -0,0 +1,42 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> section | stmt' +// stmt' -> epsilon | expr +// expr -> value (stmt)* | equal_expr (stmt)* +// equal_expr -> value ( ':' | '=' ) equal_expr' +// equal_expr' -> number | string | quoted_string +// quoted_string -> " quoted_string' +// quoted_string' -> string quoted_string_end +// quoted_string_end -> " +// +// section -> [ section' +// section' -> section_value section_close +// section_value -> number | string_subset | boolean | quoted_string_subset +// quoted_string_subset -> " quoted_string_subset' +// quoted_string_subset' -> string_subset quoted_string_end +// quoted_string_subset -> " +// section_close -> ] +// +// value -> number | string_subset | boolean +// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ? +// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ? +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go new file mode 100644 index 000000000000..04345a54c20d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go new file mode 100644 index 000000000000..0f278d55e6c4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go @@ -0,0 +1,22 @@ +package ini + +import "fmt" + +// UnableToReadFile is an error indicating that a ini file could not be read +type UnableToReadFile struct { + Err error +} + +// Error returns an error message and the underlying error message if present +func (e *UnableToReadFile) Error() string { + base := "unable to read file" + if e.Err == nil { + return base + } + return fmt.Sprintf("%s: %v", base, e.Err) +} + +// Unwrap returns the underlying error +func (e *UnableToReadFile) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go new file mode 100644 index 000000000000..91ba2a59dd5e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go new file mode 100644 index 000000000000..6e545b63bc41 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go @@ -0,0 +1,18 @@ +//go:build gofuzz +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go new file mode 100644 index 000000000000..f54018f1dcf7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ini + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.3.11" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go new file mode 100644 index 000000000000..f7406231318d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go @@ -0,0 +1,58 @@ +package ini + +import ( + "fmt" + "io" + "os" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (sections Sections, err error) { + f, oerr := os.Open(path) + if oerr != nil { + return Sections{}, &UnableToReadFile{Err: oerr} + } + + defer func() { + closeErr := f.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("close error: %v, original error: %w", closeErr, err) + } + }() + + return Parse(f, path) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader, path string) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor(path) + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor("") + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go new file mode 100644 index 000000000000..abf1fb036262 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go @@ -0,0 +1,157 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, &UnableToReadFile{Err: err} + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go new file mode 100644 index 000000000000..12fc7d5aa495 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go @@ -0,0 +1,349 @@ +package ini + +import ( + "fmt" + "io" +) + +// ParseState represents the current state of the parser. +type ParseState uint + +// State enums for the parse table +const ( + InvalidState ParseState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]ParseState{ + ASTKindStart: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: { + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: { + TokenLit: ValueState, + TokenSep: ValueState, + TokenOp: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + }, + ASTKindStatement: { + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: { + TokenLit: ValueState, + TokenSep: ValueState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: { + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: { + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + // if should skip is true, we skip the tokens until should skip is set to false. + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + switch k.Kind { + case ASTKindEqualExpr: + // assigning a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + root.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k.Kind, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which exludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go new file mode 100644 index 000000000000..eca42d1b2937 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go @@ -0,0 +1,336 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isCaselessLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency. +func isCaselessLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != unicode.ToLower(have[i]) { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = isCaselessLitValue(runesTrue, v.raw) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// NewStringValue returns a Value type generated using a string input. +func NewStringValue(str string) (Value, error) { + return newValue(StringType, 10, []rune(str)) +} + +// NewIntValue returns a Value type generated using an int64 input. +func NewIntValue(i int64) (Value, error) { + v := strconv.FormatInt(i, 10) + return newValue(IntegerType, 10, []rune(v)) +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go new file mode 100644 index 000000000000..e52ac399f17d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go new file mode 100644 index 000000000000..a45c0bc56622 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go new file mode 100644 index 000000000000..8a84c7cbe080 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go new file mode 100644 index 000000000000..30ae0b8f228f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go @@ -0,0 +1,19 @@ +package ini + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +func (err *ParseError) Error() string { + return err.msg +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go new file mode 100644 index 000000000000..7f01cf7c7036 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go new file mode 100644 index 000000000000..f82095ba2594 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go new file mode 100644 index 000000000000..07e90876a4a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + s.Continue() + return false + } + + s.prevTok = tok + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true +} + +func (s *skipper) Continue() { + s.shouldSkip = false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go new file mode 100644 index 000000000000..ba0af01b53b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini defintion. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go new file mode 100644 index 000000000000..b5480fdeb359 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isCaselessLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go new file mode 100644 index 000000000000..a07a6373897c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go @@ -0,0 +1,269 @@ +package ini + +import ( + "fmt" + "sort" + "strings" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + + // scope is the profile which is being visited + scope string + + // path is the file path which the visitor is visiting + path string + + // Sections defines list of the profile section + Sections Sections +} + +// NewDefaultVisitor returns a DefaultVisitor. It takes in a filepath +// which points to the file it is visiting. +func NewDefaultVisitor(filepath string) *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + path: filepath, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + if t.SourceFile == nil { + t.SourceFile = make(map[string]string, 0) + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values. + // If the token is not either a literal or one of the token types that identifies those four additional + // tokens then error. + if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + val, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + // lower case key to standardize + k := strings.ToLower(key) + + // identify if the section already had this key, append log on section + if t.Has(k) { + t.Logs = append(t.Logs, + fmt.Sprintf("For profile: %v, overriding %v value, "+ + "with a %v value found in a duplicate profile defined later in the same file %v. \n", + t.Name, k, k, v.path)) + } + + // assign the value + t.values[k] = val + // update the source file path for region + t.SourceFile[k] = v.path + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + + // trim start and end space + name = strings.TrimSpace(name) + + // if has prefix "profile " + [ws+] + "profile-name", + // we standardize by removing the [ws+] between prefix and profile-name. + if strings.HasPrefix(name, "profile ") { + names := strings.SplitN(name, " ", 2) + name = names[0] + " " + strings.TrimLeft(names[1], " ") + } + + // attach profile name on section + if !v.Sections.HasSection(name) { + v.Sections.container[name] = NewSection(name) + } + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// NewSections returns empty ini Sections +func NewSections() Sections { + return Sections{ + container: make(map[string]Section, 0), + } +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// HasSection denotes if Sections consist of a section with +// provided name. +func (t Sections) HasSection(p string) bool { + _, ok := t.container[p] + return ok +} + +// SetSection sets a section value for provided section name. +func (t Sections) SetSection(p string, v Section) Sections { + t.container[p] = v + return t +} + +// DeleteSection deletes a section entry/value for provided section name./ +func (t Sections) DeleteSection(p string) { + delete(t.container, p) +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + // Name is the Section profile name + Name string + + // values are the values within parsed profile + values values + + // Errors is the list of errors + Errors []error + + // Logs is the list of logs + Logs []string + + // SourceFile is the INI Source file from where this section + // was retrieved. They key is the property, value is the + // source file the property was retrieved from. + SourceFile map[string]string +} + +// NewSection returns an initialize section for the name +func NewSection(name string) Section { + return Section{ + Name: name, + values: values{}, + SourceFile: map[string]string{}, + } +} + +// UpdateSourceFile updates source file for a property to provided filepath. +func (t Section) UpdateSourceFile(property string, filepath string) { + t.SourceFile[property] = filepath +} + +// UpdateValue updates value for a provided key with provided value +func (t Section) UpdateValue(k string, v Value) error { + t.values[k] = v + return nil +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go new file mode 100644 index 000000000000..99915f7f777c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go new file mode 100644 index 000000000000..7ffb4ae06ff0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go new file mode 100644 index 000000000000..c8484dcd7592 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go @@ -0,0 +1,33 @@ +package rand + +import ( + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func init() { + Reader = rand.Reader +} + +// Reader provides a random reader that can reset during testing. +var Reader io.Reader + +var floatMaxBigInt = big.NewInt(1 << 53) + +// Float64 returns a float64 read from an io.Reader source. The returned float will be between [0.0, 1.0). +func Float64(reader io.Reader) (float64, error) { + bi, err := rand.Int(reader, floatMaxBigInt) + if err != nil { + return 0, fmt.Errorf("failed to read random value, %v", err) + } + + return float64(bi.Int64()) / (1 << 53), nil +} + +// CryptoRandFloat64 returns a random float64 obtained from the crypto rand +// source. +func CryptoRandFloat64() (float64, error) { + return Float64(Reader) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go new file mode 100644 index 000000000000..2b42cbe6421a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go @@ -0,0 +1,9 @@ +package sdk + +// Invalidator provides access to a type's invalidate method to make it +// invalidate it cache. +// +// e.g aws.SafeCredentialsProvider's Invalidate method. +type Invalidator interface { + Invalidate() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go new file mode 100644 index 000000000000..8e8dabad5488 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go @@ -0,0 +1,74 @@ +package sdk + +import ( + "context" + "time" +) + +func init() { + NowTime = time.Now + Sleep = time.Sleep + SleepWithContext = sleepWithContext +} + +// NowTime is a value for getting the current time. This value can be overridden +// for testing mocking out current time. +var NowTime func() time.Time + +// Sleep is a value for sleeping for a duration. This value can be overridden +// for testing and mocking out sleep duration. +var Sleep func(time.Duration) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// This value can be overridden for testing and mocking out sleep duration. +var SleepWithContext func(context.Context, time.Duration) error + +// sleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the +// Context's error will be returned. +func sleepWithContext(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} + +// noOpSleepWithContext does nothing, returns immediately. +func noOpSleepWithContext(context.Context, time.Duration) error { + return nil +} + +func noOpSleep(time.Duration) {} + +// TestingUseNopSleep is a utility for disabling sleep across the SDK for +// testing. +func TestingUseNopSleep() func() { + SleepWithContext = noOpSleepWithContext + Sleep = noOpSleep + + return func() { + SleepWithContext = sleepWithContext + Sleep = time.Sleep + } +} + +// TestingUseReferenceTime is a utility for swapping the time function across the SDK to return a specific reference time +// for testing purposes. +func TestingUseReferenceTime(referenceTime time.Time) func() { + NowTime = func() time.Time { + return referenceTime + } + return func() { + NowTime = time.Now + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go new file mode 100644 index 000000000000..6c443988bbc9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go new file mode 100644 index 000000000000..d008ae27cb31 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE new file mode 100644 index 000000000000..fe6a62006a52 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go new file mode 100644 index 000000000000..cb70616e8027 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go @@ -0,0 +1,7 @@ +// Package singleflight provides a duplicate function call suppression +// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight +// package. The package is forked, because the package a part of the unstable +// and unversioned golang.org/x/sync module. +// +// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight +package singleflight diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go new file mode 100644 index 000000000000..e8a1b17d5640 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package singleflight + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + c.wg.Done() + g.mu.Lock() + defer g.mu.Unlock() + if !c.forgotten { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go new file mode 100644 index 000000000000..5d69db5f2497 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go @@ -0,0 +1,13 @@ +package timeconv + +import "time" + +// FloatSecondsDur converts a fractional seconds to duration. +func FloatSecondsDur(v float64) time.Duration { + return time.Duration(v * float64(time.Second)) +} + +// DurSecondsFloat converts a duration into fractional seconds. +func DurSecondsFloat(d time.Duration) float64 { + return float64(d) / float64(time.Second) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md new file mode 100644 index 000000000000..50369c9ecedd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md @@ -0,0 +1,8 @@ +# v1.0.1 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2022-04-07) + +* **Release**: New internal v4a signing module location. + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go new file mode 100644 index 000000000000..856dcd62d4ea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go @@ -0,0 +1,141 @@ +package v4a + +import ( + "context" + "crypto/ecdsa" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +// Credentials is Context, ECDSA, and Optional Session Token that can be used +// to sign requests using SigV4a +type Credentials struct { + Context string + PrivateKey *ecdsa.PrivateKey + SessionToken string + + // Time the credentials will expire. + CanExpire bool + Expires time.Time +} + +// Expired returns if the credentials have expired. +func (v Credentials) Expired() bool { + if v.CanExpire { + return !v.Expires.After(sdk.NowTime()) + } + + return false +} + +// HasKeys returns if the credentials keys are set. +func (v Credentials) HasKeys() bool { + return len(v.Context) > 0 && v.PrivateKey != nil +} + +// SymmetricCredentialAdaptor wraps a SigV4 AccessKey/SecretKey provider and adapts the credentials +// to a ECDSA PrivateKey for signing with SiV4a +type SymmetricCredentialAdaptor struct { + SymmetricProvider aws.CredentialsProvider + + asymmetric atomic.Value + m sync.Mutex +} + +// Retrieve retrieves symmetric credentials from the underlying provider. +func (s *SymmetricCredentialAdaptor) Retrieve(ctx context.Context) (aws.Credentials, error) { + symCreds, err := s.retrieveFromSymmetricProvider(ctx) + if err != nil { + return aws.Credentials{}, nil + } + + if asymCreds := s.getCreds(); asymCreds == nil { + return symCreds, nil + } + + s.m.Lock() + defer s.m.Unlock() + + asymCreds := s.getCreds() + if asymCreds == nil { + return symCreds, nil + } + + // if the context does not match the access key id clear it + if asymCreds.Context != symCreds.AccessKeyID { + s.asymmetric.Store((*Credentials)(nil)) + } + + return symCreds, nil +} + +// RetrievePrivateKey returns credentials suitable for SigV4a signing +func (s *SymmetricCredentialAdaptor) RetrievePrivateKey(ctx context.Context) (Credentials, error) { + if asymCreds := s.getCreds(); asymCreds != nil { + return *asymCreds, nil + } + + s.m.Lock() + defer s.m.Unlock() + + if asymCreds := s.getCreds(); asymCreds != nil { + return *asymCreds, nil + } + + symmetricCreds, err := s.retrieveFromSymmetricProvider(ctx) + if err != nil { + return Credentials{}, fmt.Errorf("failed to retrieve symmetric credentials: %v", err) + } + + privateKey, err := deriveKeyFromAccessKeyPair(symmetricCreds.AccessKeyID, symmetricCreds.SecretAccessKey) + if err != nil { + return Credentials{}, fmt.Errorf("failed to derive assymetric key from credentials") + } + + creds := Credentials{ + Context: symmetricCreds.AccessKeyID, + PrivateKey: privateKey, + SessionToken: symmetricCreds.SessionToken, + CanExpire: symmetricCreds.CanExpire, + Expires: symmetricCreds.Expires, + } + + s.asymmetric.Store(&creds) + + return creds, nil +} + +func (s *SymmetricCredentialAdaptor) getCreds() *Credentials { + v := s.asymmetric.Load() + + if v == nil { + return nil + } + + c := v.(*Credentials) + if c != nil && c.HasKeys() && !c.Expired() { + return c + } + + return nil +} + +func (s *SymmetricCredentialAdaptor) retrieveFromSymmetricProvider(ctx context.Context) (aws.Credentials, error) { + credentials, err := s.SymmetricProvider.Retrieve(ctx) + if err != nil { + return aws.Credentials{}, err + } + + return credentials, nil +} + +// CredentialsProvider is the interface for a provider to retrieve credentials +// to sign requests with. +type CredentialsProvider interface { + RetrievePrivateKey(context.Context) (Credentials, error) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go new file mode 100644 index 000000000000..380d17427146 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go @@ -0,0 +1,17 @@ +package v4a + +import "fmt" + +// SigningError indicates an error condition occurred while performing SigV4a signing +type SigningError struct { + Err error +} + +func (e *SigningError) Error() string { + return fmt.Sprintf("failed to sign request: %v", e.Err) +} + +// Unwrap returns the underlying error cause +func (e *SigningError) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go new file mode 100644 index 000000000000..5ee4fc9435a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package v4a + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.0.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go new file mode 100644 index 000000000000..a93ec40c8a85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go @@ -0,0 +1,30 @@ +package crypto + +import "fmt" + +// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison +// if the two byte slices assuming they represent a big-endian number. +// +// error if len(x) != len(y) +// -1 if x < y +// 0 if x == y +// +1 if x > y +func ConstantTimeByteCompare(x, y []byte) (int, error) { + if len(x) != len(y) { + return 0, fmt.Errorf("slice lengths do not match") + } + + xLarger, yLarger := 0, 0 + + for i := 0; i < len(x); i++ { + xByte, yByte := int(x[i]), int(y[i]) + + x := ((yByte - xByte) >> 8) & 1 + y := ((xByte - yByte) >> 8) & 1 + + xLarger |= x &^ yLarger + yLarger |= y &^ xLarger + } + + return xLarger - yLarger, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go new file mode 100644 index 000000000000..758c73fcb3e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go @@ -0,0 +1,113 @@ +package crypto + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/hmac" + "encoding/asn1" + "encoding/binary" + "fmt" + "hash" + "math" + "math/big" +) + +type ecdsaSignature struct { + R, S *big.Int +} + +// ECDSAKey takes the given elliptic curve, and private key (d) byte slice +// and returns the private ECDSA key. +func ECDSAKey(curve elliptic.Curve, d []byte) *ecdsa.PrivateKey { + return ECDSAKeyFromPoint(curve, (&big.Int{}).SetBytes(d)) +} + +// ECDSAKeyFromPoint takes the given elliptic curve and point and returns the +// private and public keypair +func ECDSAKeyFromPoint(curve elliptic.Curve, d *big.Int) *ecdsa.PrivateKey { + pX, pY := curve.ScalarBaseMult(d.Bytes()) + + privKey := &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: pX, + Y: pY, + }, + D: d, + } + + return privKey +} + +// ECDSAPublicKey takes the provide curve and (x, y) coordinates and returns +// *ecdsa.PublicKey. Returns an error if the given points are not on the curve. +func ECDSAPublicKey(curve elliptic.Curve, x, y []byte) (*ecdsa.PublicKey, error) { + xPoint := (&big.Int{}).SetBytes(x) + yPoint := (&big.Int{}).SetBytes(y) + + if !curve.IsOnCurve(xPoint, yPoint) { + return nil, fmt.Errorf("point(%v, %v) is not on the given curve", xPoint.String(), yPoint.String()) + } + + return &ecdsa.PublicKey{ + Curve: curve, + X: xPoint, + Y: yPoint, + }, nil +} + +// VerifySignature takes the provided public key, hash, and asn1 encoded signature and returns +// whether the given signature is valid. +func VerifySignature(key *ecdsa.PublicKey, hash []byte, signature []byte) (bool, error) { + var ecdsaSignature ecdsaSignature + + _, err := asn1.Unmarshal(signature, &ecdsaSignature) + if err != nil { + return false, err + } + + return ecdsa.Verify(key, hash, ecdsaSignature.R, ecdsaSignature.S), nil +} + +// HMACKeyDerivation provides an implementation of a NIST-800-108 of a KDF (Key Derivation Function) in Counter Mode. +// For the purposes of this implantation HMAC is used as the PRF (Pseudorandom function), where the value of +// `r` is defined as a 4 byte counter. +func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, context []byte) ([]byte, error) { + // verify that we won't overflow the counter + n := int64(math.Ceil((float64(bitLen) / 8) / float64(hash().Size()))) + if n > 0x7FFFFFFF { + return nil, fmt.Errorf("unable to derive key of size %d using 32-bit counter", bitLen) + } + + // verify the requested bit length is not larger then the length encoding size + if int64(bitLen) > 0x7FFFFFFF { + return nil, fmt.Errorf("bitLen is greater than 32-bits") + } + + fixedInput := bytes.NewBuffer(nil) + fixedInput.Write(label) + fixedInput.WriteByte(0x00) + fixedInput.Write(context) + if err := binary.Write(fixedInput, binary.BigEndian, int32(bitLen)); err != nil { + return nil, fmt.Errorf("failed to write bit length to fixed input string: %v", err) + } + + var output []byte + + h := hmac.New(hash, key) + + for i := int64(1); i <= n; i++ { + h.Reset() + if err := binary.Write(h, binary.BigEndian, int32(i)); err != nil { + return nil, err + } + _, err := h.Write(fixedInput.Bytes()) + if err != nil { + return nil, err + } + output = append(output, h.Sum(nil)...) + } + + return output[:bitLen/8], nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go new file mode 100644 index 000000000000..89a76e2eaab4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go @@ -0,0 +1,36 @@ +package v4 + +const ( + // EmptyStringSHA256 is the hex encoded sha256 value of an empty string + EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` + + // UnsignedPayload indicates that the request payload body is unsigned + UnsignedPayload = "UNSIGNED-PAYLOAD" + + // AmzAlgorithmKey indicates the signing algorithm + AmzAlgorithmKey = "X-Amz-Algorithm" + + // AmzSecurityTokenKey indicates the security token to be used with temporary credentials + AmzSecurityTokenKey = "X-Amz-Security-Token" + + // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' + AmzDateKey = "X-Amz-Date" + + // AmzCredentialKey is the access key ID and credential scope + AmzCredentialKey = "X-Amz-Credential" + + // AmzSignedHeadersKey is the set of headers signed for the request + AmzSignedHeadersKey = "X-Amz-SignedHeaders" + + // AmzSignatureKey is the query parameter to store the SigV4 signature + AmzSignatureKey = "X-Amz-Signature" + + // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter + TimeFormat = "20060102T150405Z" + + // ShortTimeFormat is the shorten time format used in the credential scope + ShortTimeFormat = "20060102" + + // ContentSHAKey is the SHA256 of request body + ContentSHAKey = "X-Amz-Content-Sha256" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go new file mode 100644 index 000000000000..a15177e8f3f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" +) + +// Rules houses a set of Rule needed for validation of a +// string value +type Rules []Rule + +// Rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that Rule +type Rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r Rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// MapRule generic Rule for maps +type MapRule map[string]struct{} + +// IsValid for the map Rule satisfies whether it exists in the map +func (m MapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// AllowList is a generic Rule for whitelisting +type AllowList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (w AllowList) IsValid(value string) bool { + return w.Rule.IsValid(value) +} + +// DenyList is a generic Rule for blacklisting +type DenyList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (b DenyList) IsValid(value string) bool { + return !b.Rule.IsValid(value) +} + +// Patterns is a list of strings to match against +type Patterns []string + +// IsValid for Patterns checks each pattern and returns if a match has +// been found +func (p Patterns) IsValid(value string) bool { + for _, pattern := range p { + if sdkstrings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// InclusiveRules rules allow for rules to depend on one another +type InclusiveRules []Rule + +// IsValid will return true if all rules are true +func (r InclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go new file mode 100644 index 000000000000..3487dc3352d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go @@ -0,0 +1,67 @@ +package v4 + +// IgnoredHeaders is a list of headers that are ignored during signing +var IgnoredHeaders = Rules{ + DenyList{ + MapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// RequiredSignedHeaders is a whitelist for Build canonical headers. +var RequiredSignedHeaders = Rules{ + AllowList{ + MapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + "X-Amz-Tagging": struct{}{}, + }, + }, + Patterns{"X-Amz-Meta-"}, +} + +// AllowedQueryHoisting is a whitelist for Build query headers. The boolean value +// represents whether or not it is a pattern. +var AllowedQueryHoisting = InclusiveRules{ + DenyList{RequiredSignedHeaders}, + Patterns{"X-Amz-"}, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go new file mode 100644 index 000000000000..e7fa7a1b1e60 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go @@ -0,0 +1,13 @@ +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" +) + +// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. +func HMACSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go new file mode 100644 index 000000000000..bf93659a43f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go @@ -0,0 +1,75 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go new file mode 100644 index 000000000000..1de06a765d1b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go @@ -0,0 +1,36 @@ +package v4 + +import "time" + +// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. +type SigningTime struct { + time.Time + timeFormat string + shortTimeFormat string +} + +// NewSigningTime creates a new SigningTime given a time.Time +func NewSigningTime(t time.Time) SigningTime { + return SigningTime{ + Time: t, + } +} + +// TimeFormat provides a time formatted in the X-Amz-Date format. +func (m *SigningTime) TimeFormat() string { + return m.format(&m.timeFormat, TimeFormat) +} + +// ShortTimeFormat provides a time formatted of 20060102. +func (m *SigningTime) ShortTimeFormat() string { + return m.format(&m.shortTimeFormat, ShortTimeFormat) +} + +func (m *SigningTime) format(target *string, format string) string { + if len(*target) > 0 { + return *target + } + v := m.Time.Format(format) + *target = v + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go new file mode 100644 index 000000000000..741019b5f9da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go @@ -0,0 +1,64 @@ +package v4 + +import ( + "net/url" + "strings" +) + +const doubleSpace = " " + +// StripExcessSpaces will rewrite the passed in slice's string values to not +// contain muliple side-by-side spaces. +func StripExcessSpaces(str string) string { + var j, k, l, m, spaces int + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + return str + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + return string(buf[:m]) +} + +// GetURIPath returns the escaped URI component from the provided URL +func GetURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go new file mode 100644 index 000000000000..55d5e8abd6d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go @@ -0,0 +1,105 @@ +package v4a + +import ( + "context" + "fmt" + "net/http" + "time" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// HTTPSigner is SigV4a HTTP signer implementation +type HTTPSigner interface { + SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optfns ...func(*SignerOptions)) error +} + +// SignHTTPRequestMiddlewareOptions is the middleware options for constructing a SignHTTPRequestMiddleware. +type SignHTTPRequestMiddlewareOptions struct { + Credentials CredentialsProvider + Signer HTTPSigner + LogSigning bool +} + +// SignHTTPRequestMiddleware is a middleware for signing an HTTP request using SigV4a. +type SignHTTPRequestMiddleware struct { + credentials CredentialsProvider + signer HTTPSigner + logSigning bool +} + +// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given SignHTTPRequestMiddlewareOptions. +func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { + return &SignHTTPRequestMiddleware{ + credentials: options.Credentials, + signer: options.Signer, + logSigning: options.LogSigning, + } +} + +// ID the middleware identifier. +func (s *SignHTTPRequestMiddleware) ID() string { + return "Signing" +} + +// HandleFinalize signs an HTTP request using SigV4a. +func (s *SignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if !hasCredentialProvider(s.credentials) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected request middleware type %T", in.Request) + } + + signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) + payloadHash := v4.GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} + } + + credentials, err := s.credentials.RetrievePrivateKey(ctx) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} + } + + err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, []string{signingRegion}, time.Now().UTC(), func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} + } + + return next.HandleFinalize(ctx, in) +} + +func hasCredentialProvider(p CredentialsProvider) bool { + if p == nil { + return false + } + + return true +} + +// RegisterSigningMiddleware registers the SigV4a signing middleware to the stack. If a signing middleware is already +// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the +// finalize step. +func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignHTTPRequestMiddleware) (err error) { + const signedID = "Signing" + _, present := stack.Finalize.Get(signedID) + if present { + _, err = stack.Finalize.Swap(signedID, signingMiddleware) + } else { + err = stack.Finalize.Add(signingMiddleware, middleware.After) + } + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go new file mode 100644 index 000000000000..951fc415d527 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go @@ -0,0 +1,117 @@ +package v4a + +import ( + "context" + "fmt" + "net/http" + "time" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyHTTP "github.com/aws/smithy-go/transport/http" +) + +// HTTPPresigner is an interface to a SigV4a signer that can sign create a +// presigned URL for a HTTP requests. +type HTTPPresigner interface { + PresignHTTP( + ctx context.Context, credentials Credentials, r *http.Request, + payloadHash string, service string, regionSet []string, signingTime time.Time, + optFns ...func(*SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. +type PresignHTTPRequestMiddlewareOptions struct { + CredentialsProvider CredentialsProvider + Presigner HTTPPresigner + LogSigning bool +} + +// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a +// presigned URL for an HTTP request. +// +// Will short circuit the middleware stack and not forward onto the next +// Finalize handler. +type PresignHTTPRequestMiddleware struct { + credentialsProvider CredentialsProvider + presigner HTTPPresigner + logSigning bool +} + +// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware +// initialized with the presigner. +func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { + return &PresignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + presigner: options.Presigner, + logSigning: options.LogSigning, + } +} + +// ID provides the middleware ID. +func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 presign authentication scheme. +func (s *PresignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyHTTP.Request) + if !ok { + return out, metadata, &SigningError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + httpReq := req.Build(ctx) + if !hasCredentialProvider(s.credentialsProvider) { + out.Result = &v4.PresignedHTTPRequest{ + URL: httpReq.URL.String(), + Method: httpReq.Method, + SignedHeader: http.Header{}, + } + + return out, metadata, nil + } + + signingName := awsmiddleware.GetSigningName(ctx) + signingRegion := awsmiddleware.GetSigningRegion(ctx) + payloadHash := v4.GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{ + Err: fmt.Errorf("computed payload hash missing from context"), + } + } + + credentials, err := s.credentialsProvider.RetrievePrivateKey(ctx) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to retrieve credentials: %w", err), + } + } + + u, h, err := s.presigner.PresignHTTP(ctx, credentials, + httpReq, payloadHash, signingName, []string{signingRegion}, sdk.NowTime(), + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to sign http request, %w", err), + } + } + + out.Result = &v4.PresignedHTTPRequest{ + URL: u, + Method: httpReq.Method, + SignedHeader: h, + } + + return out, metadata, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go new file mode 100644 index 000000000000..f1f6ecc37140 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go @@ -0,0 +1,520 @@ +package v4a + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + "math/big" + "net/http" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + "time" + + signerCrypto "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto" + v4Internal "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/logging" +) + +const ( + // AmzRegionSetKey represents the region set header used for sigv4a + AmzRegionSetKey = "X-Amz-Region-Set" + amzAlgorithmKey = v4Internal.AmzAlgorithmKey + amzSecurityTokenKey = v4Internal.AmzSecurityTokenKey + amzDateKey = v4Internal.AmzDateKey + amzCredentialKey = v4Internal.AmzCredentialKey + amzSignedHeadersKey = v4Internal.AmzSignedHeadersKey + authorizationHeader = "Authorization" + + signingAlgorithm = "AWS4-ECDSA-P256-SHA256" + + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // EmptyStringSHA256 is a hex encoded SHA-256 hash of an empty string + EmptyStringSHA256 = v4Internal.EmptyStringSHA256 + + // Version of signing v4a + Version = "SigV4A" +) + +var ( + p256 elliptic.Curve + nMinusTwoP256 *big.Int + + one = new(big.Int).SetInt64(1) +) + +func init() { + // Ensure the elliptic curve parameters are initialized on package import rather then on first usage + p256 = elliptic.P256() + + nMinusTwoP256 = new(big.Int).SetBytes(p256.Params().N.Bytes()) + nMinusTwoP256 = nMinusTwoP256.Sub(nMinusTwoP256, new(big.Int).SetInt64(2)) +} + +// SignerOptions is the SigV4a signing options for constructing a Signer. +type SignerOptions struct { + Logger logging.Logger + LogSigning bool + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool +} + +// Signer is a SigV4a HTTP signing implementation +type Signer struct { + options SignerOptions +} + +// NewSigner constructs a SigV4a Signer. +func NewSigner(optFns ...func(*SignerOptions)) *Signer { + options := SignerOptions{} + + for _, fn := range optFns { + fn(&options) + } + + return &Signer{options: options} +} + +// deriveKeyFromAccessKeyPair derives a NIST P-256 PrivateKey from the given +// IAM AccessKey and SecretKey pair. +// +// Based on FIPS.186-4 Appendix B.4.2 +func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, error) { + params := p256.Params() + bitLen := params.BitSize // Testing random candidates does not require an additional 64 bits + counter := 0x01 + + buffer := make([]byte, 1+len(accessKey)) // 1 byte counter + len(accessKey) + kdfContext := bytes.NewBuffer(buffer) + + inputKey := append([]byte("AWS4A"), []byte(secretKey)...) + + d := new(big.Int) + for { + kdfContext.Reset() + kdfContext.WriteString(accessKey) + kdfContext.WriteByte(byte(counter)) + + key, err := signerCrypto.HMACKeyDerivation(sha256.New, bitLen, inputKey, []byte(signingAlgorithm), kdfContext.Bytes()) + if err != nil { + return nil, err + } + + // Check key first before calling SetBytes if key key is in fact a valid candidate. + // This ensures the byte slice is the correct length (32-bytes) to compare in constant-time + cmp, err := signerCrypto.ConstantTimeByteCompare(key, nMinusTwoP256.Bytes()) + if err != nil { + return nil, err + } + if cmp == -1 { + d.SetBytes(key) + break + } + + counter++ + if counter > 0xFF { + return nil, fmt.Errorf("exhausted single byte external counter") + } + } + d = d.Add(d, one) + + priv := new(ecdsa.PrivateKey) + priv.PublicKey.Curve = p256 + priv.D = d + priv.PublicKey.X, priv.PublicKey.Y = p256.ScalarBaseMult(d.Bytes()) + + return priv, nil +} + +type httpSigner struct { + Request *http.Request + ServiceName string + RegionSet []string + Time time.Time + Credentials Credentials + IsPreSign bool + + Logger logging.Logger + Debug bool + + // PayloadHash is the hex encoded SHA-256 hash of the request payload + // If len(PayloadHash) == 0 the signer will attempt to send the request + // as an unsigned payload. Note: Unsigned payloads only work for a subset of services. + PayloadHash string + + DisableHeaderHoisting bool + DisableURIPathEscaping bool +} + +// SignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and signs using SigV4a. +// The passed in request will be modified in place. +func (s *Signer) SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) error { + options := s.options + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r, + PayloadHash: payloadHash, + ServiceName: service, + RegionSet: regionSet, + Credentials: credentials, + Time: signingTime.UTC(), + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + } + + signedRequest, err := signer.Build() + if err != nil { + return err + } + + logHTTPSigningInfo(ctx, options, signedRequest) + + return nil +} + +// PresignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and presigns using SigV4a +// Returns the presigned URL along with the headers that were signed with the request. +// +// PresignHTTP will not set the expires time of the presigned request +// automatically. To specify the expire duration for a request add the +// "X-Amz-Expires" query parameter on the request with the value as the +// duration in seconds the presigned URL should be considered valid for. This +// parameter is not used by all AWS services, and is most notable used by +// Amazon S3 APIs. +func (s *Signer) PresignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) (signedURI string, signedHeaders http.Header, err error) { + options := s.options + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r, + PayloadHash: payloadHash, + ServiceName: service, + RegionSet: regionSet, + Credentials: credentials, + Time: signingTime.UTC(), + IsPreSign: true, + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + } + + signedRequest, err := signer.Build() + if err != nil { + return "", nil, err + } + + logHTTPSigningInfo(ctx, options, signedRequest) + + signedHeaders = make(http.Header) + + // For the signed headers we canonicalize the header keys in the returned map. + // This avoids situations where can standard library double headers like host header. For example the standard + // library will set the Host header, even if it is present in lower-case form. + for k, v := range signedRequest.SignedHeaders { + key := textproto.CanonicalMIMEHeaderKey(k) + signedHeaders[key] = append(signedHeaders[key], v...) + } + + return signedRequest.Request.URL.String(), signedHeaders, nil +} + +func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { + amzDate := s.Time.Format(timeFormat) + + if s.IsPreSign { + query.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ",")) + query.Set(amzDateKey, amzDate) + query.Set(amzAlgorithmKey, signingAlgorithm) + if len(s.Credentials.SessionToken) > 0 { + query.Set(amzSecurityTokenKey, s.Credentials.SessionToken) + } + return + } + + headers.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ",")) + headers.Set(amzDateKey, amzDate) + if len(s.Credentials.SessionToken) > 0 { + headers.Set(amzSecurityTokenKey, s.Credentials.SessionToken) + } +} + +func (s *httpSigner) Build() (signedRequest, error) { + req := s.Request + + query := req.URL.Query() + headers := req.Header + + s.setRequiredSigningFields(headers, query) + + // Sort Each Query Key's Values + for key := range query { + sort.Strings(query[key]) + } + + v4Internal.SanitizeHostForHeader(req) + + credentialScope := s.buildCredentialScope() + credentialStr := s.Credentials.Context + "/" + credentialScope + if s.IsPreSign { + query.Set(amzCredentialKey, credentialStr) + } + + unsignedHeaders := headers + if s.IsPreSign && !s.DisableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, unsignedHeaders) + for k := range urlValues { + query[k] = urlValues[k] + } + } + + host := req.URL.Host + if len(req.Host) > 0 { + host = req.Host + } + + signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) + + if s.IsPreSign { + query.Set(amzSignedHeadersKey, signedHeadersStr) + } + + rawQuery := strings.Replace(query.Encode(), "+", "%20", -1) + + canonicalURI := v4Internal.GetURIPath(req.URL) + if !s.DisableURIPathEscaping { + canonicalURI = httpbinding.EscapePath(canonicalURI, false) + } + + canonicalString := s.buildCanonicalString( + req.Method, + canonicalURI, + rawQuery, + signedHeadersStr, + canonicalHeaderStr, + ) + + strToSign := s.buildStringToSign(credentialScope, canonicalString) + signingSignature, err := s.buildSignature(strToSign) + if err != nil { + return signedRequest{}, err + } + + if s.IsPreSign { + rawQuery += "&X-Amz-Signature=" + signingSignature + } else { + headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) + } + + req.URL.RawQuery = rawQuery + + return signedRequest{ + Request: req, + SignedHeaders: signedHeaders, + CanonicalString: canonicalString, + StringToSign: strToSign, + PreSigned: s.IsPreSign, + }, nil +} + +func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { + const credential = "Credential=" + const signedHeaders = "SignedHeaders=" + const signature = "Signature=" + const commaSpace = ", " + + var parts strings.Builder + parts.Grow(len(signingAlgorithm) + 1 + + len(credential) + len(credentialStr) + len(commaSpace) + + len(signedHeaders) + len(signedHeadersStr) + len(commaSpace) + + len(signature) + len(signingSignature), + ) + parts.WriteString(signingAlgorithm) + parts.WriteRune(' ') + parts.WriteString(credential) + parts.WriteString(credentialStr) + parts.WriteString(commaSpace) + parts.WriteString(signedHeaders) + parts.WriteString(signedHeadersStr) + parts.WriteString(commaSpace) + parts.WriteString(signature) + parts.WriteString(signingSignature) + return parts.String() +} + +func (s *httpSigner) buildCredentialScope() string { + return strings.Join([]string{ + s.Time.Format(shortTimeFormat), + s.ServiceName, + "aws4_request", + }, "/") + +} + +func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} + +func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { + signed = make(http.Header) + + var headers []string + const hostHeader = "host" + headers = append(headers, hostHeader) + signed[hostHeader] = append(signed[hostHeader], host) + + if length > 0 { + const contentLengthHeader = "content-length" + headers = append(headers, contentLengthHeader) + signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) + } + + for k, v := range header { + if !rule.IsValid(k) { + continue // ignored header + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := signed[lowerCaseKey]; ok { + // include additional values + signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + signed[lowerCaseKey] = v + } + sort.Strings(headers) + + signedHeaders = strings.Join(headers, ";") + + var canonicalHeaders strings.Builder + n := len(headers) + const colon = ':' + for i := 0; i < n; i++ { + if headers[i] == hostHeader { + canonicalHeaders.WriteString(hostHeader) + canonicalHeaders.WriteRune(colon) + canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) + } else { + canonicalHeaders.WriteString(headers[i]) + canonicalHeaders.WriteRune(colon) + // Trim out leading, trailing, and dedup inner spaces from signed header values. + values := signed[headers[i]] + for j, v := range values { + cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) + canonicalHeaders.WriteString(cleanedValue) + if j < len(values)-1 { + canonicalHeaders.WriteRune(',') + } + } + } + canonicalHeaders.WriteRune('\n') + } + canonicalHeadersStr = canonicalHeaders.String() + + return signed, signedHeaders, canonicalHeadersStr +} + +func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { + return strings.Join([]string{ + method, + uri, + query, + canonicalHeaders, + signedHeaders, + s.PayloadHash, + }, "\n") +} + +func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { + return strings.Join([]string{ + signingAlgorithm, + s.Time.Format(timeFormat), + credentialScope, + hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), + }, "\n") +} + +func makeHash(hash hash.Hash, b []byte) []byte { + hash.Reset() + hash.Write(b) + return hash.Sum(nil) +} + +func (s *httpSigner) buildSignature(strToSign string) (string, error) { + sig, err := s.Credentials.PrivateKey.Sign(rand.Reader, makeHash(sha256.New(), []byte(strToSign)), crypto.SHA256) + if err != nil { + return "", err + } + return hex.EncodeToString(sig), nil +} + +const logSignInfoMsg = `Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func logHTTPSigningInfo(ctx context.Context, options SignerOptions, r signedRequest) { + if !options.LogSigning { + return + } + signedURLMsg := "" + if r.PreSigned { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, r.Request.URL.String()) + } + logger := logging.WithContext(ctx, options.Logger) + logger.Logf(logging.Debug, logSignInfoMsg, r.CanonicalString, r.StringToSign, signedURLMsg) +} + +type signedRequest struct { + Request *http.Request + SignedHeaders http.Header + CanonicalString string + StringToSign string + PreSigned bool +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh b/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh new file mode 100644 index 000000000000..81a836127566 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +PROJECT_DIR="" +SDK_SOURCE_DIR=$(cd `dirname $0` && pwd) + +usage() { + echo "Usage: $0 [-s SDK_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2 + exit 1 +} + +while getopts "hs:d:" options; do + case "${options}" in + s) + SDK_SOURCE_DIR=${OPTARG} + if [ "$SDK_SOURCE_DIR" == "" ]; then + echo "path to SDK source directory is required" || exit + usage + fi + ;; + d) + PROJECT_DIR=${OPTARG} + ;; + h) + usage + ;; + *) + usage + ;; + esac +done + +if [ "$PROJECT_DIR" != "" ]; then + cd "$PROJECT_DIR" || exit +fi + +go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/aws-sdk-go-v2" | while read x; do + repPath=${x/github.com\/aws\/aws-sdk-go-v2/${SDK_SOURCE_DIR}} + echo -replace $x=$repPath +done | xargs go mod edit diff --git a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml new file mode 100644 index 000000000000..bde8e1391bb8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml @@ -0,0 +1,74 @@ + +[dependencies] + "github.com/aws/smithy-go" = "v1.11.2" + "github.com/google/go-cmp" = "v0.5.7" + "github.com/jmespath/go-jmespath" = "v0.4.0" + "golang.org/x/net" = "v0.0.0-20220127200216-cd36cc0744dd" + +[modules] + + [modules."."] + metadata_package = "aws" + + [modules."example/service/dynamodb/createTable"] + no_tag = true + + [modules."example/service/dynamodb/scanItems"] + no_tag = true + + [modules."example/service/s3/listObjects"] + no_tag = true + + [modules."example/service/s3/usingPrivateLink"] + no_tag = true + + [modules."feature/ec2/imds/internal/configtesting"] + no_tag = true + + [modules."internal/codegen"] + no_tag = true + + [modules."internal/configsources/configtesting"] + no_tag = true + + [modules."internal/protocoltest/awsrestjson"] + no_tag = true + + [modules."internal/protocoltest/ec2query"] + no_tag = true + + [modules."internal/protocoltest/jsonrpc"] + no_tag = true + + [modules."internal/protocoltest/jsonrpc10"] + no_tag = true + + [modules."internal/protocoltest/query"] + no_tag = true + + [modules."internal/protocoltest/restxml"] + no_tag = true + + [modules."internal/protocoltest/restxmlwithnamespace"] + no_tag = true + + [modules."internal/repotools"] + no_tag = true + + [modules."internal/repotools/changes"] + no_tag = true + + [modules."service/internal/benchmark"] + no_tag = true + + [modules."service/internal/integrationtest"] + no_tag = true + + [modules."service/kinesis/internal/testing"] + no_tag = true + + [modules."service/s3/internal/configtesting"] + no_tag = true + + [modules."service/transcribestreaming/internal/testing"] + no_tag = true diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md new file mode 100644 index 000000000000..7cc42a1936ae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -0,0 +1,48 @@ +# v1.9.1 (2022-03-24) + +* No change notes available for this release. + +# v1.9.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.8.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.7.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.6.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.5.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.4.0 (2021-10-21) + +* **Feature**: Updated to latest version + +# v1.3.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.2.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. + +# v1.2.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go new file mode 100644 index 000000000000..3f451fc9b453 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go @@ -0,0 +1,176 @@ +package acceptencoding + +import ( + "compress/gzip" + "context" + "fmt" + "io" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const acceptEncodingHeaderKey = "Accept-Encoding" +const contentEncodingHeaderKey = "Content-Encoding" + +// AddAcceptEncodingGzipOptions provides the options for the +// AddAcceptEncodingGzip middleware setup. +type AddAcceptEncodingGzipOptions struct { + Enable bool +} + +// AddAcceptEncodingGzip explicitly adds handling for accept-encoding GZIP +// middleware to the operation stack. This allows checksums to be correctly +// computed without disabling GZIP support. +func AddAcceptEncodingGzip(stack *middleware.Stack, options AddAcceptEncodingGzipOptions) error { + if options.Enable { + if err := stack.Finalize.Add(&EnableGzip{}, middleware.Before); err != nil { + return err + } + if err := stack.Deserialize.Insert(&DecompressGzip{}, "OperationDeserializer", middleware.After); err != nil { + return err + } + return nil + } + + return stack.Finalize.Add(&DisableGzip{}, middleware.Before) +} + +// DisableGzip provides the middleware that will +// disable the underlying http client automatically enabling for gzip +// decompress content-encoding support. +type DisableGzip struct{} + +// ID returns the id for the middleware. +func (*DisableGzip) ID() string { + return "DisableAcceptEncodingGzip" +} + +// HandleFinalize implements the FinalizeMiddleware interface. +func (*DisableGzip) HandleFinalize( + ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + output middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return output, metadata, &smithy.SerializationError{ + Err: fmt.Errorf("unknown request type %T", input.Request), + } + } + + // Explicitly enable gzip support, this will prevent the http client from + // auto extracting the zipped content. + req.Header.Set(acceptEncodingHeaderKey, "identity") + + return next.HandleFinalize(ctx, input) +} + +// EnableGzip provides a middleware to enable support for +// gzip responses, with manual decompression. This prevents the underlying HTTP +// client from performing the gzip decompression automatically. +type EnableGzip struct{} + +// ID returns the id for the middleware. +func (*EnableGzip) ID() string { + return "AcceptEncodingGzip" +} + +// HandleFinalize implements the FinalizeMiddleware interface. +func (*EnableGzip) HandleFinalize( + ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + output middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return output, metadata, &smithy.SerializationError{ + Err: fmt.Errorf("unknown request type %T", input.Request), + } + } + + // Explicitly enable gzip support, this will prevent the http client from + // auto extracting the zipped content. + req.Header.Set(acceptEncodingHeaderKey, "gzip") + + return next.HandleFinalize(ctx, input) +} + +// DecompressGzip provides the middleware for decompressing a gzip +// response from the service. +type DecompressGzip struct{} + +// ID returns the id for the middleware. +func (*DecompressGzip) ID() string { + return "DecompressGzip" +} + +// HandleDeserialize implements the DeserializeMiddlware interface. +func (*DecompressGzip) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + output, metadata, err = next.HandleDeserialize(ctx, input) + if err != nil { + return output, metadata, err + } + + resp, ok := output.RawResponse.(*smithyhttp.Response) + if !ok { + return output, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("unknown response type %T", output.RawResponse), + } + } + if v := resp.Header.Get(contentEncodingHeaderKey); v != "gzip" { + return output, metadata, err + } + + // Clear content length since it will no longer be valid once the response + // body is decompressed. + resp.Header.Del("Content-Length") + resp.ContentLength = -1 + + resp.Body = wrapGzipReader(resp.Body) + + return output, metadata, err +} + +type gzipReader struct { + reader io.ReadCloser + gzip *gzip.Reader +} + +func wrapGzipReader(reader io.ReadCloser) *gzipReader { + return &gzipReader{ + reader: reader, + } +} + +// Read wraps the gzip reader around the underlying io.Reader to extract the +// response bytes on the fly. +func (g *gzipReader) Read(b []byte) (n int, err error) { + if g.gzip == nil { + g.gzip, err = gzip.NewReader(g.reader) + if err != nil { + g.gzip = nil // ensure uninitialized gzip value isn't used in close. + return 0, fmt.Errorf("failed to decompress gzip response, %w", err) + } + } + + return g.gzip.Read(b) +} + +func (g *gzipReader) Close() error { + if g.gzip == nil { + return nil + } + + if err := g.gzip.Close(); err != nil { + g.reader.Close() + return fmt.Errorf("failed to decompress gzip response, %w", err) + } + + return g.reader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go new file mode 100644 index 000000000000..3ffac01315d6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go @@ -0,0 +1,23 @@ +/* +Package acceptencoding provides customizations associated with Accept Encoding Header. + +Accept encoding gzip + +The Go HTTP client automatically supports accept-encoding and content-encoding +gzip by default. This default behavior is not desired by the SDK, and prevents +validating the response body's checksum. To prevent this the SDK must manually +control usage of content-encoding gzip. + +To control content-encoding, the SDK must always set the `Accept-Encoding` +header to a value. This prevents the HTTP client from using gzip automatically. +When gzip is enabled on the API client, the SDK's customization will control +decompressing the gzip data in order to not break the checksum validation. When +gzip is disabled, the API client will disable gzip, preventing the HTTP +client's default behavior. + +An `EnableAcceptEncodingGzip` option may or may not be present depending on the client using +the below middleware. The option if present can be used to enable auto decompressing +gzip by the SDK. + +*/ +package acceptencoding diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go new file mode 100644 index 000000000000..af9bc67b3e3c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package acceptencoding + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.9.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md new file mode 100644 index 000000000000..b18d187019a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md @@ -0,0 +1,32 @@ +# v1.1.5 (2022-04-27) + +* **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors. + +# v1.1.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2022-03-08) + +* **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606) +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2022-02-24) + +* **Release**: New module for computing checksums +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go new file mode 100644 index 000000000000..a17041c35d07 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go @@ -0,0 +1,323 @@ +package checksum + +import ( + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "hash/crc32" + "io" + "strings" + "sync" +) + +// Algorithm represents the checksum algorithms supported +type Algorithm string + +// Enumeration values for supported checksum Algorithms. +const ( + // AlgorithmCRC32C represents CRC32C hash algorithm + AlgorithmCRC32C Algorithm = "CRC32C" + + // AlgorithmCRC32 represents CRC32 hash algorithm + AlgorithmCRC32 Algorithm = "CRC32" + + // AlgorithmSHA1 represents SHA1 hash algorithm + AlgorithmSHA1 Algorithm = "SHA1" + + // AlgorithmSHA256 represents SHA256 hash algorithm + AlgorithmSHA256 Algorithm = "SHA256" +) + +var supportedAlgorithms = []Algorithm{ + AlgorithmCRC32C, + AlgorithmCRC32, + AlgorithmSHA1, + AlgorithmSHA256, +} + +func (a Algorithm) String() string { return string(a) } + +// ParseAlgorithm attempts to parse the provided value into a checksum +// algorithm, matching without case. Returns the algorithm matched, or an error +// if the algorithm wasn't matched. +func ParseAlgorithm(v string) (Algorithm, error) { + for _, a := range supportedAlgorithms { + if strings.EqualFold(string(a), v) { + return a, nil + } + } + return "", fmt.Errorf("unknown checksum algorithm, %v", v) +} + +// FilterSupportedAlgorithms filters the set of algorithms, returning a slice +// of algorithms that are supported. +func FilterSupportedAlgorithms(vs []string) []Algorithm { + found := map[Algorithm]struct{}{} + + supported := make([]Algorithm, 0, len(supportedAlgorithms)) + for _, v := range vs { + for _, a := range supportedAlgorithms { + // Only consider algorithms that are supported + if !strings.EqualFold(v, string(a)) { + continue + } + // Ignore duplicate algorithms in list. + if _, ok := found[a]; ok { + continue + } + + supported = append(supported, a) + found[a] = struct{}{} + } + } + return supported +} + +// NewAlgorithmHash returns a hash.Hash for the checksum algorithm. Error is +// returned if the algorithm is unknown. +func NewAlgorithmHash(v Algorithm) (hash.Hash, error) { + switch v { + case AlgorithmSHA1: + return sha1.New(), nil + case AlgorithmSHA256: + return sha256.New(), nil + case AlgorithmCRC32: + return crc32.NewIEEE(), nil + case AlgorithmCRC32C: + return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil + default: + return nil, fmt.Errorf("unknown checksum algorithm, %v", v) + } +} + +// AlgorithmChecksumLength returns the length of the algorithm's checksum in +// bytes. If the algorithm is not known, an error is returned. +func AlgorithmChecksumLength(v Algorithm) (int, error) { + switch v { + case AlgorithmSHA1: + return sha1.Size, nil + case AlgorithmSHA256: + return sha256.Size, nil + case AlgorithmCRC32: + return crc32.Size, nil + case AlgorithmCRC32C: + return crc32.Size, nil + default: + return 0, fmt.Errorf("unknown checksum algorithm, %v", v) + } +} + +const awsChecksumHeaderPrefix = "x-amz-checksum-" + +// AlgorithmHTTPHeader returns the HTTP header for the algorithm's hash. +func AlgorithmHTTPHeader(v Algorithm) string { + return awsChecksumHeaderPrefix + strings.ToLower(string(v)) +} + +// base64EncodeHashSum computes base64 encoded checksum of a given running +// hash. The running hash must already have content written to it. Returns the +// byte slice of checksum and an error +func base64EncodeHashSum(h hash.Hash) []byte { + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + return sum64 +} + +// hexEncodeHashSum computes hex encoded checksum of a given running hash. The +// running hash must already have content written to it. Returns the byte slice +// of checksum and an error +func hexEncodeHashSum(h hash.Hash) []byte { + sum := h.Sum(nil) + sumHex := make([]byte, hex.EncodedLen(len(sum))) + hex.Encode(sumHex, sum) + return sumHex +} + +// computeMD5Checksum computes base64 MD5 checksum of an io.Reader's contents. +// Returns the byte slice of MD5 checksum and an error. +func computeMD5Checksum(r io.Reader) ([]byte, error) { + h := md5.New() + + // Copy errors may be assumed to be from the body. + if _, err := io.Copy(h, r); err != nil { + return nil, fmt.Errorf("failed compute MD5 hash of reader, %w", err) + } + + // Encode the MD5 checksum in base64. + return base64EncodeHashSum(h), nil +} + +// computeChecksumReader provides a reader wrapping an underlying io.Reader to +// compute the checksum of the stream's bytes. +type computeChecksumReader struct { + stream io.Reader + algorithm Algorithm + hasher hash.Hash + base64ChecksumLen int + + mux sync.RWMutex + lockedChecksum string + lockedErr error +} + +// newComputeChecksumReader returns a computeChecksumReader for the stream and +// algorithm specified. Returns error if unable to create the reader, or +// algorithm is unknown. +func newComputeChecksumReader(stream io.Reader, algorithm Algorithm) (*computeChecksumReader, error) { + hasher, err := NewAlgorithmHash(algorithm) + if err != nil { + return nil, err + } + + checksumLength, err := AlgorithmChecksumLength(algorithm) + if err != nil { + return nil, err + } + + return &computeChecksumReader{ + stream: io.TeeReader(stream, hasher), + algorithm: algorithm, + hasher: hasher, + base64ChecksumLen: base64.StdEncoding.EncodedLen(checksumLength), + }, nil +} + +// Read wraps the underlying reader. When the underlying reader returns EOF, +// the checksum of the reader will be computed, and can be retrieved with +// ChecksumBase64String. +func (r *computeChecksumReader) Read(p []byte) (int, error) { + n, err := r.stream.Read(p) + if err == nil { + return n, nil + } else if err != io.EOF { + r.mux.Lock() + defer r.mux.Unlock() + + r.lockedErr = err + return n, err + } + + b := base64EncodeHashSum(r.hasher) + + r.mux.Lock() + defer r.mux.Unlock() + + r.lockedChecksum = string(b) + + return n, err +} + +func (r *computeChecksumReader) Algorithm() Algorithm { + return r.algorithm +} + +// Base64ChecksumLength returns the base64 encoded length of the checksum for +// algorithm. +func (r *computeChecksumReader) Base64ChecksumLength() int { + return r.base64ChecksumLen +} + +// Base64Checksum returns the base64 checksum for the algorithm, or error if +// the underlying reader returned a non-EOF error. +// +// Safe to be called concurrently, but will return an error until after the +// underlying reader is returns EOF. +func (r *computeChecksumReader) Base64Checksum() (string, error) { + r.mux.RLock() + defer r.mux.RUnlock() + + if r.lockedErr != nil { + return "", r.lockedErr + } + + if r.lockedChecksum == "" { + return "", fmt.Errorf( + "checksum not available yet, called before reader returns EOF", + ) + } + + return r.lockedChecksum, nil +} + +// validateChecksumReader implements io.ReadCloser interface. The wrapper +// performs checksum validation when the underlying reader has been fully read. +type validateChecksumReader struct { + originalBody io.ReadCloser + body io.Reader + hasher hash.Hash + algorithm Algorithm + expectChecksum string +} + +// newValidateChecksumReader returns a configured io.ReadCloser that performs +// checksum validation when the underlying reader has been fully read. +func newValidateChecksumReader( + body io.ReadCloser, + algorithm Algorithm, + expectChecksum string, +) (*validateChecksumReader, error) { + hasher, err := NewAlgorithmHash(algorithm) + if err != nil { + return nil, err + } + + return &validateChecksumReader{ + originalBody: body, + body: io.TeeReader(body, hasher), + hasher: hasher, + algorithm: algorithm, + expectChecksum: expectChecksum, + }, nil +} + +// Read attempts to read from the underlying stream while also updating the +// running hash. If the underlying stream returns with an EOF error, the +// checksum of the stream will be collected, and compared against the expected +// checksum. If the checksums do not match, an error will be returned. +// +// If a non-EOF error occurs when reading the underlying stream, that error +// will be returned and the checksum for the stream will be discarded. +func (c *validateChecksumReader) Read(p []byte) (n int, err error) { + n, err = c.body.Read(p) + if err == io.EOF { + if checksumErr := c.validateChecksum(); checksumErr != nil { + return n, checksumErr + } + } + + return n, err +} + +// Close closes the underlying reader, returning any error that occurred in the +// underlying reader. +func (c *validateChecksumReader) Close() (err error) { + return c.originalBody.Close() +} + +func (c *validateChecksumReader) validateChecksum() error { + // Compute base64 encoded checksum hash of the payload's read bytes. + v := base64EncodeHashSum(c.hasher) + if e, a := c.expectChecksum, string(v); !strings.EqualFold(e, a) { + return validationError{ + Algorithm: c.algorithm, Expect: e, Actual: a, + } + } + + return nil +} + +type validationError struct { + Algorithm Algorithm + Expect string + Actual string +} + +func (v validationError) Error() string { + return fmt.Sprintf("checksum did not match: algorithm %v, expect %v, actual %v", + v.Algorithm, v.Expect, v.Actual) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go new file mode 100644 index 000000000000..e8b5c3f5a2c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go @@ -0,0 +1,389 @@ +package checksum + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +const ( + crlf = "\r\n" + + // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html + defaultChunkLength = 1024 * 64 + + awsTrailerHeaderName = "x-amz-trailer" + decodedContentLengthHeaderName = "x-amz-decoded-content-length" + + contentEncodingHeaderName = "content-encoding" + awsChunkedContentEncodingHeaderValue = "aws-chunked" + + trailerKeyValueSeparator = ":" +) + +var ( + crlfBytes = []byte(crlf) + finalChunkBytes = []byte("0" + crlf) +) + +type awsChunkedEncodingOptions struct { + // The total size of the stream. For unsigned encoding this implies that + // there will only be a single chunk containing the underlying payload, + // unless ChunkLength is also specified. + StreamLength int64 + + // Set of trailer key:value pairs that will be appended to the end of the + // payload after the end chunk has been written. + Trailers map[string]awsChunkedTrailerValue + + // The maximum size of each chunk to be sent. Default value of -1, signals + // that optimal chunk length will be used automatically. ChunkSize must be + // at least 8KB. + // + // If ChunkLength and StreamLength are both specified, the stream will be + // broken up into ChunkLength chunks. The encoded length of the aws-chunked + // encoding can still be determined as long as all trailers, if any, have a + // fixed length. + ChunkLength int +} + +type awsChunkedTrailerValue struct { + // Function to retrieve the value of the trailer. Will only be called after + // the underlying stream returns EOF error. + Get func() (string, error) + + // If the length of the value can be pre-determined, and is constant + // specify the length. A value of -1 means the length is unknown, or + // cannot be pre-determined. + Length int +} + +// awsChunkedEncoding provides a reader that wraps the payload such that +// payload is read as a single aws-chunk payload. This reader can only be used +// if the content length of payload is known. Content-Length is used as size of +// the single payload chunk. The final chunk and trailing checksum is appended +// at the end. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition +// +// Here is the aws-chunked payload stream as read from the awsChunkedEncoding +// if original request stream is "Hello world", and checksum hash used is SHA256 +// +// \r\n +// Hello world\r\n +// 0\r\n +// x-amz-checksum-sha256:ZOyIygCyaOW6GjVnihtTFtIS9PNmskdyMlNKiuyjfzw=\r\n +// \r\n +type awsChunkedEncoding struct { + options awsChunkedEncodingOptions + + encodedStream io.Reader + trailerEncodedLength int +} + +// newUnsignedAWSChunkedEncoding returns a new awsChunkedEncoding configured +// for unsigned aws-chunked content encoding. Any additional trailers that need +// to be appended after the end chunk must be included as via Trailer +// callbacks. +func newUnsignedAWSChunkedEncoding( + stream io.Reader, + optFns ...func(*awsChunkedEncodingOptions), +) *awsChunkedEncoding { + options := awsChunkedEncodingOptions{ + Trailers: map[string]awsChunkedTrailerValue{}, + StreamLength: -1, + ChunkLength: -1, + } + for _, fn := range optFns { + fn(&options) + } + + var chunkReader io.Reader + if options.ChunkLength != -1 || options.StreamLength == -1 { + if options.ChunkLength == -1 { + options.ChunkLength = defaultChunkLength + } + chunkReader = newBufferedAWSChunkReader(stream, options.ChunkLength) + } else { + chunkReader = newUnsignedChunkReader(stream, options.StreamLength) + } + + trailerReader := newAWSChunkedTrailerReader(options.Trailers) + + return &awsChunkedEncoding{ + options: options, + encodedStream: io.MultiReader(chunkReader, + trailerReader, + bytes.NewBuffer(crlfBytes), + ), + trailerEncodedLength: trailerReader.EncodedLength(), + } +} + +// EncodedLength returns the final length of the aws-chunked content encoded +// stream if it can be determined without reading the underlying stream or lazy +// header values, otherwise -1 is returned. +func (e *awsChunkedEncoding) EncodedLength() int64 { + var length int64 + if e.options.StreamLength == -1 || e.trailerEncodedLength == -1 { + return -1 + } + + if e.options.StreamLength != 0 { + // If the stream length is known, and there is no chunk length specified, + // only a single chunk will be used. Otherwise the stream length needs to + // include the multiple chunk padding content. + if e.options.ChunkLength == -1 { + length += getUnsignedChunkBytesLength(e.options.StreamLength) + + } else { + // Compute chunk header and payload length + numChunks := e.options.StreamLength / int64(e.options.ChunkLength) + length += numChunks * getUnsignedChunkBytesLength(int64(e.options.ChunkLength)) + if remainder := e.options.StreamLength % int64(e.options.ChunkLength); remainder != 0 { + length += getUnsignedChunkBytesLength(remainder) + } + } + } + + // End chunk + length += int64(len(finalChunkBytes)) + + // Trailers + length += int64(e.trailerEncodedLength) + + // Encoding terminator + length += int64(len(crlf)) + + return length +} + +func getUnsignedChunkBytesLength(payloadLength int64) int64 { + payloadLengthStr := strconv.FormatInt(payloadLength, 16) + return int64(len(payloadLengthStr)) + int64(len(crlf)) + payloadLength + int64(len(crlf)) +} + +// HTTPHeaders returns the set of headers that must be included the request for +// aws-chunked to work. This includes the content-encoding: aws-chunked header. +// +// If there are multiple layered content encoding, the aws-chunked encoding +// must be appended to the previous layers the stream's encoding. The best way +// to do this is to append all header values returned to the HTTP request's set +// of headers. +func (e *awsChunkedEncoding) HTTPHeaders() map[string][]string { + headers := map[string][]string{ + contentEncodingHeaderName: { + awsChunkedContentEncodingHeaderValue, + }, + } + + if len(e.options.Trailers) != 0 { + trailers := make([]string, 0, len(e.options.Trailers)) + for name := range e.options.Trailers { + trailers = append(trailers, strings.ToLower(name)) + } + headers[awsTrailerHeaderName] = trailers + } + + return headers +} + +func (e *awsChunkedEncoding) Read(b []byte) (n int, err error) { + return e.encodedStream.Read(b) +} + +// awsChunkedTrailerReader provides a lazy reader for reading of aws-chunked +// content encoded trailers. The trailer values will not be retrieved until the +// reader is read from. +type awsChunkedTrailerReader struct { + reader *bytes.Buffer + trailers map[string]awsChunkedTrailerValue + trailerEncodedLength int +} + +// newAWSChunkedTrailerReader returns an initialized awsChunkedTrailerReader to +// lazy reading aws-chunk content encoded trailers. +func newAWSChunkedTrailerReader(trailers map[string]awsChunkedTrailerValue) *awsChunkedTrailerReader { + return &awsChunkedTrailerReader{ + trailers: trailers, + trailerEncodedLength: trailerEncodedLength(trailers), + } +} + +func trailerEncodedLength(trailers map[string]awsChunkedTrailerValue) (length int) { + for name, trailer := range trailers { + length += len(name) + len(trailerKeyValueSeparator) + l := trailer.Length + if l == -1 { + return -1 + } + length += l + len(crlf) + } + + return length +} + +// EncodedLength returns the length of the encoded trailers if the length could +// be determined without retrieving the header values. Returns -1 if length is +// unknown. +func (r *awsChunkedTrailerReader) EncodedLength() (length int) { + return r.trailerEncodedLength +} + +// Read populates the passed in byte slice with bytes from the encoded +// trailers. Will lazy read header values first time Read is called. +func (r *awsChunkedTrailerReader) Read(p []byte) (int, error) { + if r.trailerEncodedLength == 0 { + return 0, io.EOF + } + + if r.reader == nil { + trailerLen := r.trailerEncodedLength + if r.trailerEncodedLength == -1 { + trailerLen = 0 + } + r.reader = bytes.NewBuffer(make([]byte, 0, trailerLen)) + for name, trailer := range r.trailers { + r.reader.WriteString(name) + r.reader.WriteString(trailerKeyValueSeparator) + v, err := trailer.Get() + if err != nil { + return 0, fmt.Errorf("failed to get trailer value, %w", err) + } + r.reader.WriteString(v) + r.reader.WriteString(crlf) + } + } + + return r.reader.Read(p) +} + +// newUnsignedChunkReader returns an io.Reader encoding the underlying reader +// as unsigned aws-chunked chunks. The returned reader will also include the +// end chunk, but not the aws-chunked final `crlf` segment so trailers can be +// added. +// +// If the payload size is -1 for unknown length the content will be buffered in +// defaultChunkLength chunks before wrapped in aws-chunked chunk encoding. +func newUnsignedChunkReader(reader io.Reader, payloadSize int64) io.Reader { + if payloadSize == -1 { + return newBufferedAWSChunkReader(reader, defaultChunkLength) + } + + var endChunk bytes.Buffer + if payloadSize == 0 { + endChunk.Write(finalChunkBytes) + return &endChunk + } + + endChunk.WriteString(crlf) + endChunk.Write(finalChunkBytes) + + var header bytes.Buffer + header.WriteString(strconv.FormatInt(payloadSize, 16)) + header.WriteString(crlf) + return io.MultiReader( + &header, + reader, + &endChunk, + ) +} + +// Provides a buffered aws-chunked chunk encoder of an underlying io.Reader. +// Will include end chunk, but not the aws-chunked final `crlf` segment so +// trailers can be added. +// +// Note does not implement support for chunk extensions, e.g. chunk signing. +type bufferedAWSChunkReader struct { + reader io.Reader + chunkSize int + chunkSizeStr string + + headerBuffer *bytes.Buffer + chunkBuffer *bytes.Buffer + + multiReader io.Reader + multiReaderLen int + endChunkDone bool +} + +// newBufferedAWSChunkReader returns an bufferedAWSChunkReader for reading +// aws-chunked encoded chunks. +func newBufferedAWSChunkReader(reader io.Reader, chunkSize int) *bufferedAWSChunkReader { + return &bufferedAWSChunkReader{ + reader: reader, + chunkSize: chunkSize, + chunkSizeStr: strconv.FormatInt(int64(chunkSize), 16), + + headerBuffer: bytes.NewBuffer(make([]byte, 0, 64)), + chunkBuffer: bytes.NewBuffer(make([]byte, 0, chunkSize+len(crlf))), + } +} + +// Read attempts to read from the underlying io.Reader writing aws-chunked +// chunk encoded bytes to p. When the underlying io.Reader has been completed +// read the end chunk will be available. Once the end chunk is read, the reader +// will return EOF. +func (r *bufferedAWSChunkReader) Read(p []byte) (n int, err error) { + if r.multiReaderLen == 0 && r.endChunkDone { + return 0, io.EOF + } + if r.multiReader == nil || r.multiReaderLen == 0 { + r.multiReader, r.multiReaderLen, err = r.newMultiReader() + if err != nil { + return 0, err + } + } + + n, err = r.multiReader.Read(p) + r.multiReaderLen -= n + + if err == io.EOF && !r.endChunkDone { + // Edge case handling when the multi-reader has been completely read, + // and returned an EOF, make sure that EOF only gets returned if the + // end chunk was included in the multi-reader. Otherwise, the next call + // to read will initialize the next chunk's multi-reader. + err = nil + } + return n, err +} + +// newMultiReader returns a new io.Reader for wrapping the next chunk. Will +// return an error if the underlying reader can not be read from. Will never +// return io.EOF. +func (r *bufferedAWSChunkReader) newMultiReader() (io.Reader, int, error) { + // io.Copy eats the io.EOF returned by io.LimitReader. Any error that + // occurs here is due to an actual read error. + n, err := io.Copy(r.chunkBuffer, io.LimitReader(r.reader, int64(r.chunkSize))) + if err != nil { + return nil, 0, err + } + if n == 0 { + // Early exit writing out only the end chunk. This does not include + // aws-chunk's final `crlf` so that trailers can still be added by + // upstream reader. + r.headerBuffer.Reset() + r.headerBuffer.WriteString("0") + r.headerBuffer.WriteString(crlf) + r.endChunkDone = true + + return r.headerBuffer, r.headerBuffer.Len(), nil + } + r.chunkBuffer.WriteString(crlf) + + chunkSizeStr := r.chunkSizeStr + if int(n) != r.chunkSize { + chunkSizeStr = strconv.FormatInt(n, 16) + } + + r.headerBuffer.Reset() + r.headerBuffer.WriteString(chunkSizeStr) + r.headerBuffer.WriteString(crlf) + + return io.MultiReader( + r.headerBuffer, + r.chunkBuffer, + ), r.headerBuffer.Len() + r.chunkBuffer.Len(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go new file mode 100644 index 000000000000..81ebcfcc2e3f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package checksum + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.1.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go new file mode 100644 index 000000000000..3e17d2216b72 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go @@ -0,0 +1,185 @@ +package checksum + +import ( + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// InputMiddlewareOptions provides the options for the request +// checksum middleware setup. +type InputMiddlewareOptions struct { + // GetAlgorithm is a function to get the checksum algorithm of the + // input payload from the input parameters. + // + // Given the input parameter value, the function must return the algorithm + // and true, or false if no algorithm is specified. + GetAlgorithm func(interface{}) (string, bool) + + // Forces the middleware to compute the input payload's checksum. The + // request will fail if the algorithm is not specified or unable to compute + // the checksum. + RequireChecksum bool + + // Enables support for wrapping the serialized input payload with a + // content-encoding: aws-check wrapper, and including a trailer for the + // algorithm's checksum value. + // + // The checksum will not be computed, nor added as trailing checksum, if + // the Algorithm's header is already set on the request. + EnableTrailingChecksum bool + + // Enables support for computing the SHA256 checksum of input payloads + // along with the algorithm specified checksum. Prevents downstream + // middleware handlers (computePayloadSHA256) re-reading the payload. + // + // The SHA256 payload checksum will only be used for computed for requests + // that are not TLS, or do not enable trailing checksums. + // + // The SHA256 payload hash will not be computed, if the Algorithm's header + // is already set on the request. + EnableComputeSHA256PayloadHash bool + + // Enables support for setting the aws-chunked decoded content length + // header for the decoded length of the underlying stream. Will only be set + // when used with trailing checksums, and aws-chunked content-encoding. + EnableDecodedContentLengthHeader bool +} + +// AddInputMiddleware adds the middleware for performing checksum computing +// of request payloads, and checksum validation of response payloads. +func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) (err error) { + // TODO ensure this works correctly with presigned URLs + + // Middleware stack: + // * (OK)(Initialize) --none-- + // * (OK)(Serialize) EndpointResolver + // * (OK)(Build) ComputeContentLength + // * (AD)(Build) Header ComputeInputPayloadChecksum + // * SIGNED Payload - If HTTP && not support trailing checksum + // * UNSIGNED Payload - If HTTPS && not support trailing checksum + // * (RM)(Build) ContentChecksum - OK to remove + // * (OK)(Build) ComputePayloadHash + // * v4.dynamicPayloadSigningMiddleware + // * v4.computePayloadSHA256 + // * v4.unsignedPayload + // (OK)(Build) Set computedPayloadHash header + // * (OK)(Finalize) Retry + // * (AD)(Finalize) Trailer ComputeInputPayloadChecksum, + // * Requires HTTPS && support trailing checksum + // * UNSIGNED Payload + // * Finalize run if HTTPS && support trailing checksum + // * (OK)(Finalize) Signing + // * (OK)(Deserialize) --none-- + + // Initial checksum configuration look up middleware + err = stack.Initialize.Add(&setupInputContext{ + GetAlgorithm: options.GetAlgorithm, + }, middleware.Before) + if err != nil { + return err + } + + stack.Build.Remove("ContentChecksum") + + // Create the compute checksum middleware that will be added as both a + // build and finalize handler. + inputChecksum := &computeInputPayloadChecksum{ + RequireChecksum: options.RequireChecksum, + EnableTrailingChecksum: options.EnableTrailingChecksum, + EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, + EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, + } + + // Insert header checksum after ComputeContentLength middleware, must also + // be before the computePayloadHash middleware handlers. + err = stack.Build.Insert(inputChecksum, + (*smithyhttp.ComputeContentLength)(nil).ID(), + middleware.After) + if err != nil { + return err + } + + // If trailing checksum is not supported no need for finalize handler to be added. + if options.EnableTrailingChecksum { + err = stack.Finalize.Insert(inputChecksum, "Retry", middleware.After) + if err != nil { + return err + } + } + + return nil +} + +// RemoveInputMiddleware Removes the compute input payload checksum middleware +// handlers from the stack. +func RemoveInputMiddleware(stack *middleware.Stack) { + id := (*setupInputContext)(nil).ID() + stack.Initialize.Remove(id) + + id = (*computeInputPayloadChecksum)(nil).ID() + stack.Build.Remove(id) + stack.Finalize.Remove(id) +} + +// OutputMiddlewareOptions provides options for configuring output checksum +// validation middleware. +type OutputMiddlewareOptions struct { + // GetValidationMode is a function to get the checksum validation + // mode of the output payload from the input parameters. + // + // Given the input parameter value, the function must return the validation + // mode and true, or false if no mode is specified. + GetValidationMode func(interface{}) (string, bool) + + // The set of checksum algorithms that should be used for response payload + // checksum validation. The algorithm(s) used will be a union of the + // output's returned algorithms and this set. + // + // Only the first algorithm in the union is currently used. + ValidationAlgorithms []string + + // If set the middleware will ignore output multipart checksums. Otherwise + // an checksum format error will be returned by the middleware. + IgnoreMultipartValidation bool + + // When set the middleware will log when output does not have checksum or + // algorithm to validate. + LogValidationSkipped bool + + // When set the middleware will log when the output contains a multipart + // checksum that was, skipped and not validated. + LogMultipartValidationSkipped bool +} + +// AddOutputMiddleware adds the middleware for validating response payload's +// checksum. +func AddOutputMiddleware(stack *middleware.Stack, options OutputMiddlewareOptions) error { + err := stack.Initialize.Add(&setupOutputContext{ + GetValidationMode: options.GetValidationMode, + }, middleware.Before) + if err != nil { + return err + } + + // Resolve a supported priority order list of algorithms to validate. + algorithms := FilterSupportedAlgorithms(options.ValidationAlgorithms) + + m := &validateOutputPayloadChecksum{ + Algorithms: algorithms, + IgnoreMultipartValidation: options.IgnoreMultipartValidation, + LogMultipartValidationSkipped: options.LogMultipartValidationSkipped, + LogValidationSkipped: options.LogValidationSkipped, + } + + return stack.Deserialize.Add(m, middleware.After) +} + +// RemoveOutputMiddleware Removes the compute input payload checksum middleware +// handlers from the stack. +func RemoveOutputMiddleware(stack *middleware.Stack) { + id := (*setupOutputContext)(nil).ID() + stack.Initialize.Remove(id) + + id = (*validateOutputPayloadChecksum)(nil).ID() + stack.Deserialize.Remove(id) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go new file mode 100644 index 000000000000..b10d2150564c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go @@ -0,0 +1,480 @@ +package checksum + +import ( + "context" + "crypto/sha256" + "fmt" + "hash" + "io" + "strconv" + "strings" + + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const ( + contentMD5Header = "Content-Md5" + streamingUnsignedPayloadTrailerPayloadHash = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" +) + +// computedInputChecksumsKey is the metadata key for recording the algorithm the +// checksum was computed for and the checksum value. +type computedInputChecksumsKey struct{} + +// GetComputedInputChecksums returns the map of checksum algorithm to their +// computed value stored in the middleware Metadata. Returns false if no values +// were stored in the Metadata. +func GetComputedInputChecksums(m middleware.Metadata) (map[string]string, bool) { + vs, ok := m.Get(computedInputChecksumsKey{}).(map[string]string) + return vs, ok +} + +// SetComputedInputChecksums stores the map of checksum algorithm to their +// computed value in the middleware Metadata. Overwrites any values that +// currently exist in the metadata. +func SetComputedInputChecksums(m *middleware.Metadata, vs map[string]string) { + m.Set(computedInputChecksumsKey{}, vs) +} + +// computeInputPayloadChecksum middleware computes payload checksum +type computeInputPayloadChecksum struct { + // Enables support for wrapping the serialized input payload with a + // content-encoding: aws-check wrapper, and including a trailer for the + // algorithm's checksum value. + // + // The checksum will not be computed, nor added as trailing checksum, if + // the Algorithm's header is already set on the request. + EnableTrailingChecksum bool + + // States that a checksum is required to be included for the operation. If + // Input does not specify a checksum, fallback to built in MD5 checksum is + // used. + // + // Replaces smithy-go's ContentChecksum middleware. + RequireChecksum bool + + // Enables support for computing the SHA256 checksum of input payloads + // along with the algorithm specified checksum. Prevents downstream + // middleware handlers (computePayloadSHA256) re-reading the payload. + // + // The SHA256 payload hash will only be used for computed for requests + // that are not TLS, or do not enable trailing checksums. + // + // The SHA256 payload hash will not be computed, if the Algorithm's header + // is already set on the request. + EnableComputePayloadHash bool + + // Enables support for setting the aws-chunked decoded content length + // header for the decoded length of the underlying stream. Will only be set + // when used with trailing checksums, and aws-chunked content-encoding. + EnableDecodedContentLengthHeader bool + + buildHandlerRun bool + deferToFinalizeHandler bool +} + +// ID provides the middleware's identifier. +func (m *computeInputPayloadChecksum) ID() string { + return "AWSChecksum:ComputeInputPayloadChecksum" +} + +type computeInputHeaderChecksumError struct { + Msg string + Err error +} + +func (e computeInputHeaderChecksumError) Error() string { + const intro = "compute input header checksum failed" + + if e.Err != nil { + return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err) + } + + return fmt.Sprintf("%s, %s", intro, e.Msg) +} +func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err } + +// HandleBuild handles computing the payload's checksum, in the following cases: +// * Is HTTP, not HTTPS +// * RequireChecksum is true, and no checksums were specified via the Input +// * Trailing checksums are not supported +// +// The build handler must be inserted in the stack before ContentPayloadHash +// and after ComputeContentLength. +func (m *computeInputPayloadChecksum) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + m.buildHandlerRun = true + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, computeInputHeaderChecksumError{ + Msg: fmt.Sprintf("unknown request type %T", req), + } + } + + var algorithm Algorithm + var checksum string + defer func() { + if algorithm == "" || checksum == "" || err != nil { + return + } + + // Record the checksum and algorithm that was computed + SetComputedInputChecksums(&metadata, map[string]string{ + string(algorithm): checksum, + }) + }() + + // If no algorithm was specified, and the operation requires a checksum, + // fallback to the legacy content MD5 checksum. + algorithm, ok, err = getInputAlgorithm(ctx) + if err != nil { + return out, metadata, err + } else if !ok { + if m.RequireChecksum { + checksum, err = setMD5Checksum(ctx, req) + if err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to compute stream's MD5 checksum", + Err: err, + } + } + algorithm = Algorithm("MD5") + } + return next.HandleBuild(ctx, in) + } + + // If the checksum header is already set nothing to do. + checksumHeader := AlgorithmHTTPHeader(algorithm) + if checksum = req.Header.Get(checksumHeader); checksum != "" { + return next.HandleBuild(ctx, in) + } + + computePayloadHash := m.EnableComputePayloadHash + if v := v4.GetPayloadHash(ctx); v != "" { + computePayloadHash = false + } + + stream := req.GetStream() + streamLength, err := getRequestStreamLength(req) + if err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to determine stream length", + Err: err, + } + } + + // If trailing checksums are supported, the request is HTTPS, and the + // stream is not nil or empty, there is nothing to do in the build stage. + // The checksum will be added to the request as a trailing checksum in the + // finalize handler. + // + // Nil and empty streams will always be handled as a request header, + // regardless if the operation supports trailing checksums or not. + if strings.EqualFold(req.URL.Scheme, "https") { + if stream != nil && streamLength != 0 && m.EnableTrailingChecksum { + if m.EnableComputePayloadHash { + // payload hash is set as header in Build middleware handler, + // ContentSHA256Header. + ctx = v4.SetPayloadHash(ctx, streamingUnsignedPayloadTrailerPayloadHash) + } + + m.deferToFinalizeHandler = true + return next.HandleBuild(ctx, in) + } + + // If trailing checksums are not enabled but protocol is still HTTPS + // disabling computing the payload hash. Downstream middleware handler + // (ComputetPayloadHash) will set the payload hash to unsigned payload, + // if signing was used. + computePayloadHash = false + } + + // Only seekable streams are supported for non-trailing checksums, because + // the stream needs to be rewound before the handler can continue. + if stream != nil && !req.IsStreamSeekable() { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "unseekable stream is not supported without TLS and trailing checksum", + } + } + + var sha256Checksum string + checksum, sha256Checksum, err = computeStreamChecksum( + algorithm, stream, computePayloadHash) + if err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to compute stream checksum", + Err: err, + } + } + + if err := req.RewindStream(); err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to rewind stream", + Err: err, + } + } + + req.Header.Set(checksumHeader, checksum) + + if computePayloadHash { + ctx = v4.SetPayloadHash(ctx, sha256Checksum) + } + + return next.HandleBuild(ctx, in) +} + +type computeInputTrailingChecksumError struct { + Msg string + Err error +} + +func (e computeInputTrailingChecksumError) Error() string { + const intro = "compute input trailing checksum failed" + + if e.Err != nil { + return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err) + } + + return fmt.Sprintf("%s, %s", intro, e.Msg) +} +func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err } + +// HandleFinalize handles computing the payload's checksum, in the following cases: +// * Is HTTPS, not HTTP +// * A checksum was specified via the Input +// * Trailing checksums are supported. +// +// The finalize handler must be inserted in the stack before Signing, and after Retry. +func (m *computeInputPayloadChecksum) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if !m.deferToFinalizeHandler { + if !m.buildHandlerRun { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "build handler was removed without also removing finalize handler", + } + } + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, computeInputTrailingChecksumError{ + Msg: fmt.Sprintf("unknown request type %T", req), + } + } + + // Trailing checksums are only supported when TLS is enabled. + if !strings.EqualFold(req.URL.Scheme, "https") { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "HTTPS required", + } + } + + // If no algorithm was specified, there is nothing to do. + algorithm, ok, err := getInputAlgorithm(ctx) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed to get algorithm", + Err: err, + } + } else if !ok { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "no algorithm specified", + } + } + + // If the checksum header is already set before finalize could run, there + // is nothing to do. + checksumHeader := AlgorithmHTTPHeader(algorithm) + if req.Header.Get(checksumHeader) != "" { + return next.HandleFinalize(ctx, in) + } + + stream := req.GetStream() + streamLength, err := getRequestStreamLength(req) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed to determine stream length", + Err: err, + } + } + + if stream == nil || streamLength == 0 { + // Nil and empty streams are handled by the Build handler. They are not + // supported by the trailing checksums finalize handler. There is no + // benefit to sending them as trailers compared to headers. + return out, metadata, computeInputTrailingChecksumError{ + Msg: "nil or empty streams are not supported", + } + } + + checksumReader, err := newComputeChecksumReader(stream, algorithm) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed to created checksum reader", + Err: err, + } + } + + awsChunkedReader := newUnsignedAWSChunkedEncoding(checksumReader, + func(o *awsChunkedEncodingOptions) { + o.Trailers[AlgorithmHTTPHeader(checksumReader.Algorithm())] = awsChunkedTrailerValue{ + Get: checksumReader.Base64Checksum, + Length: checksumReader.Base64ChecksumLength(), + } + o.StreamLength = streamLength + }) + + for key, values := range awsChunkedReader.HTTPHeaders() { + for _, value := range values { + req.Header.Add(key, value) + } + } + + // Setting the stream on the request will create a copy. The content length + // is not updated until after the request is copied to prevent impacting + // upstream middleware. + req, err = req.SetStream(awsChunkedReader) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed updating request to trailing checksum wrapped stream", + Err: err, + } + } + req.ContentLength = awsChunkedReader.EncodedLength() + in.Request = req + + // Add decoded content length header if original stream's content length is known. + if streamLength != -1 && m.EnableDecodedContentLengthHeader { + req.Header.Set(decodedContentLengthHeaderName, strconv.FormatInt(streamLength, 10)) + } + + out, metadata, err = next.HandleFinalize(ctx, in) + if err == nil { + checksum, err := checksumReader.Base64Checksum() + if err != nil { + return out, metadata, fmt.Errorf("failed to get computed checksum, %w", err) + } + + // Record the checksum and algorithm that was computed + SetComputedInputChecksums(&metadata, map[string]string{ + string(algorithm): checksum, + }) + } + + return out, metadata, err +} + +func getInputAlgorithm(ctx context.Context) (Algorithm, bool, error) { + ctxAlgorithm := getContextInputAlgorithm(ctx) + if ctxAlgorithm == "" { + return "", false, nil + } + + algorithm, err := ParseAlgorithm(ctxAlgorithm) + if err != nil { + return "", false, fmt.Errorf( + "failed to parse algorithm, %w", err) + } + + return algorithm, true, nil +} + +func computeStreamChecksum(algorithm Algorithm, stream io.Reader, computePayloadHash bool) ( + checksum string, sha256Checksum string, err error, +) { + hasher, err := NewAlgorithmHash(algorithm) + if err != nil { + return "", "", fmt.Errorf( + "failed to get hasher for checksum algorithm, %w", err) + } + + var sha256Hasher hash.Hash + var batchHasher io.Writer = hasher + + // Compute payload hash for the protocol. To prevent another handler + // (computePayloadSHA256) re-reading body also compute the SHA256 for + // request signing. If configured checksum algorithm is SHA256, don't + // double wrap stream with another SHA256 hasher. + if computePayloadHash && algorithm != AlgorithmSHA256 { + sha256Hasher = sha256.New() + batchHasher = io.MultiWriter(hasher, sha256Hasher) + } + + if stream != nil { + if _, err = io.Copy(batchHasher, stream); err != nil { + return "", "", fmt.Errorf( + "failed to read stream to compute hash, %w", err) + } + } + + checksum = string(base64EncodeHashSum(hasher)) + if computePayloadHash { + if algorithm != AlgorithmSHA256 { + sha256Checksum = string(hexEncodeHashSum(sha256Hasher)) + } else { + sha256Checksum = string(hexEncodeHashSum(hasher)) + } + } + + return checksum, sha256Checksum, nil +} + +func getRequestStreamLength(req *smithyhttp.Request) (int64, error) { + if v := req.ContentLength; v > 0 { + return v, nil + } + + if length, ok, err := req.StreamLength(); err != nil { + return 0, fmt.Errorf("failed getting request stream's length, %w", err) + } else if ok { + return length, nil + } + + return -1, nil +} + +// setMD5Checksum computes the MD5 of the request payload and sets it to the +// Content-MD5 header. Returning the MD5 base64 encoded string or error. +// +// If the MD5 is already set as the Content-MD5 header, that value will be +// returned, and nothing else will be done. +// +// If the payload is empty, no MD5 will be computed. No error will be returned. +// Empty payloads do not have an MD5 value. +// +// Replaces the smithy-go middleware for httpChecksum trait. +func setMD5Checksum(ctx context.Context, req *smithyhttp.Request) (string, error) { + if v := req.Header.Get(contentMD5Header); len(v) != 0 { + return v, nil + } + stream := req.GetStream() + if stream == nil { + return "", nil + } + + if !req.IsStreamSeekable() { + return "", fmt.Errorf( + "unseekable stream is not supported for computing md5 checksum") + } + + v, err := computeMD5Checksum(stream) + if err != nil { + return "", err + } + if err := req.RewindStream(); err != nil { + return "", fmt.Errorf("failed to rewind stream after computing MD5 checksum, %w", err) + } + // set the 'Content-MD5' header + req.Header.Set(contentMD5Header, string(v)) + return string(v), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go new file mode 100644 index 000000000000..f72952549764 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go @@ -0,0 +1,117 @@ +package checksum + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +// setupChecksumContext is the initial middleware that looks up the input +// used to configure checksum behavior. This middleware must be executed before +// input validation step or any other checksum middleware. +type setupInputContext struct { + // GetAlgorithm is a function to get the checksum algorithm of the + // input payload from the input parameters. + // + // Given the input parameter value, the function must return the algorithm + // and true, or false if no algorithm is specified. + GetAlgorithm func(interface{}) (string, bool) +} + +// ID for the middleware +func (m *setupInputContext) ID() string { + return "AWSChecksum:SetupInputContext" +} + +// HandleInitialize initialization middleware that setups up the checksum +// context based on the input parameters provided in the stack. +func (m *setupInputContext) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // Check if validation algorithm is specified. + if m.GetAlgorithm != nil { + // check is input resource has a checksum algorithm + algorithm, ok := m.GetAlgorithm(in.Parameters) + if ok && len(algorithm) != 0 { + ctx = setContextInputAlgorithm(ctx, algorithm) + } + } + + return next.HandleInitialize(ctx, in) +} + +// inputAlgorithmKey is the key set on context used to identify, retrieves the +// request checksum algorithm if present on the context. +type inputAlgorithmKey struct{} + +// setContextInputAlgorithm sets the request checksum algorithm on the context. +// +// Scoped to stack values. +func setContextInputAlgorithm(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, inputAlgorithmKey{}, value) +} + +// getContextInputAlgorithm returns the checksum algorithm from the context if +// one was specified. Empty string is returned if one is not specified. +// +// Scoped to stack values. +func getContextInputAlgorithm(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, inputAlgorithmKey{}).(string) + return v +} + +type setupOutputContext struct { + // GetValidationMode is a function to get the checksum validation + // mode of the output payload from the input parameters. + // + // Given the input parameter value, the function must return the validation + // mode and true, or false if no mode is specified. + GetValidationMode func(interface{}) (string, bool) +} + +// ID for the middleware +func (m *setupOutputContext) ID() string { + return "AWSChecksum:SetupOutputContext" +} + +// HandleInitialize initialization middleware that setups up the checksum +// context based on the input parameters provided in the stack. +func (m *setupOutputContext) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // Check if validation mode is specified. + if m.GetValidationMode != nil { + // check is input resource has a checksum algorithm + mode, ok := m.GetValidationMode(in.Parameters) + if ok && len(mode) != 0 { + ctx = setContextOutputValidationMode(ctx, mode) + } + } + + return next.HandleInitialize(ctx, in) +} + +// outputValidationModeKey is the key set on context used to identify if +// output checksum validation is enabled. +type outputValidationModeKey struct{} + +// setContextOutputValidationMode sets the request checksum +// algorithm on the context. +// +// Scoped to stack values. +func setContextOutputValidationMode(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, outputValidationModeKey{}, value) +} + +// getContextOutputValidationMode returns response checksum validation state, +// if one was specified. Empty string is returned if one is not specified. +// +// Scoped to stack values. +func getContextOutputValidationMode(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, outputValidationModeKey{}).(string) + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go new file mode 100644 index 000000000000..9fde12d86d7d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go @@ -0,0 +1,131 @@ +package checksum + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// outputValidationAlgorithmsUsedKey is the metadata key for indexing the algorithms +// that were used, by the middleware's validation. +type outputValidationAlgorithmsUsedKey struct{} + +// GetOutputValidationAlgorithmsUsed returns the checksum algorithms used +// stored in the middleware Metadata. Returns false if no algorithms were +// stored in the Metadata. +func GetOutputValidationAlgorithmsUsed(m middleware.Metadata) ([]string, bool) { + vs, ok := m.Get(outputValidationAlgorithmsUsedKey{}).([]string) + return vs, ok +} + +// SetOutputValidationAlgorithmsUsed stores the checksum algorithms used in the +// middleware Metadata. +func SetOutputValidationAlgorithmsUsed(m *middleware.Metadata, vs []string) { + m.Set(outputValidationAlgorithmsUsedKey{}, vs) +} + +// validateOutputPayloadChecksum middleware computes payload checksum of the +// received response and validates with checksum returned by the service. +type validateOutputPayloadChecksum struct { + // Algorithms represents a priority-ordered list of valid checksum + // algorithm that should be validated when present in HTTP response + // headers. + Algorithms []Algorithm + + // IgnoreMultipartValidation indicates multipart checksums ending with "-#" + // will be ignored. + IgnoreMultipartValidation bool + + // When set the middleware will log when output does not have checksum or + // algorithm to validate. + LogValidationSkipped bool + + // When set the middleware will log when the output contains a multipart + // checksum that was, skipped and not validated. + LogMultipartValidationSkipped bool +} + +func (m *validateOutputPayloadChecksum) ID() string { + return "AWSChecksum:ValidateOutputPayloadChecksum" +} + +// HandleDeserialize is a Deserialize middleware that wraps the HTTP response +// body with an io.ReadCloser that will validate the its checksum. +func (m *validateOutputPayloadChecksum) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + // If there is no validation mode specified nothing is supported. + if mode := getContextOutputValidationMode(ctx); mode != "ENABLED" { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("unknown transport type %T", out.RawResponse), + } + } + + var expectedChecksum string + var algorithmToUse Algorithm + for _, algorithm := range m.Algorithms { + value := response.Header.Get(AlgorithmHTTPHeader(algorithm)) + if len(value) == 0 { + continue + } + + expectedChecksum = value + algorithmToUse = algorithm + } + + // TODO this must validate the validation mode is set to enabled. + + logger := middleware.GetLogger(ctx) + + // Skip validation if no checksum algorithm or checksum is available. + if len(expectedChecksum) == 0 || len(algorithmToUse) == 0 { + if m.LogValidationSkipped { + // TODO this probably should have more information about the + // operation output that won't be validated. + logger.Logf(logging.Warn, + "Response has no supported checksum. Not validating response payload.") + } + return out, metadata, nil + } + + // Ignore multipart validation + if m.IgnoreMultipartValidation && strings.Contains(expectedChecksum, "-") { + if m.LogMultipartValidationSkipped { + // TODO this probably should have more information about the + // operation output that won't be validated. + logger.Logf(logging.Warn, "Skipped validation of multipart checksum.") + } + return out, metadata, nil + } + + body, err := newValidateChecksumReader(response.Body, algorithmToUse, expectedChecksum) + if err != nil { + return out, metadata, fmt.Errorf("failed to create checksum validation reader, %w", err) + } + response.Body = body + + // Update the metadata to include the set of the checksum algorithms that + // will be validated. + SetOutputValidationAlgorithmsUsed(&metadata, []string{ + string(algorithmToUse), + }) + + return out, metadata, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md new file mode 100644 index 000000000000..ebcc6a461b9b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -0,0 +1,95 @@ +# v1.9.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go new file mode 100644 index 000000000000..cc919701a06f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go @@ -0,0 +1,48 @@ +package presignedurl + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +// WithIsPresigning adds the isPresigning sentinel value to a context to signal +// that the middleware stack is using the presign flow. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func WithIsPresigning(ctx context.Context) context.Context { + return middleware.WithStackValue(ctx, isPresigningKey{}, true) +} + +// GetIsPresigning returns if the context contains the isPresigning sentinel +// value for presigning flows. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetIsPresigning(ctx context.Context) bool { + v, _ := middleware.GetStackValue(ctx, isPresigningKey{}).(bool) + return v +} + +type isPresigningKey struct{} + +// AddAsIsPresigingMiddleware adds a middleware to the head of the stack that +// will update the stack's context to be flagged as being invoked for the +// purpose of presigning. +func AddAsIsPresigingMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before) +} + +type asIsPresigningMiddleware struct{} + +func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" } + +func (asIsPresigningMiddleware) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + ctx = WithIsPresigning(ctx) + return next.HandleInitialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go new file mode 100644 index 000000000000..1b85375cf806 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go @@ -0,0 +1,3 @@ +// Package presignedurl provides the customizations for API clients to fill in +// presigned URLs into input parameters. +package presignedurl diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go new file mode 100644 index 000000000000..a312a9efa2ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package presignedurl + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.9.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go new file mode 100644 index 000000000000..1e2f5c8122a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go @@ -0,0 +1,110 @@ +package presignedurl + +import ( + "context" + "fmt" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + + "github.com/aws/smithy-go/middleware" +) + +// URLPresigner provides the interface to presign the input parameters in to a +// presigned URL. +type URLPresigner interface { + // PresignURL presigns a URL. + PresignURL(ctx context.Context, srcRegion string, params interface{}) (*v4.PresignedHTTPRequest, error) +} + +// ParameterAccessor provides an collection of accessor to for retrieving and +// setting the values needed to PresignedURL generation +type ParameterAccessor struct { + // GetPresignedURL accessor points to a function that retrieves a presigned url if present + GetPresignedURL func(interface{}) (string, bool, error) + + // GetSourceRegion accessor points to a function that retrieves source region for presigned url + GetSourceRegion func(interface{}) (string, bool, error) + + // CopyInput accessor points to a function that takes in an input, and returns a copy. + CopyInput func(interface{}) (interface{}, error) + + // SetDestinationRegion accessor points to a function that sets destination region on api input struct + SetDestinationRegion func(interface{}, string) error + + // SetPresignedURL accessor points to a function that sets presigned url on api input struct + SetPresignedURL func(interface{}, string) error +} + +// Options provides the set of options needed by the presigned URL middleware. +type Options struct { + // Accessor are the parameter accessors used by this middleware + Accessor ParameterAccessor + + // Presigner is the URLPresigner used by the middleware + Presigner URLPresigner +} + +// AddMiddleware adds the Presign URL middleware to the middleware stack. +func AddMiddleware(stack *middleware.Stack, opts Options) error { + return stack.Initialize.Add(&presign{options: opts}, middleware.Before) +} + +// RemoveMiddleware removes the Presign URL middleware from the stack. +func RemoveMiddleware(stack *middleware.Stack) error { + _, err := stack.Initialize.Remove((*presign)(nil).ID()) + return err +} + +type presign struct { + options Options +} + +func (m *presign) ID() string { return "Presign" } + +func (m *presign) HandleInitialize( + ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // If PresignedURL is already set ignore middleware. + if _, ok, err := m.options.Accessor.GetPresignedURL(input.Parameters); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } else if ok { + return next.HandleInitialize(ctx, input) + } + + // If have source region is not set ignore middleware. + srcRegion, ok, err := m.options.Accessor.GetSourceRegion(input.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } else if !ok || len(srcRegion) == 0 { + return next.HandleInitialize(ctx, input) + } + + // Create a copy of the original input so the destination region value can + // be added. This ensures that value does not leak into the original + // request parameters. + paramCpy, err := m.options.Accessor.CopyInput(input.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) + } + + // Destination region is the API client's configured region. + dstRegion := awsmiddleware.GetRegion(ctx) + if err = m.options.Accessor.SetDestinationRegion(paramCpy, dstRegion); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } + + presignedReq, err := m.options.Presigner.PresignURL(ctx, srcRegion, paramCpy) + if err != nil { + return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) + } + + // Update the original input with the presigned URL value. + if err = m.options.Accessor.SetPresignedURL(input.Parameters, presignedReq.URL); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } + + return next.HandleInitialize(ctx, input) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md new file mode 100644 index 000000000000..c101ab1e64c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md @@ -0,0 +1,104 @@ +# v1.13.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-09-02) + +* **Feature**: Add support for S3 Multi-Region Access Point ARNs. + +# v1.6.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-04) + +* **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. + +# v1.3.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go new file mode 100644 index 000000000000..4f7017e84e75 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go @@ -0,0 +1,54 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +// +// Supported Access point resource format: +// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} +// - example: arn:aws:s3:us-west-2:012345678901:accesspoint/myaccesspoint +// +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if isFIPS(a.Region) { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} + +func isFIPS(region string) bool { + return strings.HasPrefix(region, "fips-") || strings.HasSuffix(region, "-fips") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go new file mode 100644 index 000000000000..06e1a3addd43 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go @@ -0,0 +1,85 @@ +package arn + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +var supportedServiceARN = []string{ + "s3", + "s3-outposts", + "s3-object-lambda", +} + +func isSupportedServiceARN(service string) bool { + for _, name := range supportedServiceARN { + if name == service { + return true + } + } + return false +} + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(a arn.ARN, resParser ResourceParser) (resARN Resource, err error) { + if len(a.Partition) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "partition not set"} + } + + if !isSupportedServiceARN(a.Service) { + return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} + } + + if len(a.Resource) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +// Error returns a string denoting the occurred InvalidARNError +func (e InvalidARNError) Error() string { + return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go new file mode 100644 index 000000000000..2b9bd6c21639 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go @@ -0,0 +1,130 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +// OutpostARN interface that should be satisfied by outpost ARNs +type OutpostARN interface { + Resource + GetOutpostID() string +} + +// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format +// and return a specific OutpostARN type +// +// Currently supported outpost ARN formats: +// * Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +// * Outpost Bucket ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket +// +// Other outpost ARN formats may be supported and added in the future. +// +func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { + if len(a.Region) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "region not set"} + } + + if isFIPS(a.Region) { + return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} + } + + if len(a.AccountID) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + + // verify if outpost id is present and valid + if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + // verify possible resource type exists + if len(resParts) < 3 { + return nil, InvalidARNError{ + ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", + } + } + + // Since we know this is a OutpostARN fetch outpostID + outpostID := strings.TrimSpace(resParts[0]) + + switch resParts[1] { + case "accesspoint": + accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return OutpostAccessPointARN{}, err + } + return OutpostAccessPointARN{ + AccessPointARN: accesspointARN, + OutpostID: outpostID, + }, nil + + case "bucket": + bucketName, err := parseBucketResource(a, resParts[2:]) + if err != nil { + return nil, err + } + return OutpostBucketARN{ + ARN: a, + BucketName: bucketName, + OutpostID: outpostID, + }, nil + + default: + return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} + } +} + +// OutpostAccessPointARN represents outpost access point ARN. +type OutpostAccessPointARN struct { + AccessPointARN + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost access point arn +func (o OutpostAccessPointARN) GetOutpostID() string { + return o.OutpostID +} + +// OutpostBucketARN represents the outpost bucket ARN. +type OutpostBucketARN struct { + arn.ARN + BucketName string + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost bucket arn +func (o OutpostBucketARN) GetOutpostID() string { + return o.OutpostID +} + +// GetARN retrives the base ARN from outpost bucket ARN resource +func (o OutpostBucketARN) GetARN() arn.ARN { + return o.ARN +} + +// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the +// bucket resource id. +// +// parseBucketResource only parses the bucket resource id. +// +func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { + if len(resParts) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + if len(resParts) > 1 { + return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + bucketName = strings.TrimSpace(resParts[0]) + if len(bucketName) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + return bucketName, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go new file mode 100644 index 000000000000..513154cc0e31 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go @@ -0,0 +1,15 @@ +package arn + +// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service +type S3ObjectLambdaARN interface { + Resource + + isS3ObjectLambdasARN() +} + +// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type +type S3ObjectLambdaAccessPointARN struct { + AccessPointARN +} + +func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go new file mode 100644 index 000000000000..b51532085f6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go @@ -0,0 +1,73 @@ +package s3shared + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +// ARNLookup is the initial middleware that looks up if an arn is provided. +// This middleware is responsible for fetching ARN from a arnable field, and registering the ARN on +// middleware context. This middleware must be executed before input validation step or any other +// arn processing middleware. +type ARNLookup struct { + + // GetARNValue takes in a input interface and returns a ptr to string and a bool + GetARNValue func(interface{}) (*string, bool) +} + +// ID for the middleware +func (m *ARNLookup) ID() string { + return "S3Shared:ARNLookup" +} + +// HandleInitialize handles the behavior of this initialize step +func (m *ARNLookup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // check if GetARNValue is supported + if m.GetARNValue == nil { + return next.HandleInitialize(ctx, in) + } + + // check is input resource is an ARN; if not go to next + v, ok := m.GetARNValue(in.Parameters) + if !ok || v == nil || !arn.IsARN(*v) { + return next.HandleInitialize(ctx, in) + } + + // if ARN process ResourceRequest and put it on ctx + av, err := arn.Parse(*v) + if err != nil { + return out, metadata, fmt.Errorf("error parsing arn: %w", err) + } + // set parsed arn on context + ctx = setARNResourceOnContext(ctx, av) + + return next.HandleInitialize(ctx, in) +} + +// arnResourceKey is the key set on context used to identify, retrive an ARN resource +// if present on the context. +type arnResourceKey struct{} + +// SetARNResourceOnContext sets the S3 ARN on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setARNResourceOnContext(ctx context.Context, value arn.ARN) context.Context { + return middleware.WithStackValue(ctx, arnResourceKey{}, value) +} + +// GetARNResourceFromContext returns an ARN from context and a bool indicating +// presence of ARN on ctx. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetARNResourceFromContext(ctx context.Context) (arn.ARN, bool) { + v, ok := middleware.GetStackValue(ctx, arnResourceKey{}).(arn.ARN) + return v, ok +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go new file mode 100644 index 000000000000..8926e5970e42 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go @@ -0,0 +1,22 @@ +package config + +import "context" + +// UseARNRegionProvider is an interface for retrieving external configuration value for UseARNRegion +type UseARNRegionProvider interface { + GetS3UseARNRegion(ctx context.Context) (value bool, found bool, err error) +} + +// ResolveUseARNRegion extracts the first instance of a UseARNRegion from the config slice. +// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseARNRegion(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseARNRegionProvider); ok { + value, found, err = p.GetS3UseARNRegion(ctx) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go new file mode 100644 index 000000000000..aa0c3714e2b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go @@ -0,0 +1,183 @@ +package s3shared + +import ( + "fmt" + + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" +) + +// TODO: fix these error statements to be relevant to v2 sdk + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +// InvalidARNError denotes the error for Invalid ARN +type InvalidARNError struct { + message string + resource arn.Resource + origErr error +} + +// Error returns the InvalidARN error string +func (e InvalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + msg := invalidARNErrorErrCode + " : " + e.message + if extra != "" { + msg = msg + "\n\t" + extra + } + + return msg +} + +// OrigErr is the original error wrapped by Invalid ARN Error +func (e InvalidARNError) Unwrap() error { + return e.origErr +} + +// NewInvalidARNError denotes invalid arn error +func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition +func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithFIPSError ARN not supported for FIPS region +// +// Deprecated: FIPS will not appear in the ARN region component. +func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for FIPS region", + resource: resource, + origErr: err, + } +} + +// ConfigurationError is used to denote a client configuration error +type ConfigurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +// Error returns the Configuration error string +func (e ConfigurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + msg := configurationErrorErrCode + " : " + e.message + if extra != "" { + msg = msg + "\n\t" + extra + } + return msg +} + +// OrigErr is the original error wrapped by Configuration Error +func (e ConfigurationError) Unwrap() error { + return e.origErr +} + +// NewClientPartitionMismatchError stub +func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientRegionMismatchError denotes cross region access error +func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFailedToResolveEndpointError denotes endpoint resolving error +func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access +func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFIPSConfigurationError denotes a configuration error when a client or request is configured for FIPS +func NewFIPSConfigurationError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "use of ARN is not supported when client or request is configured for FIPS", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate +func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Accelerate but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request +func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack +func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Dual-stack but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go new file mode 100644 index 000000000000..6156c285103b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package s3shared + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.13.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go new file mode 100644 index 000000000000..85b60d2a1b92 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go @@ -0,0 +1,29 @@ +package s3shared + +import ( + "github.com/aws/smithy-go/middleware" +) + +// hostID is used to retrieve host id from response metadata +type hostID struct { +} + +// SetHostIDMetadata sets the provided host id over middleware metadata +func SetHostIDMetadata(metadata *middleware.Metadata, id string) { + metadata.Set(hostID{}, id) +} + +// GetHostIDMetadata retrieves the host id from middleware metadata +// returns host id as string along with a boolean indicating presence of +// hostId on middleware metadata. +func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) { + if !metadata.Has(hostID{}) { + return "", false + } + + v, ok := metadata.Get(hostID{}).(string) + if !ok { + return "", true + } + return v, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go new file mode 100644 index 000000000000..f02604cb62aa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go @@ -0,0 +1,28 @@ +package s3shared + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +// clonedInputKey used to denote if request input was cloned. +type clonedInputKey struct{} + +// SetClonedInputKey sets a key on context to denote input was cloned previously. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetClonedInputKey(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, clonedInputKey{}, value) +} + +// IsClonedInput retrieves if context key for cloned input was set. +// If set, we can infer that the reuqest input was cloned previously. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func IsClonedInput(ctx context.Context) bool { + v, _ := middleware.GetStackValue(ctx, clonedInputKey{}).(bool) + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go new file mode 100644 index 000000000000..f52f2f11e91b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go @@ -0,0 +1,52 @@ +package s3shared + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const metadataRetrieverID = "S3MetadataRetriever" + +// AddMetadataRetrieverMiddleware adds request id, host id retriever middleware +func AddMetadataRetrieverMiddleware(stack *middleware.Stack) error { + // add metadata retriever middleware before operation deserializers so that it can retrieve metadata such as + // host id, request id from response header returned by operation deserializers + return stack.Deserialize.Insert(&metadataRetriever{}, "OperationDeserializer", middleware.Before) +} + +type metadataRetriever struct { +} + +// ID returns the middleware identifier +func (m *metadataRetriever) ID() string { + return metadataRetrieverID +} + +func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // check for header for Request id + if v := resp.Header.Get("X-Amz-Request-Id"); len(v) != 0 { + // set reqID on metadata for successful responses. + awsmiddleware.SetRequestIDMetadata(&metadata, v) + } + + // look up host-id + if v := resp.Header.Get("X-Amz-Id-2"); len(v) != 0 { + // set reqID on metadata for successful responses. + SetHostIDMetadata(&metadata, v) + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go new file mode 100644 index 000000000000..bee8da3fe346 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go @@ -0,0 +1,77 @@ +package s3shared + +import ( + "fmt" + "strings" + + awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" +) + +// ResourceRequest represents an ARN resource and api request metadata +type ResourceRequest struct { + Resource arn.Resource + // RequestRegion is the region configured on the request config + RequestRegion string + + // SigningRegion is the signing region resolved for the request + SigningRegion string + + // PartitionID is the resolved partition id for the provided request region + PartitionID string + + // UseARNRegion indicates if client should use the region provided in an ARN resource + UseARNRegion bool + + // UseFIPS indicates if te client is configured for FIPS + UseFIPS bool +} + +// ARN returns the resource ARN +func (r ResourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS +// +// Deprecated: FIPS will not be present in the ARN region +func (r ResourceRequest) ResourceConfiguredForFIPS() bool { + return IsFIPS(r.ARN().Region) +} + +// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set +func (r ResourceRequest) AllowCrossRegion() bool { + return r.UseARNRegion +} + +// IsCrossPartition returns true if request is configured for region of another partition, than +// the partition that resource ARN region resolves to. IsCrossPartition will not return an error, +// if request is not configured with a specific partition id. This might happen if customer provides +// custom endpoint url, but does not associate a partition id with it. +func (r ResourceRequest) IsCrossPartition() (bool, error) { + rv := r.PartitionID + if len(rv) == 0 { + return false, nil + } + + av := r.Resource.GetARN().Partition + if len(av) == 0 { + return false, fmt.Errorf("no partition id for provided ARN") + } + + return !strings.EqualFold(rv, av), nil +} + +// IsCrossRegion returns true if request signing region is not same as arn region +func (r ResourceRequest) IsCrossRegion() bool { + v := r.SigningRegion + return !strings.EqualFold(v, r.Resource.GetARN().Region) +} + +// IsFIPS returns true if region is a fips pseudo-region +// +// Deprecated: FIPS should be specified via EndpointOptions. +func IsFIPS(region string) bool { + return strings.HasPrefix(region, "fips-") || + strings.HasSuffix(region, "-fips") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go new file mode 100644 index 000000000000..85733624306d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go @@ -0,0 +1,33 @@ +package s3shared + +import ( + "errors" + "fmt" + + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" +) + +// ResponseError provides the HTTP centric error type wrapping the underlying error +// with the HTTP response value and the deserialized RequestID. +type ResponseError struct { + *awshttp.ResponseError + + // HostID associated with response error + HostID string +} + +// ServiceHostID returns the host id associated with Response Error +func (e *ResponseError) ServiceHostID() string { return e.HostID } + +// Error returns the formatted error +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "https response error StatusCode: %d, RequestID: %s, HostID: %s, %v", + e.Response.StatusCode, e.RequestID, e.HostID, e.Err) +} + +// As populates target and returns true if the type of target is a error type that +// the ResponseError embeds, (e.g.S3 HTTP ResponseError) +func (e *ResponseError) As(target interface{}) bool { + return errors.As(e.ResponseError, target) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go new file mode 100644 index 000000000000..54357624506d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go @@ -0,0 +1,60 @@ +package s3shared + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddResponseErrorMiddleware adds response error wrapper middleware +func AddResponseErrorMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before request id retriever middleware so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&errorWrapper{}, metadataRetrieverID, middleware.Before) +} + +type errorWrapper struct { +} + +// ID returns the middleware identifier +func (m *errorWrapper) ID() string { + return "ResponseErrorWrapper" +} + +func (m *errorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err == nil { + // Nothing to do when there is no error. + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // look for request id in metadata + reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) + // look for host id in metadata + hostID, _ := GetHostIDMetadata(metadata) + + // Wrap the returned smithy error with the request id retrieved from the metadata + err = &ResponseError{ + ResponseError: &awshttp.ResponseError{ + ResponseError: &smithyhttp.ResponseError{ + Response: resp, + Err: err, + }, + RequestID: reqID, + }, + HostID: hostID, + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go new file mode 100644 index 000000000000..22773199f62a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go @@ -0,0 +1,78 @@ +package s3shared + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" +) + +// EnableDualstack represents middleware struct for enabling dualstack support +// +// Deprecated: See EndpointResolverOptions' UseDualStackEndpoint support +type EnableDualstack struct { + // UseDualstack indicates if dualstack endpoint resolving is to be enabled + UseDualstack bool + + // DefaultServiceID is the service id prefix used in endpoint resolving + // by default service-id is 's3' and 's3-control' for service s3, s3control. + DefaultServiceID string +} + +// ID returns the middleware ID. +func (*EnableDualstack) ID() string { + return "EnableDualstack" +} + +// HandleSerialize handles serializer middleware behavior when middleware is executed +func (u *EnableDualstack) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + + // check for host name immutable property + if smithyhttp.GetHostnameImmutable(ctx) { + return next.HandleSerialize(ctx, in) + } + + serviceID := awsmiddle.GetServiceID(ctx) + + // s3-control may be represented as `S3 Control` as in model + if serviceID == "S3 Control" { + serviceID = "s3-control" + } + + if len(serviceID) == 0 { + // default service id + serviceID = u.DefaultServiceID + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + if u.UseDualstack { + parts := strings.Split(req.URL.Host, ".") + if len(parts) < 3 { + return out, metadata, fmt.Errorf("unable to update endpoint host for dualstack, hostname invalid, %s", req.URL.Host) + } + + for i := 0; i+1 < len(parts); i++ { + if strings.EqualFold(parts[i], serviceID) { + parts[i] = parts[i] + ".dualstack" + break + } + } + + // construct the url host + req.URL.Host = strings.Join(parts, ".") + } + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go new file mode 100644 index 000000000000..65fd07e0006d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go @@ -0,0 +1,89 @@ +package s3shared + +import ( + "encoding/xml" + "fmt" + "io" + "net/http" + "strings" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` +} + +// GetUnwrappedErrorResponseComponents returns the error fields from an xml error response body +func GetUnwrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) { + var errComponents ErrorComponents + if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err) + } + return errComponents, nil +} + +// GetWrappedErrorResponseComponents returns the error fields from an xml error response body +// in which error code, and message are wrapped by a tag +func GetWrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) { + var errComponents struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + } + + if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err) + } + + return ErrorComponents{ + Code: errComponents.Code, + Message: errComponents.Message, + RequestID: errComponents.RequestID, + HostID: errComponents.HostID, + }, nil +} + +// GetErrorResponseComponents retrieves error components according to passed in options +func GetErrorResponseComponents(r io.Reader, options ErrorResponseDeserializerOptions) (ErrorComponents, error) { + var errComponents ErrorComponents + var err error + + if options.IsWrappedWithErrorTag { + errComponents, err = GetWrappedErrorResponseComponents(r) + } else { + errComponents, err = GetUnwrappedErrorResponseComponents(r) + } + + if err != nil { + return ErrorComponents{}, err + } + + // If an error code or message is not retrieved, it is derived from the http status code + // eg, for S3 service, we derive err code and message, if none is found + if options.UseStatusCode && len(errComponents.Code) == 0 && + len(errComponents.Message) == 0 { + // derive code and message from status code + statusText := http.StatusText(options.StatusCode) + errComponents.Code = strings.Replace(statusText, " ", "", -1) + errComponents.Message = statusText + } + return errComponents, nil +} + +// ErrorResponseDeserializerOptions represents error response deserializer options for s3 and s3-control service +type ErrorResponseDeserializerOptions struct { + // UseStatusCode denotes if status code should be used to retrieve error code, msg + UseStatusCode bool + + // StatusCode is status code of error response + StatusCode int + + //IsWrappedWithErrorTag represents if error response's code, msg is wrapped within an + // additional tag + IsWrappedWithErrorTag bool +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md new file mode 100644 index 000000000000..940ca7932f4b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md @@ -0,0 +1,168 @@ +# v1.26.9 (2022-05-06) + +* No change notes available for this release. + +# v1.26.8 (2022-05-03) + +* **Documentation**: Documentation only update for doc bug fixes for the S3 API docs. + +# v1.26.7 (2022-04-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.6 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.5 (2022-04-12) + +* **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. + +# v1.26.4 (2022-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Bug Fix**: Fixes the AWS Sigv4 signer to trim header value's whitespace when computing the canonical headers block of the string to sign. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.1 (2022-01-28) + +* **Bug Fix**: Updates SDK API client deserialization to pre-allocate byte slice and string response payloads, [#1565](https://github.com/aws/aws-sdk-go-v2/pull/1565). Thanks to [Tyson Mote](https://github.com/tysonmote) for submitting this PR. + +# v1.24.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. +* **Feature**: Updated to latest service endpoints + +# v1.21.0 (2021-12-02) + +* **Feature**: API client updated +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2021-11-30) + +* **Feature**: API client updated + +# v1.19.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2021-11-12) + +* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature. + +# v1.18.0 (2021-11-06) + +* **Feature**: Support has been added for the SelectObjectContent API. +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Feature**: Updates S3 streaming operations - PutObject, UploadPart, WriteGetObjectResponse to use unsigned payload signing auth when TLS is enabled. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2021-09-17) + +* **Feature**: Updated API client and endpoints to latest revision. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2021-09-10) + +* No change notes available for this release. + +# v1.15.0 (2021-09-02) + +* **Feature**: API client updated +* **Feature**: Add support for S3 Multi-Region Access Point ARNs. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2021-08-27) + +* **Feature**: Updated API model to latest revision. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2021-08-19) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-08-04) + +* **Feature**: Add `HeadObject` presign support. ([#1346](https://github.com/aws/aws-sdk-go-v2/pull/1346)) +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-06-04) + +* **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. +* **Feature**: Updated service client to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-05-25) + +* **Feature**: API client updated + +# v1.8.0 (2021-05-20) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Feature**: Updated to latest service API model. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go new file mode 100644 index 000000000000..b17db2ae82f5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go @@ -0,0 +1,769 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/v4a" + acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" + s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "S3" +const ServiceAPIVersion = "2006-03-01" + +// Client provides the API client to make operations call for Amazon Simple Storage +// Service. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + resolveHTTPSignerV4a(&options) + + for _, fn := range optFns { + fn(&options) + } + + resolveCredentialProvider(&options) + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // Allows you to disable S3 Multi-Region access points feature. + DisableMultiRegionAccessPoints bool + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // Allows you to enable arn region support for the service. + UseARNRegion bool + + // Allows you to enable S3 Accelerate feature. All operations compatible with S3 + // Accelerate will use the accelerate endpoint for requests. Requests not + // compatible will fall back to normal S3 requests. The bucket must be enabled for + // accelerate to be used with S3 client with accelerate enabled. If the bucket is + // not enabled for accelerate an error will be returned. The bucket name must be + // DNS compatible to work with accelerate. + UseAccelerate bool + + // Allows you to enable dual-stack endpoint support for the service. + // + // Deprecated: Set dual-stack by setting UseDualStackEndpoint on + // EndpointResolverOptions. When EndpointResolverOptions' UseDualStackEndpoint + // field is set it overrides this field value. + UseDualstack bool + + // Allows you to enable the client to use path-style addressing, i.e., + // https://s3.amazonaws.com/BUCKET/KEY. By default, the S3 client will use virtual + // hosted bucket addressing when possible(https://BUCKET.s3.amazonaws.com/KEY). + UsePathStyle bool + + // Signature Version 4a (SigV4a) Signer + httpSignerV4a httpSignerV4a + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + setSafeEventStreamClientLogMode(&options, opID) + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + resolveCredentialProvider(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseARNRegion(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "s3", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + so.DisableURIPathEscaping = true + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves UseARNRegion S3 configuration +func resolveUseARNRegion(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := s3sharedconfig.ResolveUseARNRegion(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.UseARNRegion = value + } + return nil +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func resolveCredentialProvider(o *Options) { + if o.Credentials == nil { + return + } + + if _, ok := o.Credentials.(v4a.CredentialsProvider); ok { + return + } + + switch o.Credentials.(type) { + case aws.AnonymousCredentials, *aws.AnonymousCredentials: + return + } + + o.Credentials = &v4a.SymmetricCredentialAdaptor{SymmetricProvider: o.Credentials} +} + +func swapWithCustomHTTPSignerMiddleware(stack *middleware.Stack, o Options) error { + mw := s3cust.NewSignHTTPRequestMiddleware(s3cust.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + V4Signer: o.HTTPSignerV4, + V4aSigner: o.httpSignerV4a, + LogSigning: o.ClientLogMode.IsSigning(), + }) + + return s3cust.RegisterSigningMiddleware(stack, mw) +} + +type httpSignerV4a interface { + SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash, + service string, regionSet []string, signingTime time.Time, + optFns ...func(*v4a.SignerOptions)) error +} + +func resolveHTTPSignerV4a(o *Options) { + if o.httpSignerV4a != nil { + return + } + o.httpSignerV4a = newDefaultV4aSigner(*o) +} + +func newDefaultV4aSigner(o Options) *v4a.Signer { + return v4a.NewSigner(func(so *v4a.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + so.DisableURIPathEscaping = true + }) +} + +func addMetadataRetrieverMiddleware(stack *middleware.Stack) error { + return s3shared.AddMetadataRetrieverMiddleware(stack) +} + +// ComputedInputChecksumsMetadata provides information about the algorithms used to +// compute the checksum(s) of the input payload. +type ComputedInputChecksumsMetadata struct { + // ComputedChecksums is a map of algorithm name to checksum value of the computed + // input payload's checksums. + ComputedChecksums map[string]string +} + +// GetComputedInputChecksumsMetadata retrieves from the result metadata the map of +// algorithms and input payload checksums values. +func GetComputedInputChecksumsMetadata(m middleware.Metadata) (ComputedInputChecksumsMetadata, bool) { + values, ok := internalChecksum.GetComputedInputChecksums(m) + if !ok { + return ComputedInputChecksumsMetadata{}, false + } + return ComputedInputChecksumsMetadata{ + ComputedChecksums: values, + }, true + +} + +// ChecksumValidationMetadata contains metadata such as the checksum algorithm used +// for data integrity validation. +type ChecksumValidationMetadata struct { + // AlgorithmsUsed is the set of the checksum algorithms used to validate the + // response payload. The response payload must be completely read in order for the + // checksum validation to be performed. An error is returned by the operation + // output's response io.ReadCloser if the computed checksums are invalid. + AlgorithmsUsed []string +} + +// GetChecksumValidationMetadata returns the set of algorithms that will be used to +// validate the response payload with. The response payload must be completely read +// in order for the checksum validation to be performed. An error is returned by +// the operation output's response io.ReadCloser if the computed checksums are +// invalid. Returns false if no checksum algorithm used metadata was found. +func GetChecksumValidationMetadata(m middleware.Metadata) (ChecksumValidationMetadata, bool) { + values, ok := internalChecksum.GetOutputValidationAlgorithmsUsed(m) + if !ok { + return ChecksumValidationMetadata{}, false + } + return ChecksumValidationMetadata{ + AlgorithmsUsed: append(make([]string, 0, len(values)), values...), + }, true + +} + +// nopGetBucketAccessor is no-op accessor for operation that don't support bucket +// member as input +func nopGetBucketAccessor(input interface{}) (*string, bool) { + return nil, false +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return s3shared.AddResponseErrorMiddleware(stack) +} + +func disableAcceptEncodingGzip(stack *middleware.Stack) error { + return acceptencodingcust.AddAcceptEncodingGzip(stack, acceptencodingcust.AddAcceptEncodingGzipOptions{}) +} + +// ResponseError provides the HTTP centric error type wrapping the underlying error +// with the HTTP response value and the deserialized RequestID. +type ResponseError interface { + error + + ServiceHostID() string + ServiceRequestID() string +} + +var _ ResponseError = (*s3shared.ResponseError)(nil) + +// GetHostIDMetadata retrieves the host id from middleware metadata returns host id +// as string along with a boolean indicating presence of hostId on middleware +// metadata. +func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) { + return s3shared.GetHostIDMetadata(metadata) +} + +// HTTPPresignerV4 represents presigner interface used by presign url client +type HTTPPresignerV4 interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*v4.SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// httpPresignerV4a represents sigv4a presigner interface used by presign url +// client +type httpPresignerV4a interface { + PresignHTTP( + ctx context.Context, credentials v4a.Credentials, r *http.Request, + payloadHash string, service string, regionSet []string, signingTime time.Time, + optFns ...func(*v4a.SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignOptions represents the presign client options +type PresignOptions struct { + + // ClientOptions are list of functional options to mutate client options used by + // the presign client. + ClientOptions []func(*Options) + + // Presigner is the presigner used by the presign url client + Presigner HTTPPresignerV4 + + // Expires sets the expiration duration for the generated presign url. This should + // be the duration in seconds the presigned URL should be considered valid for. If + // not set or set to zero, presign url would default to expire after 900 seconds. + Expires time.Duration + + // presignerV4a is the presigner used by the presign url client + presignerV4a httpPresignerV4a +} + +func (o PresignOptions) copy() PresignOptions { + clientOptions := make([]func(*Options), len(o.ClientOptions)) + copy(clientOptions, o.ClientOptions) + o.ClientOptions = clientOptions + return o +} + +// WithPresignClientFromClientOptions is a helper utility to retrieve a function +// that takes PresignOption as input +func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { + return withPresignClientFromClientOptions(optFns).options +} + +type withPresignClientFromClientOptions []func(*Options) + +func (w withPresignClientFromClientOptions) options(o *PresignOptions) { + o.ClientOptions = append(o.ClientOptions, w...) +} + +// WithPresignExpires is a helper utility to append Expires value on presign +// options optional function +func WithPresignExpires(dur time.Duration) func(*PresignOptions) { + return withPresignExpires(dur).options +} + +type withPresignExpires time.Duration + +func (w withPresignExpires) options(o *PresignOptions) { + o.Expires = time.Duration(w) +} + +// PresignClient represents the presign url client +type PresignClient struct { + client *Client + options PresignOptions +} + +// NewPresignClient generates a presign client using provided API Client and +// presign options +func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { + var options PresignOptions + for _, fn := range optFns { + fn(&options) + } + if len(options.ClientOptions) != 0 { + c = New(c.options, options.ClientOptions...) + } + + if options.Presigner == nil { + options.Presigner = newDefaultV4Signer(c.options) + } + + if options.presignerV4a == nil { + options.presignerV4a = newDefaultV4aSigner(c.options) + } + + return &PresignClient{ + client: c, + options: options, + } +} + +func withNopHTTPClientAPIOption(o *Options) { + o.HTTPClient = smithyhttp.NopClient{} +} + +type presignConverter PresignOptions + +func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { + stack.Finalize.Clear() + stack.Deserialize.Clear() + stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) + stack.Build.Remove("UserAgent") + pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + Presigner: c.Presigner, + LogSigning: options.ClientLogMode.IsSigning(), + }) + err = stack.Finalize.Add(pmw, middleware.After) + if err != nil { + return err + } + + // add multi-region access point presigner + signermv := s3cust.NewPresignHTTPRequestMiddleware(s3cust.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + V4Presigner: c.Presigner, + V4aPresigner: c.presignerV4a, + LogSigning: options.ClientLogMode.IsSigning(), + }) + err = s3cust.RegisterPreSigningMiddleware(stack, signermv) + if err != nil { + return err + } + + if c.Expires < 0 { + return fmt.Errorf("presign URL duration must be 0 or greater, %v", c.Expires) + } + // add middleware to set expiration for s3 presigned url, if expiration is set to + // 0, this middleware sets a default expiration of 900 seconds + err = stack.Build.Add(&s3cust.AddExpiresOnPresignedURL{Expires: c.Expires}, middleware.After) + if err != nil { + return err + } + err = presignedurlcust.AddAsIsPresigingMiddleware(stack) + if err != nil { + return err + } + return nil +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go new file mode 100644 index 000000000000..042e848a3875 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go @@ -0,0 +1,232 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action aborts a multipart upload. After a multipart upload is aborted, no +// additional parts can be uploaded using that upload ID. The storage consumed by +// any previously uploaded parts will be freed. However, if any part uploads are +// currently in progress, those part uploads might or might not succeed. As a +// result, it might be necessary to abort a given multipart upload multiple times +// in order to completely free all storage consumed by all parts. To verify that +// all parts have been removed, so you don't get charged for the part storage, you +// should call the ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) action and +// ensure that the parts list is empty. For information about permissions required +// to use the multipart upload, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The +// following operations are related to AbortMultipartUpload: +// +// * +// CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// ListMultipartUploads +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) { + if params == nil { + params = &AbortMultipartUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AbortMultipartUpload", params, optFns, c.addOperationAbortMultipartUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AbortMultipartUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AbortMultipartUploadInput struct { + + // The bucket name to which the upload was taking place. When using this action + // with an access point, you must direct requests to the access point hostname. The + // access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Key of the object for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // Upload ID that identifies the multipart upload. + // + // This member is required. + UploadId *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +type AbortMultipartUploadOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpAbortMultipartUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpAbortMultipartUpload{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAbortMultipartUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addAbortMultipartUploadUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAbortMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "AbortMultipartUpload", + } +} + +// getAbortMultipartUploadBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getAbortMultipartUploadBucketMember(input interface{}) (*string, bool) { + in := input.(*AbortMultipartUploadInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addAbortMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getAbortMultipartUploadBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go new file mode 100644 index 000000000000..95ff64968637 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go @@ -0,0 +1,436 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Completes a multipart upload by assembling previously uploaded parts. You first +// initiate the multipart upload and then upload all parts using the UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation. +// After successfully uploading all relevant parts of an upload, you call this +// action to complete the upload. Upon receiving this request, Amazon S3 +// concatenates all the parts in ascending order by part number to create a new +// object. In the Complete Multipart Upload request, you must provide the parts +// list. You must ensure that the parts list is complete. This action concatenates +// the parts that you provide in the list. For each part in the list, you must +// provide the part number and the ETag value, returned after that part was +// uploaded. Processing of a Complete Multipart Upload request could take several +// minutes to complete. After Amazon S3 begins processing the request, it sends an +// HTTP response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. Because a request could fail after the initial 200 +// OK response has been sent, it is important that you check the response body to +// determine whether the request succeeded. Note that if CompleteMultipartUpload +// fails, applications should be prepared to retry the failed requests. For more +// information, see Amazon S3 Error Best Practices +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). You +// cannot use Content-Type: application/x-www-form-urlencoded with Complete +// Multipart Upload requests. Also, if you do not provide a Content-Type header, +// CompleteMultipartUpload returns a 200 OK response. For more information about +// multipart uploads, see Uploading Objects Using Multipart Upload +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For +// information about permissions required to use the multipart upload API, see +// Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// CompleteMultipartUpload has the following special errors: +// +// * Error code: +// EntityTooSmall +// +// * Description: Your proposed upload is smaller than the minimum +// allowed object size. Each part must be at least 5 MB in size, except the last +// part. +// +// * 400 Bad Request +// +// * Error code: InvalidPart +// +// * Description: One or more +// of the specified parts could not be found. The part might not have been +// uploaded, or the specified entity tag might not have matched the part's entity +// tag. +// +// * 400 Bad Request +// +// * Error code: InvalidPartOrder +// +// * Description: The list +// of parts was not in ascending order. The parts list must be specified in order +// by part number. +// +// * 400 Bad Request +// +// * Error code: NoSuchUpload +// +// * Description: +// The specified multipart upload does not exist. The upload ID might be invalid, +// or the multipart upload might have been aborted or completed. +// +// * 404 Not +// Found +// +// The following operations are related to CompleteMultipartUpload: +// +// * +// CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// ListMultipartUploads +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) { + if params == nil { + params = &CompleteMultipartUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CompleteMultipartUpload", params, optFns, c.addOperationCompleteMultipartUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CompleteMultipartUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CompleteMultipartUploadInput struct { + + // Name of the bucket to which the multipart upload was initiated. When using this + // action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // ID for the initiated multipart upload. + // + // This member is required. + UploadId *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The container for the multipart upload request information. + MultipartUpload *types.CompletedMultipartUpload + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string + + // The server-side encryption (SSE) customer managed key. This parameter is needed + // only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKey *string + + // The MD5 server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string + + noSmithyDocumentSerde +} + +type CompleteMultipartUploadOutput struct { + + // The name of the bucket that contains the newly created object. Does not return + // the access point ARN or access point alias if used. When using this action with + // an access point, you must direct requests to the access point hostname. The + // access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + Bucket *string + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is an + // opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will contain + // one or more nonhexadecimal characters and/or will consist of less than 32 or + // more than 32 hexadecimal digits. For more information about how the entity tag + // is calculated, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ETag *string + + // If the object expiration is configured, this will contain the expiration date + // (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. + Expiration *string + + // The object key of the newly created object. + Key *string + + // The URI that identifies the newly created object. + Location *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an Amazon Web Services KMS key in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. + ServerSideEncryption types.ServerSideEncryption + + // Version ID of the newly created object, in case the bucket has versioning turned + // on. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpCompleteMultipartUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCompleteMultipartUpload{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteMultipartUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addCompleteMultipartUploadUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCompleteMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "CompleteMultipartUpload", + } +} + +// getCompleteMultipartUploadBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getCompleteMultipartUploadBucketMember(input interface{}) (*string, bool) { + in := input.(*CompleteMultipartUploadInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCompleteMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCompleteMultipartUploadBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go new file mode 100644 index 000000000000..477900778d78 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go @@ -0,0 +1,574 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Creates a copy of an object that is already stored in Amazon S3. You can store +// individual objects of up to 5 TB in Amazon S3. You create a copy of your object +// up to 5 GB in size in a single atomic action using this API. However, to copy an +// object greater than 5 GB, you must use the multipart upload Upload Part - Copy +// (UploadPartCopy) API. For more information, see Copy Object Using the REST +// Multipart Upload API +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// All copy requests must be authenticated. Additionally, you must have read access +// to the source object and write access to the destination bucket. For more +// information, see REST Authentication +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). Both +// the Region that you want to copy the object from and the Region that you want to +// copy the object to must be enabled for your account. A copy request might return +// an error when Amazon S3 receives the copy request or while Amazon S3 is copying +// the files. If the error occurs before the copy action starts, you receive a +// standard Amazon S3 error. If the error occurs during the copy operation, the +// error response is embedded in the 200 OK response. This means that a 200 OK +// response can contain either a success or an error. Design your application to +// parse the contents of the response and handle it appropriately. If the copy is +// successful, you receive a response with information about the copied object. If +// the request is an HTTP 1.1 request, the response is chunk encoded. If it were +// not, it would not contain the content-length, and you would need to read the +// entire body. The copy request charge is based on the storage class and Region +// that you specify for the destination object. For pricing information, see Amazon +// S3 pricing (http://aws.amazon.com/s3/pricing/). Amazon S3 transfer acceleration +// does not support cross-Region copies. If you request a cross-Region copy using a +// transfer acceleration endpoint, you get a 400 Bad Request error. For more +// information, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// Metadata When copying an object, you can preserve all metadata (default) or +// specify new metadata. However, the ACL is not preserved and is set to private +// for the user making the request. To override the default ACL setting, specify a +// new ACL when generating a copy request. For more information, see Using ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). To +// specify whether you want the object metadata copied from the source object or +// replaced with metadata provided in the request, you can optionally add the +// x-amz-metadata-directive header. When you grant permissions, you can use the +// s3:x-amz-metadata-directive condition key to enforce certain metadata behavior +// when objects are uploaded. For more information, see Specifying Conditions in a +// Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) in +// the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition +// keys, see Actions, Resources, and Condition Keys for Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). +// x-amz-copy-source-if Headers To only copy an object under certain conditions, +// such as whether the Etag matches or whether the object was modified before or +// after a specified date, use the following request parameters: +// +// * +// x-amz-copy-source-if-match +// +// * x-amz-copy-source-if-none-match +// +// * +// x-amz-copy-source-if-unmodified-since +// +// * x-amz-copy-source-if-modified-since +// +// If +// both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// 200 OK and copies the data: +// +// * x-amz-copy-source-if-match condition evaluates to +// true +// +// * x-amz-copy-source-if-unmodified-since condition evaluates to false +// +// If +// both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// the 412 Precondition Failed response code: +// +// * x-amz-copy-source-if-none-match +// condition evaluates to false +// +// * x-amz-copy-source-if-modified-since condition +// evaluates to true +// +// All headers with the x-amz- prefix, including +// x-amz-copy-source, must be signed. Server-side encryption When you perform a +// CopyObject operation, you can optionally use the appropriate encryption-related +// headers to encrypt the object using server-side encryption with Amazon Web +// Services managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided +// encryption key. With server-side encryption, Amazon S3 encrypts your data as it +// writes it to disks in its data centers and decrypts the data when you access it. +// For more information about server-side encryption, see Using Server-Side +// Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). If +// a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. +// For more information, see Amazon S3 Bucket Keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon +// S3 User Guide. Access Control List (ACL)-Specific Request Headers When copying +// an object, you can optionally use headers to grant ACL-based permissions. By +// default, all objects are private. Only the owner has full access control. When +// adding a new object, you can grant permissions to individual Amazon Web Services +// accounts or to predefined groups defined by Amazon S3. These permissions are +// then added to the ACL on the object. For more information, see Access Control +// List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and Managing +// ACLs Using the REST API +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). If +// the bucket that you're copying objects to uses the bucket owner enforced setting +// for S3 Object Ownership, ACLs are disabled and no longer affect permissions. +// Buckets that use this setting only accept PUT requests that don't specify an ACL +// or PUT requests that specify bucket owner full control ACLs, such as the +// bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed +// in the XML format. For more information, see Controlling ownership of objects +// and disabling ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced +// setting for Object Ownership, all objects written to the bucket by any account +// will be owned by the bucket owner. Checksums When copying an object, if it has a +// checksum, that checksum will be copied to the new object by default. When you +// copy the object over, you may optionally specify a different checksum algorithm +// to use with the x-amz-checksum-algorithm header. Storage Class Options You can +// use the CopyObject action to change the storage class of an object that is +// already stored in Amazon S3 using the StorageClass parameter. For more +// information, see Storage Classes +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in +// the Amazon S3 User Guide. Versioning By default, x-amz-copy-source identifies +// the current version of an object to copy. If the current version is a delete +// marker, Amazon S3 behaves as if the object was deleted. To copy a different +// version, use the versionId subresource. If you enable versioning on the target +// bucket, Amazon S3 generates a unique version ID for the object being copied. +// This version ID is different from the version ID of the source object. Amazon S3 +// returns the version ID of the copied object in the x-amz-version-id response +// header in the response. If you do not enable versioning or suspend it on the +// target bucket, the version ID that Amazon S3 generates is always null. If the +// source object's storage class is GLACIER, you must restore a copy of this object +// before you can use it as a source object for the copy operation. For more +// information, see RestoreObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). The +// following operations are related to CopyObject: +// +// * PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// For more +// information, see Copying Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) { + if params == nil { + params = &CopyObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CopyObject", params, optFns, c.addOperationCopyObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CopyObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CopyObjectInput struct { + + // The name of the destination bucket. When using this action with an access point, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Specifies the source object for the copy operation. You specify the value in one + // of two formats, depending on whether you want to access the source object + // through an access point + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // + // * + // For objects not accessed through an access point, specify the name of the source + // bucket and the key of the source object, separated by a slash (/). For example, + // to copy the object reports/january.pdf from the bucket awsexamplebucket, use + // awsexamplebucket/reports/january.pdf. The value must be URL-encoded. + // + // * For + // objects accessed through access points, specify the Amazon Resource Name (ARN) + // of the object as accessed through the access point, in the format + // arn:aws:s3:::accesspoint//object/. For example, to copy the object + // reports/january.pdf through access point my-access-point owned by account + // 123456789012 in Region us-west-2, use the URL encoding of + // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using access + // points only when the source and destination buckets are in the same Amazon Web + // Services Region. Alternatively, for objects accessed through Amazon S3 on + // Outposts, specify the ARN of the object as accessed in the format + // arn:aws:s3-outposts:::outpost//object/. For example, to copy the object + // reports/january.pdf through outpost my-outpost owned by account 123456789012 in + // Region us-west-2, use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL-encoded. + // + // To copy a specific version of an object, append + // ?versionId= to the value (for example, + // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of the + // source object. + // + // This member is required. + CopySource *string + + // The key of the destination object. + // + // This member is required. + Key *string + + // The canned ACL to apply to the object. This action is not supported by Amazon S3 + // on Outposts. + ACL types.ObjectCannedACL + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true + // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. + // Specifying this header with a COPY action doesn’t affect bucket-level settings + // for S3 Bucket Key. + BucketKeyEnabled bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // Indicates the algorithm you want Amazon S3 to use to create the checksum for the + // object. For more information, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Specifies presentational information for the object. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // A standard MIME type describing the format of the object data. + ContentType *string + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time + + // Copies the object if its entity tag (ETag) is different than the specified ETag. + CopySourceIfNoneMatch *string + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + CopySourceSSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one that + // was used when the source object was created. + CopySourceSSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string + + // The account ID of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request fails with the HTTP status + // code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The account ID of the expected source bucket owner. If the source bucket is + // owned by a different account, the request fails with the HTTP status code 403 + // Forbidden (access denied). + ExpectedSourceBucketOwner *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This + // action is not supported by Amazon S3 on Outposts. + GrantFullControl *string + + // Allows grantee to read the object data and its metadata. This action is not + // supported by Amazon S3 on Outposts. + GrantRead *string + + // Allows grantee to read the object ACL. This action is not supported by Amazon S3 + // on Outposts. + GrantReadACP *string + + // Allows grantee to write the ACL for the applicable object. This action is not + // supported by Amazon S3 on Outposts. + GrantWriteACP *string + + // A map of metadata to store with the object in S3. + Metadata map[string]string + + // Specifies whether the metadata is copied from the source object or replaced with + // metadata provided in the request. + MetadataDirective types.MetadataDirective + + // Specifies whether you want to apply a legal hold to the copied object. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // The Object Lock mode that you want to apply to the copied object. + ObjectLockMode types.ObjectLockMode + + // The date and time when you want the copied object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. + SSEKMSEncryptionContext *string + + // Specifies the Amazon Web Services KMS key ID to use for object encryption. All + // GET and PUT requests for an object protected by Amazon Web Services KMS will + // fail if not made via SSL or using SigV4. For information about configuring using + // any of the officially supported Amazon Web Services SDKs and Amazon Web Services + // CLI, see Specifying the Signature Version in Request Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 User Guide. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high + // availability. Depending on performance needs, you can specify a different + // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For + // more information, see Storage Classes + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in + // the Amazon S3 User Guide. + StorageClass types.StorageClass + + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL Query + // parameters. + Tagging *string + + // Specifies whether the object tag-set are copied from the source object or + // replaced with tag-set provided in the request. + TaggingDirective types.TaggingDirective + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. + WebsiteRedirectLocation *string + + noSmithyDocumentSerde +} + +type CopyObjectOutput struct { + + // Indicates whether the copied object uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // Container for all response elements. + CopyObjectResult *types.CopyObjectResult + + // Version of the copied object in the destination bucket. + CopySourceVersionId *string + + // If the object expiration is configured, the response includes this header. + Expiration *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. + SSEKMSEncryptionContext *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // Version ID of the newly created copy. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpCopyObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCopyObject{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpCopyObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCopyObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addCopyObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCopyObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "CopyObject", + } +} + +// getCopyObjectBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getCopyObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*CopyObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCopyObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCopyObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go new file mode 100644 index 000000000000..27322a2c656a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go @@ -0,0 +1,318 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 +// and have a valid Amazon Web Services Access Key ID to authenticate requests. +// Anonymous requests are never allowed to create buckets. By creating the bucket, +// you become the bucket owner. Not every string is an acceptable bucket name. For +// information about bucket naming restrictions, see Bucket naming rules +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). +// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). +// By default, the bucket is created in the US East (N. Virginia) Region. You can +// optionally specify a Region in the request body. You might choose a Region to +// optimize latency, minimize costs, or address regulatory requirements. For +// example, if you reside in Europe, you will probably find it advantageous to +// create buckets in the Europe (Ireland) Region. For more information, see +// Accessing a bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +// If you send your create bucket request to the s3.amazonaws.com endpoint, the +// request goes to the us-east-1 Region. Accordingly, the signature calculations in +// Signature Version 4 must use us-east-1 as the Region, even if the location +// constraint in the request specifies another Region where the bucket is to be +// created. If you create a bucket in a Region other than US East (N. Virginia), +// your application must be able to handle 307 redirect. For more information, see +// Virtual hosting of buckets +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Access +// control lists (ACLs) When creating a bucket using this operation, you can +// optionally configure the bucket ACL to specify the accounts or groups that +// should be granted specific permissions on the bucket. If your CreateBucket +// request sets bucket owner enforced for S3 Object Ownership and specifies a +// bucket ACL that provides access to an external Amazon Web Services account, your +// request fails with a 400 error and returns the +// InvalidBucketAclWithObjectOwnership error code. For more information, see +// Controlling object ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. There are two ways to grant the appropriate +// permissions using the request headers. +// +// * Specify a canned ACL using the +// x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as +// canned ACLs. Each canned ACL has a predefined set of grantees and permissions. +// For more information, see Canned ACL +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * +// Specify access permissions explicitly using the x-amz-grant-read, +// x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and +// x-amz-grant-full-control headers. These headers map to the set of permissions +// Amazon S3 supports in an ACL. For more information, see Access control list +// (ACL) overview +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html). You +// specify each grantee as a type=value pair, where the type is one of the +// following: +// +// * id – if the value specified is the canonical user ID of an Amazon +// Web Services account +// +// * uri – if you are granting permissions to a predefined +// group +// +// * emailAddress – if the value specified is the email address of an Amazon +// Web Services account Using email addresses to specify a grantee is only +// supported in the following Amazon Web Services Regions: +// +// * US East (N. +// Virginia) +// +// * US West (N. California) +// +// * US West (Oregon) +// +// * Asia Pacific +// (Singapore) +// +// * Asia Pacific (Sydney) +// +// * Asia Pacific (Tokyo) +// +// * Europe +// (Ireland) +// +// * South America (São Paulo) +// +// For a list of all the Amazon S3 +// supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. +// +// For example, the following +// x-amz-grant-read header grants the Amazon Web Services accounts identified by +// account IDs permissions to read object data and its metadata: x-amz-grant-read: +// id="11112222333", id="444455556666" +// +// You can use either a canned ACL or specify +// access permissions explicitly. You cannot do both. Permissions In addition to +// s3:CreateBucket, the following permissions are required when your CreateBucket +// includes specific headers: +// +// * ACLs - If your CreateBucket request specifies ACL +// permissions and the ACL is public-read, public-read-write, authenticated-read, +// or if you specify access permissions explicitly through any other ACL, both +// s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the +// CreateBucket request is private or doesn't specify any ACLs, only +// s3:CreateBucket permission is needed. +// +// * Object Lock - If +// ObjectLockEnabledForBucket is set to true in your CreateBucket request, +// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are +// required. +// +// * S3 Object Ownership - If your CreateBucket request includes the the +// x-amz-object-ownership header, s3:PutBucketOwnershipControls permission is +// required. +// +// The following operations are related to CreateBucket: +// +// * PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// DeleteBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) { + if params == nil { + params = &CreateBucketInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateBucket", params, optFns, c.addOperationCreateBucketMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateBucketOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateBucketInput struct { + + // The name of the bucket to create. + // + // This member is required. + Bucket *string + + // The canned ACL to apply to the bucket. + ACL types.BucketCannedACL + + // The configuration information for the bucket. + CreateBucketConfiguration *types.CreateBucketConfiguration + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string + + // Allows grantee to list the objects in the bucket. + GrantRead *string + + // Allows grantee to read the bucket ACL. + GrantReadACP *string + + // Allows grantee to create new objects in the bucket. For the bucket and object + // owners of existing objects, also allows deletions and overwrites of those + // objects. + GrantWrite *string + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string + + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + ObjectLockEnabledForBucket bool + + // The container element for object ownership for a bucket's ownership controls. + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the + // bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. ObjectWriter - The uploading account will own the object if the + // object is uploaded with the bucket-owner-full-control canned ACL. + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that don't + // specify an ACL or bucket owner full control ACLs, such as the + // bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed + // in the XML format. + ObjectOwnership types.ObjectOwnership + + noSmithyDocumentSerde +} + +type CreateBucketOutput struct { + + // A forward slash followed by the name of the bucket. + Location *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucket{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucket{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateBucketValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucket(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addCreateBucketUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateBucket(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "CreateBucket", + } +} + +// getCreateBucketBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getCreateBucketBucketMember(input interface{}) (*string, bool) { + in := input.(*CreateBucketInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCreateBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCreateBucketBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: false, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go new file mode 100644 index 000000000000..825feebd2c85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go @@ -0,0 +1,604 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// This action initiates a multipart upload and returns an upload ID. This upload +// ID is used to associate all of the parts in the specific multipart upload. You +// specify this upload ID in each of your subsequent upload part requests (see +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). You also +// include this upload ID in the final request to either complete or abort the +// multipart upload request. For more information about multipart uploads, see +// Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). If you have +// configured a lifecycle rule to abort incomplete multipart uploads, the upload +// must complete within the number of days specified in the bucket lifecycle +// configuration. Otherwise, the incomplete multipart upload becomes eligible for +// an abort action and Amazon S3 aborts the multipart upload. For more information, +// see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// For information about the permissions required to use the multipart upload API, +// see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). For +// request signing, multipart upload is just a series of regular requests. You +// initiate a multipart upload, send one or more requests to upload parts, and then +// complete the multipart upload process. You sign each request individually. There +// is nothing special about signing multipart upload requests. For more information +// about signing, see Authenticating Requests (Amazon Web Services Signature +// Version 4) +// (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or abort +// the multipart upload. Amazon S3 frees up the space used to store the parts and +// stop charging you for storing them only after you either complete or abort a +// multipart upload. You can optionally request server-side encryption. For +// server-side encryption, Amazon S3 encrypts your data as it writes it to disks in +// its data centers and decrypts it when you access it. You can provide your own +// encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed +// encryption keys. If you choose to provide your own encryption key, the request +// headers you provide in UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) and +// UploadPartCopy +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the request to initiate the upload +// by using CreateMultipartUpload. To perform a multipart upload with encryption +// using an Amazon Web Services KMS key, the requester must have permission to the +// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are +// required because Amazon S3 must decrypt and read data from the encrypted file +// parts before it completes the multipart upload. For more information, see +// Multipart upload API and permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// in the Amazon S3 User Guide. If your Identity and Access Management (IAM) user +// or role is in the same Amazon Web Services account as the KMS key, then you must +// have these permissions on the key policy. If your IAM user or role belongs to a +// different account than the key, then you must have the permissions on both the +// key policy and your IAM user or role. For more information, see Protecting Data +// Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// Access Permissions When copying an object, you can optionally specify the +// accounts or groups that should be granted specific permissions on the new +// object. There are two ways to grant the permissions using the request +// headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more +// information, see Canned ACL +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * +// Specify access permissions explicitly with the x-amz-grant-read, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. These parameters map to the set of permissions that Amazon S3 supports +// in an ACL. For more information, see Access Control List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can +// use either a canned ACL or specify access permissions explicitly. You cannot do +// both. Server-Side- Encryption-Specific Request Headers You can optionally tell +// Amazon S3 to encrypt data at rest using server-side encryption. Server-side +// encryption is for data encryption at rest. Amazon S3 encrypts your data as it +// writes it to disks in its data centers and decrypts it when you access it. The +// option you use depends on whether you want to use Amazon Web Services managed +// encryption keys or provide your own encryption key. +// +// * Use encryption keys +// managed by Amazon S3 or customer managed key stored in Amazon Web Services Key +// Management Service (Amazon Web Services KMS) – If you want Amazon Web Services +// to manage the keys used to encrypt data, specify the following headers in the +// request. +// +// * x-amz-server-side-encryption +// +// * +// x-amz-server-side-encryption-aws-kms-key-id +// +// * +// x-amz-server-side-encryption-context +// +// If you specify +// x-amz-server-side-encryption:aws:kms, but don't provide +// x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web +// Services managed key in Amazon Web Services KMS to protect the data. All GET and +// PUT requests for an object protected by Amazon Web Services KMS fail if you +// don't make them with SSL or by using SigV4. For more information about +// server-side encryption with KMS key (SSE-KMS), see Protecting Data Using +// Server-Side Encryption with KMS keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * +// Use customer-provided encryption keys – If you want to manage your own +// encryption keys, provide all the following headers in the request. +// +// * +// x-amz-server-side-encryption-customer-algorithm +// +// * +// x-amz-server-side-encryption-customer-key +// +// * +// x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about +// server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using +// Server-Side Encryption with KMS keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List +// (ACL)-Specific Request Headers You also can use the following access +// control–related headers with this operation. By default, all objects are +// private. Only the owner has full access control. When adding a new object, you +// can grant permissions to individual Amazon Web Services accounts or to +// predefined groups defined by Amazon S3. These permissions are then added to the +// access control list (ACL) on the object. For more information, see Using ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). With +// this operation, you can grant access permissions using one of the following two +// methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of +// predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of +// grantees and permissions. For more information, see Canned ACL +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * +// Specify access permissions explicitly — To explicitly grant access permissions +// to specific Amazon Web Services accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an ACL. For +// more information, see Access Control List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). In the +// header, you specify a list of grantees who get the specific permission. To grant +// permissions explicitly, use: +// +// * x-amz-grant-read +// +// * x-amz-grant-write +// +// * +// x-amz-grant-read-acp +// +// * x-amz-grant-write-acp +// +// * x-amz-grant-full-control +// +// You +// specify each grantee as a type=value pair, where the type is one of the +// following: +// +// * id – if the value specified is the canonical user ID of an Amazon +// Web Services account +// +// * uri – if you are granting permissions to a predefined +// group +// +// * emailAddress – if the value specified is the email address of an Amazon +// Web Services account Using email addresses to specify a grantee is only +// supported in the following Amazon Web Services Regions: +// +// * US East (N. +// Virginia) +// +// * US West (N. California) +// +// * US West (Oregon) +// +// * Asia Pacific +// (Singapore) +// +// * Asia Pacific (Sydney) +// +// * Asia Pacific (Tokyo) +// +// * Europe +// (Ireland) +// +// * South America (São Paulo) +// +// For a list of all the Amazon S3 +// supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. +// +// For example, the following +// x-amz-grant-read header grants the Amazon Web Services accounts identified by +// account IDs permissions to read object data and its metadata: x-amz-grant-read: +// id="11112222333", id="444455556666" +// +// The following operations are related to +// CreateMultipartUpload: +// +// * UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// ListMultipartUploads +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) { + if params == nil { + params = &CreateMultipartUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateMultipartUpload", params, optFns, c.addOperationCreateMultipartUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateMultipartUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateMultipartUploadInput struct { + + // The name of the bucket to which to initiate the upload When using this action + // with an access point, you must direct requests to the access point hostname. The + // access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the multipart upload is to be initiated. + // + // This member is required. + Key *string + + // The canned ACL to apply to the object. This action is not supported by Amazon S3 + // on Outposts. + ACL types.ObjectCannedACL + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true + // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. + // Specifying this header with an object action doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // Indicates the algorithm you want Amazon S3 to use to create the checksum for the + // object. For more information, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Specifies presentational information for the object. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // A standard MIME type describing the format of the object data. + ContentType *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This + // action is not supported by Amazon S3 on Outposts. + GrantFullControl *string + + // Allows grantee to read the object data and its metadata. This action is not + // supported by Amazon S3 on Outposts. + GrantRead *string + + // Allows grantee to read the object ACL. This action is not supported by Amazon S3 + // on Outposts. + GrantReadACP *string + + // Allows grantee to write the ACL for the applicable object. This action is not + // supported by Amazon S3 on Outposts. + GrantWriteACP *string + + // A map of metadata to store with the object in S3. + Metadata map[string]string + + // Specifies whether you want to apply a legal hold to the uploaded object. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // Specifies the Object Lock mode that you want to apply to the uploaded object. + ObjectLockMode types.ObjectLockMode + + // Specifies the date and time when you want the Object Lock to expire. + ObjectLockRetainUntilDate *time.Time + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. + SSEKMSEncryptionContext *string + + // Specifies the ID of the symmetric customer managed key to use for object + // encryption. All GET and PUT requests for an object protected by Amazon Web + // Services KMS will fail if not made via SSL or using SigV4. For information about + // configuring using any of the officially supported Amazon Web Services SDKs and + // Amazon Web Services CLI, see Specifying the Signature Version in Request + // Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 User Guide. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high + // availability. Depending on performance needs, you can specify a different + // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For + // more information, see Storage Classes + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in + // the Amazon S3 User Guide. + StorageClass types.StorageClass + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + Tagging *string + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. + WebsiteRedirectLocation *string + + noSmithyDocumentSerde +} + +type CreateMultipartUploadOutput struct { + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object name + // in the request, the response includes this header. The header indicates when the + // initiated multipart upload becomes eligible for an abort operation. For more + // information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle + // Policy + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // The response also includes the x-amz-abort-rule-id header that provides the ID + // of the lifecycle configuration rule that defines this action. + AbortDate *time.Time + + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId *string + + // The name of the bucket to which the multipart upload was initiated. Does not + // return the access point ARN or access point alias if used. When using this + // action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + Bucket *string + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Object key for which the multipart upload was initiated. + Key *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. + SSEKMSEncryptionContext *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // ID for the initiated multipart upload. + UploadId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpCreateMultipartUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateMultipartUpload{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateMultipartUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addCreateMultipartUploadUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "CreateMultipartUpload", + } +} + +// getCreateMultipartUploadBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getCreateMultipartUploadBucketMember(input interface{}) (*string, bool) { + in := input.(*CreateMultipartUploadInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCreateMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCreateMultipartUploadBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go new file mode 100644 index 000000000000..44823c6a8fea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go @@ -0,0 +1,169 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the S3 bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// Related Resources +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// DeleteObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { + if params == nil { + params = &DeleteBucketInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucket", params, optFns, c.addOperationDeleteBucketMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketInput struct { + + // Specifies the bucket being deleted. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucket{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucket{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucket(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucket(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucket", + } +} + +// getDeleteBucketBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getDeleteBucketBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: false, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go new file mode 100644 index 000000000000..e016d976302e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go @@ -0,0 +1,189 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). To use this operation, you must have permissions to perform +// the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – +// Storage Class Analysis +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// The following operations are related to DeleteBucketAnalyticsConfiguration: +// +// * +// GetBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * +// ListBucketAnalyticsConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// * +// PutBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketAnalyticsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketAnalyticsConfiguration", params, optFns, c.addOperationDeleteBucketAnalyticsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketAnalyticsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketAnalyticsConfigurationInput struct { + + // The name of the bucket from which an analytics configuration is deleted. + // + // This member is required. + Bucket *string + + // The ID that identifies the analytics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketAnalyticsConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketAnalyticsConfiguration", + } +} + +// getDeleteBucketAnalyticsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getDeleteBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketAnalyticsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketAnalyticsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go new file mode 100644 index 000000000000..79045abe2d05 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go @@ -0,0 +1,172 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the cors configuration information set for the bucket. To use this +// operation, you must have permission to perform the s3:PutBucketCORS action. The +// bucket owner has this permission by default and can grant this permission to +// others. For information about cors, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 +// User Guide. Related Resources: +// +// * PutBucketCors +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * +// RESTOPTIONSobject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) { + if params == nil { + params = &DeleteBucketCorsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketCors", params, optFns, c.addOperationDeleteBucketCorsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketCorsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketCorsInput struct { + + // Specifies the bucket whose cors configuration is being deleted. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketCorsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketCors{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketCors{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketCors(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketCorsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketCors", + } +} + +// getDeleteBucketCorsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getDeleteBucketCorsBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketCorsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketCorsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go new file mode 100644 index 000000000000..9c3201f6ded3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go @@ -0,0 +1,180 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This implementation of the DELETE action removes default encryption from the +// bucket. For information about the Amazon S3 default encryption feature, see +// Amazon S3 Default Bucket Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the +// Amazon S3 User Guide. To use this operation, you must have permissions to +// perform the s3:PutEncryptionConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. Related Resources +// +// * PutBucketEncryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// * +// GetBucketEncryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) { + if params == nil { + params = &DeleteBucketEncryptionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketEncryption", params, optFns, c.addOperationDeleteBucketEncryptionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketEncryptionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketEncryptionInput struct { + + // The name of the bucket containing the server-side encryption configuration to + // delete. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketEncryptionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketEncryption{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketEncryption{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketEncryption(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketEncryptionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketEncryption", + } +} + +// getDeleteBucketEncryptionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketEncryptionBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketEncryptionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketEncryptionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go new file mode 100644 index 000000000000..139dd78a05bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. The +// S3 Intelligent-Tiering storage class is designed to optimize storage costs by +// automatically moving data to the most cost-effective storage access tier, +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput access +// tiers. To get the lowest storage cost on data that can be accessed in minutes to +// hours, you can choose to activate additional archiving capabilities. The S3 +// Intelligent-Tiering storage class is the ideal storage class for data with +// unknown, changing, or unpredictable access patterns, independent of object size +// or retention period. If the size of an object is less than 128 KB, it is not +// monitored and not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. For more information, see Storage class for +// automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// * +// GetBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * +// PutBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * +// ListBucketIntelligentTieringConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketIntelligentTieringConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketIntelligentTieringConfiguration", params, optFns, c.addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketIntelligentTieringConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketIntelligentTieringConfigurationInput struct { + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // This member is required. + Id *string + + noSmithyDocumentSerde +} + +type DeleteBucketIntelligentTieringConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketIntelligentTieringConfiguration", + } +} + +// getDeleteBucketIntelligentTieringConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getDeleteBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketIntelligentTieringConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketIntelligentTieringConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go new file mode 100644 index 000000000000..32fe81f12e0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes an inventory configuration (identified by the inventory ID) from the +// bucket. To use this operation, you must have permissions to perform the +// s3:PutInventoryConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// Operations related to DeleteBucketInventoryConfiguration include: +// +// * +// GetBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * +// PutBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// * +// ListBucketInventoryConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketInventoryConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketInventoryConfiguration", params, optFns, c.addOperationDeleteBucketInventoryConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketInventoryConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketInventoryConfigurationInput struct { + + // The name of the bucket containing the inventory configuration to delete. + // + // This member is required. + Bucket *string + + // The ID used to identify the inventory configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketInventoryConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketInventoryConfiguration", + } +} + +// getDeleteBucketInventoryConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getDeleteBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketInventoryConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketInventoryConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go new file mode 100644 index 000000000000..c110bfb440d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go @@ -0,0 +1,178 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes +// all the lifecycle configuration rules in the lifecycle subresource associated +// with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the deleted +// lifecycle configuration. To use this operation, you must have permission to +// perform the s3:PutLifecycleConfiguration action. By default, the bucket owner +// has this permission and the bucket owner can grant this permission to others. +// There is usually some time lag before lifecycle configuration deletion is fully +// propagated to all the Amazon S3 systems. For more information about the object +// expiration, see Elements to Describe Lifecycle Actions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// Related actions include: +// +// * PutBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * +// GetBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) { + if params == nil { + params = &DeleteBucketLifecycleInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketLifecycle", params, optFns, c.addOperationDeleteBucketLifecycleMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketLifecycleOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketLifecycleInput struct { + + // The bucket name of the lifecycle to delete. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketLifecycleOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketLifecycle{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketLifecycle{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketLifecycle(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketLifecycleUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketLifecycle(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketLifecycle", + } +} + +// getDeleteBucketLifecycleBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketLifecycleBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketLifecycleInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketLifecycleUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketLifecycleBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go new file mode 100644 index 000000000000..cc08f04b7870 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go @@ -0,0 +1,194 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. To use this operation, you must have +// permissions to perform the s3:PutMetricsConfiguration action. The bucket owner +// has this permission by default. The bucket owner can grant this permission to +// others. For more information about permissions, see Permissions Related to +// Bucket Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * +// GetBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * +// PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * +// ListBucketMetricsConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// * +// Monitoring Metrics with Amazon CloudWatch +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketMetricsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetricsConfiguration", params, optFns, c.addOperationDeleteBucketMetricsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketMetricsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketMetricsConfigurationInput struct { + + // The name of the bucket containing the metrics configuration to delete. + // + // This member is required. + Bucket *string + + // The ID used to identify the metrics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketMetricsConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketMetricsConfiguration", + } +} + +// getDeleteBucketMetricsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getDeleteBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketMetricsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketMetricsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go new file mode 100644 index 000000000000..6186db5e1942 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go @@ -0,0 +1,171 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you +// must have the s3:PutBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// The following operations are related to DeleteBucketOwnershipControls: +// +// * +// GetBucketOwnershipControls +// +// * PutBucketOwnershipControls +func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) { + if params == nil { + params = &DeleteBucketOwnershipControlsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketOwnershipControls", params, optFns, c.addOperationDeleteBucketOwnershipControlsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketOwnershipControlsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketOwnershipControlsInput struct { + + // The Amazon S3 bucket whose OwnershipControls you want to delete. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketOwnershipControlsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketOwnershipControls", + } +} + +// getDeleteBucketOwnershipControlsBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getDeleteBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketOwnershipControlsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketOwnershipControlsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go new file mode 100644 index 000000000000..618d9bedebeb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go @@ -0,0 +1,181 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This implementation of the DELETE action uses the policy subresource to delete +// the policy of a specified bucket. If you are using an identity other than the +// root user of the Amazon Web Services account that owns the bucket, the calling +// identity must have the DeleteBucketPolicy permissions on the specified bucket +// and belong to the bucket owner's account to use this operation. If you don't +// have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied +// error. If you have the correct permissions, but you're not using an identity +// that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not +// Allowed error. As a security precaution, the root user of the Amazon Web +// Services account that owns a bucket can always use this operation, even if the +// policy explicitly denies the root user the ability to perform this action. For +// more information about bucket policies, see Using Bucket Policies and +// UserPolicies +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The +// following operations are related to DeleteBucketPolicy +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// DeleteObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { + if params == nil { + params = &DeleteBucketPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketPolicy", params, optFns, c.addOperationDeleteBucketPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketPolicyInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketPolicyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketPolicyUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketPolicy", + } +} + +// getDeleteBucketPolicyBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketPolicyBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketPolicyInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketPolicyBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go new file mode 100644 index 000000000000..ad2d772d477e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the replication configuration from the bucket. To use this operation, +// you must have permissions to perform the s3:PutReplicationConfiguration action. +// The bucket owner has these permissions by default and can grant it to others. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// It can take a while for the deletion of a replication configuration to fully +// propagate. For information about replication configuration, see Replication +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon +// S3 User Guide. The following operations are related to +// DeleteBucketReplication: +// +// * PutBucketReplication +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// * +// GetBucketReplication +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) { + if params == nil { + params = &DeleteBucketReplicationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketReplication", params, optFns, c.addOperationDeleteBucketReplicationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketReplicationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketReplicationInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketReplicationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketReplication{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketReplication{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketReplication(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketReplicationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketReplication", + } +} + +// getDeleteBucketReplicationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketReplicationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketReplicationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketReplicationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go new file mode 100644 index 000000000000..063f0bc5977f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go @@ -0,0 +1,170 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the tags from the bucket. To use this operation, you must have +// permission to perform the s3:PutBucketTagging action. By default, the bucket +// owner has this permission and can grant this permission to others. The following +// operations are related to DeleteBucketTagging: +// +// * GetBucketTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// * +// PutBucketTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { + if params == nil { + params = &DeleteBucketTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketTagging", params, optFns, c.addOperationDeleteBucketTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketTaggingInput struct { + + // The bucket that has the tag set to be removed. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketTaggingOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketTagging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketTagging", + } +} + +// getDeleteBucketTaggingBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go new file mode 100644 index 000000000000..7eb72b86a226 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action removes the website configuration for a bucket. Amazon S3 returns a +// 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 +// response if the bucket specified in the request does not exist. This DELETE +// action requires the S3:DeleteBucketWebsite permission. By default, only the +// bucket owner can delete the website configuration attached to a bucket. However, +// bucket owners can grant other users permission to delete the website +// configuration by writing a bucket policy granting them the +// S3:DeleteBucketWebsite permission. For more information about hosting websites, +// see Hosting Websites on Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). The +// following operations are related to DeleteBucketWebsite: +// +// * GetBucketWebsite +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) +// +// * +// PutBucketWebsite +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) { + if params == nil { + params = &DeleteBucketWebsiteInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketWebsite", params, optFns, c.addOperationDeleteBucketWebsiteMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketWebsiteOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketWebsiteInput struct { + + // The bucket name for which you want to remove the website configuration. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketWebsiteOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketWebsite{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketWebsite{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketWebsite(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketWebsiteUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketWebsite", + } +} + +// getDeleteBucketWebsiteBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketWebsiteBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketWebsiteInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketWebsiteBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go new file mode 100644 index 000000000000..82e6687f8af4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go @@ -0,0 +1,239 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a null +// version, Amazon S3 does not remove any objects but will still respond that the +// command was successful. To remove a specific version, you must be the bucket +// owner and you must use the version Id subresource. Using this subresource +// permanently deletes the version. If the object deleted is a delete marker, +// Amazon S3 sets the response header, x-amz-delete-marker, to true. If the object +// you want to delete is in a bucket where the bucket versioning configuration is +// MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE +// versionId request. Requests that include x-amz-mfa must use HTTPS. For more +// information about MFA Delete, see Using MFA Delete +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). To see +// sample requests that use versioning, see Sample Request +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// You can delete objects by explicitly calling DELETE Object or configure its +// lifecycle (PutBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// to enable Amazon S3 to remove them for you. If you want to block users or +// accounts from removing or deleting objects from your bucket, you must deny them +// the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration +// actions. The following action is related to DeleteObject: +// +// * PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) { + if params == nil { + params = &DeleteObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteObject", params, optFns, c.addOperationDeleteObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteObjectInput struct { + + // The bucket name of the bucket containing the object. When using this action with + // an access point, you must direct requests to the access point hostname. The + // access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Key name of the object to delete. + // + // This member is required. + Key *string + + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to + // process this operation. To use this header, you must have the + // s3:BypassGovernanceRetention permission. + BypassGovernanceRetention bool + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The concatenation of the authentication device's serial number, a space, and the + // value that is displayed on your authentication device. Required to permanently + // delete a versioned object if versioning is configured with MFA delete enabled. + MFA *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type DeleteObjectOutput struct { + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker bool + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObject{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteObject", + } +} + +// getDeleteObjectBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getDeleteObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go new file mode 100644 index 000000000000..0d1bb73994f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go @@ -0,0 +1,202 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the entire tag set from the specified object. For more information about +// managing object tags, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). To use +// this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. To delete tags of a specific object version, add the versionId query +// parameter in the request. You will need permission for the +// s3:DeleteObjectVersionTagging action. The following operations are related to +// DeleteBucketMetricsConfiguration: +// +// * PutObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// * +// GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) { + if params == nil { + params = &DeleteObjectTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteObjectTagging", params, optFns, c.addOperationDeleteObjectTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteObjectTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteObjectTaggingInput struct { + + // The bucket name containing the objects from which to remove the tags. When using + // this action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key that identifies the object in the bucket from which to remove all tags. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The versionId of the object that the tag-set will be removed from. + VersionId *string + + noSmithyDocumentSerde +} + +type DeleteObjectTaggingOutput struct { + + // The versionId of the object the tag-set was removed from. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjectTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjectTagging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjectTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteObjectTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteObjectTagging", + } +} + +// getDeleteObjectTaggingBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteObjectTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteObjectTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteObjectTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go new file mode 100644 index 000000000000..50078da53bc0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go @@ -0,0 +1,294 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action enables you to delete multiple objects from a bucket using a single +// HTTP request. If you know the object keys that you want to delete, then this +// action provides a suitable alternative to sending individual delete requests, +// reducing per-request overhead. The request contains a list of up to 1000 keys +// that you want to delete. In the XML, you provide the object key names, and +// optionally, version IDs if you want to delete a specific version of the object +// from a versioning-enabled bucket. For each key, Amazon S3 performs a delete +// action and returns the result of that delete, success, or failure, in the +// response. Note that if the object specified in the request is not found, Amazon +// S3 returns the result as deleted. The action supports two modes for the +// response: verbose and quiet. By default, the action uses verbose mode in which +// the response includes the result of deletion of each key in your request. In +// quiet mode the response includes only keys where the delete action encountered +// an error. For a successful deletion, the action does not return any information +// about the delete in the response body. When performing this action on an MFA +// Delete enabled bucket, that attempts to delete any versioned objects, you must +// include an MFA token. If you do not provide one, the entire request will fail, +// even if there are non-versioned objects you are trying to delete. If you provide +// an invalid token, whether there are versioned keys in the request or not, the +// entire Multi-Object Delete request will fail. For information about MFA Delete, +// see MFA Delete +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// Finally, the Content-MD5 header is required for all Multi-Object Delete +// requests. Amazon S3 uses the header value to ensure that your request body has +// not been altered in transit. The following operations are related to +// DeleteObjects: +// +// * CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) { + if params == nil { + params = &DeleteObjectsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteObjects", params, optFns, c.addOperationDeleteObjectsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteObjectsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteObjectsInput struct { + + // The bucket name containing the objects to delete. When using this action with an + // access point, you must direct requests to the access point hostname. The access + // point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Container for the request. + // + // This member is required. + Delete *types.Delete + + // Specifies whether you want to delete this object even if it has a + // Governance-type Object Lock in place. To use this header, you must have the + // s3:BypassGovernanceRetention permission. + BypassGovernanceRetention bool + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must + // be the same for all parts and it match the checksum value supplied in the + // CreateMultipartUpload request. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The concatenation of the authentication device's serial number, a space, and the + // value that is displayed on your authentication device. Required to permanently + // delete a versioned object if versioning is configured with MFA delete enabled. + MFA *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +type DeleteObjectsOutput struct { + + // Container element for a successful delete. It identifies the object that was + // successfully deleted. + Deleted []types.DeletedObject + + // Container for a failed delete action that describes the object that Amazon S3 + // attempted to delete and the error it encountered. + Errors []types.Error + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjects{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjects{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjects(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteObjectsInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addDeleteObjectsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteObjects(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteObjects", + } +} + +// getDeleteObjectsRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getDeleteObjectsRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*DeleteObjectsInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addDeleteObjectsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getDeleteObjectsRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getDeleteObjectsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getDeleteObjectsBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteObjectsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteObjectsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go new file mode 100644 index 000000000000..3defd538c743 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go @@ -0,0 +1,183 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketPublicAccessBlock permission. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// The following operations are related to DeletePublicAccessBlock: +// +// * Using Amazon +// S3 Block Public Access +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * +// GetPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * +// PutPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * +// GetBucketPolicyStatus +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) { + if params == nil { + params = &DeletePublicAccessBlockInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeletePublicAccessBlock", params, optFns, c.addOperationDeletePublicAccessBlockMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeletePublicAccessBlockOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeletePublicAccessBlockInput struct { + + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeletePublicAccessBlockOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeletePublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeletePublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeletePublicAccessBlock(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeletePublicAccessBlockUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeletePublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeletePublicAccessBlock", + } +} + +// getDeletePublicAccessBlockBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeletePublicAccessBlockBucketMember(input interface{}) (*string, bool) { + in := input.(*DeletePublicAccessBlockInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeletePublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeletePublicAccessBlockBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go new file mode 100644 index 000000000000..d1690f3ea4fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This implementation of the GET action uses the accelerate subresource to return +// the Transfer Acceleration state of a bucket, which is either Enabled or +// Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. To use this +// operation, you must have permission to perform the s3:GetAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner can +// grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. You set the Transfer Acceleration state of an +// existing bucket to Enabled or Suspended by using the +// PutBucketAccelerateConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// operation. A GET accelerate request does not return a state value for a bucket +// that has no transfer acceleration state. A bucket has no Transfer Acceleration +// state if a state has never been set on the bucket. For more information about +// transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) in +// the Amazon S3 User Guide. Related Resources +// +// * PutBucketAccelerateConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) { + if params == nil { + params = &GetBucketAccelerateConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketAccelerateConfiguration", params, optFns, c.addOperationGetBucketAccelerateConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketAccelerateConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketAccelerateConfigurationInput struct { + + // The name of the bucket for which the accelerate configuration is retrieved. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketAccelerateConfigurationOutput struct { + + // The accelerate configuration of the bucket. + Status types.BucketAccelerateStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAccelerateConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAccelerateConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketAccelerateConfiguration", + } +} + +// getGetBucketAccelerateConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketAccelerateConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketAccelerateConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go new file mode 100644 index 000000000000..610762883aaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go @@ -0,0 +1,181 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This implementation of the GET action uses the acl subresource to return the +// access control list (ACL) of a bucket. To use GET to return the ACL of the +// bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is +// granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. If your bucket uses the bucket owner enforced +// setting for S3 Object Ownership, requests to read ACLs are still supported and +// return the bucket-owner-full-control ACL with the owner being the account that +// created the bucket. For more information, see Controlling object ownership and +// disabling ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. Related Resources +// +// * ListObjects +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) { + if params == nil { + params = &GetBucketAclInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketAcl", params, optFns, c.addOperationGetBucketAclMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketAclOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketAclInput struct { + + // Specifies the S3 bucket whose ACL is being requested. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketAclOutput struct { + + // A list of grants. + Grants []types.Grant + + // Container for the bucket owner's display name and ID. + Owner *types.Owner + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAcl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAcl{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketAclValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAcl(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketAclUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketAcl", + } +} + +// getGetBucketAclBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketAclBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketAclInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketAclBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go new file mode 100644 index 000000000000..bf2c3be67262 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go @@ -0,0 +1,194 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This implementation of the GET action returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. To use this +// operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner can +// grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. For information about Amazon S3 analytics feature, +// see Amazon S3 Analytics – Storage Class Analysis +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon S3 User Guide. Related Resources +// +// * +// DeleteBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * +// ListBucketAnalyticsConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// * +// PutBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) { + if params == nil { + params = &GetBucketAnalyticsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketAnalyticsConfiguration", params, optFns, c.addOperationGetBucketAnalyticsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketAnalyticsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketAnalyticsConfigurationInput struct { + + // The name of the bucket from which an analytics configuration is retrieved. + // + // This member is required. + Bucket *string + + // The ID that identifies the analytics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketAnalyticsConfigurationOutput struct { + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *types.AnalyticsConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketAnalyticsConfiguration", + } +} + +// getGetBucketAnalyticsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketAnalyticsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketAnalyticsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go new file mode 100644 index 000000000000..0ed61273a75a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the Cross-Origin Resource Sharing (CORS) configuration information set +// for the bucket. To use this operation, you must have permission to perform the +// s3:GetBucketCORS action. By default, the bucket owner has this permission and +// can grant it to others. For more information about CORS, see Enabling +// Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). The following +// operations are related to GetBucketCors: +// +// * PutBucketCors +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * +// DeleteBucketCors +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) { + if params == nil { + params = &GetBucketCorsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketCors", params, optFns, c.addOperationGetBucketCorsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketCorsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketCorsInput struct { + + // The bucket name for which to get the cors configuration. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketCorsOutput struct { + + // A set of origins and methods (cross-origin access that you want to allow). You + // can add up to 100 rules to the configuration. + CORSRules []types.CORSRule + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketCors{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketCors{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketCors(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketCorsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketCors", + } +} + +// getGetBucketCorsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketCorsBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketCorsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketCorsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go new file mode 100644 index 000000000000..7fa92fc5fd27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the default encryption configuration for an Amazon S3 bucket. If the +// bucket does not have a default encryption configuration, GetBucketEncryption +// returns ServerSideEncryptionConfigurationNotFoundError. For information about +// the Amazon S3 default encryption feature, see Amazon S3 Default Bucket +// Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). To use +// this operation, you must have permission to perform the +// s3:GetEncryptionConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// The following operations are related to GetBucketEncryption: +// +// * +// PutBucketEncryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// * +// DeleteBucketEncryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) { + if params == nil { + params = &GetBucketEncryptionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketEncryption", params, optFns, c.addOperationGetBucketEncryptionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketEncryptionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketEncryptionInput struct { + + // The name of the bucket from which the server-side encryption configuration is + // retrieved. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketEncryptionOutput struct { + + // Specifies the default server-side-encryption configuration. + ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketEncryption{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketEncryption{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketEncryption(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketEncryptionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketEncryption", + } +} + +// getGetBucketEncryptionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketEncryptionBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketEncryptionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketEncryptionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go new file mode 100644 index 000000000000..70bbb9dfbaa3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go @@ -0,0 +1,193 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by +// automatically moving data to the most cost-effective storage access tier, +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput access +// tiers. To get the lowest storage cost on data that can be accessed in minutes to +// hours, you can choose to activate additional archiving capabilities. The S3 +// Intelligent-Tiering storage class is the ideal storage class for data with +// unknown, changing, or unpredictable access patterns, independent of object size +// or retention period. If the size of an object is less than 128 KB, it is not +// monitored and not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. For more information, see Storage class for +// automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// * +// DeleteBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * +// PutBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * +// ListBucketIntelligentTieringConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) { + if params == nil { + params = &GetBucketIntelligentTieringConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketIntelligentTieringConfiguration", params, optFns, c.addOperationGetBucketIntelligentTieringConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketIntelligentTieringConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketIntelligentTieringConfigurationInput struct { + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // This member is required. + Id *string + + noSmithyDocumentSerde +} + +type GetBucketIntelligentTieringConfigurationOutput struct { + + // Container for S3 Intelligent-Tiering configuration. + IntelligentTieringConfiguration *types.IntelligentTieringConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketIntelligentTieringConfiguration", + } +} + +// getGetBucketIntelligentTieringConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getGetBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketIntelligentTieringConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketIntelligentTieringConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go new file mode 100644 index 000000000000..f35a4606c8f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go @@ -0,0 +1,192 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. To use this operation, you must have permissions to perform +// the s3:GetInventoryConfiguration action. The bucket owner has this permission by +// default and can grant this permission to others. For more information about +// permissions, see Permissions Related to Bucket Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). The +// following operations are related to GetBucketInventoryConfiguration: +// +// * +// DeleteBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * +// ListBucketInventoryConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// * +// PutBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) { + if params == nil { + params = &GetBucketInventoryConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketInventoryConfiguration", params, optFns, c.addOperationGetBucketInventoryConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketInventoryConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketInventoryConfigurationInput struct { + + // The name of the bucket containing the inventory configuration to retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the inventory configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketInventoryConfigurationOutput struct { + + // Specifies the inventory configuration. + InventoryConfiguration *types.InventoryConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketInventoryConfiguration", + } +} + +// getGetBucketInventoryConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketInventoryConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketInventoryConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go new file mode 100644 index 000000000000..5d72d2ebc49b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go @@ -0,0 +1,209 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Bucket lifecycle configuration now supports specifying a lifecycle rule using an +// object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The response describes the +// new filter element that you can use to specify a filter to select a subset of +// objects to which the rule applies. If you are using a previous version of the +// lifecycle configuration, it still works. For the earlier action, see +// GetBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// Returns the lifecycle configuration information set on the bucket. For +// information about lifecycle configuration, see Object Lifecycle Management +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). To +// use this operation, you must have permission to perform the +// s3:GetLifecycleConfiguration action. The bucket owner has this permission, by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// GetBucketLifecycleConfiguration has the following special error: +// +// * Error code: +// NoSuchLifecycleConfiguration +// +// * Description: The lifecycle configuration does +// not exist. +// +// * HTTP Status Code: 404 Not Found +// +// * SOAP Fault Code Prefix: +// Client +// +// The following operations are related to +// GetBucketLifecycleConfiguration: +// +// * GetBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// +// * +// PutBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// * +// DeleteBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) { + if params == nil { + params = &GetBucketLifecycleConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketLifecycleConfiguration", params, optFns, c.addOperationGetBucketLifecycleConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketLifecycleConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketLifecycleConfigurationInput struct { + + // The name of the bucket for which to get the lifecycle information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketLifecycleConfigurationOutput struct { + + // Container for a lifecycle rule. + Rules []types.LifecycleRule + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLifecycleConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLifecycleConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketLifecycleConfiguration", + } +} + +// getGetBucketLifecycleConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketLifecycleConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketLifecycleConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go new file mode 100644 index 000000000000..fb8ff30fd41f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go @@ -0,0 +1,248 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" +) + +// Returns the Region the bucket resides in. You set the bucket's Region using the +// LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). To use +// this implementation of the operation, you must be the bucket owner. To use this +// API against an access point, provide the alias of the access point in place of +// the bucket name. The following operations are related to GetBucketLocation: +// +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) { + if params == nil { + params = &GetBucketLocationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketLocation", params, optFns, c.addOperationGetBucketLocationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketLocationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketLocationInput struct { + + // The name of the bucket for which to get the location. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketLocationOutput struct { + + // Specifies the Region where the bucket resides. For a list of all the Amazon S3 + // supported location constraints by Region, see Regions and Endpoints + // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). Buckets in + // Region us-east-1 have a LocationConstraint of null. + LocationConstraint types.BucketLocationConstraint + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLocation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLocation{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapDeserializerHelper(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLocation(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketLocationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +type awsRestxml_deserializeOpGetBucketLocation_custom struct { +} + +func (*awsRestxml_deserializeOpGetBucketLocation_custom) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketLocation_custom) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata) + } + output := &GetBucketLocationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + decoder := smithyxml.WrapNodeDecoder(rootDecoder, xml.StartElement{}) + err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder) + if err == io.EOF { + err = nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +// Helper to swap in a custom deserializer +func swapDeserializerHelper(stack *middleware.Stack) error { + _, err := stack.Deserialize.Swap("OperationDeserializer", &awsRestxml_deserializeOpGetBucketLocation_custom{}) + if err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketLocation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketLocation", + } +} + +// getGetBucketLocationBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketLocationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketLocationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketLocationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketLocationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go new file mode 100644 index 000000000000..ca115886d50c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go @@ -0,0 +1,177 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the logging status of a bucket and the permissions users have to view +// and modify that status. To use GET, you must be the bucket owner. The following +// operations are related to GetBucketLogging: +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// PutBucketLogging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) { + if params == nil { + params = &GetBucketLoggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketLogging", params, optFns, c.addOperationGetBucketLoggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketLoggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketLoggingInput struct { + + // The bucket name for which to get the logging information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketLoggingOutput struct { + + // Describes where logs are stored and the prefix that Amazon S3 assigns to all log + // object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in + // the Amazon S3 API Reference. + LoggingEnabled *types.LoggingEnabled + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLogging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLogging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLogging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketLoggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketLogging", + } +} + +// getGetBucketLoggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketLoggingBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketLoggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketLoggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go new file mode 100644 index 000000000000..22cf389cd88f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go @@ -0,0 +1,199 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a metrics configuration (specified by the metrics configuration ID) from +// the bucket. Note that this doesn't include the daily storage metrics. To use +// this operation, you must have permissions to perform the +// s3:GetMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// The following operations are related to GetBucketMetricsConfiguration: +// +// * +// PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * +// DeleteBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// * +// ListBucketMetricsConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// * +// Monitoring Metrics with Amazon CloudWatch +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) { + if params == nil { + params = &GetBucketMetricsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketMetricsConfiguration", params, optFns, c.addOperationGetBucketMetricsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketMetricsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketMetricsConfigurationInput struct { + + // The name of the bucket containing the metrics configuration to retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the metrics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketMetricsConfigurationOutput struct { + + // Specifies the metrics configuration. + MetricsConfiguration *types.MetricsConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketMetricsConfiguration", + } +} + +// getGetBucketMetricsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketMetricsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketMetricsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go new file mode 100644 index 000000000000..cbf103a7fd15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go @@ -0,0 +1,193 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the notification configuration of a bucket. If notifications are not +// enabled on the bucket, the action returns an empty NotificationConfiguration +// element. By default, you must be the bucket owner to read the notification +// configuration of a bucket. However, the bucket owner can use a bucket policy to +// grant permission to other users to read this configuration with the +// s3:GetBucketNotification permission. For more information about setting and +// reading the notification configuration on a bucket, see Setting Up Notification +// of Bucket Events +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). For +// more information about bucket policies, see Using Bucket Policies +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The +// following action is related to GetBucketNotification: +// +// * PutBucketNotification +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) { + if params == nil { + params = &GetBucketNotificationConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketNotificationConfiguration", params, optFns, c.addOperationGetBucketNotificationConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketNotificationConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketNotificationConfigurationInput struct { + + // The name of the bucket for which to get the notification configuration. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +// A container for specifying the notification configuration of the bucket. If this +// element is empty, notifications are turned off for the bucket. +type GetBucketNotificationConfigurationOutput struct { + + // Enables delivery of events to Amazon EventBridge. + EventBridgeConfiguration *types.EventBridgeConfiguration + + // Describes the Lambda functions to invoke and the events for which to invoke + // them. + LambdaFunctionConfigurations []types.LambdaFunctionConfiguration + + // The Amazon Simple Queue Service queues to publish messages to and the events for + // which to publish messages. + QueueConfigurations []types.QueueConfiguration + + // The topic to which notifications are sent and the events for which notifications + // are generated. + TopicConfigurations []types.TopicConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketNotificationConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketNotificationConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketNotificationConfiguration", + } +} + +// getGetBucketNotificationConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketNotificationConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketNotificationConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go new file mode 100644 index 000000000000..571c9566cec7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go @@ -0,0 +1,177 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you +// must have the s3:GetBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying permissions in a policy +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html). +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html). +// The following operations are related to GetBucketOwnershipControls: +// +// * +// PutBucketOwnershipControls +// +// * DeleteBucketOwnershipControls +func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) { + if params == nil { + params = &GetBucketOwnershipControlsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketOwnershipControls", params, optFns, c.addOperationGetBucketOwnershipControlsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketOwnershipControlsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketOwnershipControlsInput struct { + + // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketOwnershipControlsOutput struct { + + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or + // ObjectWriter) currently in effect for this Amazon S3 bucket. + OwnershipControls *types.OwnershipControls + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketOwnershipControls(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketOwnershipControls", + } +} + +// getGetBucketOwnershipControlsBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketOwnershipControlsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketOwnershipControlsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go new file mode 100644 index 000000000000..f16c84cddf1a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go @@ -0,0 +1,180 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the policy of a specified bucket. If you are using an identity other +// than the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must have the GetBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account in order to use this operation. +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. As a security precaution, the root user of the Amazon +// Web Services account that owns a bucket can always use this operation, even if +// the policy explicitly denies the root user the ability to perform this action. +// For more information about bucket policies, see Using Bucket Policies and User +// Policies +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The +// following action is related to GetBucketPolicy: +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { + if params == nil { + params = &GetBucketPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicy", params, optFns, c.addOperationGetBucketPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketPolicyInput struct { + + // The bucket name for which to get the bucket policy. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketPolicyOutput struct { + + // The bucket policy as a JSON document. + Policy *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketPolicyUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketPolicy", + } +} + +// getGetBucketPolicyBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketPolicyBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketPolicyInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketPolicyBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go new file mode 100644 index 000000000000..570f60faa9a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go @@ -0,0 +1,189 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. In order to use this operation, you must have the +// s3:GetBucketPolicyStatus permission. For more information about Amazon S3 +// permissions, see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// For more information about when Amazon S3 considers a bucket public, see The +// Meaning of "Public" +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// The following operations are related to GetBucketPolicyStatus: +// +// * Using Amazon +// S3 Block Public Access +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * +// GetPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * +// PutPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * +// DeletePublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) { + if params == nil { + params = &GetBucketPolicyStatusInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicyStatus", params, optFns, c.addOperationGetBucketPolicyStatusMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketPolicyStatusOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketPolicyStatusInput struct { + + // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketPolicyStatusOutput struct { + + // The policy status for the specified bucket. + PolicyStatus *types.PolicyStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicyStatus{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicyStatus{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicyStatus(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketPolicyStatusUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketPolicyStatus(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketPolicyStatus", + } +} + +// getGetBucketPolicyStatusBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketPolicyStatusBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketPolicyStatusInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketPolicyStatusUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketPolicyStatusBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go new file mode 100644 index 000000000000..5d7f3115b8de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the replication configuration of a bucket. It can take a while to +// propagate the put or delete a replication configuration to all Amazon S3 +// systems. Therefore, a get request soon after put or delete can return a wrong +// result. For information about replication configuration, see Replication +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon +// S3 User Guide. This action requires permissions for the +// s3:GetReplicationConfiguration action. For more information about permissions, +// see Using Bucket Policies and User Policies +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). If +// you include the Filter element in a replication configuration, you must also +// include the DeleteMarkerReplication and Priority elements. The response also +// returns those elements. For information about GetBucketReplication errors, see +// List of replication-related error codes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// The following operations are related to GetBucketReplication: +// +// * +// PutBucketReplication +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// * +// DeleteBucketReplication +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) { + if params == nil { + params = &GetBucketReplicationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketReplication", params, optFns, c.addOperationGetBucketReplicationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketReplicationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketReplicationInput struct { + + // The bucket name for which to get the replication information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketReplicationOutput struct { + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + ReplicationConfiguration *types.ReplicationConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketReplication{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketReplication{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketReplication(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketReplicationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketReplication", + } +} + +// getGetBucketReplicationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketReplicationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketReplicationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketReplicationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go new file mode 100644 index 000000000000..45f985b95a5d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go @@ -0,0 +1,172 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the request payment configuration of a bucket. To use this version of +// the operation, you must be the bucket owner. For more information, see Requester +// Pays Buckets +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). The +// following operations are related to GetBucketRequestPayment: +// +// * ListObjects +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) { + if params == nil { + params = &GetBucketRequestPaymentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketRequestPayment", params, optFns, c.addOperationGetBucketRequestPaymentMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketRequestPaymentOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketRequestPaymentInput struct { + + // The name of the bucket for which to get the payment request configuration + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketRequestPaymentOutput struct { + + // Specifies who pays for the download and request fees. + Payer types.Payer + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketRequestPayment{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketRequestPayment{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketRequestPayment(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketRequestPayment", + } +} + +// getGetBucketRequestPaymentBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketRequestPaymentBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketRequestPaymentInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketRequestPaymentBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go new file mode 100644 index 000000000000..816d1b3e7b76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go @@ -0,0 +1,185 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the tag set associated with the bucket. To use this operation, you must +// have permission to perform the s3:GetBucketTagging action. By default, the +// bucket owner has this permission and can grant this permission to others. +// GetBucketTagging has the following special error: +// +// * Error code: NoSuchTagSet +// +// * +// Description: There is no tag set associated with the bucket. +// +// The following +// operations are related to GetBucketTagging: +// +// * PutBucketTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// * +// DeleteBucketTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) { + if params == nil { + params = &GetBucketTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketTagging", params, optFns, c.addOperationGetBucketTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketTaggingInput struct { + + // The name of the bucket for which to get the tagging information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketTaggingOutput struct { + + // Contains the tag set. + // + // This member is required. + TagSet []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketTagging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketTagging", + } +} + +// getGetBucketTaggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go new file mode 100644 index 000000000000..3657bd1ca4d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go @@ -0,0 +1,186 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the versioning state of a bucket. To retrieve the versioning state of a +// bucket, you must be the bucket owner. This implementation also returns the MFA +// Delete status of the versioning state. If the MFA Delete status is enabled, the +// bucket owner must use an authentication device to change the versioning state of +// the bucket. The following operations are related to GetBucketVersioning: +// +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// DeleteObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) { + if params == nil { + params = &GetBucketVersioningInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketVersioning", params, optFns, c.addOperationGetBucketVersioningMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketVersioningOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketVersioningInput struct { + + // The name of the bucket for which to get the versioning information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketVersioningOutput struct { + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA delete. + // If the bucket has never been so configured, this element is not returned. + MFADelete types.MFADeleteStatus + + // The versioning state of the bucket. + Status types.BucketVersioningStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketVersioning{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketVersioning{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketVersioning(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketVersioningUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketVersioning", + } +} + +// getGetBucketVersioningBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketVersioningBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketVersioningInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketVersioningBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go new file mode 100644 index 000000000000..aa866b301876 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go @@ -0,0 +1,190 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the website configuration for a bucket. To host website on Amazon S3, +// you can configure a bucket as website by adding a website configuration. For +// more information about hosting websites, see Hosting Websites on Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). This GET +// action requires the S3:GetBucketWebsite permission. By default, only the bucket +// owner can read the bucket website configuration. However, bucket owners can +// allow other users to read the website configuration by writing a bucket policy +// granting them the S3:GetBucketWebsite permission. The following operations are +// related to DeleteBucketWebsite: +// +// * DeleteBucketWebsite +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) +// +// * +// PutBucketWebsite +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) { + if params == nil { + params = &GetBucketWebsiteInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketWebsite", params, optFns, c.addOperationGetBucketWebsiteMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketWebsiteOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketWebsiteInput struct { + + // The bucket name for which to get the website configuration. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketWebsiteOutput struct { + + // The object key name of the website error document to use for 4XX class errors. + ErrorDocument *types.ErrorDocument + + // The name of the index document for the website (for example index.html). + IndexDocument *types.IndexDocument + + // Specifies the redirect behavior of all requests to a website endpoint of an + // Amazon S3 bucket. + RedirectAllRequestsTo *types.RedirectAllRequestsTo + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []types.RoutingRule + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketWebsite{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketWebsite{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketWebsite(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketWebsiteUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketWebsite", + } +} + +// getGetBucketWebsiteBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketWebsiteBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketWebsiteInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketWebsiteBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go new file mode 100644 index 000000000000..92c38bed8158 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go @@ -0,0 +1,593 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "time" +) + +// Retrieves objects from Amazon S3. To use GET, you must have READ access to the +// object. If you grant READ access to the anonymous user, you can return the +// object without using an authorization header. An Amazon S3 bucket has no +// directory hierarchy such as you would find in a typical computer file system. +// You can, however, create a logical hierarchy by using object key names that +// imply a folder structure. For example, instead of naming an object sample.jpg, +// you can name it photos/2006/February/sample.jpg. To get an object from such a +// logical hierarchy, specify the full key name for the object in the GET +// operation. For a virtual hosted-style request example, if you have the object +// photos/2006/February/sample.jpg, specify the resource as +// /photos/2006/February/sample.jpg. For a path-style request example, if you have +// the object photos/2006/February/sample.jpg in the bucket named examplebucket, +// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more +// information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +// For more information about returning the ACL of an object, see GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). If the +// object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive +// storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep +// Archive tiers, before you can retrieve the object you must first restore a copy +// using RestoreObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this action returns an InvalidObjectStateError error. For information +// about restoring archived objects, see Restoring Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// Encryption request headers, like x-amz-server-side-encryption, should not be +// sent for GET requests if your object uses server-side encryption with KMS keys +// (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys +// (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 +// BadRequest error. If you encrypt an object by using server-side encryption with +// customer-provided encryption keys (SSE-C) when you store the object in Amazon +// S3, then when you GET the object, you must use the following headers: +// +// * +// x-amz-server-side-encryption-customer-algorithm +// +// * +// x-amz-server-side-encryption-customer-key +// +// * +// x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, +// see Server-Side Encryption (Using Customer-Provided Encryption Keys) +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// Assuming you have the relevant permission to read object tags, the response also +// returns the x-amz-tagging-count header that provides the count of number of tags +// associated with the object. You can use GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) to +// retrieve the tag set associated with an object. Permissions You need the +// relevant read object (or version) permission for this operation. For more +// information, see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). If +// the object you request does not exist, the error Amazon S3 returns depends on +// whether you also have the s3:ListBucket permission. +// +// * If you have the +// s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status +// code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket +// permission, Amazon S3 will return an HTTP status code 403 ("access denied") +// error. +// +// Versioning By default, the GET action returns the current version of an +// object. To return a different version, use the versionId subresource. +// +// * If you +// supply a versionId, you need the s3:GetObjectVersion permission to access a +// specific version of an object. If you request a specific version, you do not +// need to have the s3:GetObject permission. +// +// * If the current version of the +// object is a delete marker, Amazon S3 behaves as if the object was deleted and +// includes x-amz-delete-marker: true in the response. +// +// For more information about +// versioning, see PutBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). +// Overriding Response Header Values There are times when you want to override +// certain response header values in a GET response. For example, you might +// override the Content-Disposition response header value in your GET request. You +// can override values for a set of response headers using the following query +// parameters. These response header values are sent only on a successful request, +// that is, when status code 200 OK is returned. The set of headers you can +// override using these parameters is a subset of the headers that Amazon S3 +// accepts when you create an object. The response headers that you can override +// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, +// Content-Disposition, and Content-Encoding. To override these header values in +// the GET response, you use the following request parameters. You must sign the +// request, either using an Authorization header or a presigned URL, when using +// these parameters. They cannot be used with an unsigned (anonymous) request. +// +// * +// response-content-type +// +// * response-content-language +// +// * response-expires +// +// * +// response-cache-control +// +// * response-content-disposition +// +// * +// response-content-encoding +// +// Additional Considerations about Request Headers If +// both of the If-Match and If-Unmodified-Since headers are present in the request +// as follows: If-Match condition evaluates to true, and; If-Unmodified-Since +// condition evaluates to false; then, S3 returns 200 OK and the data requested. If +// both of the If-None-Match and If-Modified-Since headers are present in the +// request as follows: If-None-Match condition evaluates to false, and; +// If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified +// response code. For more information about conditional requests, see RFC 7232 +// (https://tools.ietf.org/html/rfc7232). The following operations are related to +// GetObject: +// +// * ListBuckets +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// * +// GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) { + if params == nil { + params = &GetObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObject", params, optFns, c.addOperationGetObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectInput struct { + + // The bucket name containing the object. When using this action with an access + // point, you must direct requests to the access point hostname. The access point + // hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using an Object Lambda access point the + // hostname takes the form + // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. When using this + // action with Amazon S3 on Outposts, you must direct requests to the S3 on + // Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Key of the object to get. + // + // This member is required. + Key *string + + // To retrieve the checksum, this mode must be enabled. + ChecksumMode types.ChecksumMode + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Return the object only if its entity tag (ETag) is the same as the one + // specified; otherwise, return a 412 (precondition failed) error. + IfMatch *string + + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. + IfModifiedSince *time.Time + + // Return the object only if its entity tag (ETag) is different from the one + // specified; otherwise, return a 304 (not modified) error. + IfNoneMatch *string + + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 (precondition failed) error. + IfUnmodifiedSince *time.Time + + // Part number of the object being read. This is a positive integer between 1 and + // 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber int32 + + // Downloads the specified range bytes of an object. For more information about the + // HTTP Range header, see + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). Amazon S3 + // doesn't support retrieving multiple ranges of data per GET request. + Range *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string + + // Sets the Content-Type header of the response. + ResponseContentType *string + + // Sets the Expires header of the response. + ResponseExpires *time.Time + + // Specifies the algorithm to use to when decrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 used to encrypt the + // data. This value is used to decrypt the object when recovering it and must match + // the one used when storing the data. The key must be appropriate for use with the + // algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type GetObjectOutput struct { + + // Indicates that a range of bytes was specified. + AcceptRanges *string + + // Object data. + Body io.ReadCloser + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Specifies presentational information for the object. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // Size of the body in bytes. + ContentLength int64 + + // The portion of the object returned in the response. + ContentRange *string + + // A standard MIME type describing the format of the object data. + ContentType *string + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker bool + + // An entity tag (ETag) is an opaque identifier assigned by a web server to a + // specific version of a resource found at a URL. + ETag *string + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is + // URL-encoded. + Expiration *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // Creation date of the object. + LastModified *time.Time + + // A map of metadata to store with the object in S3. + // + // Map keys will be normalized to lower-case. + Metadata map[string]string + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, you + // can create metadata whose values are not legal HTTP headers. + MissingMeta int32 + + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // The Object Lock mode currently in place for this object. + ObjectLockMode types.ObjectLockMode + + // The date and time when this object's Object Lock will expire. + ObjectLockRetainUntilDate *time.Time + + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount int32 + + // Amazon S3 can return this if your request involves a bucket that is either a + // source or destination in a replication rule. + ReplicationStatus types.ReplicationStatus + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Provides information about object restoration action and expiration time of the + // restored object copy. + Restore *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. + StorageClass types.StorageClass + + // The number of tags, if any, on the object. + TagCount int32 + + // Version of the object. + VersionId *string + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. + WebsiteRedirectLocation *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObject{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectOutputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addGetObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObject", + } +} + +// getGetObjectRequestValidationModeMember gets the request checksum validation +// mode provided as input. +func getGetObjectRequestValidationModeMember(input interface{}) (string, bool) { + in := input.(*GetObjectInput) + if len(in.ChecksumMode) == 0 { + return "", false + } + return string(in.ChecksumMode), true +} + +func addGetObjectOutputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddOutputMiddleware(stack, internalChecksum.OutputMiddlewareOptions{ + GetValidationMode: getGetObjectRequestValidationModeMember, + ValidationAlgorithms: []string{"CRC32", "CRC32C", "SHA256", "SHA1"}, + IgnoreMultipartValidation: true, + LogValidationSkipped: true, + LogMultipartValidationSkipped: true, + }) +} + +// getGetObjectBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getGetObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignGetObject is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignGetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &GetObjectInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "GetObject", params, clientOptFns, + c.client.addOperationGetObjectMiddlewares, + presignConverter(options).convertToPresignMiddleware, + addGetObjectPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addGetObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go new file mode 100644 index 000000000000..709e62ff755e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go @@ -0,0 +1,224 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the access control list (ACL) of an object. To use this operation, you +// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For more +// information, see Mapping of ACL permissions and access policy permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) +// in the Amazon S3 User Guide This action is not supported by Amazon S3 on +// Outposts. Versioning By default, GET returns ACL information about the current +// version of an object. To return ACL information about a different version, use +// the versionId subresource. If your bucket uses the bucket owner enforced setting +// for S3 Object Ownership, requests to read ACLs are still supported and return +// the bucket-owner-full-control ACL with the owner being the account that created +// the bucket. For more information, see Controlling object ownership and +// disabling ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. The following operations are related to +// GetObjectAcl: +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// * +// DeleteObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// * +// PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) { + if params == nil { + params = &GetObjectAclInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectAcl", params, optFns, c.addOperationGetObjectAclMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectAclOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectAclInput struct { + + // The bucket name that contains the object for which to get the ACL information. + // When using this action with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key of the object for which to get the ACL information. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type GetObjectAclOutput struct { + + // A list of grants. + Grants []types.Grant + + // Container for the bucket owner's display name and ID. + Owner *types.Owner + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAcl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAcl{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectAclValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAcl(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectAclUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectAcl", + } +} + +// getGetObjectAclBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetObjectAclBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectAclInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectAclBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go new file mode 100644 index 000000000000..fb1683e7d2db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go @@ -0,0 +1,363 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Retrieves all the metadata from an object without returning the object itself. +// This action is useful if you're interested only in an object's metadata. To use +// GetObjectAttributes, you must have READ access to the object. +// GetObjectAttributes combines the functionality of GetObjectAcl, +// GetObjectLegalHold, GetObjectLockConfiguration, GetObjectRetention, +// GetObjectTagging, HeadObject, and ListParts. All of the data returned with each +// of those individual calls can be returned with a single call to +// GetObjectAttributes. If you encrypt an object by using server-side encryption +// with customer-provided encryption keys (SSE-C) when you store the object in +// Amazon S3, then when you retrieve the metadata from the object, you must use the +// following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * +// x-amz-server-side-encryption-customer-key +// +// * +// x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, +// see Server-Side Encryption (Using Customer-Provided Encryption Keys) +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// +// * Encryption request headers, such as +// x-amz-server-side-encryption, should not be sent for GET requests if your object +// uses server-side encryption with Amazon Web Services KMS keys stored in Amazon +// Web Services Key Management Service (SSE-KMS) or server-side encryption with +// Amazon S3 managed encryption keys (SSE-S3). If your object does use these types +// of keys, you'll get an HTTP 400 Bad Request error. +// +// * The last modified property +// in this case is the creation date of the object. +// +// Consider the following when +// using request headers: +// +// * If both of the If-Match and If-Unmodified-Since +// headers are present in the request as follows, then Amazon S3 returns the HTTP +// status code 200 OK and the data requested: +// +// * If-Match condition evaluates to +// true. +// +// * If-Unmodified-Since condition evaluates to false. +// +// * If both of the +// If-None-Match and If-Modified-Since headers are present in the request as +// follows, then Amazon S3 returns the HTTP status code 304 Not Modified: +// +// * +// If-None-Match condition evaluates to false. +// +// * If-Modified-Since condition +// evaluates to true. +// +// For more information about conditional requests, see RFC +// 7232 (https://tools.ietf.org/html/rfc7232). Permissions The permissions that you +// need to use this operation depend on whether the bucket is versioned. If the +// bucket is versioned, you need both the s3:GetObjectVersion and +// s3:GetObjectVersionAttributes permissions for this operation. If the bucket is +// not versioned, you need the s3:GetObject and s3:GetObjectAttributes permissions. +// For more information, see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in +// the Amazon S3 User Guide. If the object that you request does not exist, the +// error Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 +// returns an HTTP status code 404 Not Found ("no such key") error. +// +// * If you don't +// have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 +// Forbidden ("access denied") error. +// +// The following actions are related to +// GetObjectAttributes: +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// * +// GetObjectLegalHold +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) +// +// * +// GetObjectLockConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) +// +// * +// GetObjectRetention +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) +// +// * +// GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// * +// HeadObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) +// +// * +// ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) { + if params == nil { + params = &GetObjectAttributesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectAttributes", params, optFns, c.addOperationGetObjectAttributesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectAttributesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectAttributesInput struct { + + // The name of the bucket that contains the object. When using this action with an + // access point, you must direct requests to the access point hostname. The access + // point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The object key. + // + // This member is required. + Key *string + + // An XML header that specifies the fields at the root level that you want returned + // in the response. Fields that you do not specify are not returned. + // + // This member is required. + ObjectAttributes []types.ObjectAttributes + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Sets the maximum number of parts to return. + MaxParts int32 + + // Specifies the part after which listing should begin. Only parts with higher part + // numbers will be listed. + PartNumberMarker *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + // The version ID used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type GetObjectAttributesOutput struct { + + // The checksum or digest of the object. + Checksum *types.Checksum + + // Specifies whether the object retrieved was (true) or was not (false) a delete + // marker. If false, this response header does not appear in the response. + DeleteMarker bool + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string + + // The creation date of the object. + LastModified *time.Time + + // A collection of parts associated with a multipart upload. + ObjectParts *types.GetObjectAttributesParts + + // The size of the object in bytes. + ObjectSize int64 + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Provides the storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. For more + // information, see Storage Classes + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass types.StorageClass + + // The version ID of the object. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAttributes{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAttributes{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAttributes(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectAttributesUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectAttributes(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectAttributes", + } +} + +// getGetObjectAttributesBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetObjectAttributesBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectAttributesInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectAttributesUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectAttributesBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go new file mode 100644 index 000000000000..a2446ac32c90 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go @@ -0,0 +1,195 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets an object's current legal hold status. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This +// action is not supported by Amazon S3 on Outposts. The following action is +// related to GetObjectLegalHold: +// +// * GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) { + if params == nil { + params = &GetObjectLegalHoldInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectLegalHold", params, optFns, c.addOperationGetObjectLegalHoldMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectLegalHoldOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectLegalHoldInput struct { + + // The bucket name containing the object whose legal hold status you want to + // retrieve. When using this action with an access point, you must direct requests + // to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key name for the object whose legal hold status you want to retrieve. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The version ID of the object whose legal hold status you want to retrieve. + VersionId *string + + noSmithyDocumentSerde +} + +type GetObjectLegalHoldOutput struct { + + // The current legal hold status for the specified object. + LegalHold *types.ObjectLockLegalHold + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLegalHold{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLegalHold{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLegalHold(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectLegalHoldUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectLegalHold", + } +} + +// getGetObjectLegalHoldBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetObjectLegalHoldBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectLegalHoldInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectLegalHoldBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go new file mode 100644 index 000000000000..91793c133856 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go @@ -0,0 +1,181 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object placed +// in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). The +// following action is related to GetObjectLockConfiguration: +// +// * +// GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) { + if params == nil { + params = &GetObjectLockConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectLockConfiguration", params, optFns, c.addOperationGetObjectLockConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectLockConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectLockConfigurationInput struct { + + // The bucket whose Object Lock configuration you want to retrieve. When using this + // action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetObjectLockConfigurationOutput struct { + + // The specified bucket's Object Lock configuration. + ObjectLockConfiguration *types.ObjectLockConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLockConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLockConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLockConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectLockConfiguration", + } +} + +// getGetObjectLockConfigurationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetObjectLockConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectLockConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectLockConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go new file mode 100644 index 000000000000..33fc04897a3d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go @@ -0,0 +1,195 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves an object's retention settings. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This +// action is not supported by Amazon S3 on Outposts. The following action is +// related to GetObjectRetention: +// +// * GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) { + if params == nil { + params = &GetObjectRetentionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectRetention", params, optFns, c.addOperationGetObjectRetentionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectRetentionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectRetentionInput struct { + + // The bucket name containing the object whose retention settings you want to + // retrieve. When using this action with an access point, you must direct requests + // to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key name for the object whose retention settings you want to retrieve. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The version ID for the object whose retention settings you want to retrieve. + VersionId *string + + noSmithyDocumentSerde +} + +type GetObjectRetentionOutput struct { + + // The container element for an object's retention settings. + Retention *types.ObjectLockRetention + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectRetention{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectRetention{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectRetention(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectRetentionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectRetention", + } +} + +// getGetObjectRetentionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetObjectRetentionBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectRetentionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectRetentionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go new file mode 100644 index 000000000000..cec5210c2430 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go @@ -0,0 +1,223 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. To use this operation, you must have +// permission to perform the s3:GetObjectTagging action. By default, the GET action +// returns information about current version of an object. For a versioned bucket, +// you can have multiple versions of an object in your bucket. To retrieve tags of +// any other version, use the versionId query parameter. You also need permission +// for the s3:GetObjectVersionTagging action. By default, the bucket owner has this +// permission and can grant this permission to others. For information about the +// Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). The +// following actions are related to GetObjectTagging: +// +// * DeleteObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// * +// GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// * +// PutObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) { + if params == nil { + params = &GetObjectTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectTagging", params, optFns, c.addOperationGetObjectTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectTaggingInput struct { + + // The bucket name containing the object for which to get the tagging information. + // When using this action with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which to get the tagging information. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The versionId of the object for which to get the tagging information. + VersionId *string + + noSmithyDocumentSerde +} + +type GetObjectTaggingOutput struct { + + // Contains the tag set. + // + // This member is required. + TagSet []types.Tag + + // The versionId of the object for which you got the tagging information. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTagging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectTagging", + } +} + +// getGetObjectTaggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetObjectTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go new file mode 100644 index 000000000000..fa71442c286c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go @@ -0,0 +1,191 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" +) + +// Returns torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. For more information about BitTorrent, see +// Using BitTorrent with Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). You can get +// torrent only for objects that are less than 5 GB in size, and that are not +// encrypted using server-side encryption with a customer-provided encryption key. +// To use GET, you must have READ access to the object. This action is not +// supported by Amazon S3 on Outposts. The following action is related to +// GetObjectTorrent: +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) { + if params == nil { + params = &GetObjectTorrentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectTorrent", params, optFns, c.addOperationGetObjectTorrentMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectTorrentOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectTorrentInput struct { + + // The name of the bucket containing the object for which to get the torrent files. + // + // This member is required. + Bucket *string + + // The object key for which to get the information. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +type GetObjectTorrentOutput struct { + + // A Bencoded dictionary as defined by the BitTorrent specification + Body io.ReadCloser + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTorrent{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTorrent{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTorrent(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectTorrentUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectTorrent(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectTorrent", + } +} + +// getGetObjectTorrentBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetObjectTorrentBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectTorrentInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectTorrentUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectTorrentBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go new file mode 100644 index 000000000000..eb42c7d275ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go @@ -0,0 +1,196 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For +// more information about Amazon S3 permissions, see Specifying Permissions in a +// Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an +// object, it checks the PublicAccessBlock configuration for both the bucket (or +// the bucket that contains the object) and the bucket owner's account. If the +// PublicAccessBlock settings are different between the bucket and the account, +// Amazon S3 uses the most restrictive combination of the bucket-level and +// account-level settings. For more information about when Amazon S3 considers a +// bucket or an object public, see The Meaning of "Public" +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// The following operations are related to GetPublicAccessBlock: +// +// * Using Amazon S3 +// Block Public Access +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * +// PutPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * +// GetPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * +// DeletePublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) { + if params == nil { + params = &GetPublicAccessBlockInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetPublicAccessBlock", params, optFns, c.addOperationGetPublicAccessBlockMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetPublicAccessBlockOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetPublicAccessBlockInput struct { + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want + // to retrieve. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetPublicAccessBlockOutput struct { + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetPublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetPublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPublicAccessBlock(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetPublicAccessBlockUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetPublicAccessBlock", + } +} + +// getGetPublicAccessBlockBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetPublicAccessBlockBucketMember(input interface{}) (*string, bool) { + in := input.(*GetPublicAccessBlockInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetPublicAccessBlockBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go new file mode 100644 index 000000000000..7344f202e0b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go @@ -0,0 +1,510 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "errors" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithywaiter "github.com/aws/smithy-go/waiter" + "time" +) + +// This action is useful to determine if a bucket exists and you have permission to +// access it. The action returns a 200 OK if the bucket exists and you have +// permission to access it. If the bucket does not exist or you do not have +// permission to access it, the HEAD request returns a generic 404 Not Found or 403 +// Forbidden code. A message body is not included, so you cannot determine the +// exception beyond these error codes. To use this operation, you must have +// permissions to perform the s3:ListBucket action. The bucket owner has this +// permission by default and can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// To use this API against an access point, you must provide the alias of the +// access point in place of the bucket name or specify the access point ARN. When +// using the access point ARN, you must direct requests to the access point +// hostname. The access point hostname takes the form +// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using the +// Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For +// more information see, Using access points +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html). +func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) { + if params == nil { + params = &HeadBucketInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "HeadBucket", params, optFns, c.addOperationHeadBucketMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*HeadBucketOutput) + out.ResultMetadata = metadata + return out, nil +} + +type HeadBucketInput struct { + + // The bucket name. When using this action with an access point, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type HeadBucketOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpHeadBucket{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadBucket{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpHeadBucketValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadBucket(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addHeadBucketUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// HeadBucketAPIClient is a client that implements the HeadBucket operation. +type HeadBucketAPIClient interface { + HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) +} + +var _ HeadBucketAPIClient = (*Client)(nil) + +// BucketExistsWaiterOptions are waiter options for BucketExistsWaiter +type BucketExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // BucketExistsWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or set + // to zero, BucketExistsWaiter will use default max delay of 120 seconds. Note that + // MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) +} + +// BucketExistsWaiter defines the waiters for BucketExists +type BucketExistsWaiter struct { + client HeadBucketAPIClient + + options BucketExistsWaiterOptions +} + +// NewBucketExistsWaiter constructs a BucketExistsWaiter. +func NewBucketExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketExistsWaiterOptions)) *BucketExistsWaiter { + options := BucketExistsWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = bucketExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &BucketExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for BucketExists waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *BucketExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for BucketExists waiter and returns the +// output of the successful operation. The maxWaitDur is the maximum wait duration +// the waiter will wait. The maxWaitDur is required and must be greater than zero. +func (w *BucketExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) (*HeadBucketOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for BucketExists waiter") +} + +func bucketExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) { + + if err == nil { + return false, nil + } + + if err != nil { + var errorType *types.NotFound + if errors.As(err, &errorType) { + return true, nil + } + } + + return true, nil +} + +// BucketNotExistsWaiterOptions are waiter options for BucketNotExistsWaiter +type BucketNotExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // BucketNotExistsWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or set + // to zero, BucketNotExistsWaiter will use default max delay of 120 seconds. Note + // that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) +} + +// BucketNotExistsWaiter defines the waiters for BucketNotExists +type BucketNotExistsWaiter struct { + client HeadBucketAPIClient + + options BucketNotExistsWaiterOptions +} + +// NewBucketNotExistsWaiter constructs a BucketNotExistsWaiter. +func NewBucketNotExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketNotExistsWaiterOptions)) *BucketNotExistsWaiter { + options := BucketNotExistsWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = bucketNotExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &BucketNotExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for BucketNotExists waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *BucketNotExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for BucketNotExists waiter and returns +// the output of the successful operation. The maxWaitDur is the maximum wait +// duration the waiter will wait. The maxWaitDur is required and must be greater +// than zero. +func (w *BucketNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) (*HeadBucketOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for BucketNotExists waiter") +} + +func bucketNotExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) { + + if err != nil { + var errorType *types.NotFound + if errors.As(err, &errorType) { + return false, nil + } + } + + return true, nil +} + +func newServiceMetadataMiddleware_opHeadBucket(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "HeadBucket", + } +} + +// getHeadBucketBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getHeadBucketBucketMember(input interface{}) (*string, bool) { + in := input.(*HeadBucketInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addHeadBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getHeadBucketBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go new file mode 100644 index 000000000000..1e745a7e5cee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go @@ -0,0 +1,884 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "errors" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithywaiter "github.com/aws/smithy-go/waiter" + "time" +) + +// The HEAD action retrieves metadata from an object without returning the object +// itself. This action is useful if you're only interested in an object's metadata. +// To use HEAD, you must have READ access to the object. A HEAD request has the +// same options as a GET action on an object. The response is identical to the GET +// response except that there is no response body. Because of this, if the HEAD +// request generates an error, it returns a generic 404 Not Found or 403 Forbidden +// code. It is not possible to retrieve the exact exception beyond these error +// codes. If you encrypt an object by using server-side encryption with +// customer-provided encryption keys (SSE-C) when you store the object in Amazon +// S3, then when you retrieve the metadata from the object, you must use the +// following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * +// x-amz-server-side-encryption-customer-key +// +// * +// x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, +// see Server-Side Encryption (Using Customer-Provided Encryption Keys) +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// * +// Encryption request headers, like x-amz-server-side-encryption, should not be +// sent for GET requests if your object uses server-side encryption with KMS keys +// (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys +// (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 +// BadRequest error. +// +// * The last modified property in this case is the creation +// date of the object. +// +// Request headers are limited to 8 KB in size. For more +// information, see Common Request Headers +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// Consider the following when using request headers: +// +// * Consideration 1 – If both +// of the If-Match and If-Unmodified-Since headers are present in the request as +// follows: +// +// * If-Match condition evaluates to true, and; +// +// * If-Unmodified-Since +// condition evaluates to false; +// +// Then Amazon S3 returns 200 OK and the data +// requested. +// +// * Consideration 2 – If both of the If-None-Match and +// If-Modified-Since headers are present in the request as follows: +// +// * +// If-None-Match condition evaluates to false, and; +// +// * If-Modified-Since condition +// evaluates to true; +// +// Then Amazon S3 returns the 304 Not Modified response +// code. +// +// For more information about conditional requests, see RFC 7232 +// (https://tools.ietf.org/html/rfc7232). Permissions You need the relevant read +// object (or version) permission for this operation. For more information, see +// Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). If +// the object you request does not exist, the error Amazon S3 returns depends on +// whether you also have the s3:ListBucket permission. +// +// * If you have the +// s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code +// 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, +// Amazon S3 returns an HTTP status code 403 ("access denied") error. +// +// The +// following actions are related to HeadObject: +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) { + if params == nil { + params = &HeadObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "HeadObject", params, optFns, c.addOperationHeadObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*HeadObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type HeadObjectInput struct { + + // The name of the bucket containing the object. When using this action with an + // access point, you must direct requests to the access point hostname. The access + // point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The object key. + // + // This member is required. + Key *string + + // To retrieve the checksum, this parameter must be enabled. In addition, if you + // enable ChecksumMode and the object is encrypted with Amazon Web Services Key + // Management Service (Amazon Web Services KMS), you must have permission to use + // the kms:Decrypt action for the request to succeed. + ChecksumMode types.ChecksumMode + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Return the object only if its entity tag (ETag) is the same as the one + // specified; otherwise, return a 412 (precondition failed) error. + IfMatch *string + + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. + IfModifiedSince *time.Time + + // Return the object only if its entity tag (ETag) is different from the one + // specified; otherwise, return a 304 (not modified) error. + IfNoneMatch *string + + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 (precondition failed) error. + IfUnmodifiedSince *time.Time + + // Part number of the object being read. This is a positive integer between 1 and + // 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber int32 + + // Because HeadObject returns only the metadata for an object, this parameter has + // no effect. + Range *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type HeadObjectOutput struct { + + // Indicates that a range of bytes was specified. + AcceptRanges *string + + // The archive state of the head object. + ArchiveStatus types.ArchiveStatus + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Specifies presentational information for the object. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // Size of the body in bytes. + ContentLength int64 + + // A standard MIME type describing the format of the object data. + ContentType *string + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker bool + + // An entity tag (ETag) is an opaque identifier assigned by a web server to a + // specific version of a resource found at a URL. + ETag *string + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is + // URL-encoded. + Expiration *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // Creation date of the object. + LastModified *time.Time + + // A map of metadata to store with the object in S3. + // + // Map keys will be normalized to lower-case. + Metadata map[string]string + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, you + // can create metadata whose values are not legal HTTP headers. + MissingMeta int32 + + // Specifies whether a legal hold is in effect for this object. This header is only + // returned if the requester has the s3:GetObjectLegalHold permission. This header + // is not returned if the specified version of this object has never had a legal + // hold applied. For more information about S3 Object Lock, see Object Lock + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // The Object Lock mode, if any, that's in effect for this object. This header is + // only returned if the requester has the s3:GetObjectRetention permission. For + // more information about S3 Object Lock, see Object Lock + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockMode types.ObjectLockMode + + // The date and time when the Object Lock retention period expires. This header is + // only returned if the requester has the s3:GetObjectRetention permission. + ObjectLockRetainUntilDate *time.Time + + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount int32 + + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or a destination in a replication rule. In replication, you have + // a source bucket on which you configure replication and destination bucket or + // buckets where Amazon S3 stores object replicas. When you request an object + // (GetObject) or object metadata (HeadObject) from these buckets, Amazon S3 will + // return the x-amz-replication-status header in the response as follows: + // + // * If + // requesting an object from the source bucket, Amazon S3 will return the + // x-amz-replication-status header if the object in your request is eligible for + // replication. For example, suppose that in your replication configuration, you + // specify object prefix TaxDocs requesting Amazon S3 to replicate objects with key + // prefix TaxDocs. Any objects you upload with this key name prefix, for example + // TaxDocs/document1.pdf, are eligible for replication. For any object request with + // this key name prefix, Amazon S3 will return the x-amz-replication-status header + // with value PENDING, COMPLETED or FAILED indicating object replication status. + // + // * + // If requesting an object from a destination bucket, Amazon S3 will return the + // x-amz-replication-status header with value REPLICA if the object in your request + // is a replica that Amazon S3 created and there is no replica modification + // replication in progress. + // + // * When replicating objects to multiple destination + // buckets, the x-amz-replication-status header acts differently. The header of the + // source object will only return a value of COMPLETED when replication is + // successful to all destinations. The header will remain at value PENDING until + // replication has completed for all destinations. If one or more destinations + // fails replication the header will return FAILED. + // + // For more information, see + // Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). + ReplicationStatus types.ReplicationStatus + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) or an + // archive copy is already restored. If an archive copy is already restored, the + // header value indicates when Amazon S3 is scheduled to delete the object copy. + // For example: x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec + // 2012 00:00:00 GMT" If the object restoration is in progress, the header returns + // the value ongoing-request="true". For more information about archiving objects, + // see Transitioning Objects: General Considerations + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). + Restore *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // If the object is stored using server-side encryption either with an Amazon Web + // Services KMS key or an Amazon S3-managed encryption key, the response includes + // this header with the value of the server-side encryption algorithm used when + // storing this object in Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. For more + // information, see Storage Classes + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass types.StorageClass + + // Version of the object. + VersionId *string + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. + WebsiteRedirectLocation *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpHeadObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadObject{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpHeadObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addHeadObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// HeadObjectAPIClient is a client that implements the HeadObject operation. +type HeadObjectAPIClient interface { + HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) +} + +var _ HeadObjectAPIClient = (*Client)(nil) + +// ObjectExistsWaiterOptions are waiter options for ObjectExistsWaiter +type ObjectExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // ObjectExistsWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or set + // to zero, ObjectExistsWaiter will use default max delay of 120 seconds. Note that + // MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) +} + +// ObjectExistsWaiter defines the waiters for ObjectExists +type ObjectExistsWaiter struct { + client HeadObjectAPIClient + + options ObjectExistsWaiterOptions +} + +// NewObjectExistsWaiter constructs a ObjectExistsWaiter. +func NewObjectExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectExistsWaiterOptions)) *ObjectExistsWaiter { + options := ObjectExistsWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = objectExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &ObjectExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for ObjectExists waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *ObjectExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for ObjectExists waiter and returns the +// output of the successful operation. The maxWaitDur is the maximum wait duration +// the waiter will wait. The maxWaitDur is required and must be greater than zero. +func (w *ObjectExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) (*HeadObjectOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.HeadObject(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for ObjectExists waiter") +} + +func objectExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) { + + if err == nil { + return false, nil + } + + if err != nil { + var apiErr smithy.APIError + ok := errors.As(err, &apiErr) + if !ok { + return false, fmt.Errorf("expected err to be of type smithy.APIError, got %w", err) + } + + if "NotFound" == apiErr.ErrorCode() { + return true, nil + } + } + + return true, nil +} + +// ObjectNotExistsWaiterOptions are waiter options for ObjectNotExistsWaiter +type ObjectNotExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // ObjectNotExistsWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or set + // to zero, ObjectNotExistsWaiter will use default max delay of 120 seconds. Note + // that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) +} + +// ObjectNotExistsWaiter defines the waiters for ObjectNotExists +type ObjectNotExistsWaiter struct { + client HeadObjectAPIClient + + options ObjectNotExistsWaiterOptions +} + +// NewObjectNotExistsWaiter constructs a ObjectNotExistsWaiter. +func NewObjectNotExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectNotExistsWaiterOptions)) *ObjectNotExistsWaiter { + options := ObjectNotExistsWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = objectNotExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &ObjectNotExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for ObjectNotExists waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *ObjectNotExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for ObjectNotExists waiter and returns +// the output of the successful operation. The maxWaitDur is the maximum wait +// duration the waiter will wait. The maxWaitDur is required and must be greater +// than zero. +func (w *ObjectNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) (*HeadObjectOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.HeadObject(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for ObjectNotExists waiter") +} + +func objectNotExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) { + + if err != nil { + var apiErr smithy.APIError + ok := errors.As(err, &apiErr) + if !ok { + return false, fmt.Errorf("expected err to be of type smithy.APIError, got %w", err) + } + + if "NotFound" == apiErr.ErrorCode() { + return false, nil + } + } + + return true, nil +} + +func newServiceMetadataMiddleware_opHeadObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "HeadObject", + } +} + +// getHeadObjectBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getHeadObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*HeadObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addHeadObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getHeadObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignHeadObject is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignHeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &HeadObjectInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "HeadObject", params, clientOptFns, + c.client.addOperationHeadObjectMiddlewares, + presignConverter(options).convertToPresignMiddleware, + addHeadObjectPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addHeadObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go new file mode 100644 index 000000000000..0a0373f29047 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go @@ -0,0 +1,214 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. This action supports list pagination and +// does not return more than 100 configurations at a time. You should always check +// the IsTruncated element in the response. If there are no more configurations to +// list, IsTruncated is set to false. If there are more configurations to list, +// IsTruncated is set to true, and there will be a value in NextContinuationToken. +// You use the NextContinuationToken value to continue the pagination of the list +// by passing the value in continuation-token in the request to GET the next page. +// To use this operation, you must have permissions to perform the +// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics – +// Storage Class Analysis +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// The following operations are related to ListBucketAnalyticsConfigurations: +// +// * +// GetBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * +// DeleteBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * +// PutBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) { + if params == nil { + params = &ListBucketAnalyticsConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBucketAnalyticsConfigurations", params, optFns, c.addOperationListBucketAnalyticsConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketAnalyticsConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketAnalyticsConfigurationsInput struct { + + // The name of the bucket from which analytics configurations are retrieved. + // + // This member is required. + Bucket *string + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type ListBucketAnalyticsConfigurationsOutput struct { + + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []types.AnalyticsConfiguration + + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string + + // Indicates whether the returned list of analytics configurations is complete. A + // value of true indicates that the list is not complete and the + // NextContinuationToken will be provided for a subsequent request. + IsTruncated bool + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketAnalyticsConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketAnalyticsConfigurations{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListBucketAnalyticsConfigurationsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListBucketAnalyticsConfigurations", + } +} + +// getListBucketAnalyticsConfigurationsBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getListBucketAnalyticsConfigurationsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListBucketAnalyticsConfigurationsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListBucketAnalyticsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListBucketAnalyticsConfigurationsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go new file mode 100644 index 000000000000..972a69c99fcb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go @@ -0,0 +1,206 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by +// automatically moving data to the most cost-effective storage access tier, +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput access +// tiers. To get the lowest storage cost on data that can be accessed in minutes to +// hours, you can choose to activate additional archiving capabilities. The S3 +// Intelligent-Tiering storage class is the ideal storage class for data with +// unknown, changing, or unpredictable access patterns, independent of object size +// or retention period. If the size of an object is less than 128 KB, it is not +// monitored and not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. For more information, see Storage class for +// automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// * +// DeleteBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * +// PutBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * +// GetBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + if params == nil { + params = &ListBucketIntelligentTieringConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBucketIntelligentTieringConfigurations", params, optFns, c.addOperationListBucketIntelligentTieringConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketIntelligentTieringConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketIntelligentTieringConfigurationsInput struct { + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // This member is required. + Bucket *string + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string + + noSmithyDocumentSerde +} + +type ListBucketIntelligentTieringConfigurationsOutput struct { + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string + + // The list of S3 Intelligent-Tiering configurations for a bucket. + IntelligentTieringConfigurationList []types.IntelligentTieringConfiguration + + // Indicates whether the returned list of analytics configurations is complete. A + // value of true indicates that the list is not complete and the + // NextContinuationToken will be provided for a subsequent request. + IsTruncated bool + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketIntelligentTieringConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListBucketIntelligentTieringConfigurations", + } +} + +// getListBucketIntelligentTieringConfigurationsBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getListBucketIntelligentTieringConfigurationsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListBucketIntelligentTieringConfigurationsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListBucketIntelligentTieringConfigurationsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go new file mode 100644 index 000000000000..e6c8c79a84ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go @@ -0,0 +1,215 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of inventory configurations for the bucket. You can have up to +// 1,000 analytics configurations per bucket. This action supports list pagination +// and does not return more than 100 configurations at a time. Always check the +// IsTruncated element in the response. If there are no more configurations to +// list, IsTruncated is set to false. If there are more configurations to list, +// IsTruncated is set to true, and there is a value in NextContinuationToken. You +// use the NextContinuationToken value to continue the pagination of the list by +// passing the value in continuation-token in the request to GET the next page. To +// use this operation, you must have permissions to perform the +// s3:GetInventoryConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) The +// following operations are related to ListBucketInventoryConfigurations: +// +// * +// GetBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * +// DeleteBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * +// PutBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) { + if params == nil { + params = &ListBucketInventoryConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBucketInventoryConfigurations", params, optFns, c.addOperationListBucketInventoryConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketInventoryConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketInventoryConfigurationsInput struct { + + // The name of the bucket containing the inventory configurations to retrieve. + // + // This member is required. + Bucket *string + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value that + // Amazon S3 understands. + ContinuationToken *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type ListBucketInventoryConfigurationsOutput struct { + + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string + + // The list of inventory configurations for a bucket. + InventoryConfigurationList []types.InventoryConfiguration + + // Tells whether the returned list of inventory configurations is complete. A value + // of true indicates that the list is not complete and the NextContinuationToken is + // provided for a subsequent request. + IsTruncated bool + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketInventoryConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketInventoryConfigurations{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketInventoryConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListBucketInventoryConfigurationsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListBucketInventoryConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListBucketInventoryConfigurations", + } +} + +// getListBucketInventoryConfigurationsBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getListBucketInventoryConfigurationsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListBucketInventoryConfigurationsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListBucketInventoryConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListBucketInventoryConfigurationsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go new file mode 100644 index 000000000000..50b207af615f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go @@ -0,0 +1,218 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the metrics configurations for the bucket. The metrics configurations are +// only for the request metrics of the bucket and do not provide information on +// daily storage metrics. You can have up to 1,000 configurations per bucket. This +// action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there are no +// more configurations to list, IsTruncated is set to false. If there are more +// configurations to list, IsTruncated is set to true, and there is a value in +// NextContinuationToken. You use the NextContinuationToken value to continue the +// pagination of the list by passing the value in continuation-token in the request +// to GET the next page. To use this operation, you must have permissions to +// perform the s3:GetMetricsConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// The following operations are related to ListBucketMetricsConfigurations: +// +// * +// PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * +// GetBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * +// DeleteBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) { + if params == nil { + params = &ListBucketMetricsConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBucketMetricsConfigurations", params, optFns, c.addOperationListBucketMetricsConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketMetricsConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketMetricsConfigurationsInput struct { + + // The name of the bucket containing the metrics configurations to retrieve. + // + // This member is required. + Bucket *string + + // The marker that is used to continue a metrics configuration listing that has + // been truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value that + // Amazon S3 understands. + ContinuationToken *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type ListBucketMetricsConfigurationsOutput struct { + + // The marker that is used as a starting point for this metrics configuration list + // response. This value is present if it was sent in the request. + ContinuationToken *string + + // Indicates whether the returned list of metrics configurations is complete. A + // value of true indicates that the list is not complete and the + // NextContinuationToken will be provided for a subsequent request. + IsTruncated bool + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []types.MetricsConfiguration + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value that + // Amazon S3 understands. + NextContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketMetricsConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketMetricsConfigurations{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketMetricsConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListBucketMetricsConfigurationsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListBucketMetricsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListBucketMetricsConfigurations", + } +} + +// getListBucketMetricsConfigurationsBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getListBucketMetricsConfigurationsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListBucketMetricsConfigurationsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListBucketMetricsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListBucketMetricsConfigurationsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go new file mode 100644 index 000000000000..7a3de38f4287 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go @@ -0,0 +1,145 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of all buckets owned by the authenticated sender of the request. +// To use this operation, you must have the s3:ListAllMyBuckets permission. +func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) { + if params == nil { + params = &ListBucketsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBuckets", params, optFns, c.addOperationListBucketsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketsInput struct { + noSmithyDocumentSerde +} + +type ListBucketsOutput struct { + + // The list of buckets owned by the requester. + Buckets []types.Bucket + + // The owner of the buckets listed. + Owner *types.Owner + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListBuckets{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBuckets{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListBucketsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListBuckets(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListBuckets", + } +} + +func addListBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: nopGetBucketAccessor, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: false, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go new file mode 100644 index 000000000000..af281a25287e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go @@ -0,0 +1,310 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action lists in-progress multipart uploads. An in-progress multipart upload +// is a multipart upload that has been initiated using the Initiate Multipart +// Upload request, but has not yet been completed or aborted. This action returns +// at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the +// maximum number of uploads a response can include, which is also the default +// value. You can further limit the number of uploads in a response by specifying +// the max-uploads parameter in the response. If additional multipart uploads +// satisfy the list criteria, the response will contain an IsTruncated element with +// the value true. To list the additional multipart uploads, use the key-marker and +// upload-id-marker request parameters. In the response, the uploads are sorted by +// key. If your application has initiated more than one multipart upload using the +// same object key, then uploads in the response are first sorted by key. +// Additionally, uploads are sorted in ascending order within each key by the +// upload initiation time. For more information on multipart uploads, see Uploading +// Objects Using Multipart Upload +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For +// information on permissions required to use the multipart upload API, see +// Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The +// following operations are related to ListMultipartUploads: +// +// * +// CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { + if params == nil { + params = &ListMultipartUploadsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListMultipartUploads", params, optFns, c.addOperationListMultipartUploadsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListMultipartUploadsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListMultipartUploadsInput struct { + + // The name of the bucket to which the multipart upload was initiated. When using + // this action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Character you use to group keys. All keys that contain the same string between + // the prefix, if specified, and the first occurrence of the delimiter after the + // prefix are grouped under a single result element, CommonPrefixes. If you don't + // specify the prefix parameter, then the substring starts at the beginning of the + // key. The keys that are grouped under CommonPrefixes result element are not + // returned elsewhere in the response. + Delimiter *string + + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters with an + // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. + EncodingType types.EncodingType + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. If upload-id-marker is not specified, only the + // keys lexicographically greater than the specified key-marker will be included in + // the list. If upload-id-marker is specified, any multipart uploads for a key + // equal to the key-marker might also be included, provided those multipart uploads + // have upload IDs lexicographically greater than the specified upload-id-marker. + KeyMarker *string + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the + // response body. 1,000 is the maximum number of uploads that can be returned in a + // response. + MaxUploads int32 + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different grouping of + // keys. (You can think of using prefix to make groups in the same way you'd use a + // folder in a file system.) + Prefix *string + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter is + // ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + UploadIdMarker *string + + noSmithyDocumentSerde +} + +type ListMultipartUploadsOutput struct { + + // The name of the bucket to which the multipart upload was initiated. Does not + // return the access point ARN or access point alias if used. + Bucket *string + + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. + CommonPrefixes []types.CommonPrefix + + // Contains the delimiter you specified in the request. If you don't specify a + // delimiter in your request, this element is absent from the response. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object keys in the response. If you + // specify encoding-type request parameter, Amazon S3 includes this element in the + // response, and returns encoded key name values in the following response + // elements: Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. + EncodingType types.EncodingType + + // Indicates whether the returned list of multipart uploads is truncated. A value + // of true indicates that the list was truncated. The list can be truncated if the + // number of multipart uploads exceeds the limit allowed or specified by max + // uploads. + IsTruncated bool + + // The key at or after which the listing began. + KeyMarker *string + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads int32 + + // When a list is truncated, this element specifies the value that should be used + // for the key-marker request parameter in a subsequent request. + NextKeyMarker *string + + // When a list is truncated, this element specifies the value that should be used + // for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string + + // Upload ID after which listing began. + UploadIdMarker *string + + // Container for elements related to a particular multipart upload. A response can + // contain zero or more Upload elements. + Uploads []types.MultipartUpload + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListMultipartUploads{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListMultipartUploads{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMultipartUploads(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListMultipartUploadsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListMultipartUploads(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListMultipartUploads", + } +} + +// getListMultipartUploadsBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getListMultipartUploadsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListMultipartUploadsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListMultipartUploadsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListMultipartUploadsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go new file mode 100644 index 000000000000..f2d2b9fa9518 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go @@ -0,0 +1,276 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns metadata about all versions of the objects in a bucket. You can also use +// request parameters as selection criteria to return metadata about a subset of +// all the object versions. To use this operation, you must have permissions to +// perform the s3:ListBucketVersions action. Be aware of the name difference. A 200 +// OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// To use this operation, you must have READ access to the bucket. This action is +// not supported by Amazon S3 on Outposts. The following operations are related to +// ListObjectVersions: +// +// * ListObjectsV2 +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// DeleteObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { + if params == nil { + params = &ListObjectVersionsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListObjectVersions", params, optFns, c.addOperationListObjectVersionsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListObjectVersionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListObjectVersionsInput struct { + + // The bucket name that contains the objects. + // + // This member is required. + Bucket *string + + // A delimiter is a character that you specify to group keys. All keys that contain + // the same string between the prefix and the first occurrence of the delimiter are + // grouped under a single result element in CommonPrefixes. These groups are + // counted as one result against the max-keys limitation. These keys are not + // returned elsewhere in the response. + Delimiter *string + + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters with an + // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. + EncodingType types.EncodingType + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string + + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. If additional keys satisfy the search criteria, but were not + // returned because max-keys was exceeded, the response contains true. To return + // the additional keys, see key-marker and version-id-marker. + MaxKeys int32 + + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings of + // keys. (You can think of using prefix to make groups in the same way you'd use a + // folder in a file system.) You can use prefix with delimiter to roll up numerous + // objects into a single result under CommonPrefixes. + Prefix *string + + // Specifies the object version you want to start listing from. + VersionIdMarker *string + + noSmithyDocumentSerde +} + +type ListObjectVersionsOutput struct { + + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + CommonPrefixes []types.CommonPrefix + + // Container for an object that is a delete marker. + DeleteMarkers []types.DeleteMarkerEntry + + // The delimiter grouping the included keys. A delimiter is a character that you + // specify to group keys. All keys that contain the same string between the prefix + // and the first occurrence of the delimiter are grouped under a single result + // element in CommonPrefixes. These groups are counted as one result against the + // max-keys limitation. These keys are not returned elsewhere in the response. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. + EncodingType types.EncodingType + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make a + // follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the rest of + // the results. + IsTruncated bool + + // Marks the last key returned in a truncated response. + KeyMarker *string + + // Specifies the maximum number of objects to return. + MaxKeys int32 + + // The bucket name. + Name *string + + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. Use + // this value for the key-marker request parameter in a subsequent request. + NextKeyMarker *string + + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. + NextVersionIdMarker *string + + // Selects objects that start with the value supplied by this parameter. + Prefix *string + + // Marks the last version of the key returned in a truncated response. + VersionIdMarker *string + + // Container for version information. + Versions []types.ObjectVersion + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectVersions{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectVersions{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectVersions(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListObjectVersionsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListObjectVersions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListObjectVersions", + } +} + +// getListObjectVersionsBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getListObjectVersionsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListObjectVersionsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListObjectVersionsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListObjectVersionsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go new file mode 100644 index 000000000000..b2d83ff74688 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go @@ -0,0 +1,284 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns some or all (up to 1,000) of the objects in a bucket. You can use the +// request parameters as selection criteria to return a subset of the objects in a +// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design +// your application to parse the contents of the response and handle it +// appropriately. This action has been revised. We recommend that you use the newer +// version, ListObjectsV2 +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), when +// developing applications. For backward compatibility, Amazon S3 continues to +// support ListObjects. The following operations are related to ListObjects: +// +// * +// ListObjectsV2 +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// ListBuckets +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) { + if params == nil { + params = &ListObjectsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListObjects", params, optFns, c.addOperationListObjectsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListObjectsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListObjectsInput struct { + + // The name of the bucket containing the objects. When using this action with an + // access point, you must direct requests to the access point hostname. The access + // point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // A delimiter is a character you use to group keys. + Delimiter *string + + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters with an + // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. + EncodingType types.EncodingType + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. Marker can be any key in the bucket. + Marker *string + + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. + MaxKeys int32 + + // Limits the response to keys that begin with the specified prefix. + Prefix *string + + // Confirms that the requester knows that she or he will be charged for the list + // objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +type ListObjectsOutput struct { + + // All of the keys (up to 1,000) rolled up in a common prefix count as a single + // return when calculating the number of returns. A response can contain + // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if + // there are any) keys between Prefix and the next occurrence of the string + // specified by the delimiter. CommonPrefixes lists keys that act like + // subdirectories in the directory specified by Prefix. For example, if the prefix + // is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common + // prefix is notes/summer/. All of the keys that roll up into a common prefix count + // as a single return when calculating the number of returns. + CommonPrefixes []types.CommonPrefix + + // Metadata about each object returned. + Contents []types.Object + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element in the + // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in + // the response. Each rolled-up result counts as only one return against the + // MaxKeys value. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType types.EncodingType + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. + IsTruncated bool + + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. + Marker *string + + // The maximum number of keys returned in the response body. + MaxKeys int32 + + // The bucket name. + Name *string + + // When response is truncated (the IsTruncated element value in the response is + // true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request + // parameter specified. If response does not include the NextMarker and it is + // truncated, you can use the value of the last Key in the response as the marker + // in the subsequent request to get the next set of object keys. + NextMarker *string + + // Keys that begin with the indicated prefix. + Prefix *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListObjects{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjects{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListObjectsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjects(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListObjectsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListObjects(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListObjects", + } +} + +// getListObjectsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getListObjectsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListObjectsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListObjectsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go new file mode 100644 index 000000000000..6214d24716f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go @@ -0,0 +1,408 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns some or all (up to 1,000) of the objects in a bucket with each request. +// You can use the request parameters as selection criteria to return a subset of +// the objects in a bucket. A 200 OK response can contain valid or invalid XML. +// Make sure to design your application to parse the contents of the response and +// handle it appropriately. Objects are returned sorted in an ascending order of +// the respective key names in the list. For more information about listing +// objects, see Listing object keys programmatically +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// To use this operation, you must have READ access to the bucket. To use this +// action in an Identity and Access Management (IAM) policy, you must have +// permissions to perform the s3:ListBucket action. The bucket owner has this +// permission by default and can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// This section describes the latest revision of this action. We recommend that you +// use this revised API for application development. For backward compatibility, +// Amazon S3 continues to support the prior version of this API, ListObjects +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). To get a +// list of your buckets, see ListBuckets +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). The +// following operations are related to ListObjectsV2: +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) { + if params == nil { + params = &ListObjectsV2Input{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListObjectsV2", params, optFns, c.addOperationListObjectsV2Middlewares) + if err != nil { + return nil, err + } + + out := result.(*ListObjectsV2Output) + out.ResultMetadata = metadata + return out, nil +} + +type ListObjectsV2Input struct { + + // Bucket name to list. When using this action with an access point, you must + // direct requests to the access point hostname. The access point hostname takes + // the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When + // using this action with an access point through the Amazon Web Services SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // ContinuationToken indicates Amazon S3 that the list is being continued on this + // bucket with a token. ContinuationToken is obfuscated and is not a real key. + ContinuationToken *string + + // A delimiter is a character you use to group keys. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType types.EncodingType + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The owner field is not present in listV2 by default, if you want to return owner + // field with each key in the result then set the fetch owner field to true. + FetchOwner bool + + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. + MaxKeys int32 + + // Limits the response to keys that begin with the specified prefix. + Prefix *string + + // Confirms that the requester knows that she or he will be charged for the list + // objects request in V2 style. Bucket owners need not specify this parameter in + // their requests. + RequestPayer types.RequestPayer + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket. + StartAfter *string + + noSmithyDocumentSerde +} + +type ListObjectsV2Output struct { + + // All of the keys (up to 1,000) rolled up into a common prefix count as a single + // return when calculating the number of returns. A response can contain + // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if + // there are any) keys between Prefix and the next occurrence of the string + // specified by a delimiter. CommonPrefixes lists keys that act like subdirectories + // in the directory specified by Prefix. For example, if the prefix is notes/ and + // the delimiter is a slash (/) as in notes/summer/july, the common prefix is + // notes/summer/. All of the keys that roll up into a common prefix count as a + // single return when calculating the number of returns. + CommonPrefixes []types.CommonPrefix + + // Metadata about each object returned. + Contents []types.Object + + // If ContinuationToken was sent with the request, it is included in the response. + ContinuationToken *string + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element in the + // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in + // the response. Each rolled-up result counts as only one return against the + // MaxKeys value. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: Delimiter, Prefix, Key, and StartAfter. + EncodingType types.EncodingType + + // Set to false if all of the results were returned. Set to true if more keys are + // available to return. If the number of results exceeds that specified by MaxKeys, + // all of the results might not be returned. + IsTruncated bool + + // KeyCount is the number of keys returned with this request. KeyCount will always + // be less than or equals to MaxKeys field. Say you ask for 50 keys, your result + // will include less than equals 50 keys + KeyCount int32 + + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. + MaxKeys int32 + + // The bucket name. When using this action with an access point, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + Name *string + + // NextContinuationToken is sent when isTruncated is true, which means there are + // more keys in the bucket that can be listed. The next list requests to Amazon S3 + // can be continued with this NextContinuationToken. NextContinuationToken is + // obfuscated and is not a real key + NextContinuationToken *string + + // Keys that begin with the indicated prefix. + Prefix *string + + // If StartAfter was sent with the request, it is included in the response. + StartAfter *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectsV2{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectsV2{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectsV2(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListObjectsV2UpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. +type ListObjectsV2APIClient interface { + ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) +} + +var _ ListObjectsV2APIClient = (*Client)(nil) + +// ListObjectsV2PaginatorOptions is the paginator options for ListObjectsV2 +type ListObjectsV2PaginatorOptions struct { + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListObjectsV2Paginator is a paginator for ListObjectsV2 +type ListObjectsV2Paginator struct { + options ListObjectsV2PaginatorOptions + client ListObjectsV2APIClient + params *ListObjectsV2Input + nextToken *string + firstPage bool +} + +// NewListObjectsV2Paginator returns a new ListObjectsV2Paginator +func NewListObjectsV2Paginator(client ListObjectsV2APIClient, params *ListObjectsV2Input, optFns ...func(*ListObjectsV2PaginatorOptions)) *ListObjectsV2Paginator { + if params == nil { + params = &ListObjectsV2Input{} + } + + options := ListObjectsV2PaginatorOptions{} + if params.MaxKeys != 0 { + options.Limit = params.MaxKeys + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListObjectsV2Paginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.ContinuationToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListObjectsV2Paginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListObjectsV2 page. +func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectsV2Output, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.ContinuationToken = p.nextToken + + params.MaxKeys = p.options.Limit + + result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = nil + if result.IsTruncated { + p.nextToken = result.NextContinuationToken + } + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListObjectsV2(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListObjectsV2", + } +} + +// getListObjectsV2BucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getListObjectsV2BucketMember(input interface{}) (*string, bool) { + in := input.(*ListObjectsV2Input) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListObjectsV2UpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListObjectsV2BucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go new file mode 100644 index 000000000000..36675dcd74c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go @@ -0,0 +1,424 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Lists the parts that have been uploaded for a specific multipart upload. This +// operation must include the upload ID, which you obtain by sending the initiate +// multipart upload request (see CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). +// This request returns a maximum of 1,000 uploaded parts. The default number of +// parts returned is 1,000 parts. You can restrict the number of parts returned by +// specifying the max-parts request parameter. If your multipart upload consists of +// more than 1,000 parts, the response returns an IsTruncated field with the value +// of true, and a NextPartNumberMarker element. In subsequent ListParts requests +// you can include the part-number-marker query string parameter and set its value +// to the NextPartNumberMarker field value from the previous response. If the +// upload was created using a checksum algorithm, you will need to have permission +// to the kms:Decrypt action for the request to succeed. For more information on +// multipart uploads, see Uploading Objects Using Multipart Upload +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For +// information on permissions required to use the multipart upload API, see +// Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The +// following operations are related to ListParts: +// +// * CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * +// GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// * +// ListMultipartUploads +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) { + if params == nil { + params = &ListPartsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListParts", params, optFns, c.addOperationListPartsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListPartsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListPartsInput struct { + + // The name of the bucket to which the parts are being uploaded. When using this + // action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // This member is required. + UploadId *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Sets the maximum number of parts to return. + MaxParts int32 + + // Specifies the part after which listing should begin. Only parts with higher part + // numbers will be listed. + PartNumberMarker *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string + + // The server-side encryption (SSE) customer managed key. This parameter is needed + // only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKey *string + + // The MD5 server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string + + noSmithyDocumentSerde +} + +type ListPartsOutput struct { + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object name + // in the request, then the response includes this header indicating when the + // initiated multipart upload will become eligible for abort operation. For more + // information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle + // Policy + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. + AbortDate *time.Time + + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId *string + + // The name of the bucket to which the multipart upload was initiated. Does not + // return the access point ARN or access point alias if used. + Bucket *string + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Container element that identifies who initiated the multipart upload. If the + // initiator is an Amazon Web Services account, this element provides the same + // information as the Owner element. If the initiator is an IAM User, this element + // provides the user ARN and display name. + Initiator *types.Initiator + + // Indicates whether the returned list of parts is truncated. A true value + // indicates that the list was truncated. A list can be truncated if the number of + // parts exceeds the limit returned in the MaxParts element. + IsTruncated bool + + // Object key for which the multipart upload was initiated. + Key *string + + // Maximum number of parts that were allowed in the response. + MaxParts int32 + + // When a list is truncated, this element specifies the last part in the list, as + // well as the value to use for the part-number-marker request parameter in a + // subsequent request. + NextPartNumberMarker *string + + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. + Owner *types.Owner + + // When a list is truncated, this element specifies the last part in the list, as + // well as the value to use for the part-number-marker request parameter in a + // subsequent request. + PartNumberMarker *string + + // Container for elements related to a particular part. A response can contain zero + // or more Part elements. + Parts []types.Part + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded + // object. + StorageClass types.StorageClass + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListParts{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListParts{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListPartsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListParts(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListPartsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListPartsAPIClient is a client that implements the ListParts operation. +type ListPartsAPIClient interface { + ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) +} + +var _ ListPartsAPIClient = (*Client)(nil) + +// ListPartsPaginatorOptions is the paginator options for ListParts +type ListPartsPaginatorOptions struct { + // Sets the maximum number of parts to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListPartsPaginator is a paginator for ListParts +type ListPartsPaginator struct { + options ListPartsPaginatorOptions + client ListPartsAPIClient + params *ListPartsInput + nextToken *string + firstPage bool +} + +// NewListPartsPaginator returns a new ListPartsPaginator +func NewListPartsPaginator(client ListPartsAPIClient, params *ListPartsInput, optFns ...func(*ListPartsPaginatorOptions)) *ListPartsPaginator { + if params == nil { + params = &ListPartsInput{} + } + + options := ListPartsPaginatorOptions{} + if params.MaxParts != 0 { + options.Limit = params.MaxParts + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListPartsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.PartNumberMarker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListPartsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListParts page. +func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListPartsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.PartNumberMarker = p.nextToken + + params.MaxParts = p.options.Limit + + result, err := p.client.ListParts(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = nil + if result.IsTruncated { + p.nextToken = result.NextPartNumberMarker + } + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListParts(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListParts", + } +} + +// getListPartsBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getListPartsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListPartsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListPartsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListPartsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go new file mode 100644 index 000000000000..7875798f2556 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go @@ -0,0 +1,237 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster data +// transfers to Amazon S3. To use this operation, you must have permission to +// perform the s3:PutAccelerateConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// +// * Enabled – Enables accelerated data transfers to the bucket. +// +// * +// Suspended – Disables accelerated data transfers to the bucket. +// +// The +// GetBucketAccelerateConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// action returns the transfer acceleration state of a bucket. After setting the +// Transfer Acceleration state of a bucket to Enabled, it might take up to thirty +// minutes before the data transfer rates to the bucket increase. The name of the +// bucket used for Transfer Acceleration must be DNS-compliant and must not contain +// periods ("."). For more information about transfer acceleration, see Transfer +// Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// The following operations are related to PutBucketAccelerateConfiguration: +// +// * +// GetBucketAccelerateConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// +// * +// CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) { + if params == nil { + params = &PutBucketAccelerateConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketAccelerateConfiguration", params, optFns, c.addOperationPutBucketAccelerateConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketAccelerateConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketAccelerateConfigurationInput struct { + + // Container for setting the transfer acceleration state. + // + // This member is required. + AccelerateConfiguration *types.AccelerateConfiguration + + // The name of the bucket for which the accelerate configuration is set. + // + // This member is required. + Bucket *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketAccelerateConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAccelerateConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAccelerateConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketAccelerateConfiguration", + } +} + +// getPutBucketAccelerateConfigurationRequestAlgorithmMember gets the request +// checksum algorithm value provided as input. +func getPutBucketAccelerateConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketAccelerateConfigurationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketAccelerateConfigurationRequestAlgorithmMember, + RequireChecksum: false, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketAccelerateConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketAccelerateConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketAccelerateConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go new file mode 100644 index 000000000000..0f669d2acbb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go @@ -0,0 +1,368 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the permissions on an existing bucket using access control lists (ACL). For +// more information, see Using ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). To set +// the ACL of a bucket, you must have WRITE_ACP permission. You can use one of the +// following two ways to set a bucket's permissions: +// +// * Specify the ACL in the +// request body +// +// * Specify permissions using request headers +// +// You cannot specify +// access permission using both the body and the request headers. Depending on your +// application needs, you may choose to set the ACL on a bucket using either the +// request body or the headers. For example, if you have an existing application +// that updates a bucket ACL using the request body, then you can continue to use +// that approach. If your bucket uses the bucket owner enforced setting for S3 +// Object Ownership, ACLs are disabled and no longer affect permissions. You must +// use policies to grant access to your bucket and the objects in it. Requests to +// set ACLs or update ACLs fail and return the AccessControlListNotSupported error +// code. Requests to read ACLs are still supported. For more information, see +// Controlling object ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. Access Permissions You can set access permissions +// using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl +// request header. Amazon S3 supports a set of predefined ACLs, known as canned +// ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify +// the canned ACL name as the value of x-amz-acl. If you use this header, you +// cannot use other access control-specific headers in your request. For more +// information, see Canned ACL +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * +// Specify access permissions explicitly with the x-amz-grant-read, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. When using these headers, you specify explicit access permissions and +// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the +// permission. If you use these ACL-specific headers, you cannot use the x-amz-acl +// header to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control List +// (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify +// each grantee as a type=value pair, where the type is one of the following: +// +// * id +// – if the value specified is the canonical user ID of an Amazon Web Services +// account +// +// * uri – if you are granting permissions to a predefined group +// +// * +// emailAddress – if the value specified is the email address of an Amazon Web +// Services account Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// +// * US East (N. Virginia) +// +// * US West +// (N. California) +// +// * US West (Oregon) +// +// * Asia Pacific (Singapore) +// +// * Asia Pacific +// (Sydney) +// +// * Asia Pacific (Tokyo) +// +// * Europe (Ireland) +// +// * South America (São +// Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. +// +// For example, the following +// x-amz-grant-write header grants create, overwrite, and delete objects permission +// to LogDelivery group predefined by Amazon S3 and two Amazon Web Services +// accounts identified by their email addresses. x-amz-grant-write: +// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", +// id="555566667777" +// +// You can use either a canned ACL or specify access permissions +// explicitly. You cannot do both. Grantee Values You can specify the person +// (grantee) to whom you're assigning access rights (using request elements) in the +// following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName is +// optional and ignored in the request +// +// * By URI: +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email +// address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the +// CanonicalUser and, in a response to a GET Object acl request, appears as the +// CanonicalUser. Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// +// * US East (N. Virginia) +// +// * US West +// (N. California) +// +// * US West (Oregon) +// +// * Asia Pacific (Singapore) +// +// * Asia Pacific +// (Sydney) +// +// * Asia Pacific (Tokyo) +// +// * Europe (Ireland) +// +// * South America (São +// Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. +// +// Related Resources +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// DeleteBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * +// GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) { + if params == nil { + params = &PutBucketAclInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketAcl", params, optFns, c.addOperationPutBucketAclMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketAclOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketAclInput struct { + + // The bucket to which to apply the ACL. + // + // This member is required. + Bucket *string + + // The canned ACL to apply to the bucket. + ACL types.BucketCannedACL + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *types.AccessControlPolicy + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, go to RFC 1864. + // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string + + // Allows grantee to list the objects in the bucket. + GrantRead *string + + // Allows grantee to read the bucket ACL. + GrantReadACP *string + + // Allows grantee to create new objects in the bucket. For the bucket and object + // owners of existing objects, also allows deletions and overwrites of those + // objects. + GrantWrite *string + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string + + noSmithyDocumentSerde +} + +type PutBucketAclOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAcl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAcl{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketAclValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAcl(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketAclInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketAclUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketAcl", + } +} + +// getPutBucketAclRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getPutBucketAclRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketAclInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketAclRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketAclBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketAclBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketAclInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketAclBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go new file mode 100644 index 000000000000..45cec74ed1e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go @@ -0,0 +1,234 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). You can have up to 1,000 analytics configurations per bucket. +// You can choose to have storage class analysis export analysis reports sent to a +// comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you +// configure. When selecting data export, you specify a destination bucket and an +// optional destination prefix where the file is written. You can export the data +// to a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see Amazon S3 Analytics – Storage Class +// Analysis +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// To use this operation, you must have permissions to perform the +// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// Special Errors +// +// * HTTP Error: HTTP 400 Bad Request +// +// * Code: InvalidArgument +// +// * +// Cause: Invalid argument. +// +// * HTTP Error: HTTP 400 Bad Request +// +// * Code: +// TooManyConfigurations +// +// * Cause: You are attempting to create a new configuration +// but have already reached the 1,000-configuration limit. +// +// * HTTP Error: HTTP 403 +// Forbidden +// +// * Code: AccessDenied +// +// * Cause: You are not the owner of the specified +// bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to +// set the configuration on the bucket. +// +// Related Resources +// +// * +// GetBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * +// DeleteBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * +// ListBucketAnalyticsConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) { + if params == nil { + params = &PutBucketAnalyticsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketAnalyticsConfiguration", params, optFns, c.addOperationPutBucketAnalyticsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketAnalyticsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketAnalyticsConfigurationInput struct { + + // The configuration and any analyses for the analytics filter. + // + // This member is required. + AnalyticsConfiguration *types.AnalyticsConfiguration + + // The name of the bucket to which an analytics configuration is stored. + // + // This member is required. + Bucket *string + + // The ID that identifies the analytics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketAnalyticsConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketAnalyticsConfiguration", + } +} + +// getPutBucketAnalyticsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketAnalyticsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketAnalyticsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go new file mode 100644 index 000000000000..55a67f188267 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go @@ -0,0 +1,253 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the cors configuration for your bucket. If the configuration exists, Amazon +// S3 replaces it. To use this operation, you must be allowed to perform the +// s3:PutBucketCORS action. By default, the bucket owner has this permission and +// can grant it to others. You set this configuration on a bucket so that the +// bucket can service cross-origin requests. For example, you might want to enable +// a request whose origin is http://www.example.com to access your Amazon S3 bucket +// at my.example.bucket.com by using the browser's XMLHttpRequest capability. To +// enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which you +// configure rules that identify origins and the HTTP methods that can be executed +// on your bucket. The document is limited to 64 KB in size. When Amazon S3 +// receives a cross-origin request (or a pre-flight OPTIONS request) against a +// bucket, it evaluates the cors configuration on the bucket and uses the first +// CORSRule rule that matches the incoming browser request to enable a cross-origin +// request. For a rule to match, the following conditions must be met: +// +// * The +// request's Origin header must match AllowedOrigin elements. +// +// * The request method +// (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// * Every header specified in the Access-Control-Request-Headers +// request header of a pre-flight request must match an AllowedHeader element. +// +// For +// more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 +// User Guide. Related Resources +// +// * GetBucketCors +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) +// +// * +// DeleteBucketCors +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// * +// RESTOPTIONSobject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) { + if params == nil { + params = &PutBucketCorsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketCors", params, optFns, c.addOperationPutBucketCorsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketCorsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketCorsInput struct { + + // Specifies the bucket impacted by the corsconfiguration. + // + // This member is required. + Bucket *string + + // Describes the cross-origin access configuration for objects in an Amazon S3 + // bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 + // User Guide. + // + // This member is required. + CORSConfiguration *types.CORSConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, go to RFC 1864. + // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketCorsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketCors{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketCors{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketCors(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketCorsInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketCorsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketCors", + } +} + +// getPutBucketCorsRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getPutBucketCorsRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketCorsInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketCorsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketCorsRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketCorsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketCorsBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketCorsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketCorsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go new file mode 100644 index 000000000000..184f0cd31188 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go @@ -0,0 +1,243 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action uses the encryption subresource to configure default encryption and +// Amazon S3 Bucket Key for an existing bucket. Default encryption for a bucket can +// use server-side encryption with Amazon S3-managed keys (SSE-S3) or customer +// managed keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can +// also configure Amazon S3 Bucket Key. When the default encryption is SSE-KMS, if +// you upload an object to the bucket and do not specify the KMS key to use for +// encryption, Amazon S3 uses the default Amazon Web Services managed KMS key for +// your account. For information about default encryption, see Amazon S3 default +// bucket encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the +// Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 +// Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in +// the Amazon S3 User Guide. This action requires Amazon Web Services Signature +// Version 4. For more information, see Authenticating Requests (Amazon Web +// Services Signature Version 4) +// (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// To use this operation, you must have permissions to perform the +// s3:PutEncryptionConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. Related Resources +// +// * GetBucketEncryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// * +// DeleteBucketEncryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) { + if params == nil { + params = &PutBucketEncryptionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketEncryption", params, optFns, c.addOperationPutBucketEncryptionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketEncryptionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketEncryptionInput struct { + + // Specifies default encryption for a bucket using server-side encryption with + // Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For + // information about the Amazon S3 default encryption feature, see Amazon S3 + // Default Bucket Encryption + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Specifies the default server-side-encryption configuration. + // + // This member is required. + ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the server-side encryption + // configuration. For requests made using the Amazon Web Services Command Line + // Interface (CLI) or Amazon Web Services SDKs, this field is calculated + // automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketEncryptionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketEncryption{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketEncryption{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketEncryption(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketEncryptionInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketEncryptionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketEncryption", + } +} + +// getPutBucketEncryptionRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketEncryptionRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketEncryptionInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketEncryptionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketEncryptionRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketEncryptionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketEncryptionBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketEncryptionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketEncryptionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go new file mode 100644 index 000000000000..edf5d178be1c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go @@ -0,0 +1,225 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can +// have up to 1,000 S3 Intelligent-Tiering configurations per bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by +// automatically moving data to the most cost-effective storage access tier, +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput access +// tiers. To get the lowest storage cost on data that can be accessed in minutes to +// hours, you can choose to activate additional archiving capabilities. The S3 +// Intelligent-Tiering storage class is the ideal storage class for data with +// unknown, changing, or unpredictable access patterns, independent of object size +// or retention period. If the size of an object is less than 128 KB, it is not +// monitored and not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. For more information, see Storage class for +// automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// * +// DeleteBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * +// GetBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * +// ListBucketIntelligentTieringConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// You +// only need S3 Intelligent-Tiering enabled on a bucket if you want to +// automatically move objects stored in the S3 Intelligent-Tiering storage class to +// the Archive Access or Deep Archive Access tier. Special Errors +// +// * HTTP 400 Bad +// Request Error +// +// * Code: InvalidArgument +// +// * Cause: Invalid Argument +// +// * HTTP 400 +// Bad Request Error +// +// * Code: TooManyConfigurations +// +// * Cause: You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. +// +// * HTTP 403 Forbidden Error +// +// * Code: AccessDenied +// +// * Cause: You are not +// the owner of the specified bucket, or you do not have the +// s3:PutIntelligentTieringConfiguration bucket permission to set the configuration +// on the bucket. +func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) { + if params == nil { + params = &PutBucketIntelligentTieringConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketIntelligentTieringConfiguration", params, optFns, c.addOperationPutBucketIntelligentTieringConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketIntelligentTieringConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketIntelligentTieringConfigurationInput struct { + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // This member is required. + Id *string + + // Container for S3 Intelligent-Tiering configuration. + // + // This member is required. + IntelligentTieringConfiguration *types.IntelligentTieringConfiguration + + noSmithyDocumentSerde +} + +type PutBucketIntelligentTieringConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketIntelligentTieringConfiguration", + } +} + +// getPutBucketIntelligentTieringConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getPutBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketIntelligentTieringConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketIntelligentTieringConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go new file mode 100644 index 000000000000..afca60b9d10f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go @@ -0,0 +1,235 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This implementation of the PUT action adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. Amazon S3 inventory generates inventories +// of the objects in the bucket on a daily or weekly basis, and the results are +// published to a flat file. The bucket that is inventoried is called the source +// bucket, and the bucket where the inventory flat file is stored is called the +// destination bucket. The destination bucket must be in the same Amazon Web +// Services Region as the source bucket. When you configure an inventory for a +// source bucket, you specify the destination bucket where you want the inventory +// to be stored, and whether to generate the inventory daily or weekly. You can +// also configure what object metadata to include and whether to inventory all +// object versions or only current versions. For more information, see Amazon S3 +// Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) in the +// Amazon S3 User Guide. You must create a bucket policy on the destination bucket +// to grant permissions to Amazon S3 to write objects to the bucket in the defined +// location. For an example policy, see Granting Permissions for Amazon S3 +// Inventory and Storage Class Analysis +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// To use this operation, you must have permissions to perform the +// s3:PutInventoryConfiguration action. The bucket owner has this permission by +// default and can grant this permission to others. For more information about +// permissions, see Permissions Related to Bucket Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. Special Errors +// +// * HTTP 400 Bad Request Error +// +// * +// Code: InvalidArgument +// +// * Cause: Invalid Argument +// +// * HTTP 400 Bad Request +// Error +// +// * Code: TooManyConfigurations +// +// * Cause: You are attempting to create a +// new configuration but have already reached the 1,000-configuration limit. +// +// * +// HTTP 403 Forbidden Error +// +// * Code: AccessDenied +// +// * Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutInventoryConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * +// GetBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * +// DeleteBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * +// ListBucketInventoryConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) { + if params == nil { + params = &PutBucketInventoryConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketInventoryConfiguration", params, optFns, c.addOperationPutBucketInventoryConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketInventoryConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketInventoryConfigurationInput struct { + + // The name of the bucket where the inventory configuration will be stored. + // + // This member is required. + Bucket *string + + // The ID used to identify the inventory configuration. + // + // This member is required. + Id *string + + // Specifies the inventory configuration. + // + // This member is required. + InventoryConfiguration *types.InventoryConfiguration + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketInventoryConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketInventoryConfiguration", + } +} + +// getPutBucketInventoryConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketInventoryConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketInventoryConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go new file mode 100644 index 000000000000..ca79b24e9663 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -0,0 +1,269 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. Keep in mind that this will overwrite an existing +// lifecycle configuration, so if you want to retain any configuration details, +// they must be included in the new lifecycle configuration. For information about +// lifecycle configuration, see Managing your storage lifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). +// Bucket lifecycle configuration now supports specifying a lifecycle rule using an +// object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The previous version of the +// API supported filtering based only on an object key name prefix, which is +// supported for backward compatibility. For the related API description, see +// PutBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). +// Rules You specify the lifecycle configuration in your request body. The +// lifecycle configuration is specified as XML consisting of one or more rules. An +// Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not +// adjustable. Each rule consists of the following: +// +// * Filter identifying a subset +// of objects to which the rule applies. The filter can be based on a key name +// prefix, object tags, or a combination of both. +// +// * Status whether the rule is in +// effect. +// +// * One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state of +// your bucket is versioning-enabled or versioning-suspended, you can have many +// versions of the same object (one current version and zero or more noncurrent +// versions). Amazon S3 provides predefined actions that you can specify for +// current and noncurrent object versions. +// +// For more information, see Object +// Lifecycle Management +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) and +// Lifecycle Configuration Elements +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// Permissions By default, all Amazon S3 resources are private, including buckets, +// objects, and related subresources (for example, lifecycle configuration and +// website configuration). Only the resource owner (that is, the Amazon Web +// Services account that created it) can access the resource. The resource owner +// can optionally grant access permissions to others by writing an access policy. +// For this operation, a user must get the s3:PutLifecycleConfiguration permission. +// You can also explicitly deny permissions. Explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * +// s3:PutLifecycleConfiguration +// +// For more information about permissions, see +// Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// The following are related to PutBucketLifecycleConfiguration: +// +// * Examples of +// Lifecycle Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// * +// GetBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * +// DeleteBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) { + if params == nil { + params = &PutBucketLifecycleConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketLifecycleConfiguration", params, optFns, c.addOperationPutBucketLifecycleConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketLifecycleConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketLifecycleConfigurationInput struct { + + // The name of the bucket for which to set the configuration. + // + // This member is required. + Bucket *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Container for lifecycle rules. You can add as many as 1,000 rules. + LifecycleConfiguration *types.BucketLifecycleConfiguration + + noSmithyDocumentSerde +} + +type PutBucketLifecycleConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLifecycleConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLifecycleConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketLifecycleConfiguration", + } +} + +// getPutBucketLifecycleConfigurationRequestAlgorithmMember gets the request +// checksum algorithm value provided as input. +func getPutBucketLifecycleConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketLifecycleConfigurationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketLifecycleConfigurationRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketLifecycleConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketLifecycleConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketLifecycleConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go new file mode 100644 index 000000000000..0f3ea6d33c29 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go @@ -0,0 +1,258 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Set the logging parameters for a bucket and to specify permissions for who can +// view and modify the logging parameters. All logs are saved to buckets in the +// same Amazon Web Services Region as the source bucket. To set the logging status +// of a bucket, you must be the bucket owner. The bucket owner is automatically +// granted FULL_CONTROL to all logs. You use the Grantee request element to grant +// access to other people. The Permissions request element specifies the kind of +// access the grantee has to the logs. If the target bucket for log delivery uses +// the bucket owner enforced setting for S3 Object Ownership, you can't use the +// Grantee request element to grant access to others. Permissions can only be +// granted using policies. For more information, see Permissions for server access +// log delivery +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. Grantee Values You can specify the person (grantee) +// to whom you're assigning access rights (using request elements) in the following +// ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional +// and ignored in the request. +// +// * By Email address: <>Grantees@email.com<> The +// grantee is resolved to the CanonicalUser and, in a response to a GET Object acl +// request, appears as the CanonicalUser. +// +// * By URI: +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable +// logging, you use LoggingEnabled and its children request elements. To disable +// logging, you use an empty BucketLoggingStatus request element: For more +// information about server access logging, see Server Access Logging +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) in the +// Amazon S3 User Guide. For more information about creating a bucket, see +// CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). For +// more information about returning the logging status of a bucket, see +// GetBucketLogging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). The +// following operations are related to PutBucketLogging: +// +// * PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// DeleteBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * +// CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// GetBucketLogging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) { + if params == nil { + params = &PutBucketLoggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketLogging", params, optFns, c.addOperationPutBucketLoggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketLoggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketLoggingInput struct { + + // The name of the bucket for which to set the logging parameters. + // + // This member is required. + Bucket *string + + // Container for logging status information. + // + // This member is required. + BucketLoggingStatus *types.BucketLoggingStatus + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash of the PutBucketLogging request body. For requests made using the + // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, + // this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketLoggingOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLogging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLogging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLogging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketLoggingInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketLoggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketLogging", + } +} + +// getPutBucketLoggingRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketLoggingRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketLoggingInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketLoggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketLoggingRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketLoggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketLoggingBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketLoggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketLoggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go new file mode 100644 index 000000000000..6f0c6facd5ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go @@ -0,0 +1,209 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets a metrics configuration (specified by the metrics configuration ID) for the +// bucket. You can have up to 1,000 metrics configurations per bucket. If you're +// updating an existing metrics configuration, note that this is a full replacement +// of the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. To use this operation, you must have permissions +// to perform the s3:PutMetricsConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// The following operations are related to PutBucketMetricsConfiguration: +// +// * +// DeleteBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// * +// GetBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * +// ListBucketMetricsConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// GetBucketLifecycle +// has the following special error: +// +// * Error code: TooManyConfigurations +// +// * +// Description: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// * HTTP Status Code: HTTP 400 Bad Request +func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) { + if params == nil { + params = &PutBucketMetricsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketMetricsConfiguration", params, optFns, c.addOperationPutBucketMetricsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketMetricsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketMetricsConfigurationInput struct { + + // The name of the bucket for which the metrics configuration is set. + // + // This member is required. + Bucket *string + + // The ID used to identify the metrics configuration. + // + // This member is required. + Id *string + + // Specifies the metrics configuration. + // + // This member is required. + MetricsConfiguration *types.MetricsConfiguration + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketMetricsConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketMetricsConfiguration", + } +} + +// getPutBucketMetricsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketMetricsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketMetricsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go new file mode 100644 index 000000000000..8e771d6bc957 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -0,0 +1,209 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see Configuring Event Notifications +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). Using +// this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an event +// notification when it detects an event of the specified type. By default, your +// bucket has no event notifications configured. That is, the notification +// configuration will be an empty NotificationConfiguration. This action replaces +// the existing notification configuration with the configuration you include in +// the request body. After Amazon S3 receives this request, it first verifies that +// any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue +// Service (Amazon SQS) destination exists, and that the bucket owner has +// permission to publish to it by sending a test notification. In the case of +// Lambda destinations, Amazon S3 verifies that the Lambda function permissions +// grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For +// more information, see Configuring Notifications for Amazon S3 Events +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). You +// can disable notifications by adding the empty NotificationConfiguration element. +// For more information about the number of event notification configurations that +// you can create per bucket, see Amazon S3 service quotas +// (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) in Amazon Web +// Services General Reference. By default, only the bucket owner can configure +// notifications on a bucket. However, bucket owners can use a bucket policy to +// grant permission to other users to set this configuration with +// s3:PutBucketNotification permission. The PUT notification is an atomic +// operation. For example, suppose your notification configuration includes SNS +// topic, SQS queue, and Lambda function configurations. When you send a PUT +// request with this configuration, Amazon S3 sends test messages to your SNS +// topic. If the message fails, the entire PUT action will fail, and Amazon S3 will +// not add the configuration to your bucket. Responses If the configuration in the +// request body includes only one TopicConfiguration specifying only the +// s3:ReducedRedundancyLostObject event type, the response will also include the +// x-amz-sns-test-message-id header containing the message ID of the test +// notification sent to the topic. The following action is related to +// PutBucketNotificationConfiguration: +// +// * GetBucketNotificationConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) { + if params == nil { + params = &PutBucketNotificationConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketNotificationConfiguration", params, optFns, c.addOperationPutBucketNotificationConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketNotificationConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketNotificationConfigurationInput struct { + + // The name of the bucket. + // + // This member is required. + Bucket *string + + // A container for specifying the notification configuration of the bucket. If this + // element is empty, notifications are turned off for the bucket. + // + // This member is required. + NotificationConfiguration *types.NotificationConfiguration + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or + // false value. + SkipDestinationValidation bool + + noSmithyDocumentSerde +} + +type PutBucketNotificationConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketNotificationConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketNotificationConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketNotificationConfiguration", + } +} + +// getPutBucketNotificationConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketNotificationConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketNotificationConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go new file mode 100644 index 000000000000..83210cac46bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go @@ -0,0 +1,197 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketOwnershipControls permission. For more +// information about Amazon S3 permissions, see Specifying permissions in a policy +// (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html). +// For information about Amazon S3 Object Ownership, see Using object ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html). +// The following operations are related to PutBucketOwnershipControls: +// +// * +// GetBucketOwnershipControls +// +// * DeleteBucketOwnershipControls +func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) { + if params == nil { + params = &PutBucketOwnershipControlsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketOwnershipControls", params, optFns, c.addOperationPutBucketOwnershipControlsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketOwnershipControlsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketOwnershipControlsInput struct { + + // The name of the Amazon S3 bucket whose OwnershipControls you want to set. + // + // This member is required. + Bucket *string + + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or + // ObjectWriter) that you want to apply to this Amazon S3 bucket. + // + // This member is required. + OwnershipControls *types.OwnershipControls + + // The MD5 hash of the OwnershipControls request body. For requests made using the + // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, + // this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketOwnershipControlsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketOwnershipControls(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketOwnershipControlsInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketOwnershipControls", + } +} + +func addPutBucketOwnershipControlsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: nil, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketOwnershipControlsBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketOwnershipControlsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketOwnershipControlsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go new file mode 100644 index 000000000000..8860d3b560d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go @@ -0,0 +1,229 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an +// identity other than the root user of the Amazon Web Services account that owns +// the bucket, the calling identity must have the PutBucketPolicy permissions on +// the specified bucket and belong to the bucket owner's account in order to use +// this operation. If you don't have PutBucketPolicy permissions, Amazon S3 returns +// a 403 Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. As a security precaution, the root user of the +// Amazon Web Services account that owns a bucket can always use this operation, +// even if the policy explicitly denies the root user the ability to perform this +// action. For more information, see Bucket policy examples +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html). +// The following operations are related to PutBucketPolicy: +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// DeleteBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { + if params == nil { + params = &PutBucketPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketPolicy", params, optFns, c.addOperationPutBucketPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketPolicyInput struct { + + // The name of the bucket. + // + // This member is required. + Bucket *string + + // The bucket policy as a JSON document. + // + // This member is required. + Policy *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + ConfirmRemoveSelfBucketAccess bool + + // The MD5 hash of the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketPolicyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketPolicyInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketPolicyUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketPolicy", + } +} + +// getPutBucketPolicyRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketPolicyRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketPolicyInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketPolicyInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketPolicyRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketPolicyBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketPolicyBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketPolicyInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketPolicyBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go new file mode 100644 index 000000000000..2213373f30b1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go @@ -0,0 +1,264 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a replication configuration or replaces an existing one. For more +// information, see Replication +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon +// S3 User Guide. Specify the replication configuration in the request body. In the +// replication configuration, you provide the name of the destination bucket or +// buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon +// S3 can assume to replicate objects on your behalf, and other relevant +// information. A replication configuration must include at least one rule, and can +// contain a maximum of 1,000. Each rule identifies a subset of objects to +// replicate by filtering the objects in the source bucket. To choose additional +// subsets of objects to replicate, add a rule for each subset. To specify a subset +// of the objects in the source bucket to apply a replication rule to, add the +// Filter element as a child of the Rule element. You can filter objects based on +// an object key prefix, one or more object tags, or both. When you add the Filter +// element in the configuration, you must also add the following elements: +// DeleteMarkerReplication, Status, and Priority. If you are using an earlier +// version of the replication configuration, Amazon S3 handles replication of +// delete markers differently. For more information, see Backward Compatibility +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +// For information about enabling versioning on a bucket, see Using Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). Handling +// Replication of Encrypted Objects By default, Amazon S3 doesn't replicate objects +// that are stored at rest using server-side encryption with KMS keys. To replicate +// Amazon Web Services KMS-encrypted objects, add the following: +// SourceSelectionCriteria, SseKmsEncryptedObjects, Status, +// EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication +// configuration, see Replicating Objects Created with SSE Using KMS keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// For information on PutBucketReplication errors, see List of replication-related +// error codes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// Permissions To create a PutBucketReplication request, you must have +// s3:PutReplicationConfiguration permissions for the bucket. By default, a +// resource owner, in this case the Amazon Web Services account that created the +// bucket, can perform this operation. The resource owner can also grant others +// permissions to perform the operation. For more information about permissions, +// see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) and +// Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// To perform this operation, the user or role performing the action must have the +// iam:PassRole +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. The following operations are related to PutBucketReplication: +// +// * +// GetBucketReplication +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// * +// DeleteBucketReplication +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) { + if params == nil { + params = &PutBucketReplicationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketReplication", params, optFns, c.addOperationPutBucketReplicationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketReplicationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketReplicationInput struct { + + // The name of the bucket + // + // This member is required. + Bucket *string + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + // + // This member is required. + ReplicationConfiguration *types.ReplicationConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string + + noSmithyDocumentSerde +} + +type PutBucketReplicationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketReplication{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketReplication{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketReplication(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketReplicationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketReplicationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketReplication", + } +} + +// getPutBucketReplicationRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutBucketReplicationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketReplicationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketReplicationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketReplicationRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketReplicationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketReplicationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketReplicationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketReplicationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go new file mode 100644 index 000000000000..c89d97beceef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go @@ -0,0 +1,221 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download will +// be charged for the download. For more information, see Requester Pays Buckets +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). The +// following operations are related to PutBucketRequestPayment: +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// GetBucketRequestPayment +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) { + if params == nil { + params = &PutBucketRequestPaymentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketRequestPayment", params, optFns, c.addOperationPutBucketRequestPaymentMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketRequestPaymentOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketRequestPaymentInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // Container for Payer. + // + // This member is required. + RequestPaymentConfiguration *types.RequestPaymentConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketRequestPaymentOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketRequestPayment{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketRequestPayment{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketRequestPayment(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketRequestPaymentInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketRequestPayment", + } +} + +// getPutBucketRequestPaymentRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutBucketRequestPaymentRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketRequestPaymentInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketRequestPaymentInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketRequestPaymentRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketRequestPaymentBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketRequestPaymentBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketRequestPaymentInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketRequestPaymentBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go new file mode 100644 index 000000000000..f410107737c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go @@ -0,0 +1,266 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the tags for a bucket. Use tags to organize your Amazon Web Services bill +// to reflect your own cost structure. To do this, sign up to get your Amazon Web +// Services account bill with tag key values included. Then, to see the cost of +// combined resources, organize your billing information according to resources +// with the same tag key values. For example, you can tag several resources with a +// specific application name, and then organize your billing information to see the +// total cost of that application across several services. For more information, +// see Cost Allocation and Tagging +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// and Using Cost Allocation in Amazon S3 Bucket Tags +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). When +// this operation sets the tags for a bucket, it will overwrite any current tags +// the bucket already has. You cannot use this operation to add tags to an existing +// list of tags. To use this operation, you must have permissions to perform the +// s3:PutBucketTagging action. The bucket owner has this permission by default and +// can grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// PutBucketTagging has the following special errors: +// +// * Error code: +// InvalidTagError +// +// * Description: The tag provided was not a valid tag. This error +// can occur if the tag did not pass input validation. For information about tag +// restrictions, see User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// and Amazon Web Services-Generated Cost Allocation Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). +// +// * +// Error code: MalformedXMLError +// +// * Description: The XML provided does not match +// the schema. +// +// * Error code: OperationAbortedError +// +// * Description: A conflicting +// conditional action is currently in progress against this resource. Please try +// again. +// +// * Error code: InternalError +// +// * Description: The service was unable to +// apply the provided tag to the bucket. +// +// The following operations are related to +// PutBucketTagging: +// +// * GetBucketTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// * +// DeleteBucketTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) { + if params == nil { + params = &PutBucketTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketTagging", params, optFns, c.addOperationPutBucketTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketTaggingInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // Container for the TagSet and Tag elements. + // + // This member is required. + Tagging *types.Tagging + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketTaggingOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketTagging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketTaggingInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketTagging", + } +} + +// getPutBucketTaggingRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketTaggingRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketTaggingInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketTaggingRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketTaggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go new file mode 100644 index 000000000000..6d7943e6f665 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go @@ -0,0 +1,243 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the versioning state of an existing bucket. You can set the versioning +// state with one of the following values: Enabled—Enables versioning for the +// objects in the bucket. All objects added to the bucket receive a unique version +// ID. Suspended—Disables versioning for the objects in the bucket. All objects +// added to the bucket receive the version ID null. If the versioning state has +// never been set on a bucket, it has no versioning state; a GetBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// request does not return a versioning state value. In order to enable MFA Delete, +// you must be the bucket owner. If you are the bucket owner and want to enable MFA +// Delete in the bucket versioning configuration, you must include the x-amz-mfa +// request header and the Status and the MfaDelete request elements in a request to +// set the versioning state of the bucket. If you have an object expiration +// lifecycle policy in your non-versioned bucket and you want to maintain the same +// permanent delete behavior when you enable versioning, you must add a noncurrent +// expiration policy. The noncurrent expiration lifecycle policy will manage the +// deletes of the noncurrent object versions in the version-enabled bucket. (A +// version-enabled bucket maintains one current and zero or more noncurrent object +// versions.) For more information, see Lifecycle and Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// Related Resources +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// DeleteBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * +// GetBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) { + if params == nil { + params = &PutBucketVersioningInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketVersioning", params, optFns, c.addOperationPutBucketVersioningMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketVersioningOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketVersioningInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // Container for setting the versioning state. + // + // This member is required. + VersioningConfiguration *types.VersioningConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // >The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The concatenation of the authentication device's serial number, a space, and the + // value that is displayed on your authentication device. + MFA *string + + noSmithyDocumentSerde +} + +type PutBucketVersioningOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketVersioning{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketVersioning{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketVersioning(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketVersioningInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketVersioningUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketVersioning", + } +} + +// getPutBucketVersioningRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketVersioningRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketVersioningInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketVersioningInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketVersioningRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketVersioningBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketVersioningBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketVersioningInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketVersioningBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go new file mode 100644 index 000000000000..11cb4a35593a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go @@ -0,0 +1,279 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the configuration of the website that is specified in the website +// subresource. To configure a bucket as a website, you can add this subresource on +// the bucket with website configuration information such as the file name of the +// index document and any redirect rules. For more information, see Hosting +// Websites on Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). This PUT +// action requires the S3:PutBucketWebsite permission. By default, only the bucket +// owner can configure the website attached to a bucket; however, bucket owners can +// allow other users to set the website configuration by writing a bucket policy +// that grants them the S3:PutBucketWebsite permission. To redirect all website +// requests sent to the bucket's website endpoint, you add a website configuration +// with the following elements. Because all requests are sent to another website, +// you don't need to provide index document name for the bucket. +// +// * +// WebsiteConfiguration +// +// * RedirectAllRequestsTo +// +// * HostName +// +// * Protocol +// +// If you +// want granular control over redirects, you can use the following elements to add +// routing rules that describe conditions for redirecting requests and information +// about the redirect destination. In this case, the website configuration must +// provide an index document for the bucket, because some requests might not be +// redirected. +// +// * WebsiteConfiguration +// +// * IndexDocument +// +// * Suffix +// +// * +// ErrorDocument +// +// * Key +// +// * RoutingRules +// +// * RoutingRule +// +// * Condition +// +// * +// HttpErrorCodeReturnedEquals +// +// * KeyPrefixEquals +// +// * Redirect +// +// * Protocol +// +// * +// HostName +// +// * ReplaceKeyPrefixWith +// +// * ReplaceKeyWith +// +// * HttpRedirectCode +// +// Amazon +// S3 has a limitation of 50 routing rules per website configuration. If you +// require more than 50 routing rules, you can use object redirect. For more +// information, see Configuring an Object Redirect +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) in +// the Amazon S3 User Guide. +func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) { + if params == nil { + params = &PutBucketWebsiteInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketWebsite", params, optFns, c.addOperationPutBucketWebsiteMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketWebsiteOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketWebsiteInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // Container for the request. + // + // This member is required. + WebsiteConfiguration *types.WebsiteConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketWebsiteOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketWebsite{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketWebsite{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketWebsite(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketWebsiteInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketWebsiteUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketWebsite", + } +} + +// getPutBucketWebsiteRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketWebsiteRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketWebsiteInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketWebsiteInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketWebsiteRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketWebsiteBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketWebsiteBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketWebsiteInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketWebsiteBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go new file mode 100644 index 000000000000..6433640616c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go @@ -0,0 +1,613 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "time" +) + +// Adds an object to a bucket. You must have WRITE permissions on a bucket to add +// an object to it. Amazon S3 never adds partial objects; if you receive a success +// response, Amazon S3 added the entire object to the bucket. Amazon S3 is a +// distributed system. If it receives multiple write requests for the same object +// simultaneously, it overwrites all but the last object written. Amazon S3 does +// not provide object locking; if you need this, make sure to build it into your +// application layer or use versioning instead. To ensure that data is not +// corrupted traversing the network, use the Content-MD5 header. When you use this +// header, Amazon S3 checks the object against the provided MD5 value and, if they +// do not match, returns an error. Additionally, you can calculate the MD5 while +// putting an object to Amazon S3 and compare the returned ETag to the calculated +// MD5 value. +// +// * To successfully complete the PutObject request, you must have the +// s3:PutObject in your IAM permissions. +// +// * To successfully change the objects acl +// of your PutObject request, you must have the s3:PutObjectAcl in your IAM +// permissions. +// +// * The Content-MD5 header is required for any request to upload an +// object with a retention period configured using Amazon S3 Object Lock. For more +// information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) in +// the Amazon S3 User Guide. +// +// Server-side Encryption You can optionally request +// server-side encryption. With server-side encryption, Amazon S3 encrypts your +// data as it writes it to disks in its data centers and decrypts the data when you +// access it. You have the option to provide your own encryption key or use Amazon +// Web Services managed encryption keys (SSE-S3 or SSE-KMS). For more information, +// see Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// If you request server-side encryption using Amazon Web Services Key Management +// Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more +// information, see Amazon S3 Bucket Keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon +// S3 User Guide. Access Control List (ACL)-Specific Request Headers You can use +// headers to grant ACL- based permissions. By default, all objects are private. +// Only the owner has full access control. When adding a new object, you can grant +// permissions to individual Amazon Web Services accounts or to predefined groups +// defined by Amazon S3. These permissions are then added to the ACL on the object. +// For more information, see Access Control List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and Managing +// ACLs Using the REST API +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). If +// the bucket that you're uploading objects to uses the bucket owner enforced +// setting for S3 Object Ownership, ACLs are disabled and no longer affect +// permissions. Buckets that use this setting only accept PUT requests that don't +// specify an ACL or PUT requests that specify bucket owner full control ACLs, such +// as the bucket-owner-full-control canned ACL or an equivalent form of this ACL +// expressed in the XML format. PUT requests that contain other ACLs (for example, +// custom grants to certain Amazon Web Services accounts) fail and return a 400 +// error with the error code AccessControlListNotSupported. For more information, +// see Controlling ownership of objects and disabling ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced +// setting for Object Ownership, all objects written to the bucket by any account +// will be owned by the bucket owner. Storage Class Options By default, Amazon S3 +// uses the STANDARD Storage Class to store newly created objects. The STANDARD +// storage class provides high durability and high availability. Depending on +// performance needs, you can specify a different Storage Class. Amazon S3 on +// Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage +// Classes +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in +// the Amazon S3 User Guide. Versioning If you enable versioning for a bucket, +// Amazon S3 automatically generates a unique version ID for the object being +// stored. Amazon S3 returns this ID in the response. When you enable versioning +// for a bucket, if Amazon S3 receives multiple write requests for the same object +// simultaneously, it stores all of the objects. For more information about +// versioning, see Adding Objects to Versioning Enabled Buckets +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). +// For information about returning the versioning state of a bucket, see +// GetBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +// Related Resources +// +// * CopyObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * +// DeleteObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) { + if params == nil { + params = &PutObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObject", params, optFns, c.addOperationPutObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectInput struct { + + // The bucket name to which the PUT action was initiated. When using this action + // with an access point, you must direct requests to the access point hostname. The + // access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the PUT action was initiated. + // + // This member is required. + Key *string + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // This action is not supported by Amazon S3 on Outposts. + ACL types.ObjectCannedACL + + // Object data. + Body io.Reader + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true + // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. + BucketKeyEnabled bool + + // Can be used to specify caching behavior along the request/reply chain. For more + // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + CacheControl *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Specifies presentational information for the object. For more information, see + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. For more information, see + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // Size of the body in bytes. This parameter is useful when the size of the body + // cannot be determined automatically. For more information, see + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). + ContentLength int64 + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check to + // verify that the data is the same data that was originally sent. Although it is + // optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, see + // REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + ContentMD5 *string + + // A standard MIME type describing the format of the contents. For more + // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + ContentType *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The date and time at which the object is no longer cacheable. For more + // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + Expires *time.Time + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This + // action is not supported by Amazon S3 on Outposts. + GrantFullControl *string + + // Allows grantee to read the object data and its metadata. This action is not + // supported by Amazon S3 on Outposts. + GrantRead *string + + // Allows grantee to read the object ACL. This action is not supported by Amazon S3 + // on Outposts. + GrantReadACP *string + + // Allows grantee to write the ACL for the applicable object. This action is not + // supported by Amazon S3 on Outposts. + GrantWriteACP *string + + // A map of metadata to store with the object in S3. + Metadata map[string]string + + // Specifies whether a legal hold will be applied to this object. For more + // information about S3 Object Lock, see Object Lock + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // The Object Lock mode that you want to apply to this object. + ObjectLockMode types.ObjectLockMode + + // The date and time when you want this object's Object Lock to expire. Must be + // formatted as a timestamp parameter. + ObjectLockRetainUntilDate *time.Time + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. + SSEKMSEncryptionContext *string + + // If x-amz-server-side-encryption is present and has the value of aws:kms, this + // header specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetrical customer managed key that was used for the + // object. If you specify x-amz-server-side-encryption:aws:kms, but do not provide + // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web + // Services managed key to protect the data. If the KMS key does not exist in the + // same account issuing the command, you must use the full ARN and not just the ID. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high + // availability. Depending on performance needs, you can specify a different + // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For + // more information, see Storage Classes + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in + // the Amazon S3 User Guide. + StorageClass types.StorageClass + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + Tagging *string + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). In the + // following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: x-amz-website-redirect-location: + // /anotherPage.html In the following example, the request header sets the object + // redirect to another website: x-amz-website-redirect-location: + // http://www.example.com/ For more information about website hosting in Amazon S3, + // see Hosting Websites on Amazon S3 + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) and How to + // Configure Website Page Redirects + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + WebsiteRedirectLocation *string + + noSmithyDocumentSerde +} + +type PutObjectOutput struct { + + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag for the uploaded object. + ETag *string + + // If the expiration is configured for the object (see + // PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs that provide information about object expiration. The value of + // the rule-id is URL-encoded. + Expiration *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. + SSEKMSEncryptionContext *string + + // If x-amz-server-side-encryption is present and has the value of aws:kms, this + // header specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // If you specified server-side encryption either with an Amazon Web Services KMS + // key or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 used + // to encrypt the object. + ServerSideEncryption types.ServerSideEncryption + + // Version of the object. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObject{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutObjectInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutObject", + } +} + +// getPutObjectRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getPutObjectRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectRequestAlgorithmMember, + RequireChecksum: false, + EnableTrailingChecksum: true, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getPutObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignPutObject is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignPutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &PutObjectInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "PutObject", params, clientOptFns, + c.client.addOperationPutObjectMiddlewares, + presignConverter(options).convertToPresignMiddleware, + func(stack *middleware.Stack, options Options) error { + return awshttp.RemoveContentTypeHeader(stack) + }, + addPutObjectPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addPutObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go new file mode 100644 index 000000000000..05a377b5bc6a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go @@ -0,0 +1,410 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Uses the acl subresource to set the access control list (ACL) permissions for a +// new or existing object in an S3 bucket. You must have WRITE_ACP permission to +// set the ACL of an object. For more information, see What permissions can I +// grant? +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// in the Amazon S3 User Guide. This action is not supported by Amazon S3 on +// Outposts. Depending on your application needs, you can choose to set the ACL on +// an object using either the request body or the headers. For example, if you have +// an existing application that updates a bucket ACL using the request body, you +// can continue to use that approach. For more information, see Access Control List +// (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) in the +// Amazon S3 User Guide. If your bucket uses the bucket owner enforced setting for +// S3 Object Ownership, ACLs are disabled and no longer affect permissions. You +// must use policies to grant access to your bucket and the objects in it. Requests +// to set ACLs or update ACLs fail and return the AccessControlListNotSupported +// error code. Requests to read ACLs are still supported. For more information, see +// Controlling object ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. Access Permissions You can set access permissions +// using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl +// request header. Amazon S3 supports a set of predefined ACLs, known as canned +// ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify +// the canned ACL name as the value of x-amz-acl. If you use this header, you +// cannot use other access control-specific headers in your request. For more +// information, see Canned ACL +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * +// Specify access permissions explicitly with the x-amz-grant-read, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. When using these headers, you specify explicit access permissions and +// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the +// permission. If you use these ACL-specific headers, you cannot use x-amz-acl +// header to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control List +// (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify +// each grantee as a type=value pair, where the type is one of the following: +// +// * id +// – if the value specified is the canonical user ID of an Amazon Web Services +// account +// +// * uri – if you are granting permissions to a predefined group +// +// * +// emailAddress – if the value specified is the email address of an Amazon Web +// Services account Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// +// * US East (N. Virginia) +// +// * US West +// (N. California) +// +// * US West (Oregon) +// +// * Asia Pacific (Singapore) +// +// * Asia Pacific +// (Sydney) +// +// * Asia Pacific (Tokyo) +// +// * Europe (Ireland) +// +// * South America (São +// Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. +// +// For example, the following +// x-amz-grant-read header grants list objects permission to the two Amazon Web +// Services accounts identified by their email addresses. x-amz-grant-read: +// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// You can use either +// a canned ACL or specify access permissions explicitly. You cannot do both. +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (using request elements) in the following ways: +// +// * By the person's +// ID: <>ID<><>GranteesEmail<> DisplayName is optional and ignored in the +// request. +// +// * By URI: +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email +// address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the +// CanonicalUser and, in a response to a GET Object acl request, appears as the +// CanonicalUser. Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// +// * US East (N. Virginia) +// +// * US West +// (N. California) +// +// * US West (Oregon) +// +// * Asia Pacific (Singapore) +// +// * Asia Pacific +// (Sydney) +// +// * Asia Pacific (Tokyo) +// +// * Europe (Ireland) +// +// * South America (São +// Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. +// +// Versioning The ACL of an object is set +// at the object version level. By default, PUT sets the ACL of the current version +// of an object. To set the ACL of a different version, use the versionId +// subresource. Related Resources +// +// * CopyObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * +// GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) { + if params == nil { + params = &PutObjectAclInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectAcl", params, optFns, c.addOperationPutObjectAclMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectAclOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectAclInput struct { + + // The bucket name that contains the object to which you want to attach the ACL. + // When using this action with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Key for which the PUT action was initiated. When using this action with an + // access point, you must direct requests to the access point hostname. The access + // point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Key *string + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + ACL types.ObjectCannedACL + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *types.AccessControlPolicy + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, go to RFC 1864.> + // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string + + // Allows grantee to list the objects in the bucket. This action is not supported + // by Amazon S3 on Outposts. + GrantRead *string + + // Allows grantee to read the bucket ACL. This action is not supported by Amazon S3 + // on Outposts. + GrantReadACP *string + + // Allows grantee to create new objects in the bucket. For the bucket and object + // owners of existing objects, also allows deletions and overwrites of those + // objects. + GrantWrite *string + + // Allows grantee to write the ACL for the applicable bucket. This action is not + // supported by Amazon S3 on Outposts. + GrantWriteACP *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type PutObjectAclOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectAcl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectAcl{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutObjectAclValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectAcl(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutObjectAclInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectAclUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutObjectAcl", + } +} + +// getPutObjectAclRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getPutObjectAclRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectAclInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectAclRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectAclBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutObjectAclBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectAclInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectAclBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go new file mode 100644 index 000000000000..b8004b598449 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go @@ -0,0 +1,237 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Applies a legal hold configuration to the specified object. For more +// information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This action +// is not supported by Amazon S3 on Outposts. +func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) { + if params == nil { + params = &PutObjectLegalHoldInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectLegalHold", params, optFns, c.addOperationPutObjectLegalHoldMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectLegalHoldOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectLegalHoldInput struct { + + // The bucket name containing the object that you want to place a legal hold on. + // When using this action with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key name for the object that you want to place a legal hold on. + // + // This member is required. + Key *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Container element for the legal hold configuration you want to apply to the + // specified object. + LegalHold *types.ObjectLockLegalHold + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The version ID of the object that you want to place a legal hold on. + VersionId *string + + noSmithyDocumentSerde +} + +type PutObjectLegalHoldOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLegalHold{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLegalHold{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLegalHold(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutObjectLegalHoldInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectLegalHoldUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutObjectLegalHold", + } +} + +// getPutObjectLegalHoldRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutObjectLegalHoldRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectLegalHoldInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectLegalHoldInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectLegalHoldRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectLegalHoldBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutObjectLegalHoldBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectLegalHoldInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectLegalHoldBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go new file mode 100644 index 000000000000..9740967a7549 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go @@ -0,0 +1,234 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// * The +// DefaultRetention settings require both a mode and a period. +// +// * The +// DefaultRetention period can be either Days or Years but you must select one. You +// cannot specify Days and Years at the same time. +// +// * You can only enable Object +// Lock for new buckets. If you want to turn on Object Lock for an existing bucket, +// contact Amazon Web Services Support. +func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) { + if params == nil { + params = &PutObjectLockConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectLockConfiguration", params, optFns, c.addOperationPutObjectLockConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectLockConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectLockConfigurationInput struct { + + // The bucket whose Object Lock configuration you want to create or replace. + // + // This member is required. + Bucket *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The Object Lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *types.ObjectLockConfiguration + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string + + noSmithyDocumentSerde +} + +type PutObjectLockConfigurationOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLockConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLockConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLockConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutObjectLockConfigurationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutObjectLockConfiguration", + } +} + +// getPutObjectLockConfigurationRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutObjectLockConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectLockConfigurationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectLockConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectLockConfigurationRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectLockConfigurationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutObjectLockConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectLockConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectLockConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go new file mode 100644 index 000000000000..c4918f3cbcad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go @@ -0,0 +1,244 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Places an Object Retention configuration on an object. For more information, see +// Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). Users or +// accounts require the s3:PutObjectRetention permission in order to place an +// Object Retention configuration on objects. Bypassing a Governance Retention +// configuration requires the s3:BypassGovernanceRetention permission. This action +// is not supported by Amazon S3 on Outposts. +func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { + if params == nil { + params = &PutObjectRetentionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectRetention", params, optFns, c.addOperationPutObjectRetentionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectRetentionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectRetentionInput struct { + + // The bucket name that contains the object you want to apply this Object Retention + // configuration to. When using this action with an access point, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // This member is required. + Key *string + + // Indicates whether this action should bypass Governance-mode restrictions. + BypassGovernanceRetention bool + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The container element for the Object Retention configuration. + Retention *types.ObjectLockRetention + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string + + noSmithyDocumentSerde +} + +type PutObjectRetentionOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectRetention{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectRetention{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectRetention(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutObjectRetentionInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectRetentionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutObjectRetention", + } +} + +// getPutObjectRetentionRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutObjectRetentionRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectRetentionInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectRetentionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectRetentionRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectRetentionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutObjectRetentionBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectRetentionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectRetentionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go new file mode 100644 index 000000000000..43effb9eb9a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go @@ -0,0 +1,291 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the supplied tag-set to an object that already exists in a bucket. A tag is +// a key-value pair. You can associate tags with an object by sending a PUT request +// against the tagging subresource that is associated with the object. You can +// retrieve tags by sending a GET request. For more information, see +// GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). For +// tagging-related restrictions related to characters and encodings, see Tag +// Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. To +// use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. To put tags of any other version, use the versionId query +// parameter. You also need permission for the s3:PutObjectVersionTagging action. +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). Special +// Errors +// +// * Code: InvalidTagError +// +// * Cause: The tag provided was not a valid tag. +// This error can occur if the tag did not pass input validation. For more +// information, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// * Code: +// MalformedXMLError +// +// * Cause: The XML provided does not match the schema. +// +// * Code: +// OperationAbortedError +// +// * Cause: A conflicting conditional action is currently in +// progress against this resource. Please try again. +// +// * Code: InternalError +// +// * +// Cause: The service was unable to apply the provided tag to the object. +// +// Related +// Resources +// +// * GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// * +// DeleteObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) { + if params == nil { + params = &PutObjectTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectTagging", params, optFns, c.addOperationPutObjectTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectTaggingInput struct { + + // The bucket name containing the object. When using this action with an access + // point, you must direct requests to the access point hostname. The access point + // hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Name of the object key. + // + // This member is required. + Key *string + + // Container for the TagSet and Tag elements + // + // This member is required. + Tagging *types.Tagging + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The versionId of the object that the tag-set will be added to. + VersionId *string + + noSmithyDocumentSerde +} + +type PutObjectTaggingOutput struct { + + // The versionId of the object the tag-set was added to. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectTagging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutObjectTaggingInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutObjectTagging", + } +} + +// getPutObjectTaggingRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutObjectTaggingRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectTaggingInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectTaggingRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectTaggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutObjectTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go new file mode 100644 index 000000000000..922102b58a76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go @@ -0,0 +1,240 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. +// To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an +// object, it checks the PublicAccessBlock configuration for both the bucket (or +// the bucket that contains the object) and the bucket owner's account. If the +// PublicAccessBlock configurations are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level and +// account-level settings. For more information about when Amazon S3 considers a +// bucket or an object public, see The Meaning of "Public" +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// Related Resources +// +// * GetPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * +// DeletePublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// * +// GetBucketPolicyStatus +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// * +// Using Amazon S3 Block Public Access +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) { + if params == nil { + params = &PutPublicAccessBlockInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutPublicAccessBlock", params, optFns, c.addOperationPutPublicAccessBlockMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutPublicAccessBlockOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutPublicAccessBlockInput struct { + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want + // to set. + // + // This member is required. + Bucket *string + + // The PublicAccessBlock configuration that you want to apply to this Amazon S3 + // bucket. You can enable the configuration options in any combination. For more + // information about when Amazon S3 considers a bucket or object public, see The + // Meaning of "Public" + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon S3 User Guide. + // + // This member is required. + PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash of the PutPublicAccessBlock request body. For requests made using + // the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services + // SDKs, this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutPublicAccessBlockOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutPublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutPublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutPublicAccessBlock(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutPublicAccessBlockInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutPublicAccessBlockUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutPublicAccessBlock", + } +} + +// getPutPublicAccessBlockRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutPublicAccessBlockRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutPublicAccessBlockInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutPublicAccessBlockInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutPublicAccessBlockRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutPublicAccessBlockBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutPublicAccessBlockBucketMember(input interface{}) (*string, bool) { + in := input.(*PutPublicAccessBlockInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutPublicAccessBlockBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go new file mode 100644 index 000000000000..013197570297 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go @@ -0,0 +1,457 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Restores an archived copy of an object back into Amazon S3 This action is not +// supported by Amazon S3 on Outposts. This action performs the following types of +// requests: +// +// * select - Perform a select query on an archived object +// +// * restore an +// archive - Restore an archived object +// +// To use this operation, you must have +// permissions to perform the s3:RestoreObject action. The bucket owner has this +// permission by default and can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. Querying Archives with Select Requests You use a +// select type of request to perform SQL queries on archived objects. The archived +// objects that are being queried by the select request must be formatted as +// uncompressed comma-separated values (CSV) files. You can run queries and custom +// analytics on your archived data without having to restore your data to a hotter +// Amazon S3 tier. For an overview about select requests, see Querying Archived +// Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon S3 User Guide. When making a select request, do the following: +// +// * +// Define an output location for the select query's output. This must be an Amazon +// S3 bucket in the same Amazon Web Services Region as the bucket that contains the +// archive object that is being queried. The Amazon Web Services account that +// initiates the job must have permissions to write to the S3 bucket. You can +// specify the storage class and encryption for the output objects stored in the +// bucket. For more information about output, see Querying Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon S3 User Guide. For more information about the S3 structure in the +// request body, see the following: +// +// * PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * Managing +// Access with ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) in the +// Amazon S3 User Guide +// +// * Protecting Data Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) in +// the Amazon S3 User Guide +// +// * Define the SQL expression for the SELECT type of +// restoration for your query in the request body's SelectParameters structure. You +// can use expressions like the following examples. +// +// * The following expression +// returns all records from the specified object. SELECT * FROM Object +// +// * Assuming +// that you are not using any headers for data stored in the object, you can +// specify columns with positional headers. SELECT s._1, s._2 FROM Object s WHERE +// s._3 > 100 +// +// * If you have headers and you set the fileHeaderInfo in the CSV +// structure in the request body to USE, you can specify headers in the query. (If +// you set the fileHeaderInfo field to IGNORE, the first row is skipped for the +// query.) You cannot mix ordinal positions with header column names. SELECT s.Id, +// s.FirstName, s.SSN FROM S3Object s +// +// For more information about using SQL with S3 +// Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier +// Select +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon S3 User Guide. When making a select request, you can also do the +// following: +// +// * To expedite your queries, specify the Expedited tier. For more +// information about tiers, see "Restoring Archives," later in this topic. +// +// * +// Specify details about the data serialization format of both the input object +// that is being queried and the serialization of the CSV-encoded query +// results. +// +// The following are additional important facts about the select +// feature: +// +// * The output results are new Amazon S3 objects. Unlike archive +// retrievals, they are stored until explicitly deleted-manually or through a +// lifecycle policy. +// +// * You can issue more than one select request on the same +// Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing +// duplicate requests. +// +// * Amazon S3 accepts a select request even if the object has +// already been restored. A select request doesn’t return error response +// 409. +// +// Restoring objects Objects that you archive to the S3 Glacier or S3 Glacier +// Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 +// Intelligent-Tiering Deep Archive tiers are not accessible in real time. For +// objects in Archive Access or Deep Archive Access tiers you must first initiate a +// restore request, and then wait until the object is moved into the Frequent +// Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage +// classes you must first initiate a restore request, and then wait until a +// temporary copy of the object is available. To access an archived object, you +// must restore the object for the duration (number of days) that you specify. To +// restore a specific object version, you can provide a version ID. If you don't +// provide a version ID, Amazon S3 restores the current version. When restoring an +// archived object (or using a select request), you can specify one of the +// following data access tier options in the Tier element of the request body: +// +// * +// Expedited - Expedited retrievals allow you to quickly access your data stored in +// the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier when +// occasional urgent requests for a subset of archives are required. For all but +// the largest archived objects (250 MB+), data accessed using Expedited retrievals +// is typically made available within 1–5 minutes. Provisioned capacity ensures +// that retrieval capacity for Expedited retrievals is available when you need it. +// Expedited retrievals and provisioned capacity are not available for objects +// stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +// Deep Archive tier. +// +// * Standard - Standard retrievals allow you to access any of +// your archived objects within several hours. This is the default option for +// retrieval requests that do not specify the retrieval option. Standard retrievals +// typically finish within 3–5 hours for objects stored in the S3 Glacier storage +// class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 +// hours for objects stored in the S3 Glacier Deep Archive storage class or S3 +// Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects +// stored in S3 Intelligent-Tiering. +// +// * Bulk - Bulk retrievals are the lowest-cost +// retrieval option in S3 Glacier, enabling you to retrieve large amounts, even +// petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 +// hours for objects stored in the S3 Glacier storage class or S3 +// Intelligent-Tiering Archive tier. They typically finish within 48 hours for +// objects stored in the S3 Glacier Deep Archive storage class or S3 +// Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects +// stored in S3 Intelligent-Tiering. +// +// For more information about archive retrieval +// options and provisioned capacity for Expedited data access, see Restoring +// Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) in the +// Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to change the +// restore speed to a faster speed while it is in progress. For more information, +// see Upgrading the speed of an in-progress restore +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon S3 User Guide. To get the status of object restoration, you can +// send a HEAD request. Operations return the x-amz-restore header, which provides +// information about the restoration status, in the response. You can use Amazon S3 +// event notifications to notify you when a restore is initiated or completed. For +// more information, see Configuring Amazon S3 Event Notifications +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the +// Amazon S3 User Guide. After restoring an archived object, you can update the +// restoration period by reissuing the request with a new period. Amazon S3 updates +// the restoration period relative to the current time and charges only for the +// request-there are no data transfer charges. You cannot update the restoration +// period when Amazon S3 is actively processing your current restore request for +// the object. If your bucket has a lifecycle configuration with a rule that +// includes an expiration action, the object expiration overrides the life span +// that you specify in a restore request. For example, if you restore an object +// copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 +// deletes the object in 3 days. For more information about lifecycle +// configuration, see PutBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// and Object Lifecycle Management +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) in +// Amazon S3 User Guide. Responses A successful action returns either the 200 OK or +// 202 Accepted status code. +// +// * If the object is not previously restored, then +// Amazon S3 returns 202 Accepted in the response. +// +// * If the object is previously +// restored, Amazon S3 returns 200 OK in the response. +// +// Special Errors +// +// * Code: +// RestoreAlreadyInProgress +// +// * Cause: Object restore is already in progress. (This +// error does not apply to SELECT type requests.) +// +// * HTTP Status Code: 409 +// Conflict +// +// * SOAP Fault Code Prefix: Client +// +// * Code: +// GlacierExpeditedRetrievalNotAvailable +// +// * Cause: expedited retrievals are +// currently not available. Try again later. (Returned if there is insufficient +// capacity to process the Expedited request. This error applies only to Expedited +// retrievals and not to S3 Standard or Bulk retrievals.) +// +// * HTTP Status Code: +// 503 +// +// * SOAP Fault Code Prefix: N/A +// +// Related Resources +// +// * +// PutBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * +// GetBucketNotificationConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// * +// SQL Reference for Amazon S3 Select and S3 Glacier Select +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon S3 User Guide +func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) { + if params == nil { + params = &RestoreObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RestoreObject", params, optFns, c.addOperationRestoreObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RestoreObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RestoreObjectInput struct { + + // The bucket name containing the object to restore. When using this action with an + // access point, you must direct requests to the access point hostname. The access + // point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the action was initiated. + // + // This member is required. + Key *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Container for restore job parameters. + RestoreRequest *types.RestoreRequest + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type RestoreObjectOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Indicates the path in the provided S3 output location where Select results will + // be restored to. + RestoreOutputPath *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpRestoreObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpRestoreObject{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpRestoreObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRestoreObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRestoreObjectInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addRestoreObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRestoreObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "RestoreObject", + } +} + +// getRestoreObjectRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getRestoreObjectRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*RestoreObjectInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addRestoreObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getRestoreObjectRequestAlgorithmMember, + RequireChecksum: false, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getRestoreObjectBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getRestoreObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*RestoreObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addRestoreObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getRestoreObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go new file mode 100644 index 000000000000..1b9dbc7bcdaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go @@ -0,0 +1,426 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithysync "github.com/aws/smithy-go/sync" + smithyhttp "github.com/aws/smithy-go/transport/http" + "sync" +) + +// This action filters the contents of an Amazon S3 object based on a simple +// structured query language (SQL) statement. In the request, along with the SQL +// expression, you must also specify a data serialization format (JSON, CSV, or +// Apache Parquet) of the object. Amazon S3 uses this format to parse object data +// into records, and returns only records that match the specified SQL expression. +// You must also specify the data serialization format for the response. This +// action is not supported by Amazon S3 on Outposts. For more information about +// Amazon S3 Select, see Selecting Content from Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// and SELECT Command +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) +// in the Amazon S3 User Guide. For more information about using SQL with Amazon S3 +// Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon S3 User Guide. Permissions You must have s3:GetObject permission +// for this operation. Amazon S3 Select does not support anonymous access. For more +// information about permissions, see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in +// the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to +// query objects that have the following format properties: +// +// * CSV, JSON, and +// Parquet - Objects must be in CSV, JSON, or Parquet format. +// +// * UTF-8 - UTF-8 is +// the only encoding type Amazon S3 Select supports. +// +// * GZIP or BZIP2 - CSV and +// JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only +// compression formats that Amazon S3 Select supports for CSV and JSON files. +// Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. +// Amazon S3 Select does not support whole-object compression for Parquet +// objects. +// +// * Server-side encryption - Amazon S3 Select supports querying objects +// that are protected with server-side encryption. For objects that are encrypted +// with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must +// use the headers that are documented in the GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). For more +// information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 +// managed encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), +// server-side encryption is handled transparently, so you don't need to specify +// anything. For more information about server-side encryption, including SSE-S3 +// and SSE-KMS, see Protecting Data Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) in +// the Amazon S3 User Guide. +// +// Working with the Response Body Given the response +// size is unknown, Amazon S3 Select streams the response as a series of messages +// and includes a Transfer-Encoding header with chunked as its value in the +// response. For more information, see Appendix: SelectObjectContent Response +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html). +// GetObject Support The SelectObjectContent action does not support the following +// GetObject functionality. For more information, see GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// +// * Range: +// Although you can specify a scan range for an Amazon S3 Select request (see +// SelectObjectContentRequest - ScanRange +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) +// in the request parameters), you cannot specify the range of bytes of an object +// to return. +// +// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You +// cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. +// For more information, about storage classes see Storage Classes +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) +// in the Amazon S3 User Guide. +// +// Special Errors For a list of special errors for +// this operation, see List of SELECT Object Content Error Codes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) +// Related Resources +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// GetBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * +// PutBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) { + if params == nil { + params = &SelectObjectContentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "SelectObjectContent", params, optFns, c.addOperationSelectObjectContentMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*SelectObjectContentOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Request to filter the contents of an Amazon S3 object based on a simple +// Structured Query Language (SQL) statement. In the request, along with the SQL +// expression, you must specify a data serialization format (JSON or CSV) of the +// object. Amazon S3 uses this to parse object data into records. It returns only +// records that match the specified SQL expression. You must also specify the data +// serialization format for the response. For more information, see S3Select API +// Documentation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + + // The S3 bucket. + // + // This member is required. + Bucket *string + + // The expression that is used to query the object. + // + // This member is required. + Expression *string + + // The type of the provided expression (for example, SQL). + // + // This member is required. + ExpressionType types.ExpressionType + + // Describes the format of the data in the object that is being queried. + // + // This member is required. + InputSerialization *types.InputSerialization + + // The object key. + // + // This member is required. + Key *string + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // This member is required. + OutputSerialization *types.OutputSerialization + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Specifies if periodic request progress information should be enabled. + RequestProgress *types.RequestProgress + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string + + // The server-side encryption (SSE) customer managed key. This parameter is needed + // only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKey *string + + // The MD5 server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string + + // Specifies the byte range of the object to get the records from. A record is + // processed when its first byte is contained by the range. This parameter is + // optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. ScanRangemay be + // used in the following ways: + // + // * 50100 - process only the records starting between + // the bytes 50 and 100 (inclusive, counting from zero) + // + // * 50 - process only the + // records starting after the byte 50 + // + // * 50 - process only the records within the + // last 50 bytes of the file. + ScanRange *types.ScanRange + + noSmithyDocumentSerde +} + +type SelectObjectContentOutput struct { + eventStream *SelectObjectContentEventStream + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +// GetStream returns the type to interact with the event stream. +func (o *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { + return o.eventStream +} + +func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpSelectObjectContent{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpSelectObjectContent{}, middleware.After) + if err != nil { + return err + } + if err = addEventStreamSelectObjectContentMiddleware(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpSelectObjectContentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSelectObjectContent(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addSelectObjectContentUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opSelectObjectContent(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "SelectObjectContent", + } +} + +// getSelectObjectContentBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getSelectObjectContentBucketMember(input interface{}) (*string, bool) { + in := input.(*SelectObjectContentInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addSelectObjectContentUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getSelectObjectContentBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent operation. +// +// For testing and mocking the event stream this type should be initialized via +// the NewSelectObjectContentEventStream constructor function. Using the functional options +// to pass in nested mock behavior. +type SelectObjectContentEventStream struct { + // SelectObjectContentEventStreamReader is the EventStream reader for the + // SelectObjectContentEventStream events. This value is automatically set by the + // SDK when the API call is made Use this member when unit testing your code with + // the SDK to mock out the EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + done chan struct{} + closeOnce sync.Once + err *smithysync.OnceErr +} + +// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream. +// This function should only be used for testing and mocking the SelectObjectContentEventStream +// stream within your application. +// +// The Reader member must be set before reading events from the stream. +func NewSelectObjectContentEventStream(optFns ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream { + es := &SelectObjectContentEventStream{ + done: make(chan struct{}), + err: smithysync.NewOnceErr(), + } + for _, fn := range optFns { + fn(es) + } + return es +} + +// Events returns a channel to read events from. +func (es *SelectObjectContentEventStream) Events() <-chan types.SelectObjectContentEventStream { + return es.Reader.Events() +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// Will close the underlying EventStream writer and reader, and no more events can be +// sent or received. +func (es *SelectObjectContentEventStream) Close() error { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *SelectObjectContentEventStream) safeClose() { + close(es.done) + + es.Reader.Close() +} + +// Err returns any error that occurred while reading or writing EventStream Events +// from the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +func (es *SelectObjectContentEventStream) waitStreamClose() { + type errorSet interface { + ErrorSet() <-chan struct{} + } + + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(errorSet); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go new file mode 100644 index 000000000000..9c70d4c26b9b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go @@ -0,0 +1,496 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" +) + +// Uploads a part in a multipart upload. In this operation, you provide part data +// in your request. However, you have an option to specify your existing Amazon S3 +// object as a data source for the part you are uploading. To upload a part from an +// existing object, you use the UploadPartCopy +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. You must initiate a multipart upload (see CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) +// before you can upload any part. In response to your initiate request, Amazon S3 +// returns an upload ID, a unique identifier, that you must include in your upload +// part request. Part numbers can be any number from 1 to 10,000, inclusive. A part +// number uniquely identifies a part and also defines its position within the +// object being created. If you upload a new part using the same part number that +// was used with a previous part, the previously uploaded part is overwritten. For +// information about maximum and minimum part sizes and other multipart upload +// specifications, see Multipart upload limits +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) in the +// Amazon S3 User Guide. To ensure that data is not corrupted when traversing the +// network, specify the Content-MD5 header in the upload part request. Amazon S3 +// checks the part data against the provided MD5 value. If they do not match, +// Amazon S3 returns an error. If the upload request is signed with Signature +// Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header as a +// checksum instead of Content-MD5. For more information see Authenticating +// Requests: Using the Authorization Header (Amazon Web Services Signature Version +// 4) +// (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +// Note: After you initiate multipart upload and upload one or more parts, you must +// either complete or abort multipart upload in order to stop getting charged for +// storage of the uploaded parts. Only after you either complete or abort multipart +// upload, Amazon S3 frees up the parts storage and stops charging you for the +// parts storage. For more information on multipart uploads, go to Multipart Upload +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in +// the Amazon S3 User Guide . For information on the permissions required to use +// the multipart upload API, go to Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the +// Amazon S3 User Guide. You can optionally request server-side encryption where +// Amazon S3 encrypts your data as it writes it to disks in its data centers and +// decrypts it for you when you access it. You have the option of providing your +// own encryption key, or you can use the Amazon Web Services managed encryption +// keys. If you choose to provide your own encryption key, the request headers you +// provide in the request must match the headers you used in the request to +// initiate the upload by using CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// For more information, go to Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon S3 User Guide. Server-side encryption is supported by the S3 +// Multipart Upload actions. Unless you are using a customer-provided encryption +// key, you don't need to specify the encryption parameters in each UploadPart +// request. Instead, you only need to specify the server-side encryption parameters +// in the initial Initiate Multipart request. For more information, see +// CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// If you requested server-side encryption using a customer-provided encryption key +// in your initiate multipart upload request, you must provide identical encryption +// information in each part upload using the following headers. +// +// * +// x-amz-server-side-encryption-customer-algorithm +// +// * +// x-amz-server-side-encryption-customer-key +// +// * +// x-amz-server-side-encryption-customer-key-MD5 +// +// Special Errors +// +// * Code: +// NoSuchUpload +// +// * Cause: The specified multipart upload does not exist. The upload +// ID might be invalid, or the multipart upload might have been aborted or +// completed. +// +// * HTTP Status Code: 404 Not Found +// +// * SOAP Fault Code Prefix: +// Client +// +// Related Resources +// +// * CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// ListMultipartUploads +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) { + if params == nil { + params = &UploadPartInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UploadPart", params, optFns, c.addOperationUploadPartMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UploadPartOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UploadPartInput struct { + + // The name of the bucket to which the multipart upload was initiated. When using + // this action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // Part number of part being uploaded. This is a positive integer between 1 and + // 10,000. + // + // This member is required. + PartNumber int32 + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // This member is required. + UploadId *string + + // Object data. + Body io.Reader + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must + // be the same for all parts and it match the checksum value supplied in the + // CreateMultipartUpload request. + ChecksumAlgorithm types.ChecksumAlgorithm + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Size of the body in bytes. This parameter is useful when the size of the body + // cannot be determined automatically. + ContentLength int64 + + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameter is required + // if object lock parameters are specified. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. This must be the same + // encryption key specified in the initiate multipart upload request. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + noSmithyDocumentSerde +} + +type UploadPartOutput struct { + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag for the uploaded object. + ETag *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key was used for the + // object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPart{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPart{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpUploadPartValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPart(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addUploadPartInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addUploadPartUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUploadPart(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "UploadPart", + } +} + +// getUploadPartRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getUploadPartRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*UploadPartInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addUploadPartInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getUploadPartRequestAlgorithmMember, + RequireChecksum: false, + EnableTrailingChecksum: true, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getUploadPartBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getUploadPartBucketMember(input interface{}) (*string, bool) { + in := input.(*UploadPartInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addUploadPartUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getUploadPartBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignUploadPart is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignUploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &UploadPartInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "UploadPart", params, clientOptFns, + c.client.addOperationUploadPartMiddlewares, + presignConverter(options).convertToPresignMiddleware, + func(stack *middleware.Stack, options Options) error { + return awshttp.RemoveContentTypeHeader(stack) + }, + addUploadPartPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addUploadPartPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go new file mode 100644 index 000000000000..c2c58ce7950a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go @@ -0,0 +1,437 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Uploads a part by copying data from an existing object as data source. You +// specify the data source by adding the request header x-amz-copy-source in your +// request and a byte range by adding the request header x-amz-copy-source-range in +// your request. For information about maximum and minimum part sizes and other +// multipart upload specifications, see Multipart upload limits +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) in the +// Amazon S3 User Guide. Instead of using an existing object as part data, you +// might use the UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action and +// provide data in your request. You must initiate a multipart upload before you +// can upload any part. In response to your initiate request. Amazon S3 returns a +// unique identifier, the upload ID, that you must include in your upload part +// request. For more information about using the UploadPartCopy operation, see the +// following: +// +// * For conceptual information about multipart uploads, see Uploading +// Objects Using Multipart Upload +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the +// Amazon S3 User Guide. +// +// * For information about permissions required to use the +// multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the +// Amazon S3 User Guide. +// +// * For information about copying objects using a single +// atomic action vs. a multipart upload, see Operations on Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in the +// Amazon S3 User Guide. +// +// * For information about using server-side encryption with +// customer-provided encryption keys with the UploadPartCopy operation, see +// CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). +// +// Note the +// following additional considerations about the request headers +// x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, +// x-amz-copy-source-if-unmodified-since, and +// x-amz-copy-source-if-modified-since: +// +// * Consideration 1 - If both of the +// x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are +// present in the request as follows: x-amz-copy-source-if-match condition +// evaluates to true, and; x-amz-copy-source-if-unmodified-since condition +// evaluates to false; Amazon S3 returns 200 OK and copies the data. +// +// * +// Consideration 2 - If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request as +// follows: x-amz-copy-source-if-none-match condition evaluates to false, and; +// x-amz-copy-source-if-modified-since condition evaluates to true; Amazon S3 +// returns 412 Precondition Failed response code. +// +// Versioning If your bucket has +// versioning enabled, you could have multiple versions of the same object. By +// default, x-amz-copy-source identifies the current version of the object to copy. +// If the current version is a delete marker and you don't specify a versionId in +// the x-amz-copy-source, Amazon S3 returns a 404 error, because the object does +// not exist. If you specify versionId in the x-amz-copy-source and the versionId +// is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not +// allowed to specify a delete marker as a version for the x-amz-copy-source. You +// can optionally specify a specific version of the source object to copy by adding +// the versionId subresource as shown in the following example: x-amz-copy-source: +// /bucket/object?versionId=version id Special Errors +// +// * Code: NoSuchUpload +// +// * +// Cause: The specified multipart upload does not exist. The upload ID might be +// invalid, or the multipart upload might have been aborted or completed. +// +// * HTTP +// Status Code: 404 Not Found +// +// * Code: InvalidRequest +// +// * Cause: The specified copy +// source is not supported as a byte-range copy source. +// +// * HTTP Status Code: 400 +// Bad Request +// +// Related Resources +// +// * CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// ListMultipartUploads +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) { + if params == nil { + params = &UploadPartCopyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UploadPartCopy", params, optFns, c.addOperationUploadPartCopyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UploadPartCopyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UploadPartCopyInput struct { + + // The bucket name. When using this action with an access point, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Specifies the source object for the copy operation. You specify the value in one + // of two formats, depending on whether you want to access the source object + // through an access point + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // + // * + // For objects not accessed through an access point, specify the name of the source + // bucket and key of the source object, separated by a slash (/). For example, to + // copy the object reports/january.pdf from the bucket awsexamplebucket, use + // awsexamplebucket/reports/january.pdf. The value must be URL-encoded. + // + // * For + // objects accessed through access points, specify the Amazon Resource Name (ARN) + // of the object as accessed through the access point, in the format + // arn:aws:s3:::accesspoint//object/. For example, to copy the object + // reports/january.pdf through access point my-access-point owned by account + // 123456789012 in Region us-west-2, use the URL encoding of + // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using access + // points only when the source and destination buckets are in the same Amazon Web + // Services Region. Alternatively, for objects accessed through Amazon S3 on + // Outposts, specify the ARN of the object as accessed in the format + // arn:aws:s3-outposts:::outpost//object/. For example, to copy the object + // reports/january.pdf through outpost my-outpost owned by account 123456789012 in + // Region us-west-2, use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL-encoded. + // + // To copy a specific version of an object, append + // ?versionId= to the value (for example, + // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of the + // source object. + // + // This member is required. + CopySource *string + + // Object key for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // This member is required. + PartNumber int32 + + // Upload ID identifying the multipart upload whose part is being copied. + // + // This member is required. + UploadId *string + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time + + // Copies the object if its entity tag (ETag) is different than the specified ETag. + CopySourceIfNoneMatch *string + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time + + // The range of bytes to copy from the source object. The range value must use the + // form bytes=first-last, where the first and last are the zero-based byte offsets + // to copy. For example, bytes=0-9 indicates that you want to copy the first 10 + // bytes of the source. You can copy a range only if the source object is greater + // than 5 MB. + CopySourceRange *string + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + CopySourceSSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one that + // was used when the source object was created. + CopySourceSSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string + + // The account ID of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request fails with the HTTP status + // code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The account ID of the expected source bucket owner. If the source bucket is + // owned by a different account, the request fails with the HTTP status code 403 + // Forbidden (access denied). + ExpectedSourceBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. This must be the same + // encryption key specified in the initiate multipart upload request. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + noSmithyDocumentSerde +} + +type UploadPartCopyOutput struct { + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // Container for all response elements. + CopyPartResult *types.CopyPartResult + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPartCopy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPartCopy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPartCopy(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addUploadPartCopyUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUploadPartCopy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "UploadPartCopy", + } +} + +// getUploadPartCopyBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getUploadPartCopyBucketMember(input interface{}) (*string, bool) { + in := input.(*UploadPartCopyInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addUploadPartCopyUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getUploadPartCopyBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go new file mode 100644 index 000000000000..78eeadd45a1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go @@ -0,0 +1,457 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strings" + "time" +) + +// Passes transformed objects to a GetObject operation when using Object Lambda +// access points. For information about Object Lambda access points, see +// Transforming objects with Object Lambda access points +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) +// in the Amazon S3 User Guide. This operation supports metadata that can be +// returned by GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html), in +// addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. +// The GetObject response metadata is supported so that the WriteGetObjectResponse +// caller, typically an Lambda function, can provide the same metadata when it +// internally invokes GetObject. When WriteGetObjectResponse is called by a +// customer-owned Lambda function, the metadata returned to the end user GetObject +// call might differ from what Amazon S3 would normally return. You can include any +// number of metadata headers. When including a metadata header, it should be +// prefaced with x-amz-meta. For example, x-amz-meta-my-custom-header: +// MyCustomValue. The primary use case for this is to forward GetObject metadata. +// Amazon Web Services provides some prebuilt Lambda functions that you can use +// with S3 Object Lambda to detect and redact personally identifiable information +// (PII) and decompress S3 objects. These Lambda functions are available in the +// Amazon Web Services Serverless Application Repository, and can be selected +// through the Amazon Web Services Management Console when you create your Object +// Lambda access point. Example 1: PII Access Control - This Lambda function uses +// Amazon Comprehend, a natural language processing (NLP) service using machine +// learning to find insights and relationships in text. It automatically detects +// personally identifiable information (PII) such as names, addresses, dates, +// credit card numbers, and social security numbers from documents in your Amazon +// S3 bucket. Example 2: PII Redaction - This Lambda function uses Amazon +// Comprehend, a natural language processing (NLP) service using machine learning +// to find insights and relationships in text. It automatically redacts personally +// identifiable information (PII) such as names, addresses, dates, credit card +// numbers, and social security numbers from documents in your Amazon S3 bucket. +// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is +// equipped to decompress objects stored in S3 in one of six compressed file +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. For information +// on how to view and use these functions, see Using Amazon Web Services built +// Lambda functions +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) in +// the Amazon S3 User Guide. +func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) { + if params == nil { + params = &WriteGetObjectResponseInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "WriteGetObjectResponse", params, optFns, c.addOperationWriteGetObjectResponseMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*WriteGetObjectResponseOutput) + out.ResultMetadata = metadata + return out, nil +} + +type WriteGetObjectResponseInput struct { + + // Route prefix to the HTTP URL generated. + // + // This member is required. + RequestRoute *string + + // A single use encrypted token that maps WriteGetObjectResponse to the end user + // GetObject request. + // + // This member is required. + RequestToken *string + + // Indicates that a range of bytes was specified. + AcceptRanges *string + + // The object data. + Body io.Reader + + // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + // server-side encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the + // base64-encoded, 32-bit CRC32 checksum of the object returned by the Object + // Lambda function. This may not match the checksum for the object stored in Amazon + // S3. Amazon S3 will perform validation of the checksum values only when the + // original GetObject request required checksum validation. For more information + // about checksums, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. Only one checksum header can be specified at a + // time. If you supply multiple checksum headers, this request will fail. + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the + // base64-encoded, 32-bit CRC32C checksum of the object returned by the Object + // Lambda function. This may not match the checksum for the object stored in Amazon + // S3. Amazon S3 will perform validation of the checksum values only when the + // original GetObject request required checksum validation. For more information + // about checksums, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. Only one checksum header can be specified at a + // time. If you supply multiple checksum headers, this request will fail. + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the + // base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda + // function. This may not match the checksum for the object stored in Amazon S3. + // Amazon S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. Only one checksum header can be specified at a + // time. If you supply multiple checksum headers, this request will fail. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the + // base64-encoded, 256-bit SHA-256 digest of the object returned by the Object + // Lambda function. This may not match the checksum for the object stored in Amazon + // S3. Amazon S3 will perform validation of the checksum values only when the + // original GetObject request required checksum validation. For more information + // about checksums, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. Only one checksum header can be specified at a + // time. If you supply multiple checksum headers, this request will fail. + ChecksumSHA256 *string + + // Specifies presentational information for the object. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // The size of the content body in bytes. + ContentLength int64 + + // The portion of the object returned in the response. + ContentRange *string + + // A standard MIME type describing the format of the object data. + ContentType *string + + // Specifies whether an object stored in Amazon S3 is (true) or is not (false) a + // delete marker. + DeleteMarker bool + + // An opaque identifier assigned by a web server to a specific version of a + // resource found at a URL. + ETag *string + + // A string that uniquely identifies an error condition. Returned in the tag of + // the error XML response for a corresponding GetObject call. Cannot be used with a + // successful StatusCode header or when the transformed object is provided in the + // body. All error codes from S3 are sentence-cased. The regular expression (regex) + // value is "^[A-Z][a-zA-Z]+$". + ErrorCode *string + + // Contains a generic description of the error condition. Returned in the tag of + // the error XML response for a corresponding GetObject call. Cannot be used with a + // successful StatusCode header or when the transformed object is provided in body. + ErrorMessage *string + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // that provide the object expiration information. The value of the rule-id is + // URL-encoded. + Expiration *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // The date and time that the object was last modified. + LastModified *time.Time + + // A map of metadata to store with the object in S3. + Metadata map[string]string + + // Set to the number of metadata entries not returned in x-amz-meta headers. This + // can happen if you create metadata using an API like SOAP that supports more + // flexible metadata than the REST API. For example, using SOAP, you can create + // metadata whose values are not legal HTTP headers. + MissingMeta int32 + + // Indicates whether an object stored in Amazon S3 has an active legal hold. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // Indicates whether an object stored in Amazon S3 has Object Lock enabled. For + // more information about S3 Object Lock, see Object Lock + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). + ObjectLockMode types.ObjectLockMode + + // The date and time when Object Lock is configured to expire. + ObjectLockRetainUntilDate *time.Time + + // The count of parts this object has. + PartsCount int32 + + // Indicates if request involves bucket that is either a source or destination in a + // Replication rule. For more information about S3 Replication, see Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html). + ReplicationStatus types.ReplicationStatus + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Provides information about object restoration operation and expiration time of + // the restored object copy. + Restore *string + + // Encryption algorithm used if server-side encryption with a customer-provided + // encryption key was specified for object stored in Amazon S3. + SSECustomerAlgorithm *string + + // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to + // encrypt data stored in S3. For more information, see Protecting data using + // server-side encryption with customer-provided encryption keys (SSE-C) + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // stored in Amazon S3 object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing requested object in + // Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // The integer status code for an HTTP response of a corresponding GetObject + // request. Status Codes + // + // * 200 - OK + // + // * 206 - Partial Content + // + // * 304 - Not + // Modified + // + // * 400 - Bad Request + // + // * 401 - Unauthorized + // + // * 403 - Forbidden + // + // * 404 - + // Not Found + // + // * 405 - Method Not Allowed + // + // * 409 - Conflict + // + // * 411 - Length + // Required + // + // * 412 - Precondition Failed + // + // * 416 - Range Not Satisfiable + // + // * 500 - + // Internal Server Error + // + // * 503 - Service Unavailable + StatusCode int32 + + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. For more + // information, see Storage Classes + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass types.StorageClass + + // The number of tags, if any, on the object. + TagCount int32 + + // An ID used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type WriteGetObjectResponseOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpWriteGetObjectResponse{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpWriteGetObjectResponse{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddUnsignedPayloadMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack); err != nil { + return err + } + if err = addOpWriteGetObjectResponseValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opWriteGetObjectResponse(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addWriteGetObjectResponseUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +type endpointPrefix_opWriteGetObjectResponseMiddleware struct { +} + +func (*endpointPrefix_opWriteGetObjectResponseMiddleware) ID() string { + return "EndpointHostPrefix" +} + +func (m *endpointPrefix_opWriteGetObjectResponseMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if smithyhttp.GetHostnameImmutable(ctx) || smithyhttp.IsEndpointHostPrefixDisabled(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + input, ok := in.Parameters.(*WriteGetObjectResponseInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input type %T", in.Parameters) + } + + var prefix strings.Builder + if input.RequestRoute == nil { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("RequestRoute forms part of the endpoint host and so may not be nil")} + } else if !smithyhttp.ValidHostLabel(*input.RequestRoute) { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("RequestRoute forms part of the endpoint host and so must match \"[a-zA-Z0-9-]{1,63}\", but was \"%s\"", *input.RequestRoute)} + } else { + prefix.WriteString(*input.RequestRoute) + } + prefix.WriteString(".") + req.URL.Host = prefix.String() + req.URL.Host + + return next.HandleSerialize(ctx, in) +} +func addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack *middleware.Stack) error { + return stack.Serialize.Insert(&endpointPrefix_opWriteGetObjectResponseMiddleware{}, `OperationSerializer`, middleware.After) +} + +func newServiceMetadataMiddleware_opWriteGetObjectResponse(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "WriteGetObjectResponse", + } +} + +func addWriteGetObjectResponseUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: nopGetBucketAccessor, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: true, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go new file mode 100644 index 000000000000..995d909cf8c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go @@ -0,0 +1,21940 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream" + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi" + awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "strconv" + "strings" +) + +type awsRestxml_deserializeOpAbortMultipartUpload struct { +} + +func (*awsRestxml_deserializeOpAbortMultipartUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorAbortMultipartUpload(response, &metadata) + } + output := &AbortMultipartUploadOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorAbortMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchUpload", errorCode): + return awsRestxml_deserializeErrorNoSuchUpload(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(v *AbortMultipartUploadOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpCompleteMultipartUpload struct { +} + +func (*awsRestxml_deserializeOpCompleteMultipartUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCompleteMultipartUpload(response, &metadata) + } + output := &CompleteMultipartUploadOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCompleteMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(v *CompleteMultipartUploadOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteMultipartUploadOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CompleteMultipartUploadOutput + if *v == nil { + sv = &CompleteMultipartUploadOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("Location", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Location = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpCopyObject struct { +} + +func (*awsRestxml_deserializeOpCopyObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCopyObject(response, &metadata) + } + output := &CopyObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentCopyObjectResult(&output.CopyObjectResult, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCopyObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ObjectNotInActiveTierError", errorCode): + return awsRestxml_deserializeErrorObjectNotInActiveTierError(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(v *CopyObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.CopySourceVersionId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentCopyObjectOutput(v **CopyObjectOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CopyObjectOutput + if *v == nil { + sv = &CopyObjectOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CopyObjectResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCopyObjectResult(&sv.CopyObjectResult, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpCreateBucket struct { +} + +func (*awsRestxml_deserializeOpCreateBucket) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCreateBucket(response, &metadata) + } + output := &CreateBucketOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCreateBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("BucketAlreadyExists", errorCode): + return awsRestxml_deserializeErrorBucketAlreadyExists(response, errorBody) + + case strings.EqualFold("BucketAlreadyOwnedByYou", errorCode): + return awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(v *CreateBucketOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("Location"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Location = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpCreateMultipartUpload struct { +} + +func (*awsRestxml_deserializeOpCreateMultipartUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCreateMultipartUpload(response, &metadata) + } + output := &CreateMultipartUploadOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCreateMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(v *CreateMultipartUploadOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.AbortDate = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.AbortRuleId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("x-amz-checksum-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumAlgorithm = types.ChecksumAlgorithm(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(v **CreateMultipartUploadOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CreateMultipartUploadOutput + if *v == nil { + sv = &CreateMultipartUploadOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("UploadId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UploadId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpDeleteBucket struct { +} + +func (*awsRestxml_deserializeOpDeleteBucket) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucket(response, &metadata) + } + output := &DeleteBucketOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response, &metadata) + } + output := &DeleteBucketAnalyticsConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketCors struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketCors) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketCors(response, &metadata) + } + output := &DeleteBucketCorsOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketEncryption struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketEncryption) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketEncryption(response, &metadata) + } + output := &DeleteBucketEncryptionOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response, &metadata) + } + output := &DeleteBucketIntelligentTieringConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketInventoryConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response, &metadata) + } + output := &DeleteBucketInventoryConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketLifecycle struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketLifecycle) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response, &metadata) + } + output := &DeleteBucketLifecycleOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketMetricsConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response, &metadata) + } + output := &DeleteBucketMetricsConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketOwnershipControls struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketOwnershipControls) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response, &metadata) + } + output := &DeleteBucketOwnershipControlsOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketPolicy struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketPolicy(response, &metadata) + } + output := &DeleteBucketPolicyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketReplication struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketReplication) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketReplication(response, &metadata) + } + output := &DeleteBucketReplicationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketTagging struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketTagging(response, &metadata) + } + output := &DeleteBucketTaggingOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketWebsite struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketWebsite) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketWebsite(response, &metadata) + } + output := &DeleteBucketWebsiteOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteObject struct { +} + +func (*awsRestxml_deserializeOpDeleteObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteObject(response, &metadata) + } + output := &DeleteObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(v *DeleteObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.DeleteMarker = vv + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpDeleteObjects struct { +} + +func (*awsRestxml_deserializeOpDeleteObjects) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteObjects(response, &metadata) + } + output := &DeleteObjectsOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentDeleteObjectsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(v *DeleteObjectsOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentDeleteObjectsOutput(v **DeleteObjectsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DeleteObjectsOutput + if *v == nil { + sv = &DeleteObjectsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Deleted", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(&sv.Deleted, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Error", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentErrorsUnwrapped(&sv.Errors, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpDeleteObjectTagging struct { +} + +func (*awsRestxml_deserializeOpDeleteObjectTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteObjectTagging(response, &metadata) + } + output := &DeleteObjectTaggingOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(v *DeleteObjectTaggingOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpDeletePublicAccessBlock struct { +} + +func (*awsRestxml_deserializeOpDeletePublicAccessBlock) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response, &metadata) + } + output := &DeletePublicAccessBlockOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpGetBucketAccelerateConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketAccelerateConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response, &metadata) + } + output := &GetBucketAccelerateConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(v **GetBucketAccelerateConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketAccelerateConfigurationOutput + if *v == nil { + sv = &GetBucketAccelerateConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.BucketAccelerateStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketAcl struct { +} + +func (*awsRestxml_deserializeOpGetBucketAcl) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketAcl(response, &metadata) + } + output := &GetBucketAclOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketAclOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketAclOutput(v **GetBucketAclOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketAclOutput + if *v == nil { + sv = &GetBucketAclOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessControlList", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response, &metadata) + } + output := &GetBucketAnalyticsConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentAnalyticsConfiguration(&output.AnalyticsConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketAnalyticsConfigurationOutput(v **GetBucketAnalyticsConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketAnalyticsConfigurationOutput + if *v == nil { + sv = &GetBucketAnalyticsConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AnalyticsConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&sv.AnalyticsConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketCors struct { +} + +func (*awsRestxml_deserializeOpGetBucketCors) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketCors(response, &metadata) + } + output := &GetBucketCorsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketCorsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketCorsOutput(v **GetBucketCorsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketCorsOutput + if *v == nil { + sv = &GetBucketCorsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CORSRule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCORSRulesUnwrapped(&sv.CORSRules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketEncryption struct { +} + +func (*awsRestxml_deserializeOpGetBucketEncryption) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketEncryption(response, &metadata) + } + output := &GetBucketEncryptionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&output.ServerSideEncryptionConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketEncryptionOutput(v **GetBucketEncryptionOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketEncryptionOutput + if *v == nil { + sv = &GetBucketEncryptionOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ServerSideEncryptionConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&sv.ServerSideEncryptionConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response, &metadata) + } + output := &GetBucketIntelligentTieringConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&output.IntelligentTieringConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketIntelligentTieringConfigurationOutput(v **GetBucketIntelligentTieringConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketIntelligentTieringConfigurationOutput + if *v == nil { + sv = &GetBucketIntelligentTieringConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&sv.IntelligentTieringConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketInventoryConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketInventoryConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response, &metadata) + } + output := &GetBucketInventoryConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentInventoryConfiguration(&output.InventoryConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketInventoryConfigurationOutput(v **GetBucketInventoryConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketInventoryConfigurationOutput + if *v == nil { + sv = &GetBucketInventoryConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("InventoryConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryConfiguration(&sv.InventoryConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketLifecycleConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketLifecycleConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response, &metadata) + } + output := &GetBucketLifecycleConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(v **GetBucketLifecycleConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketLifecycleConfigurationOutput + if *v == nil { + sv = &GetBucketLifecycleConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketLocation struct { +} + +func (*awsRestxml_deserializeOpGetBucketLocation) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata) + } + output := &GetBucketLocationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketLocation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketLocationOutput(v **GetBucketLocationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketLocationOutput + if *v == nil { + sv = &GetBucketLocationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("LocationConstraint", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.LocationConstraint = types.BucketLocationConstraint(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketLogging struct { +} + +func (*awsRestxml_deserializeOpGetBucketLogging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketLogging(response, &metadata) + } + output := &GetBucketLoggingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(v **GetBucketLoggingOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketLoggingOutput + if *v == nil { + sv = &GetBucketLoggingOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("LoggingEnabled", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLoggingEnabled(&sv.LoggingEnabled, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketMetricsConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketMetricsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response, &metadata) + } + output := &GetBucketMetricsConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentMetricsConfiguration(&output.MetricsConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **GetBucketMetricsConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketMetricsConfigurationOutput + if *v == nil { + sv = &GetBucketMetricsConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("MetricsConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetricsConfiguration(&sv.MetricsConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketNotificationConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketNotificationConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response, &metadata) + } + output := &GetBucketNotificationConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v **GetBucketNotificationConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketNotificationConfigurationOutput + if *v == nil { + sv = &GetBucketNotificationConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("EventBridgeConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventBridgeConfiguration(&sv.EventBridgeConfiguration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("CloudFunctionConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(&sv.LambdaFunctionConfigurations, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("QueueConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(&sv.QueueConfigurations, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("TopicConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(&sv.TopicConfigurations, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketOwnershipControls struct { +} + +func (*awsRestxml_deserializeOpGetBucketOwnershipControls) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response, &metadata) + } + output := &GetBucketOwnershipControlsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentOwnershipControls(&output.OwnershipControls, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketOwnershipControlsOutput(v **GetBucketOwnershipControlsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketOwnershipControlsOutput + if *v == nil { + sv = &GetBucketOwnershipControlsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("OwnershipControls", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwnershipControls(&sv.OwnershipControls, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketPolicy struct { +} + +func (*awsRestxml_deserializeOpGetBucketPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicy(response, &metadata) + } + output := &GetBucketPolicyOutput{} + out.Result = output + + err = awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(output, response.Body, response.ContentLength) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(v *GetBucketPolicyOutput, body io.ReadCloser, contentLength int64) error { + if v == nil { + return fmt.Errorf("unsupported deserialization of nil %T", v) + } + var buf bytes.Buffer + if contentLength > 0 { + buf.Grow(int(contentLength)) + } else { + buf.Grow(512) + } + + _, err := buf.ReadFrom(body) + if err != nil { + return err + } + if buf.Len() > 0 { + v.Policy = ptr.String(buf.String()) + } + return nil +} + +type awsRestxml_deserializeOpGetBucketPolicyStatus struct { +} + +func (*awsRestxml_deserializeOpGetBucketPolicyStatus) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response, &metadata) + } + output := &GetBucketPolicyStatusOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentPolicyStatus(&output.PolicyStatus, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketPolicyStatusOutput(v **GetBucketPolicyStatusOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketPolicyStatusOutput + if *v == nil { + sv = &GetBucketPolicyStatusOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("PolicyStatus", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPolicyStatus(&sv.PolicyStatus, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketReplication struct { +} + +func (*awsRestxml_deserializeOpGetBucketReplication) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketReplication(response, &metadata) + } + output := &GetBucketReplicationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentReplicationConfiguration(&output.ReplicationConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketReplicationOutput(v **GetBucketReplicationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketReplicationOutput + if *v == nil { + sv = &GetBucketReplicationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ReplicationConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationConfiguration(&sv.ReplicationConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketRequestPayment struct { +} + +func (*awsRestxml_deserializeOpGetBucketRequestPayment) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketRequestPayment(response, &metadata) + } + output := &GetBucketRequestPaymentOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(v **GetBucketRequestPaymentOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketRequestPaymentOutput + if *v == nil { + sv = &GetBucketRequestPaymentOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Payer", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Payer = types.Payer(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketTagging struct { +} + +func (*awsRestxml_deserializeOpGetBucketTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketTagging(response, &metadata) + } + output := &GetBucketTaggingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(v **GetBucketTaggingOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketTaggingOutput + if *v == nil { + sv = &GetBucketTaggingOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TagSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketVersioning struct { +} + +func (*awsRestxml_deserializeOpGetBucketVersioning) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketVersioning(response, &metadata) + } + output := &GetBucketVersioningOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(v **GetBucketVersioningOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketVersioningOutput + if *v == nil { + sv = &GetBucketVersioningOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("MfaDelete", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.MFADelete = types.MFADeleteStatus(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.BucketVersioningStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketWebsite struct { +} + +func (*awsRestxml_deserializeOpGetBucketWebsite) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketWebsite(response, &metadata) + } + output := &GetBucketWebsiteOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(v **GetBucketWebsiteOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketWebsiteOutput + if *v == nil { + sv = &GetBucketWebsiteOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ErrorDocument", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentErrorDocument(&sv.ErrorDocument, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("IndexDocument", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIndexDocument(&sv.IndexDocument, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("RedirectAllRequestsTo", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRedirectAllRequestsTo(&sv.RedirectAllRequestsTo, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("RoutingRules", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRoutingRules(&sv.RoutingRules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObject struct { +} + +func (*awsRestxml_deserializeOpGetObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObject(response, &metadata) + } + output := &GetObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + err = awsRestxml_deserializeOpDocumentGetObjectOutput(output, response.Body) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("InvalidObjectState", errorCode): + return awsRestxml_deserializeErrorInvalidObjectState(response, errorBody) + + case strings.EqualFold("NoSuchKey", errorCode): + return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.AcceptRanges = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.CacheControl = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32C = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA1 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA256 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentDisposition = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentEncoding = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentLanguage = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 64) + if err != nil { + return err + } + v.ContentLength = vv + } + + if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentRange = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentType = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.DeleteMarker = vv + } + + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ETag = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.Expires = ptr.Time(t) + } + + if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.LastModified = ptr.Time(t) + } + + for headerKey, headerValues := range response.Header { + if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") { + if v.Metadata == nil { + v.Metadata = map[string]string{} + } + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0] + } + } + + if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.MissingMeta = int32(vv) + } + + if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ObjectLockMode = types.ObjectLockMode(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseDateTime(headerValues[0]) + if err != nil { + return err + } + v.ObjectLockRetainUntilDate = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.PartsCount = int32(vv) + } + + if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ReplicationStatus = types.ReplicationStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Restore = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.StorageClass = types.StorageClass(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-tagging-count"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.TagCount = int32(vv) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.WebsiteRedirectLocation = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectOutput(v *GetObjectOutput, body io.ReadCloser) error { + if v == nil { + return fmt.Errorf("unsupported deserialization of nil %T", v) + } + v.Body = body + return nil +} + +type awsRestxml_deserializeOpGetObjectAcl struct { +} + +func (*awsRestxml_deserializeOpGetObjectAcl) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectAcl(response, &metadata) + } + output := &GetObjectAclOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetObjectAclOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchKey", errorCode): + return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(v *GetObjectAclOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectAclOutput(v **GetObjectAclOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectAclOutput + if *v == nil { + sv = &GetObjectAclOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessControlList", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectAttributes struct { +} + +func (*awsRestxml_deserializeOpGetObjectAttributes) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectAttributes(response, &metadata) + } + output := &GetObjectAttributesOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchKey", errorCode): + return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(v *GetObjectAttributesOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.DeleteMarker = vv + } + + if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.LastModified = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(v **GetObjectAttributesOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectAttributesOutput + if *v == nil { + sv = &GetObjectAttributesOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Checksum", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentChecksum(&sv.Checksum, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("ObjectParts", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGetObjectAttributesParts(&sv.ObjectParts, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ObjectSize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ObjectSize = i64 + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectLegalHold struct { +} + +func (*awsRestxml_deserializeOpGetObjectLegalHold) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectLegalHold(response, &metadata) + } + output := &GetObjectLegalHoldOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentObjectLockLegalHold(&output.LegalHold, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetObjectLegalHoldOutput(v **GetObjectLegalHoldOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectLegalHoldOutput + if *v == nil { + sv = &GetObjectLegalHoldOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("LegalHold", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectLockLegalHold(&sv.LegalHold, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectLockConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetObjectLockConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response, &metadata) + } + output := &GetObjectLockConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentObjectLockConfiguration(&output.ObjectLockConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetObjectLockConfigurationOutput(v **GetObjectLockConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectLockConfigurationOutput + if *v == nil { + sv = &GetObjectLockConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ObjectLockConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectLockConfiguration(&sv.ObjectLockConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectRetention struct { +} + +func (*awsRestxml_deserializeOpGetObjectRetention) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectRetention(response, &metadata) + } + output := &GetObjectRetentionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentObjectLockRetention(&output.Retention, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetObjectRetentionOutput(v **GetObjectRetentionOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectRetentionOutput + if *v == nil { + sv = &GetObjectRetentionOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Retention", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectLockRetention(&sv.Retention, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectTagging struct { +} + +func (*awsRestxml_deserializeOpGetObjectTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectTagging(response, &metadata) + } + output := &GetObjectTaggingOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(v *GetObjectTaggingOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(v **GetObjectTaggingOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectTaggingOutput + if *v == nil { + sv = &GetObjectTaggingOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TagSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectTorrent struct { +} + +func (*awsRestxml_deserializeOpGetObjectTorrent) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectTorrent(response, &metadata) + } + output := &GetObjectTorrentOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + err = awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(output, response.Body) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectTorrent(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(v *GetObjectTorrentOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(v *GetObjectTorrentOutput, body io.ReadCloser) error { + if v == nil { + return fmt.Errorf("unsupported deserialization of nil %T", v) + } + v.Body = body + return nil +} + +type awsRestxml_deserializeOpGetPublicAccessBlock struct { +} + +func (*awsRestxml_deserializeOpGetPublicAccessBlock) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetPublicAccessBlock(response, &metadata) + } + output := &GetPublicAccessBlockOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&output.PublicAccessBlockConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetPublicAccessBlockOutput(v **GetPublicAccessBlockOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetPublicAccessBlockOutput + if *v == nil { + sv = &GetPublicAccessBlockOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("PublicAccessBlockConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&sv.PublicAccessBlockConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpHeadBucket struct { +} + +func (*awsRestxml_deserializeOpHeadBucket) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorHeadBucket(response, &metadata) + } + output := &HeadBucketOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorHeadBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NotFound", errorCode): + return awsRestxml_deserializeErrorNotFound(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpHeadObject struct { +} + +func (*awsRestxml_deserializeOpHeadObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorHeadObject(response, &metadata) + } + output := &HeadObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorHeadObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.AcceptRanges = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-archive-status"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ArchiveStatus = types.ArchiveStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.CacheControl = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32C = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA1 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA256 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentDisposition = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentEncoding = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentLanguage = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 64) + if err != nil { + return err + } + v.ContentLength = vv + } + + if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentType = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.DeleteMarker = vv + } + + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ETag = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.Expires = ptr.Time(t) + } + + if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.LastModified = ptr.Time(t) + } + + for headerKey, headerValues := range response.Header { + if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") { + if v.Metadata == nil { + v.Metadata = map[string]string{} + } + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0] + } + } + + if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.MissingMeta = int32(vv) + } + + if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ObjectLockMode = types.ObjectLockMode(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseDateTime(headerValues[0]) + if err != nil { + return err + } + v.ObjectLockRetainUntilDate = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.PartsCount = int32(vv) + } + + if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ReplicationStatus = types.ReplicationStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Restore = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.StorageClass = types.StorageClass(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.WebsiteRedirectLocation = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpListBucketAnalyticsConfigurations struct { +} + +func (*awsRestxml_deserializeOpListBucketAnalyticsConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response, &metadata) + } + output := &ListBucketAnalyticsConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(v **ListBucketAnalyticsConfigurationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketAnalyticsConfigurationsOutput + if *v == nil { + sv = &ListBucketAnalyticsConfigurationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AnalyticsConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(&sv.AnalyticsConfigurationList, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations struct { +} + +func (*awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response, &metadata) + } + output := &ListBucketIntelligentTieringConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(v **ListBucketIntelligentTieringConfigurationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketIntelligentTieringConfigurationsOutput + if *v == nil { + sv = &ListBucketIntelligentTieringConfigurationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(&sv.IntelligentTieringConfigurationList, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListBucketInventoryConfigurations struct { +} + +func (*awsRestxml_deserializeOpListBucketInventoryConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response, &metadata) + } + output := &ListBucketInventoryConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(v **ListBucketInventoryConfigurationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketInventoryConfigurationsOutput + if *v == nil { + sv = &ListBucketInventoryConfigurationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("InventoryConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(&sv.InventoryConfigurationList, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListBucketMetricsConfigurations struct { +} + +func (*awsRestxml_deserializeOpListBucketMetricsConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response, &metadata) + } + output := &ListBucketMetricsConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(v **ListBucketMetricsConfigurationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketMetricsConfigurationsOutput + if *v == nil { + sv = &ListBucketMetricsConfigurationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("MetricsConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(&sv.MetricsConfigurationList, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListBuckets struct { +} + +func (*awsRestxml_deserializeOpListBuckets) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBuckets(response, &metadata) + } + output := &ListBucketsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketsOutput(v **ListBucketsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketsOutput + if *v == nil { + sv = &ListBucketsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Buckets", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListMultipartUploads struct { +} + +func (*awsRestxml_deserializeOpListMultipartUploads) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListMultipartUploads(response, &metadata) + } + output := &ListMultipartUploadsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListMultipartUploads(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipartUploadsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListMultipartUploadsOutput + if *v == nil { + sv = &ListMultipartUploadsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("KeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("MaxUploads", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxUploads = int32(i64) + } + + case strings.EqualFold("NextKeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextKeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("NextUploadIdMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextUploadIdMarker = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("UploadIdMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UploadIdMarker = ptr.String(xtv) + } + + case strings.EqualFold("Upload", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(&sv.Uploads, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListObjects struct { +} + +func (*awsRestxml_deserializeOpListObjects) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListObjects(response, &metadata) + } + output := &ListObjectsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListObjectsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchBucket", errorCode): + return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListObjectsOutput(v **ListObjectsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListObjectsOutput + if *v == nil { + sv = &ListObjectsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Contents", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("Marker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Marker = ptr.String(xtv) + } + + case strings.EqualFold("MaxKeys", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxKeys = int32(i64) + } + + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + case strings.EqualFold("NextMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextMarker = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListObjectsV2 struct { +} + +func (*awsRestxml_deserializeOpListObjectsV2) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListObjectsV2(response, &metadata) + } + output := &ListObjectsV2Output{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListObjectsV2Output(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListObjectsV2(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchBucket", errorCode): + return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListObjectsV2Output + if *v == nil { + sv = &ListObjectsV2Output{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Contents", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("KeyCount", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.KeyCount = int32(i64) + } + + case strings.EqualFold("MaxKeys", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxKeys = int32(i64) + } + + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("StartAfter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StartAfter = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListObjectVersions struct { +} + +func (*awsRestxml_deserializeOpListObjectVersions) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListObjectVersions(response, &metadata) + } + output := &ListObjectVersionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListObjectVersionsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListObjectVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListObjectVersionsOutput(v **ListObjectVersionsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListObjectVersionsOutput + if *v == nil { + sv = &ListObjectVersionsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("DeleteMarker", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(&sv.DeleteMarkers, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("KeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("MaxKeys", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxKeys = int32(i64) + } + + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + case strings.EqualFold("NextKeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextKeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("NextVersionIdMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextVersionIdMarker = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("VersionIdMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionIdMarker = ptr.String(xtv) + } + + case strings.EqualFold("Version", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectVersionListUnwrapped(&sv.Versions, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListParts struct { +} + +func (*awsRestxml_deserializeOpListParts) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListParts(response, &metadata) + } + output := &ListPartsOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsListPartsOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListPartsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListParts(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsListPartsOutput(v *ListPartsOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.AbortDate = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.AbortRuleId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListPartsOutput + if *v == nil { + sv = &ListPartsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) + } + + case strings.EqualFold("Initiator", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("MaxParts", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxParts = int32(i64) + } + + case strings.EqualFold("NextPartNumberMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextPartNumberMarker = ptr.String(xtv) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PartNumberMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PartNumberMarker = ptr.String(xtv) + } + + case strings.EqualFold("Part", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPartsUnwrapped(&sv.Parts, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + case strings.EqualFold("UploadId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UploadId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpPutBucketAccelerateConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketAccelerateConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response, &metadata) + } + output := &PutBucketAccelerateConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketAcl struct { +} + +func (*awsRestxml_deserializeOpPutBucketAcl) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketAcl(response, &metadata) + } + output := &PutBucketAclOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response, &metadata) + } + output := &PutBucketAnalyticsConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketCors struct { +} + +func (*awsRestxml_deserializeOpPutBucketCors) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketCors(response, &metadata) + } + output := &PutBucketCorsOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketEncryption struct { +} + +func (*awsRestxml_deserializeOpPutBucketEncryption) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketEncryption(response, &metadata) + } + output := &PutBucketEncryptionOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response, &metadata) + } + output := &PutBucketIntelligentTieringConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketInventoryConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketInventoryConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response, &metadata) + } + output := &PutBucketInventoryConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketLifecycleConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketLifecycleConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response, &metadata) + } + output := &PutBucketLifecycleConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketLogging struct { +} + +func (*awsRestxml_deserializeOpPutBucketLogging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketLogging(response, &metadata) + } + output := &PutBucketLoggingOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketMetricsConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketMetricsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response, &metadata) + } + output := &PutBucketMetricsConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketNotificationConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketNotificationConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response, &metadata) + } + output := &PutBucketNotificationConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketOwnershipControls struct { +} + +func (*awsRestxml_deserializeOpPutBucketOwnershipControls) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response, &metadata) + } + output := &PutBucketOwnershipControlsOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketPolicy struct { +} + +func (*awsRestxml_deserializeOpPutBucketPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketPolicy(response, &metadata) + } + output := &PutBucketPolicyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketReplication struct { +} + +func (*awsRestxml_deserializeOpPutBucketReplication) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketReplication(response, &metadata) + } + output := &PutBucketReplicationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketRequestPayment struct { +} + +func (*awsRestxml_deserializeOpPutBucketRequestPayment) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketRequestPayment(response, &metadata) + } + output := &PutBucketRequestPaymentOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketTagging struct { +} + +func (*awsRestxml_deserializeOpPutBucketTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketTagging(response, &metadata) + } + output := &PutBucketTaggingOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketVersioning struct { +} + +func (*awsRestxml_deserializeOpPutBucketVersioning) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketVersioning(response, &metadata) + } + output := &PutBucketVersioningOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketWebsite struct { +} + +func (*awsRestxml_deserializeOpPutBucketWebsite) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketWebsite(response, &metadata) + } + output := &PutBucketWebsiteOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutObject struct { +} + +func (*awsRestxml_deserializeOpPutObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObject(response, &metadata) + } + output := &PutObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32C = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA1 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA256 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ETag = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectAcl struct { +} + +func (*awsRestxml_deserializeOpPutObjectAcl) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectAcl(response, &metadata) + } + output := &PutObjectAclOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchKey", errorCode): + return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(v *PutObjectAclOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectLegalHold struct { +} + +func (*awsRestxml_deserializeOpPutObjectLegalHold) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectLegalHold(response, &metadata) + } + output := &PutObjectLegalHoldOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(v *PutObjectLegalHoldOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectLockConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutObjectLockConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response, &metadata) + } + output := &PutObjectLockConfigurationOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(v *PutObjectLockConfigurationOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectRetention struct { +} + +func (*awsRestxml_deserializeOpPutObjectRetention) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectRetention(response, &metadata) + } + output := &PutObjectRetentionOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(v *PutObjectRetentionOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectTagging struct { +} + +func (*awsRestxml_deserializeOpPutObjectTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectTagging(response, &metadata) + } + output := &PutObjectTaggingOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(v *PutObjectTaggingOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutPublicAccessBlock struct { +} + +func (*awsRestxml_deserializeOpPutPublicAccessBlock) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutPublicAccessBlock(response, &metadata) + } + output := &PutPublicAccessBlockOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpRestoreObject struct { +} + +func (*awsRestxml_deserializeOpRestoreObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorRestoreObject(response, &metadata) + } + output := &RestoreObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorRestoreObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ObjectAlreadyInActiveTierError", errorCode): + return awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(v *RestoreObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-restore-output-path"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RestoreOutputPath = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpSelectObjectContent struct { +} + +func (*awsRestxml_deserializeOpSelectObjectContent) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpSelectObjectContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorSelectObjectContent(response, &metadata) + } + output := &SelectObjectContentOutput{} + out.Result = output + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorSelectObjectContent(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpUploadPart struct { +} + +func (*awsRestxml_deserializeOpUploadPart) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorUploadPart(response, &metadata) + } + output := &UploadPartOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsUploadPartOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorUploadPart(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsUploadPartOutput(v *UploadPartOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32C = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA1 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA256 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ETag = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpUploadPartCopy struct { +} + +func (*awsRestxml_deserializeOpUploadPartCopy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorUploadPartCopy(response, &metadata) + } + output := &UploadPartCopyOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentCopyPartResult(&output.CopyPartResult, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorUploadPartCopy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(v *UploadPartCopyOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.CopySourceVersionId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentUploadPartCopyOutput(v **UploadPartCopyOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *UploadPartCopyOutput + if *v == nil { + sv = &UploadPartCopyOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CopyPartResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCopyPartResult(&sv.CopyPartResult, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpWriteGetObjectResponse struct { +} + +func (*awsRestxml_deserializeOpWriteGetObjectResponse) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpWriteGetObjectResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorWriteGetObjectResponse(response, &metadata) + } + output := &WriteGetObjectResponseOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorWriteGetObjectResponse(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeEventStreamSelectObjectContentEventStream(v *types.SelectObjectContentEventStream, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + eventType := msg.Headers.Get(eventstreamapi.EventTypeHeader) + if eventType == nil { + return fmt.Errorf("%s event header not present", eventstreamapi.EventTypeHeader) + } + + switch { + case strings.EqualFold("Cont", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberCont{} + if err := awsRestxml_deserializeEventMessageContinuationEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + case strings.EqualFold("End", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberEnd{} + if err := awsRestxml_deserializeEventMessageEndEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + case strings.EqualFold("Progress", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberProgress{} + if err := awsRestxml_deserializeEventMessageProgressEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + case strings.EqualFold("Records", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberRecords{} + if err := awsRestxml_deserializeEventMessageRecordsEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + case strings.EqualFold("Stats", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberStats{} + if err := awsRestxml_deserializeEventMessageStatsEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + default: + buffer := bytes.NewBuffer(nil) + eventstream.NewEncoder().Encode(buffer, *msg) + *v = &types.UnknownUnionMember{ + Tag: eventType.String(), + Value: buffer.Bytes(), + } + return nil + + } +} + +func awsRestxml_deserializeEventStreamExceptionSelectObjectContentEventStream(msg *eventstream.Message) error { + exceptionType := msg.Headers.Get(eventstreamapi.ExceptionTypeHeader) + if exceptionType == nil { + return fmt.Errorf("%s event header not present", eventstreamapi.ExceptionTypeHeader) + } + + switch { + default: + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(br, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + errorComponents, err := awsxml.GetErrorResponseComponents(br, true) + if err != nil { + return err + } + errorCode := "UnknownError" + errorMessage := errorCode + if ev := exceptionType.String(); len(ev) > 0 { + errorCode = ev + } else if ev := errorComponents.Code; len(ev) > 0 { + errorCode = ev + } + if ev := errorComponents.Message; len(ev) > 0 { + errorMessage = ev + } + return &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + + } +} + +func awsRestxml_deserializeEventMessageRecordsEvent(v *types.RecordsEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + if msg.Payload != nil { + bsv := make([]byte, len(msg.Payload)) + copy(bsv, msg.Payload) + + v.Payload = bsv + } + return nil +} + +func awsRestxml_deserializeEventMessageStatsEvent(v *types.StatsEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(br, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentStats(&v.Details, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return nil +} + +func awsRestxml_deserializeEventMessageProgressEvent(v *types.ProgressEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(br, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentProgress(&v.Details, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return nil +} + +func awsRestxml_deserializeEventMessageContinuationEvent(v *types.ContinuationEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(br, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentContinuationEvent(&v, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return nil +} + +func awsRestxml_deserializeEventMessageEndEvent(v *types.EndEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(br, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentEndEvent(&v, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return nil +} + +func awsRestxml_deserializeDocumentContinuationEvent(v **types.ContinuationEvent, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ContinuationEvent + if *v == nil { + sv = &types.ContinuationEvent{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentEndEvent(v **types.EndEvent, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EndEvent + if *v == nil { + sv = &types.EndEvent{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentProgress(v **types.Progress, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Progress + if *v == nil { + sv = &types.Progress{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("BytesProcessed", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesProcessed = i64 + } + + case strings.EqualFold("BytesReturned", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesReturned = i64 + } + + case strings.EqualFold("BytesScanned", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesScanned = i64 + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentStats(v **types.Stats, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Stats + if *v == nil { + sv = &types.Stats{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("BytesProcessed", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesProcessed = i64 + } + + case strings.EqualFold("BytesReturned", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesReturned = i64 + } + + case strings.EqualFold("BytesScanned", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesScanned = i64 + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeErrorBucketAlreadyExists(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.BucketAlreadyExists{} + return output +} + +func awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.BucketAlreadyOwnedByYou{} + return output +} + +func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidObjectState{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentInvalidObjectState(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsRestxml_deserializeErrorNoSuchBucket(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchBucket{} + return output +} + +func awsRestxml_deserializeErrorNoSuchKey(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchKey{} + return output +} + +func awsRestxml_deserializeErrorNoSuchUpload(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchUpload{} + return output +} + +func awsRestxml_deserializeErrorNotFound(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NotFound{} + return output +} + +func awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ObjectAlreadyInActiveTierError{} + return output +} + +func awsRestxml_deserializeErrorObjectNotInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ObjectNotInActiveTierError{} + return output +} + +func awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(v **types.AbortIncompleteMultipartUpload, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AbortIncompleteMultipartUpload + if *v == nil { + sv = &types.AbortIncompleteMultipartUpload{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DaysAfterInitiation", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.DaysAfterInitiation = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAccessControlTranslation(v **types.AccessControlTranslation, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AccessControlTranslation + if *v == nil { + sv = &types.AccessControlTranslation{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Owner", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Owner = types.OwnerOverride(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAllowedHeaders(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentAllowedMethods(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentAllowedOrigins(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentAnalyticsAndOperator(v **types.AnalyticsAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AnalyticsAndOperator + if *v == nil { + sv = &types.AnalyticsAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsConfiguration(v **types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AnalyticsConfiguration + if *v == nil { + sv = &types.AnalyticsConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("StorageClassAnalysis", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentStorageClassAnalysis(&sv.StorageClassAnalysis, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsConfigurationList(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.AnalyticsConfiguration + if *v == nil { + sv = make([]types.AnalyticsConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.AnalyticsConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.AnalyticsConfiguration + if *v == nil { + sv = make([]types.AnalyticsConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.AnalyticsConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentAnalyticsExportDestination(v **types.AnalyticsExportDestination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AnalyticsExportDestination + if *v == nil { + sv = &types.AnalyticsExportDestination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("S3BucketDestination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsFilter(v *types.AnalyticsFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var uv types.AnalyticsFilter + var memberFound bool + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + if memberFound { + if err = decoder.Decoder.Skip(); err != nil { + return err + } + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("And", t.Name.Local): + var mv types.AnalyticsAndOperator + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentAnalyticsAndOperator(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.AnalyticsFilterMemberAnd{Value: mv} + memberFound = true + + case strings.EqualFold("Prefix", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.AnalyticsFilterMemberPrefix{Value: mv} + memberFound = true + + case strings.EqualFold("Tag", t.Name.Local): + var mv types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.AnalyticsFilterMemberTag{Value: mv} + memberFound = true + + default: + uv = &types.UnknownUnionMember{Tag: t.Name.Local} + memberFound = true + + } + decoder = originalDecoder + } + *v = uv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(v **types.AnalyticsS3BucketDestination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AnalyticsS3BucketDestination + if *v == nil { + sv = &types.AnalyticsS3BucketDestination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("BucketAccountId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.BucketAccountId = ptr.String(xtv) + } + + case strings.EqualFold("Format", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Format = types.AnalyticsS3ExportFileFormat(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBucket(v **types.Bucket, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Bucket + if *v == nil { + sv = &types.Bucket{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CreationDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.CreationDate = ptr.Time(t) + } + + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBucketAlreadyExists(v **types.BucketAlreadyExists, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.BucketAlreadyExists + if *v == nil { + sv = &types.BucketAlreadyExists{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBucketAlreadyOwnedByYou(v **types.BucketAlreadyOwnedByYou, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.BucketAlreadyOwnedByYou + if *v == nil { + sv = &types.BucketAlreadyOwnedByYou{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBuckets(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Bucket + if *v == nil { + sv = make([]types.Bucket, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("Bucket", t.Name.Local): + var col types.Bucket + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBucketsUnwrapped(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error { + var sv []types.Bucket + if *v == nil { + sv = make([]types.Bucket, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Bucket + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentChecksum(v **types.Checksum, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Checksum + if *v == nil { + sv = &types.Checksum{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentChecksumAlgorithmList(v *[]types.ChecksumAlgorithm, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ChecksumAlgorithm + if *v == nil { + sv = make([]types.ChecksumAlgorithm, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ChecksumAlgorithm + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = types.ChecksumAlgorithm(xtv) + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(v *[]types.ChecksumAlgorithm, decoder smithyxml.NodeDecoder) error { + var sv []types.ChecksumAlgorithm + if *v == nil { + sv = make([]types.ChecksumAlgorithm, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ChecksumAlgorithm + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = types.ChecksumAlgorithm(xtv) + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentCommonPrefix(v **types.CommonPrefix, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CommonPrefix + if *v == nil { + sv = &types.CommonPrefix{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCommonPrefixList(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.CommonPrefix + if *v == nil { + sv = make([]types.CommonPrefix, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.CommonPrefix + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error { + var sv []types.CommonPrefix + if *v == nil { + sv = make([]types.CommonPrefix, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.CommonPrefix + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentCondition(v **types.Condition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Condition + if *v == nil { + sv = &types.Condition{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("HttpErrorCodeReturnedEquals", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HttpErrorCodeReturnedEquals = ptr.String(xtv) + } + + case strings.EqualFold("KeyPrefixEquals", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyPrefixEquals = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CopyObjectResult + if *v == nil { + sv = &types.CopyObjectResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCopyPartResult(v **types.CopyPartResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CopyPartResult + if *v == nil { + sv = &types.CopyPartResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCORSRule(v **types.CORSRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CORSRule + if *v == nil { + sv = &types.CORSRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AllowedHeader", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(&sv.AllowedHeaders, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("AllowedMethod", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(&sv.AllowedMethods, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("AllowedOrigin", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(&sv.AllowedOrigins, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ExposeHeader", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentExposeHeadersUnwrapped(&sv.ExposeHeaders, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + case strings.EqualFold("MaxAgeSeconds", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxAgeSeconds = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCORSRules(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.CORSRule + if *v == nil { + sv = make([]types.CORSRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.CORSRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCORSRulesUnwrapped(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error { + var sv []types.CORSRule + if *v == nil { + sv = make([]types.CORSRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.CORSRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentDefaultRetention(v **types.DefaultRetention, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DefaultRetention + if *v == nil { + sv = &types.DefaultRetention{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Days", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Days = int32(i64) + } + + case strings.EqualFold("Mode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Mode = types.ObjectLockRetentionMode(xtv) + } + + case strings.EqualFold("Years", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Years = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeletedObject(v **types.DeletedObject, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DeletedObject + if *v == nil { + sv = &types.DeletedObject{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DeleteMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected DeleteMarker to be of type *bool, got %T instead", val) + } + sv.DeleteMarker = xtv + } + + case strings.EqualFold("DeleteMarkerVersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DeleteMarkerVersionId = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("VersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeletedObjects(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.DeletedObject + if *v == nil { + sv = make([]types.DeletedObject, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.DeletedObject + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error { + var sv []types.DeletedObject + if *v == nil { + sv = make([]types.DeletedObject, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.DeletedObject + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentDeleteMarkerEntry(v **types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DeleteMarkerEntry + if *v == nil { + sv = &types.DeleteMarkerEntry{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IsLatest", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) + } + sv.IsLatest = xtv + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("VersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeleteMarkerReplication(v **types.DeleteMarkerReplication, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DeleteMarkerReplication + if *v == nil { + sv = &types.DeleteMarkerReplication{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.DeleteMarkerReplicationStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeleteMarkers(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.DeleteMarkerEntry + if *v == nil { + sv = make([]types.DeleteMarkerEntry, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.DeleteMarkerEntry + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { + var sv []types.DeleteMarkerEntry + if *v == nil { + sv = make([]types.DeleteMarkerEntry, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.DeleteMarkerEntry + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentDestination(v **types.Destination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Destination + if *v == nil { + sv = &types.Destination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessControlTranslation", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAccessControlTranslation(&sv.AccessControlTranslation, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("EncryptionConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Metrics", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetrics(&sv.Metrics, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ReplicationTime", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationTime(&sv.ReplicationTime, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentEncryptionConfiguration(v **types.EncryptionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EncryptionConfiguration + if *v == nil { + sv = &types.EncryptionConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ReplicaKmsKeyID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReplicaKmsKeyID = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Error + if *v == nil { + sv = &types.Error{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Code", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Code = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("Message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + case strings.EqualFold("VersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentErrorDocument(v **types.ErrorDocument, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ErrorDocument + if *v == nil { + sv = &types.ErrorDocument{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentErrors(v *[]types.Error, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Error + if *v == nil { + sv = make([]types.Error, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Error + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentErrorsUnwrapped(v *[]types.Error, decoder smithyxml.NodeDecoder) error { + var sv []types.Error + if *v == nil { + sv = make([]types.Error, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Error + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentEventBridgeConfiguration(v **types.EventBridgeConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EventBridgeConfiguration + if *v == nil { + sv = &types.EventBridgeConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentEventList(v *[]types.Event, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Event + if *v == nil { + sv = make([]types.Event, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Event + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = types.Event(xtv) + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentEventListUnwrapped(v *[]types.Event, decoder smithyxml.NodeDecoder) error { + var sv []types.Event + if *v == nil { + sv = make([]types.Event, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Event + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = types.Event(xtv) + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentExistingObjectReplication(v **types.ExistingObjectReplication, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ExistingObjectReplication + if *v == nil { + sv = &types.ExistingObjectReplication{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ExistingObjectReplicationStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentExposeHeaders(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentExposeHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentFilterRule(v **types.FilterRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.FilterRule + if *v == nil { + sv = &types.FilterRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = types.FilterRuleName(xtv) + } + + case strings.EqualFold("Value", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Value = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentFilterRuleList(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.FilterRule + if *v == nil { + sv = make([]types.FilterRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.FilterRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentFilterRuleListUnwrapped(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error { + var sv []types.FilterRule + if *v == nil { + sv = make([]types.FilterRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.FilterRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectAttributesParts, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.GetObjectAttributesParts + if *v == nil { + sv = &types.GetObjectAttributesParts{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("MaxParts", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxParts = int32(i64) + } + + case strings.EqualFold("NextPartNumberMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextPartNumberMarker = ptr.String(xtv) + } + + case strings.EqualFold("PartNumberMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PartNumberMarker = ptr.String(xtv) + } + + case strings.EqualFold("Part", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPartsListUnwrapped(&sv.Parts, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PartsCount", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.TotalPartsCount = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentGrant(v **types.Grant, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Grant + if *v == nil { + sv = &types.Grant{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Grantee", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Permission", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Permission = types.Permission(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentGrantee(v **types.Grantee, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Grantee + if *v == nil { + sv = &types.Grantee{} + } else { + sv = *v + } + + for _, attr := range decoder.StartEl.Attr { + name := attr.Name.Local + if len(attr.Name.Space) != 0 { + name = attr.Name.Space + `:` + attr.Name.Local + } + switch { + case strings.EqualFold("xsi:type", name): + val := []byte(attr.Value) + { + xtv := string(val) + sv.Type = types.Type(xtv) + } + + } + } + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DisplayName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DisplayName = ptr.String(xtv) + } + + case strings.EqualFold("EmailAddress", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EmailAddress = ptr.String(xtv) + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + case strings.EqualFold("URI", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.URI = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentGrants(v *[]types.Grant, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Grant + if *v == nil { + sv = make([]types.Grant, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("Grant", t.Name.Local): + var col types.Grant + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentGrantsUnwrapped(v *[]types.Grant, decoder smithyxml.NodeDecoder) error { + var sv []types.Grant + if *v == nil { + sv = make([]types.Grant, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Grant + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentIndexDocument(v **types.IndexDocument, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IndexDocument + if *v == nil { + sv = &types.IndexDocument{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Suffix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Suffix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInitiator(v **types.Initiator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Initiator + if *v == nil { + sv = &types.Initiator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DisplayName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DisplayName = ptr.String(xtv) + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentIntelligentTieringAndOperator(v **types.IntelligentTieringAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IntelligentTieringAndOperator + if *v == nil { + sv = &types.IntelligentTieringAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentIntelligentTieringConfiguration(v **types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IntelligentTieringConfiguration + if *v == nil { + sv = &types.IntelligentTieringConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIntelligentTieringFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.IntelligentTieringStatus(xtv) + } + + case strings.EqualFold("Tiering", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTieringListUnwrapped(&sv.Tierings, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentIntelligentTieringConfigurationList(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.IntelligentTieringConfiguration + if *v == nil { + sv = make([]types.IntelligentTieringConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.IntelligentTieringConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.IntelligentTieringConfiguration + if *v == nil { + sv = make([]types.IntelligentTieringConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.IntelligentTieringConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentIntelligentTieringFilter(v **types.IntelligentTieringFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IntelligentTieringFilter + if *v == nil { + sv = &types.IntelligentTieringFilter{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("And", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIntelligentTieringAndOperator(&sv.And, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInvalidObjectState(v **types.InvalidObjectState, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidObjectState + if *v == nil { + sv = &types.InvalidObjectState{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessTier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessTier = types.IntelligentTieringAccessTier(xtv) + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryConfiguration(v **types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryConfiguration + if *v == nil { + sv = &types.InventoryConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Destination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryDestination(&sv.Destination, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("IncludedObjectVersions", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.IncludedObjectVersions = types.InventoryIncludedObjectVersions(xtv) + } + + case strings.EqualFold("IsEnabled", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsEnabled to be of type *bool, got %T instead", val) + } + sv.IsEnabled = xtv + } + + case strings.EqualFold("OptionalFields", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryOptionalFields(&sv.OptionalFields, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Schedule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventorySchedule(&sv.Schedule, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryConfigurationList(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.InventoryConfiguration + if *v == nil { + sv = make([]types.InventoryConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.InventoryConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.InventoryConfiguration + if *v == nil { + sv = make([]types.InventoryConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.InventoryConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentInventoryDestination(v **types.InventoryDestination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryDestination + if *v == nil { + sv = &types.InventoryDestination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("S3BucketDestination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryEncryption(v **types.InventoryEncryption, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryEncryption + if *v == nil { + sv = &types.InventoryEncryption{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("SSE-KMS", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSSEKMS(&sv.SSEKMS, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("SSE-S3", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSSES3(&sv.SSES3, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryFilter(v **types.InventoryFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryFilter + if *v == nil { + sv = &types.InventoryFilter{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryOptionalFields(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.InventoryOptionalField + if *v == nil { + sv = make([]types.InventoryOptionalField, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("Field", t.Name.Local): + var col types.InventoryOptionalField + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = types.InventoryOptionalField(xtv) + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryOptionalFieldsUnwrapped(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error { + var sv []types.InventoryOptionalField + if *v == nil { + sv = make([]types.InventoryOptionalField, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.InventoryOptionalField + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = types.InventoryOptionalField(xtv) + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentInventoryS3BucketDestination(v **types.InventoryS3BucketDestination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryS3BucketDestination + if *v == nil { + sv = &types.InventoryS3BucketDestination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccountId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccountId = ptr.String(xtv) + } + + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("Encryption", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryEncryption(&sv.Encryption, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Format", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Format = types.InventoryFormat(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventorySchedule(v **types.InventorySchedule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventorySchedule + if *v == nil { + sv = &types.InventorySchedule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Frequency", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Frequency = types.InventoryFrequency(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLambdaFunctionConfiguration(v **types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LambdaFunctionConfiguration + if *v == nil { + sv = &types.LambdaFunctionConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Event", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("CloudFunction", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.LambdaFunctionArn = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLambdaFunctionConfigurationList(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.LambdaFunctionConfiguration + if *v == nil { + sv = make([]types.LambdaFunctionConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.LambdaFunctionConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.LambdaFunctionConfiguration + if *v == nil { + sv = make([]types.LambdaFunctionConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.LambdaFunctionConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentLifecycleExpiration(v **types.LifecycleExpiration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LifecycleExpiration + if *v == nil { + sv = &types.LifecycleExpiration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Date", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Date = ptr.Time(t) + } + + case strings.EqualFold("Days", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Days = int32(i64) + } + + case strings.EqualFold("ExpiredObjectDeleteMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected ExpiredObjectDeleteMarker to be of type *bool, got %T instead", val) + } + sv.ExpiredObjectDeleteMarker = xtv + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRule(v **types.LifecycleRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LifecycleRule + if *v == nil { + sv = &types.LifecycleRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AbortIncompleteMultipartUpload", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(&sv.AbortIncompleteMultipartUpload, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Expiration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLifecycleExpiration(&sv.Expiration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLifecycleRuleFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + case strings.EqualFold("NoncurrentVersionExpiration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNoncurrentVersionExpiration(&sv.NoncurrentVersionExpiration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("NoncurrentVersionTransition", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(&sv.NoncurrentVersionTransitions, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ExpirationStatus(xtv) + } + + case strings.EqualFold("Transition", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTransitionListUnwrapped(&sv.Transitions, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleRuleAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LifecycleRuleAndOperator + if *v == nil { + sv = &types.LifecycleRuleAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ObjectSizeGreaterThan = i64 + } + + case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ObjectSizeLessThan = i64 + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var uv types.LifecycleRuleFilter + var memberFound bool + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + if memberFound { + if err = decoder.Decoder.Skip(); err != nil { + return err + } + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("And", t.Name.Local): + var mv types.LifecycleRuleAndOperator + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.LifecycleRuleFilterMemberAnd{Value: mv} + memberFound = true + + case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): + var mv int64 + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + mv = i64 + } + uv = &types.LifecycleRuleFilterMemberObjectSizeGreaterThan{Value: mv} + memberFound = true + + case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): + var mv int64 + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + mv = i64 + } + uv = &types.LifecycleRuleFilterMemberObjectSizeLessThan{Value: mv} + memberFound = true + + case strings.EqualFold("Prefix", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.LifecycleRuleFilterMemberPrefix{Value: mv} + memberFound = true + + case strings.EqualFold("Tag", t.Name.Local): + var mv types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.LifecycleRuleFilterMemberTag{Value: mv} + memberFound = true + + default: + uv = &types.UnknownUnionMember{Tag: t.Name.Local} + memberFound = true + + } + decoder = originalDecoder + } + *v = uv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRules(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.LifecycleRule + if *v == nil { + sv = make([]types.LifecycleRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.LifecycleRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error { + var sv []types.LifecycleRule + if *v == nil { + sv = make([]types.LifecycleRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.LifecycleRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentLoggingEnabled(v **types.LoggingEnabled, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LoggingEnabled + if *v == nil { + sv = &types.LoggingEnabled{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TargetBucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TargetBucket = ptr.String(xtv) + } + + case strings.EqualFold("TargetGrants", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTargetGrants(&sv.TargetGrants, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("TargetPrefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TargetPrefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetrics(v **types.Metrics, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Metrics + if *v == nil { + sv = &types.Metrics{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("EventThreshold", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.EventThreshold, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.MetricsStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetricsAndOperator(v **types.MetricsAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MetricsAndOperator + if *v == nil { + sv = &types.MetricsAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessPointArn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessPointArn = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetricsConfiguration(v **types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MetricsConfiguration + if *v == nil { + sv = &types.MetricsConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetricsFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetricsConfigurationList(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.MetricsConfiguration + if *v == nil { + sv = make([]types.MetricsConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.MetricsConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.MetricsConfiguration + if *v == nil { + sv = make([]types.MetricsConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.MetricsConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentMetricsFilter(v *types.MetricsFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var uv types.MetricsFilter + var memberFound bool + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + if memberFound { + if err = decoder.Decoder.Skip(); err != nil { + return err + } + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessPointArn", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.MetricsFilterMemberAccessPointArn{Value: mv} + memberFound = true + + case strings.EqualFold("And", t.Name.Local): + var mv types.MetricsAndOperator + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentMetricsAndOperator(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.MetricsFilterMemberAnd{Value: mv} + memberFound = true + + case strings.EqualFold("Prefix", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.MetricsFilterMemberPrefix{Value: mv} + memberFound = true + + case strings.EqualFold("Tag", t.Name.Local): + var mv types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.MetricsFilterMemberTag{Value: mv} + memberFound = true + + default: + uv = &types.UnknownUnionMember{Tag: t.Name.Local} + memberFound = true + + } + decoder = originalDecoder + } + *v = uv + return nil +} + +func awsRestxml_deserializeDocumentMultipartUpload(v **types.MultipartUpload, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MultipartUpload + if *v == nil { + sv = &types.MultipartUpload{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) + } + + case strings.EqualFold("Initiated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Initiated = ptr.Time(t) + } + + case strings.EqualFold("Initiator", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + case strings.EqualFold("UploadId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UploadId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMultipartUploadList(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.MultipartUpload + if *v == nil { + sv = make([]types.MultipartUpload, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.MultipartUpload + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error { + var sv []types.MultipartUpload + if *v == nil { + sv = make([]types.MultipartUpload, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.MultipartUpload + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentNoncurrentVersionExpiration(v **types.NoncurrentVersionExpiration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoncurrentVersionExpiration + if *v == nil { + sv = &types.NoncurrentVersionExpiration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("NewerNoncurrentVersions", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.NewerNoncurrentVersions = int32(i64) + } + + case strings.EqualFold("NoncurrentDays", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.NoncurrentDays = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoncurrentVersionTransition(v **types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoncurrentVersionTransition + if *v == nil { + sv = &types.NoncurrentVersionTransition{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("NewerNoncurrentVersions", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.NewerNoncurrentVersions = int32(i64) + } + + case strings.EqualFold("NoncurrentDays", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.NoncurrentDays = int32(i64) + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.TransitionStorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoncurrentVersionTransitionList(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.NoncurrentVersionTransition + if *v == nil { + sv = make([]types.NoncurrentVersionTransition, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.NoncurrentVersionTransition + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { + var sv []types.NoncurrentVersionTransition + if *v == nil { + sv = make([]types.NoncurrentVersionTransition, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.NoncurrentVersionTransition + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentNoSuchBucket(v **types.NoSuchBucket, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoSuchBucket + if *v == nil { + sv = &types.NoSuchBucket{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoSuchKey(v **types.NoSuchKey, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoSuchKey + if *v == nil { + sv = &types.NoSuchKey{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoSuchUpload(v **types.NoSuchUpload, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoSuchUpload + if *v == nil { + sv = &types.NoSuchUpload{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNotFound(v **types.NotFound, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NotFound + if *v == nil { + sv = &types.NotFound{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNotificationConfigurationFilter(v **types.NotificationConfigurationFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NotificationConfigurationFilter + if *v == nil { + sv = &types.NotificationConfigurationFilter{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("S3Key", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentS3KeyFilter(&sv.Key, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Object + if *v == nil { + sv = &types.Object{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(&sv.ChecksumAlgorithm, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Size", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Size = i64 + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.ObjectStorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectAlreadyInActiveTierError(v **types.ObjectAlreadyInActiveTierError, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectAlreadyInActiveTierError + if *v == nil { + sv = &types.ObjectAlreadyInActiveTierError{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectList(v *[]types.Object, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Object + if *v == nil { + sv = make([]types.Object, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Object + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectListUnwrapped(v *[]types.Object, decoder smithyxml.NodeDecoder) error { + var sv []types.Object + if *v == nil { + sv = make([]types.Object, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Object + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentObjectLockConfiguration(v **types.ObjectLockConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectLockConfiguration + if *v == nil { + sv = &types.ObjectLockConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ObjectLockEnabled", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ObjectLockEnabled = types.ObjectLockEnabled(xtv) + } + + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectLockRule(&sv.Rule, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectLockLegalHold(v **types.ObjectLockLegalHold, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectLockLegalHold + if *v == nil { + sv = &types.ObjectLockLegalHold{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ObjectLockLegalHoldStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectLockRetention(v **types.ObjectLockRetention, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectLockRetention + if *v == nil { + sv = &types.ObjectLockRetention{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Mode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Mode = types.ObjectLockRetentionMode(xtv) + } + + case strings.EqualFold("RetainUntilDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.RetainUntilDate = ptr.Time(t) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectLockRule(v **types.ObjectLockRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectLockRule + if *v == nil { + sv = &types.ObjectLockRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DefaultRetention", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDefaultRetention(&sv.DefaultRetention, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectNotInActiveTierError(v **types.ObjectNotInActiveTierError, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectNotInActiveTierError + if *v == nil { + sv = &types.ObjectNotInActiveTierError{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectPart + if *v == nil { + sv = &types.ObjectPart{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("PartNumber", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PartNumber = int32(i64) + } + + case strings.EqualFold("Size", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Size = i64 + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectVersion + if *v == nil { + sv = &types.ObjectVersion{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(&sv.ChecksumAlgorithm, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("IsLatest", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) + } + sv.IsLatest = xtv + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Size", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Size = i64 + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.ObjectVersionStorageClass(xtv) + } + + case strings.EqualFold("VersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectVersionList(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ObjectVersion + if *v == nil { + sv = make([]types.ObjectVersion, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ObjectVersion + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectVersionListUnwrapped(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error { + var sv []types.ObjectVersion + if *v == nil { + sv = make([]types.ObjectVersion, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ObjectVersion + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentOwner(v **types.Owner, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Owner + if *v == nil { + sv = &types.Owner{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DisplayName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DisplayName = ptr.String(xtv) + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentOwnershipControls(v **types.OwnershipControls, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.OwnershipControls + if *v == nil { + sv = &types.OwnershipControls{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentOwnershipControlsRule(v **types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.OwnershipControlsRule + if *v == nil { + sv = &types.OwnershipControlsRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ObjectOwnership", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ObjectOwnership = types.ObjectOwnership(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentOwnershipControlsRules(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.OwnershipControlsRule + if *v == nil { + sv = make([]types.OwnershipControlsRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.OwnershipControlsRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { + var sv []types.OwnershipControlsRule + if *v == nil { + sv = make([]types.OwnershipControlsRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.OwnershipControlsRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Part + if *v == nil { + sv = &types.Part{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + case strings.EqualFold("PartNumber", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PartNumber = int32(i64) + } + + case strings.EqualFold("Size", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Size = i64 + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentParts(v *[]types.Part, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Part + if *v == nil { + sv = make([]types.Part, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Part + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentPartsUnwrapped(v *[]types.Part, decoder smithyxml.NodeDecoder) error { + var sv []types.Part + if *v == nil { + sv = make([]types.Part, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Part + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentPartsList(v *[]types.ObjectPart, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ObjectPart + if *v == nil { + sv = make([]types.ObjectPart, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ObjectPart + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentObjectPart(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentPartsListUnwrapped(v *[]types.ObjectPart, decoder smithyxml.NodeDecoder) error { + var sv []types.ObjectPart + if *v == nil { + sv = make([]types.ObjectPart, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ObjectPart + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentObjectPart(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentPolicyStatus(v **types.PolicyStatus, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PolicyStatus + if *v == nil { + sv = &types.PolicyStatus{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IsPublic", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsPublic to be of type *bool, got %T instead", val) + } + sv.IsPublic = xtv + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.PublicAccessBlockConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PublicAccessBlockConfiguration + if *v == nil { + sv = &types.PublicAccessBlockConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("BlockPublicAcls", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) + } + sv.BlockPublicAcls = xtv + } + + case strings.EqualFold("BlockPublicPolicy", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) + } + sv.BlockPublicPolicy = xtv + } + + case strings.EqualFold("IgnorePublicAcls", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) + } + sv.IgnorePublicAcls = xtv + } + + case strings.EqualFold("RestrictPublicBuckets", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) + } + sv.RestrictPublicBuckets = xtv + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentQueueConfiguration(v **types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.QueueConfiguration + if *v == nil { + sv = &types.QueueConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Event", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("Queue", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.QueueArn = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentQueueConfigurationList(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.QueueConfiguration + if *v == nil { + sv = make([]types.QueueConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.QueueConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.QueueConfiguration + if *v == nil { + sv = make([]types.QueueConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.QueueConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentRedirect(v **types.Redirect, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Redirect + if *v == nil { + sv = &types.Redirect{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("HostName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HostName = ptr.String(xtv) + } + + case strings.EqualFold("HttpRedirectCode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HttpRedirectCode = ptr.String(xtv) + } + + case strings.EqualFold("Protocol", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Protocol = types.Protocol(xtv) + } + + case strings.EqualFold("ReplaceKeyPrefixWith", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReplaceKeyPrefixWith = ptr.String(xtv) + } + + case strings.EqualFold("ReplaceKeyWith", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReplaceKeyWith = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRedirectAllRequestsTo(v **types.RedirectAllRequestsTo, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RedirectAllRequestsTo + if *v == nil { + sv = &types.RedirectAllRequestsTo{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("HostName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HostName = ptr.String(xtv) + } + + case strings.EqualFold("Protocol", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Protocol = types.Protocol(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicaModifications(v **types.ReplicaModifications, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicaModifications + if *v == nil { + sv = &types.ReplicaModifications{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ReplicaModificationsStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationConfiguration(v **types.ReplicationConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationConfiguration + if *v == nil { + sv = &types.ReplicationConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Role", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Role = ptr.String(xtv) + } + + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRule(v **types.ReplicationRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationRule + if *v == nil { + sv = &types.ReplicationRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DeleteMarkerReplication", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDeleteMarkerReplication(&sv.DeleteMarkerReplication, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Destination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDestination(&sv.Destination, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ExistingObjectReplication", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentExistingObjectReplication(&sv.ExistingObjectReplication, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationRuleFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Priority", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Priority = int32(i64) + } + + case strings.EqualFold("SourceSelectionCriteria", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSourceSelectionCriteria(&sv.SourceSelectionCriteria, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ReplicationRuleStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRuleAndOperator(v **types.ReplicationRuleAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationRuleAndOperator + if *v == nil { + sv = &types.ReplicationRuleAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var uv types.ReplicationRuleFilter + var memberFound bool + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + if memberFound { + if err = decoder.Decoder.Skip(); err != nil { + return err + } + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("And", t.Name.Local): + var mv types.ReplicationRuleAndOperator + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.ReplicationRuleFilterMemberAnd{Value: mv} + memberFound = true + + case strings.EqualFold("Prefix", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.ReplicationRuleFilterMemberPrefix{Value: mv} + memberFound = true + + case strings.EqualFold("Tag", t.Name.Local): + var mv types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.ReplicationRuleFilterMemberTag{Value: mv} + memberFound = true + + default: + uv = &types.UnknownUnionMember{Tag: t.Name.Local} + memberFound = true + + } + decoder = originalDecoder + } + *v = uv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRules(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ReplicationRule + if *v == nil { + sv = make([]types.ReplicationRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ReplicationRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRulesUnwrapped(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error { + var sv []types.ReplicationRule + if *v == nil { + sv = make([]types.ReplicationRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ReplicationRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentReplicationTime(v **types.ReplicationTime, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationTime + if *v == nil { + sv = &types.ReplicationTime{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ReplicationTimeStatus(xtv) + } + + case strings.EqualFold("Time", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.Time, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationTimeValue(v **types.ReplicationTimeValue, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationTimeValue + if *v == nil { + sv = &types.ReplicationTimeValue{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Minutes", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Minutes = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRoutingRule(v **types.RoutingRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RoutingRule + if *v == nil { + sv = &types.RoutingRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Condition", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCondition(&sv.Condition, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Redirect", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRedirect(&sv.Redirect, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRoutingRules(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.RoutingRule + if *v == nil { + sv = make([]types.RoutingRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("RoutingRule", t.Name.Local): + var col types.RoutingRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRoutingRulesUnwrapped(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error { + var sv []types.RoutingRule + if *v == nil { + sv = make([]types.RoutingRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.RoutingRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentS3KeyFilter(v **types.S3KeyFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.S3KeyFilter + if *v == nil { + sv = &types.S3KeyFilter{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("FilterRule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentFilterRuleListUnwrapped(&sv.FilterRules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionByDefault(v **types.ServerSideEncryptionByDefault, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ServerSideEncryptionByDefault + if *v == nil { + sv = &types.ServerSideEncryptionByDefault{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("KMSMasterKeyID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KMSMasterKeyID = ptr.String(xtv) + } + + case strings.EqualFold("SSEAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SSEAlgorithm = types.ServerSideEncryption(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(v **types.ServerSideEncryptionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ServerSideEncryptionConfiguration + if *v == nil { + sv = &types.ServerSideEncryptionConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionRule(v **types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ServerSideEncryptionRule + if *v == nil { + sv = &types.ServerSideEncryptionRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ApplyServerSideEncryptionByDefault", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentServerSideEncryptionByDefault(&sv.ApplyServerSideEncryptionByDefault, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("BucketKeyEnabled", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected BucketKeyEnabled to be of type *bool, got %T instead", val) + } + sv.BucketKeyEnabled = xtv + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionRules(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ServerSideEncryptionRule + if *v == nil { + sv = make([]types.ServerSideEncryptionRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ServerSideEncryptionRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { + var sv []types.ServerSideEncryptionRule + if *v == nil { + sv = make([]types.ServerSideEncryptionRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ServerSideEncryptionRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentSourceSelectionCriteria(v **types.SourceSelectionCriteria, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SourceSelectionCriteria + if *v == nil { + sv = &types.SourceSelectionCriteria{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ReplicaModifications", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicaModifications(&sv.ReplicaModifications, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("SseKmsEncryptedObjects", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSseKmsEncryptedObjects(&sv.SseKmsEncryptedObjects, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentSSEKMS(v **types.SSEKMS, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SSEKMS + if *v == nil { + sv = &types.SSEKMS{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("KeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentSseKmsEncryptedObjects(v **types.SseKmsEncryptedObjects, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SseKmsEncryptedObjects + if *v == nil { + sv = &types.SseKmsEncryptedObjects{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.SseKmsEncryptedObjectsStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentSSES3(v **types.SSES3, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SSES3 + if *v == nil { + sv = &types.SSES3{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentStorageClassAnalysis(v **types.StorageClassAnalysis, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.StorageClassAnalysis + if *v == nil { + sv = &types.StorageClassAnalysis{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DataExport", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(&sv.DataExport, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(v **types.StorageClassAnalysisDataExport, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.StorageClassAnalysisDataExport + if *v == nil { + sv = &types.StorageClassAnalysisDataExport{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Destination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsExportDestination(&sv.Destination, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("OutputSchemaVersion", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.OutputSchemaVersion = types.StorageClassAnalysisSchemaVersion(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTag(v **types.Tag, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("Value", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Value = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTagSet(v *[]types.Tag, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Tag + if *v == nil { + sv = make([]types.Tag, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("Tag", t.Name.Local): + var col types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTagSetUnwrapped(v *[]types.Tag, decoder smithyxml.NodeDecoder) error { + var sv []types.Tag + if *v == nil { + sv = make([]types.Tag, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Tag + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentTargetGrant(v **types.TargetGrant, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.TargetGrant + if *v == nil { + sv = &types.TargetGrant{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Grantee", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Permission", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Permission = types.BucketLogsPermission(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTargetGrants(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.TargetGrant + if *v == nil { + sv = make([]types.TargetGrant, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("Grant", t.Name.Local): + var col types.TargetGrant + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTargetGrantsUnwrapped(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error { + var sv []types.TargetGrant + if *v == nil { + sv = make([]types.TargetGrant, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.TargetGrant + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentTiering(v **types.Tiering, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Tiering + if *v == nil { + sv = &types.Tiering{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessTier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessTier = types.IntelligentTieringAccessTier(xtv) + } + + case strings.EqualFold("Days", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Days = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTieringList(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Tiering + if *v == nil { + sv = make([]types.Tiering, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Tiering + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTieringListUnwrapped(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error { + var sv []types.Tiering + if *v == nil { + sv = make([]types.Tiering, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Tiering + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentTopicConfiguration(v **types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.TopicConfiguration + if *v == nil { + sv = &types.TopicConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Event", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("Topic", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TopicArn = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTopicConfigurationList(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.TopicConfiguration + if *v == nil { + sv = make([]types.TopicConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.TopicConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.TopicConfiguration + if *v == nil { + sv = make([]types.TopicConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.TopicConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentTransition(v **types.Transition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Transition + if *v == nil { + sv = &types.Transition{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Date", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Date = ptr.Time(t) + } + + case strings.EqualFold("Days", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Days = int32(i64) + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.TransitionStorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTransitionList(v *[]types.Transition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Transition + if *v == nil { + sv = make([]types.Transition, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Transition + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTransitionListUnwrapped(v *[]types.Transition, decoder smithyxml.NodeDecoder) error { + var sv []types.Transition + if *v == nil { + sv = make([]types.Transition, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Transition + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go new file mode 100644 index 000000000000..ce3203be5a95 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go @@ -0,0 +1,7 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package s3 provides the API client, operations, and parameter types for Amazon +// Simple Storage Service. +// +// +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go new file mode 100644 index 000000000000..8df6368cd2f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go @@ -0,0 +1,208 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "s3" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + + if options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + if options.UseDualstack { + options.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateEnabled + } else { + options.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateDisabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go new file mode 100644 index 000000000000..d6cdb533727c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go @@ -0,0 +1,285 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream" + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithysync "github.com/aws/smithy-go/sync" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "sync" +) + +// SelectObjectContentEventStreamReader provides the interface for reading events +// from a stream. +// +// The writer's Close method must allow multiple concurrent calls. +type SelectObjectContentEventStreamReader interface { + Events() <-chan types.SelectObjectContentEventStream + Close() error + Err() error +} + +type selectObjectContentEventStreamReader struct { + stream chan types.SelectObjectContentEventStream + decoder *eventstream.Decoder + eventStream io.ReadCloser + err *smithysync.OnceErr + payloadBuf []byte + done chan struct{} + closeOnce sync.Once +} + +func newSelectObjectContentEventStreamReader(readCloser io.ReadCloser, decoder *eventstream.Decoder) *selectObjectContentEventStreamReader { + w := &selectObjectContentEventStreamReader{ + stream: make(chan types.SelectObjectContentEventStream), + decoder: decoder, + eventStream: readCloser, + err: smithysync.NewOnceErr(), + done: make(chan struct{}), + payloadBuf: make([]byte, 10*1024), + } + + go w.readEventStream() + + return w +} + +func (r *selectObjectContentEventStreamReader) Events() <-chan types.SelectObjectContentEventStream { + return r.stream +} + +func (r *selectObjectContentEventStreamReader) readEventStream() { + defer r.Close() + defer close(r.stream) + + for { + r.payloadBuf = r.payloadBuf[0:0] + decodedMessage, err := r.decoder.Decode(r.eventStream, r.payloadBuf) + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + return + default: + r.err.SetError(err) + return + } + } + + event, err := r.deserializeEventMessage(&decodedMessage) + if err != nil { + r.err.SetError(err) + return + } + + select { + case r.stream <- event: + case <-r.done: + return + } + + } +} + +func (r *selectObjectContentEventStreamReader) deserializeEventMessage(msg *eventstream.Message) (types.SelectObjectContentEventStream, error) { + messageType := msg.Headers.Get(eventstreamapi.MessageTypeHeader) + if messageType == nil { + return nil, fmt.Errorf("%s event header not present", eventstreamapi.MessageTypeHeader) + } + + switch messageType.String() { + case eventstreamapi.EventMessageType: + var v types.SelectObjectContentEventStream + if err := awsRestxml_deserializeEventStreamSelectObjectContentEventStream(&v, msg); err != nil { + return nil, err + } + return v, nil + + case eventstreamapi.ExceptionMessageType: + return nil, awsRestxml_deserializeEventStreamExceptionSelectObjectContentEventStream(msg) + + case eventstreamapi.ErrorMessageType: + errorCode := "UnknownError" + errorMessage := errorCode + if header := msg.Headers.Get(eventstreamapi.ErrorCodeHeader); header != nil { + errorCode = header.String() + } + if header := msg.Headers.Get(eventstreamapi.ErrorMessageHeader); header != nil { + errorMessage = header.String() + } + return nil, &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + + default: + mc := msg.Clone() + return nil, &UnknownEventMessageError{ + Type: messageType.String(), + Message: &mc, + } + + } +} + +func (r *selectObjectContentEventStreamReader) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *selectObjectContentEventStreamReader) Close() error { + r.closeOnce.Do(r.safeClose) + return r.Err() +} + +func (r *selectObjectContentEventStreamReader) safeClose() { + close(r.done) + r.eventStream.Close() + +} + +func (r *selectObjectContentEventStreamReader) Err() error { + return r.err.Err() +} + +func (r *selectObjectContentEventStreamReader) Closed() <-chan struct{} { + return r.done +} + +type awsRestxml_deserializeOpEventStreamSelectObjectContent struct { + LogEventStreamWrites bool + LogEventStreamReads bool +} + +func (*awsRestxml_deserializeOpEventStreamSelectObjectContent) ID() string { + return "OperationEventStreamDeserializer" +} + +func (m *awsRestxml_deserializeOpEventStreamSelectObjectContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + defer func() { + if err == nil { + return + } + m.closeResponseBody(out) + }() + + logger := middleware.GetLogger(ctx) + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) + } + _ = request + + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + deserializeOutput, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) + } + _ = deserializeOutput + + output, ok := out.Result.(*SelectObjectContentOutput) + if out.Result != nil && !ok { + return out, metadata, fmt.Errorf("unexpected output result type: %T", out.Result) + } else if out.Result == nil { + output = &SelectObjectContentOutput{} + out.Result = output + } + + eventReader := newSelectObjectContentEventStreamReader( + deserializeOutput.Body, + eventstream.NewDecoder(func(options *eventstream.DecoderOptions) { + options.Logger = logger + options.LogMessages = m.LogEventStreamReads + + }), + ) + defer func() { + if err == nil { + return + } + _ = eventReader.Close() + }() + + output.eventStream = NewSelectObjectContentEventStream(func(stream *SelectObjectContentEventStream) { + stream.Reader = eventReader + }) + + go output.eventStream.waitStreamClose() + + return out, metadata, nil +} + +func (*awsRestxml_deserializeOpEventStreamSelectObjectContent) closeResponseBody(out middleware.DeserializeOutput) { + if resp, ok := out.RawResponse.(*smithyhttp.Response); ok && resp != nil && resp.Body != nil { + _, _ = io.Copy(ioutil.Discard, resp.Body) + _ = resp.Body.Close() + } +} + +func addEventStreamSelectObjectContentMiddleware(stack *middleware.Stack, options Options) error { + if err := stack.Deserialize.Insert(&awsRestxml_deserializeOpEventStreamSelectObjectContent{ + LogEventStreamWrites: options.ClientLogMode.IsRequestEventMessage(), + LogEventStreamReads: options.ClientLogMode.IsResponseEventMessage(), + }, "OperationDeserializer", middleware.Before); err != nil { + return err + } + return nil + +} + +// UnknownEventMessageError provides an error when a message is received from the stream, +// but the reader is unable to determine what kind of message it is. +type UnknownEventMessageError struct { + Type string + Message *eventstream.Message +} + +// Error retruns the error message string. +func (e *UnknownEventMessageError) Error() string { + return "unknown event stream message type, " + e.Type +} + +func setSafeEventStreamClientLogMode(o *Options, operation string) { + switch operation { + case "SelectObjectContent": + toggleEventStreamClientLogMode(o, false, true) + return + + default: + return + + } +} +func toggleEventStreamClientLogMode(o *Options, request, response bool) { + mode := o.ClientLogMode + + if request && mode.IsRequestWithBody() { + mode.ClearRequestWithBody() + mode |= aws.LogRequest + } + + if response && mode.IsResponseWithBody() { + mode.ClearResponseWithBody() + mode |= aws.LogResponse + } + + o.ClientLogMode = mode + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json new file mode 100644 index 000000000000..8643d42ff330 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json @@ -0,0 +1,128 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/v4a": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", + "github.com/aws/aws-sdk-go-v2/service/internal/checksum": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared": "v1.2.3", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_AbortMultipartUpload.go", + "api_op_CompleteMultipartUpload.go", + "api_op_CopyObject.go", + "api_op_CreateBucket.go", + "api_op_CreateMultipartUpload.go", + "api_op_DeleteBucket.go", + "api_op_DeleteBucketAnalyticsConfiguration.go", + "api_op_DeleteBucketCors.go", + "api_op_DeleteBucketEncryption.go", + "api_op_DeleteBucketIntelligentTieringConfiguration.go", + "api_op_DeleteBucketInventoryConfiguration.go", + "api_op_DeleteBucketLifecycle.go", + "api_op_DeleteBucketMetricsConfiguration.go", + "api_op_DeleteBucketOwnershipControls.go", + "api_op_DeleteBucketPolicy.go", + "api_op_DeleteBucketReplication.go", + "api_op_DeleteBucketTagging.go", + "api_op_DeleteBucketWebsite.go", + "api_op_DeleteObject.go", + "api_op_DeleteObjectTagging.go", + "api_op_DeleteObjects.go", + "api_op_DeletePublicAccessBlock.go", + "api_op_GetBucketAccelerateConfiguration.go", + "api_op_GetBucketAcl.go", + "api_op_GetBucketAnalyticsConfiguration.go", + "api_op_GetBucketCors.go", + "api_op_GetBucketEncryption.go", + "api_op_GetBucketIntelligentTieringConfiguration.go", + "api_op_GetBucketInventoryConfiguration.go", + "api_op_GetBucketLifecycleConfiguration.go", + "api_op_GetBucketLocation.go", + "api_op_GetBucketLogging.go", + "api_op_GetBucketMetricsConfiguration.go", + "api_op_GetBucketNotificationConfiguration.go", + "api_op_GetBucketOwnershipControls.go", + "api_op_GetBucketPolicy.go", + "api_op_GetBucketPolicyStatus.go", + "api_op_GetBucketReplication.go", + "api_op_GetBucketRequestPayment.go", + "api_op_GetBucketTagging.go", + "api_op_GetBucketVersioning.go", + "api_op_GetBucketWebsite.go", + "api_op_GetObject.go", + "api_op_GetObjectAcl.go", + "api_op_GetObjectAttributes.go", + "api_op_GetObjectLegalHold.go", + "api_op_GetObjectLockConfiguration.go", + "api_op_GetObjectRetention.go", + "api_op_GetObjectTagging.go", + "api_op_GetObjectTorrent.go", + "api_op_GetPublicAccessBlock.go", + "api_op_HeadBucket.go", + "api_op_HeadObject.go", + "api_op_ListBucketAnalyticsConfigurations.go", + "api_op_ListBucketIntelligentTieringConfigurations.go", + "api_op_ListBucketInventoryConfigurations.go", + "api_op_ListBucketMetricsConfigurations.go", + "api_op_ListBuckets.go", + "api_op_ListMultipartUploads.go", + "api_op_ListObjectVersions.go", + "api_op_ListObjects.go", + "api_op_ListObjectsV2.go", + "api_op_ListParts.go", + "api_op_PutBucketAccelerateConfiguration.go", + "api_op_PutBucketAcl.go", + "api_op_PutBucketAnalyticsConfiguration.go", + "api_op_PutBucketCors.go", + "api_op_PutBucketEncryption.go", + "api_op_PutBucketIntelligentTieringConfiguration.go", + "api_op_PutBucketInventoryConfiguration.go", + "api_op_PutBucketLifecycleConfiguration.go", + "api_op_PutBucketLogging.go", + "api_op_PutBucketMetricsConfiguration.go", + "api_op_PutBucketNotificationConfiguration.go", + "api_op_PutBucketOwnershipControls.go", + "api_op_PutBucketPolicy.go", + "api_op_PutBucketReplication.go", + "api_op_PutBucketRequestPayment.go", + "api_op_PutBucketTagging.go", + "api_op_PutBucketVersioning.go", + "api_op_PutBucketWebsite.go", + "api_op_PutObject.go", + "api_op_PutObjectAcl.go", + "api_op_PutObjectLegalHold.go", + "api_op_PutObjectLockConfiguration.go", + "api_op_PutObjectRetention.go", + "api_op_PutObjectTagging.go", + "api_op_PutPublicAccessBlock.go", + "api_op_RestoreObject.go", + "api_op_SelectObjectContent.go", + "api_op_UploadPart.go", + "api_op_UploadPartCopy.go", + "api_op_WriteGetObjectResponse.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "eventstream.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "types/types_exported_test.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/s3", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go new file mode 100644 index 000000000000..1d77f7544aba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package s3 + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.26.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go new file mode 100644 index 000000000000..97b5771bb1f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go @@ -0,0 +1,106 @@ +package arn + +import ( + "fmt" + "strings" + + awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" +) + +const ( + s3Namespace = "s3" + s3ObjectsLambdaNamespace = "s3-object-lambda" + s3OutpostsNamespace = "s3-outposts" +) + +// ParseEndpointARN parses a given generic aws ARN into a s3 arn resource. +func ParseEndpointARN(v awsarn.ARN) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + + switch resParts[0] { + case "accesspoint": + switch a.Service { + case s3Namespace: + return arn.ParseAccessPointResource(a, resParts[1:]) + case s3ObjectsLambdaNamespace: + return parseS3ObjectLambdaAccessPointResource(a, resParts) + default: + return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)} + } + case "outpost": + if a.Service != s3OutpostsNamespace { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not %s"} + } + return parseOutpostAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { + // outpost accesspoint arn is only valid if service is s3-outposts + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + + if len(resParts) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + if len(resParts) < 3 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ + ARN: a, Reason: "access-point resource not set in Outpost ARN", + } + } + + resID := strings.TrimSpace(resParts[0]) + if len(resID) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + var outpostAccessPointARN = arn.OutpostAccessPointARN{} + switch resParts[1] { + case "accesspoint": + // Do not allow region-less outpost access-point arns. + if len(a.Region) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "region is not set"} + } + + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return arn.OutpostAccessPointARN{}, err + } + // set access-point arn + outpostAccessPointARN.AccessPointARN = accessPointARN + default: + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} + } + + // set outpost id + outpostAccessPointARN.OutpostID = resID + return outpostAccessPointARN, nil +} + +func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) { + if a.Service != s3ObjectsLambdaNamespace { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)} + } + + if len(a.Region) == 0 { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)} + } + + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:]) + if err != nil { + return arn.S3ObjectLambdaAccessPointARN{}, err + } + + return arn.S3ObjectLambdaAccessPointARN{ + AccessPointARN: accessPointARN, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go new file mode 100644 index 000000000000..4629e494f27a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go @@ -0,0 +1,87 @@ +/* +Package customizations provides customizations for the Amazon S3 API client. + +This package provides support for following S3 customizations + + ProcessARN Middleware: processes an ARN if provided as input and updates the endpoint as per the arn type + + UpdateEndpoint Middleware: resolves a custom endpoint as per s3 config options + + RemoveBucket Middleware: removes a serialized bucket name from request url path + + processResponseWith200Error Middleware: Deserializing response error with 200 status code + + +Virtual Host style url addressing + +Since serializers serialize by default as path style url, we use customization +to modify the endpoint url when `UsePathStyle` option on S3Client is unset or +false. This flag will be ignored if `UseAccelerate` option is set to true. + +If UseAccelerate is not enabled, and the bucket name is not a valid hostname +label, they SDK will fallback to forcing the request to be made as if +UsePathStyle was enabled. This behavior is also used if UseDualStackEndpoint is enabled. + +https://docs.aws.amazon.com/AmazonS3/latest/dev/dual-stack-endpoints.html#dual-stack-endpoints-description + + +Transfer acceleration + +By default S3 Transfer acceleration support is disabled. By enabling `UseAccelerate` +option on S3Client, one can enable s3 transfer acceleration support. Transfer +acceleration only works with Virtual Host style addressing, and thus `UsePathStyle` +option if set is ignored. Transfer acceleration is not supported for S3 operations +DeleteBucket, ListBuckets, and CreateBucket. + + +Dualstack support + +By default dualstack support for s3 client is disabled. By enabling `UseDualstack` +option on s3 client, you can enable dualstack endpoint support. + + +Endpoint customizations + + +Customizations to lookup ARN, process ARN needs to happen before request serialization. +UpdateEndpoint middleware which mutates resources based on Options such as +UseDualstack, UseAccelerate for modifying resolved endpoint are executed after +request serialization. Remove bucket middleware is executed after +an request is serialized, and removes the serialized bucket name from request path + + Middleware layering: + + + Initialize : HTTP Request -> ARN Lookup -> Input-Validation -> Serialize step + + Serialize : HTTP Request -> Process ARN -> operation serializer -> Update-Endpoint customization -> Remove-Bucket -> next middleware + + +Customization options: + UseARNRegion (Disabled by Default) + + UsePathStyle (Disabled by Default) + + UseAccelerate (Disabled by Default) + + UseDualstack (Disabled by Default) + + +Handle Error response with 200 status code + +S3 operations: CopyObject, CompleteMultipartUpload, UploadPartCopy can have an +error Response with status code 2xx. The processResponseWith200Error middleware +customizations enables SDK to check for an error within response body prior to +deserialization. + +As the check for 2xx response containing an error needs to be performed earlier +than response deserialization. Since the behavior of Deserialization is in +reverse order to the other stack steps its easier to consider that "after" means +"before". + + Middleware layering: + + HTTP Response -> handle 200 error customization -> deserialize + +*/ +package customizations diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go new file mode 100644 index 000000000000..2b11b1fa278a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go @@ -0,0 +1,74 @@ +package customizations + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "strings" + + "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// HandleResponseErrorWith200Status check for S3 200 error response. +// If an s3 200 error is found, status code for the response is modified temporarily to +// 5xx response status code. +func HandleResponseErrorWith200Status(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&processResponseFor200ErrorMiddleware{}, "OperationDeserializer", middleware.After) +} + +// middleware to process raw response and look for error response with 200 status code +type processResponseFor200ErrorMiddleware struct{} + +// ID returns the middleware ID. +func (*processResponseFor200ErrorMiddleware) ID() string { + return "S3:ProcessResponseFor200Error" +} + +func (m *processResponseFor200ErrorMiddleware) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + // check if response status code is 2xx. + if response.StatusCode < 200 || response.StatusCode >= 300 { + return + } + + var readBuff bytes.Buffer + body := io.TeeReader(response.Body, &readBuff) + + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("received empty response payload"), + } + } + + // rewind response body + response.Body = ioutil.NopCloser(io.MultiReader(&readBuff, response.Body)) + + // if start tag is "Error", the response is consider error response. + if strings.EqualFold(t.Name.Local, "Error") { + // according to https://aws.amazon.com/premiumsupport/knowledge-center/s3-resolve-200-internalerror/ + // 200 error responses are similar to 5xx errors. + response.StatusCode = 500 + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go new file mode 100644 index 000000000000..87f7a22327d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go @@ -0,0 +1,22 @@ +package customizations + +import ( + "github.com/aws/smithy-go/transport/http" + "strings" +) + +func updateS3HostForS3AccessPoint(req *http.Request) { + updateHostPrefix(req, "s3", s3AccessPoint) +} + +func updateS3HostForS3ObjectLambda(req *http.Request) { + updateHostPrefix(req, "s3", s3ObjectLambda) +} + +func updateHostPrefix(req *http.Request, oldEndpointPrefix, newEndpointPrefix string) { + host := req.URL.Host + if strings.HasPrefix(host, oldEndpointPrefix) { + // For example if oldEndpointPrefix=s3 would replace to newEndpointPrefix + req.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):] + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go new file mode 100644 index 000000000000..f4bbb4b6de1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go @@ -0,0 +1,49 @@ +package customizations + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddExpiresOnPresignedURL represents a build middleware used to assign +// expiration on a presigned URL. +type AddExpiresOnPresignedURL struct { + + // Expires is time.Duration within which presigned url should be expired. + // This should be the duration in seconds the presigned URL should be considered valid for. + // By default the S3 presigned url expires in 15 minutes ie. 900 seconds. + Expires time.Duration +} + +// ID representing the middleware +func (*AddExpiresOnPresignedURL) ID() string { + return "S3:AddExpiresOnPresignedURL" +} + +// HandleBuild handles the build step middleware behavior +func (m *AddExpiresOnPresignedURL) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + // if expiration is unset skip this middleware + if m.Expires == 0 { + // default to 15 * time.Minutes + m.Expires = 15 * time.Minute + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + // set S3 X-AMZ-Expires header + query := req.URL.Query() + query.Set("X-Amz-Expires", strconv.FormatInt(int64(m.Expires/time.Second), 10)) + req.URL.RawQuery = query.Encode() + + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go new file mode 100644 index 000000000000..a232e622b4f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go @@ -0,0 +1,564 @@ +package customizations + +import ( + "context" + "fmt" + "net/url" + "strings" + + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/v4a" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" + s3arn "github.com/aws/aws-sdk-go-v2/service/s3/internal/arn" + "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" +) + +const ( + s3AccessPoint = "s3-accesspoint" + s3ObjectLambda = "s3-object-lambda" +) + +// processARNResource is used to process an ARN resource. +type processARNResource struct { + + // UseARNRegion indicates if region parsed from an ARN should be used. + UseARNRegion bool + + // UseAccelerate indicates if s3 transfer acceleration is enabled + UseAccelerate bool + + // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver + EndpointResolver EndpointResolver + + // EndpointResolverOptions used by endpoint resolver + EndpointResolverOptions EndpointResolverOptions + + // DisableMultiRegionAccessPoints indicates multi-region access point support is disabled + DisableMultiRegionAccessPoints bool +} + +// ID returns the middleware ID. +func (*processARNResource) ID() string { return "S3:ProcessARNResource" } + +func (m *processARNResource) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + // check if arn was provided, if not skip this middleware + arnValue, ok := s3shared.GetARNResourceFromContext(ctx) + if !ok { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*http.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // parse arn into an endpoint arn wrt to service + resource, err := s3arn.ParseEndpointARN(arnValue) + if err != nil { + return out, metadata, err + } + + // build a resource request struct + resourceRequest := s3shared.ResourceRequest{ + Resource: resource, + UseARNRegion: m.UseARNRegion, + UseFIPS: m.EndpointResolverOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled, + RequestRegion: awsmiddleware.GetRegion(ctx), + SigningRegion: awsmiddleware.GetSigningRegion(ctx), + PartitionID: awsmiddleware.GetPartitionID(ctx), + } + + // switch to correct endpoint updater + switch tv := resource.(type) { + case arn.AccessPointARN: + // multi-region arns do not need to validate for cross partition request + if len(tv.Region) != 0 { + // validate resource request + if err := validateRegionForResourceRequest(resourceRequest); err != nil { + return out, metadata, err + } + } + + // Special handling for region-less ap-arns. + if len(tv.Region) == 0 { + // check if multi-region arn support is disabled + if m.DisableMultiRegionAccessPoints { + return out, metadata, fmt.Errorf("Invalid configuration, Multi-Region access point ARNs are disabled") + } + + // Do not allow dual-stack configuration with multi-region arns. + if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + } + + // check if accelerate + if m.UseAccelerate { + return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // fetch arn region to resolve request + resolveRegion := tv.Region + // check if request region is FIPS + if resourceRequest.UseFIPS && len(resolveRegion) == 0 { + // Do not allow Fips support within multi-region arns. + return out, metadata, s3shared.NewClientConfiguredForFIPSError( + tv, resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + var requestBuilder func(context.Context, accesspointOptions) (context.Context, error) + if len(resolveRegion) == 0 { + requestBuilder = buildMultiRegionAccessPointsRequest + } else { + requestBuilder = buildAccessPointRequest + } + + // build request as per accesspoint builder + ctx, err = requestBuilder(ctx, accesspointOptions{ + processARNResource: *m, + request: req, + resource: tv, + resolveRegion: resolveRegion, + partitionID: resourceRequest.PartitionID, + requestRegion: resourceRequest.RequestRegion, + }) + if err != nil { + return out, metadata, err + } + + case arn.S3ObjectLambdaAccessPointARN: + // validate region for resource request + if err := validateRegionForResourceRequest(resourceRequest); err != nil { + return out, metadata, err + } + + // check if accelerate + if m.UseAccelerate { + return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // check if dualstack + if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // fetch arn region to resolve request + resolveRegion := tv.Region + + // build access point request + ctx, err = buildS3ObjectLambdaAccessPointRequest(ctx, accesspointOptions{ + processARNResource: *m, + request: req, + resource: tv.AccessPointARN, + resolveRegion: resolveRegion, + partitionID: resourceRequest.PartitionID, + requestRegion: resourceRequest.RequestRegion, + }) + if err != nil { + return out, metadata, err + } + + // process outpost accesspoint ARN + case arn.OutpostAccessPointARN: + // validate region for resource request + if err := validateRegionForResourceRequest(resourceRequest); err != nil { + return out, metadata, err + } + + // check if accelerate + if m.UseAccelerate { + return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // check if dual stack + if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // check if request region is FIPS + if resourceRequest.UseFIPS { + return out, metadata, s3shared.NewFIPSConfigurationError(tv, resourceRequest.PartitionID, + resourceRequest.RequestRegion, nil) + } + + // build outpost access point request + ctx, err = buildOutpostAccessPointRequest(ctx, outpostAccessPointOptions{ + processARNResource: *m, + resource: tv, + request: req, + partitionID: resourceRequest.PartitionID, + requestRegion: resourceRequest.RequestRegion, + }) + if err != nil { + return out, metadata, err + } + + default: + return out, metadata, s3shared.NewInvalidARNError(resource, nil) + } + + return next.HandleSerialize(ctx, in) +} + +// validate if s3 resource and request region config is compatible. +func validateRegionForResourceRequest(resourceRequest s3shared.ResourceRequest) error { + // check if resourceRequest leads to a cross partition error + v, err := resourceRequest.IsCrossPartition() + if err != nil { + return err + } + if v { + // if cross partition + return s3shared.NewClientPartitionMismatchError(resourceRequest.Resource, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // check if resourceRequest leads to a cross region error + if !resourceRequest.AllowCrossRegion() && resourceRequest.IsCrossRegion() { + // if cross region, but not use ARN region is not enabled + return s3shared.NewClientRegionMismatchError(resourceRequest.Resource, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + return nil +} + +// === Accesspoint ========== + +type accesspointOptions struct { + processARNResource + request *http.Request + resource arn.AccessPointARN + resolveRegion string + partitionID string + requestRegion string +} + +func buildAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { + tv := options.resource + req := options.request + resolveRegion := options.resolveRegion + + resolveService := tv.Service + + ero := options.EndpointResolverOptions + ero.Logger = middleware.GetLogger(ctx) + ero.ResolvedRegion = "" // clear endpoint option's resolved region so that we resolve using the passed in region + + // resolve endpoint + endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) + if err != nil { + return ctx, s3shared.NewFailedToResolveEndpointError( + tv, + options.partitionID, + options.requestRegion, + err, + ) + } + + // assign resolved endpoint url to request url + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + // Must sign with s3-object-lambda + ctx = awsmiddleware.SetSigningName(ctx, resolveService) + } + + if len(endpoint.SigningRegion) != 0 { + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + } else { + ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) + } + + // update serviceID to "s3-accesspoint" + ctx = awsmiddleware.SetServiceID(ctx, s3AccessPoint) + + // disable host prefix behavior + ctx = http.DisableEndpointHostPrefix(ctx, true) + + // remove the serialized arn in place of /{Bucket} + ctx = setBucketToRemoveOnContext(ctx, tv.String()) + + // skip arn processing, if arn region resolves to a immutable endpoint + if endpoint.HostnameImmutable { + return ctx, nil + } + + updateS3HostForS3AccessPoint(req) + + ctx, err = buildAccessPointHostPrefix(ctx, req, tv) + if err != nil { + return ctx, err + } + + return ctx, nil +} + +func buildS3ObjectLambdaAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { + tv := options.resource + req := options.request + resolveRegion := options.resolveRegion + + resolveService := tv.Service + + ero := options.EndpointResolverOptions + ero.Logger = middleware.GetLogger(ctx) + ero.ResolvedRegion = "" // clear endpoint options resolved region so we resolve the passed in region + + // resolve endpoint + endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) + if err != nil { + return ctx, s3shared.NewFailedToResolveEndpointError( + tv, + options.partitionID, + options.requestRegion, + err, + ) + } + + // assign resolved endpoint url to request url + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + // Must sign with s3-object-lambda + ctx = awsmiddleware.SetSigningName(ctx, resolveService) + } + + if len(endpoint.SigningRegion) != 0 { + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + } else { + ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) + } + + // update serviceID to "s3-object-lambda" + ctx = awsmiddleware.SetServiceID(ctx, s3ObjectLambda) + + // disable host prefix behavior + ctx = http.DisableEndpointHostPrefix(ctx, true) + + // remove the serialized arn in place of /{Bucket} + ctx = setBucketToRemoveOnContext(ctx, tv.String()) + + // skip arn processing, if arn region resolves to a immutable endpoint + if endpoint.HostnameImmutable { + return ctx, nil + } + + if endpoint.Source == aws.EndpointSourceServiceMetadata { + updateS3HostForS3ObjectLambda(req) + } + + ctx, err = buildAccessPointHostPrefix(ctx, req, tv) + if err != nil { + return ctx, err + } + + return ctx, nil +} + +func buildMultiRegionAccessPointsRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { + const s3GlobalLabel = "s3-global." + const accesspointLabel = "accesspoint." + + tv := options.resource + req := options.request + resolveService := tv.Service + resolveRegion := options.requestRegion + arnPartition := tv.Partition + + // resolve endpoint + ero := options.EndpointResolverOptions + ero.Logger = middleware.GetLogger(ctx) + + endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) + if err != nil { + return ctx, s3shared.NewFailedToResolveEndpointError( + tv, + options.partitionID, + options.requestRegion, + err, + ) + } + + // set signing region and version for MRAP + endpoint.SigningRegion = "*" + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = SetSignerVersion(ctx, v4a.Version) + + if len(endpoint.SigningName) != 0 { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + ctx = awsmiddleware.SetSigningName(ctx, resolveService) + } + + // skip arn processing, if arn region resolves to a immutable endpoint + if endpoint.HostnameImmutable { + return ctx, nil + } + + // modify endpoint host to use s3-global host prefix + scheme := strings.SplitN(endpoint.URL, "://", 2) + dnsSuffix, err := endpoints.GetDNSSuffix(arnPartition, ero) + if err != nil { + return ctx, fmt.Errorf("Error determining dns suffix from arn partition, %w", err) + } + // set url as per partition + endpoint.URL = scheme[0] + "://" + s3GlobalLabel + dnsSuffix + + // assign resolved endpoint url to request url + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + // build access point host prefix + accessPointHostPrefix := tv.AccessPointName + "." + accesspointLabel + + // add host prefix to url + req.URL.Host = accessPointHostPrefix + req.URL.Host + if len(req.Host) > 0 { + req.Host = accessPointHostPrefix + req.Host + } + + // validate the endpoint host + if err := http.ValidateEndpointHost(req.URL.Host); err != nil { + return ctx, fmt.Errorf("endpoint validation error: %w, when using arn %v", err, tv) + } + + // disable host prefix behavior + ctx = http.DisableEndpointHostPrefix(ctx, true) + + // remove the serialized arn in place of /{Bucket} + ctx = setBucketToRemoveOnContext(ctx, tv.String()) + + return ctx, nil +} + +func buildAccessPointHostPrefix(ctx context.Context, req *http.Request, tv arn.AccessPointARN) (context.Context, error) { + // add host prefix for access point + accessPointHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "." + req.URL.Host = accessPointHostPrefix + req.URL.Host + if len(req.Host) > 0 { + req.Host = accessPointHostPrefix + req.Host + } + + // validate the endpoint host + if err := http.ValidateEndpointHost(req.URL.Host); err != nil { + return ctx, s3shared.NewInvalidARNError(tv, err) + } + + return ctx, nil +} + +// ====== Outpost Accesspoint ======== + +type outpostAccessPointOptions struct { + processARNResource + request *http.Request + resource arn.OutpostAccessPointARN + partitionID string + requestRegion string +} + +func buildOutpostAccessPointRequest(ctx context.Context, options outpostAccessPointOptions) (context.Context, error) { + tv := options.resource + req := options.request + + resolveRegion := tv.Region + resolveService := tv.Service + endpointsID := resolveService + if strings.EqualFold(resolveService, "s3-outposts") { + // assign endpoints ID as "S3" + endpointsID = "s3" + } + + ero := options.EndpointResolverOptions + ero.Logger = middleware.GetLogger(ctx) + ero.ResolvedRegion = "" + + // resolve regional endpoint for resolved region. + endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) + if err != nil { + return ctx, s3shared.NewFailedToResolveEndpointError( + tv, + options.partitionID, + options.requestRegion, + err, + ) + } + + // assign resolved endpoint url to request url + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + // assign resolved service from arn as signing name + if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + ctx = awsmiddleware.SetSigningName(ctx, resolveService) + } + + if len(endpoint.SigningRegion) != 0 { + // redirect signer to use resolved endpoint signing name and region + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + } else { + ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) + } + + // update serviceID to resolved service id + ctx = awsmiddleware.SetServiceID(ctx, resolveService) + + // disable host prefix behavior + ctx = http.DisableEndpointHostPrefix(ctx, true) + + // remove the serialized arn in place of /{Bucket} + ctx = setBucketToRemoveOnContext(ctx, tv.String()) + + // skip further customizations, if arn region resolves to a immutable endpoint + if endpoint.HostnameImmutable { + return ctx, nil + } + + updateHostPrefix(req, endpointsID, resolveService) + + // add host prefix for s3-outposts + outpostAPHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "." + tv.OutpostID + "." + req.URL.Host = outpostAPHostPrefix + req.URL.Host + if len(req.Host) > 0 { + req.Host = outpostAPHostPrefix + req.Host + } + + // validate the endpoint host + if err := http.ValidateEndpointHost(req.URL.Host); err != nil { + return ctx, s3shared.NewInvalidARNError(tv, err) + } + + return ctx, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go new file mode 100644 index 000000000000..2e030f29c996 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go @@ -0,0 +1,58 @@ +package customizations + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" +) + +// removeBucketFromPathMiddleware needs to be executed after serialize step is performed +type removeBucketFromPathMiddleware struct { +} + +func (m *removeBucketFromPathMiddleware) ID() string { + return "S3:RemoveBucketFromPathMiddleware" +} + +func (m *removeBucketFromPathMiddleware) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + // check if a bucket removal from HTTP path is required + bucket, ok := getRemoveBucketFromPath(ctx) + if !ok { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*http.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + removeBucketFromPath(req.URL, bucket) + return next.HandleSerialize(ctx, in) +} + +type removeBucketKey struct { + bucket string +} + +// setBucketToRemoveOnContext sets the bucket name to be removed. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setBucketToRemoveOnContext(ctx context.Context, bucket string) context.Context { + return middleware.WithStackValue(ctx, removeBucketKey{}, bucket) +} + +// getRemoveBucketFromPath returns the bucket name to remove from the path. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func getRemoveBucketFromPath(ctx context.Context) (string, bool) { + v, ok := middleware.GetStackValue(ctx, removeBucketKey{}).(string) + return v, ok +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go new file mode 100644 index 000000000000..325b2d369a11 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go @@ -0,0 +1,84 @@ +package customizations + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" + "net/url" +) + +type s3ObjectLambdaEndpoint struct { + // whether the operation should use the s3-object-lambda endpoint + UseEndpoint bool + + // use transfer acceleration + UseAccelerate bool + + EndpointResolver EndpointResolver + EndpointResolverOptions EndpointResolverOptions +} + +func (t *s3ObjectLambdaEndpoint) ID() string { + return "S3:ObjectLambdaEndpoint" +} + +func (t *s3ObjectLambdaEndpoint) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !t.UseEndpoint { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*http.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) + } + + if t.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + return out, metadata, fmt.Errorf("client configured for dualstack but not supported for operation") + } + + if t.UseAccelerate { + return out, metadata, fmt.Errorf("client configured for accelerate but not supported for operation") + } + + region := awsmiddleware.GetRegion(ctx) + + ero := t.EndpointResolverOptions + + endpoint, err := t.EndpointResolver.ResolveEndpoint(region, ero) + if err != nil { + return out, metadata, err + } + + // Set the ServiceID and SigningName + ctx = awsmiddleware.SetServiceID(ctx, s3ObjectLambda) + + if len(endpoint.SigningName) > 0 && endpoint.Source == aws.EndpointSourceCustom { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + ctx = awsmiddleware.SetSigningName(ctx, s3ObjectLambda) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, err + } + + if len(endpoint.SigningRegion) > 0 { + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + } else { + ctx = awsmiddleware.SetSigningRegion(ctx, region) + } + + if endpoint.Source == aws.EndpointSourceServiceMetadata || !endpoint.HostnameImmutable { + updateS3HostForS3ObjectLambda(req) + } + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go new file mode 100644 index 000000000000..6689acb8e9e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go @@ -0,0 +1,213 @@ +package customizations + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/internal/v4a" + "github.com/aws/smithy-go/middleware" +) + +type signerVersionKey struct{} + +// GetSignerVersion retrieves the signer version to use for signing +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSignerVersion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signerVersionKey{}).(string) + return v +} + +// SetSignerVersion sets the signer version to be used for signing the request +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSignerVersion(ctx context.Context, version string) context.Context { + return middleware.WithStackValue(ctx, signerVersionKey{}, version) +} + +// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware. +type SignHTTPRequestMiddlewareOptions struct { + + // credential provider + CredentialsProvider aws.CredentialsProvider + + // log signing + LogSigning bool + + // v4 signer + V4Signer v4.HTTPSigner + + //v4a signer + V4aSigner v4a.HTTPSigner +} + +// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests +func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { + return &SignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + v4Signer: options.V4Signer, + v4aSigner: options.V4aSigner, + logSigning: options.LogSigning, + } +} + +// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation to select HTTP Signing method +type SignHTTPRequestMiddleware struct { + + // credential provider + credentialsProvider aws.CredentialsProvider + + // log signing + logSigning bool + + // v4 signer + v4Signer v4.HTTPSigner + + //v4a signer + v4aSigner v4a.HTTPSigner +} + +// ID is the SignHTTPRequestMiddleware identifier +func (s *SignHTTPRequestMiddleware) ID() string { + return "Signing" +} + +// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme +func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + // fetch signer type from context + signerVersion := GetSignerVersion(ctx) + + switch signerVersion { + case v4a.Version: + v4aCredentialProvider, ok := s.credentialsProvider.(v4a.CredentialsProvider) + if !ok { + return out, metadata, fmt.Errorf("invalid credential-provider provided for sigV4a Signer") + } + + mw := v4a.NewSignHTTPRequestMiddleware(v4a.SignHTTPRequestMiddlewareOptions{ + Credentials: v4aCredentialProvider, + Signer: s.v4aSigner, + LogSigning: s.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + + default: + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: s.credentialsProvider, + Signer: s.v4Signer, + LogSigning: s.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + } +} + +// RegisterSigningMiddleware registers the wrapper signing middleware to the stack. If a signing middleware is already +// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the +// finalize step. +func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignHTTPRequestMiddleware) (err error) { + const signedID = "Signing" + _, present := stack.Finalize.Get(signedID) + if present { + _, err = stack.Finalize.Swap(signedID, signingMiddleware) + } else { + err = stack.Finalize.Add(signingMiddleware, middleware.After) + } + return err +} + +// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. +type PresignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + V4Presigner v4.HTTPPresigner + V4aPresigner v4a.HTTPPresigner + LogSigning bool +} + +// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a +// presigned URL for an HTTP request. +// +// Will short circuit the middleware stack and not forward onto the next +// Finalize handler. +type PresignHTTPRequestMiddleware struct { + + // cred provider and signer for sigv4 + credentialsProvider aws.CredentialsProvider + + // sigV4 signer + v4Signer v4.HTTPPresigner + + // sigV4a signer + v4aSigner v4a.HTTPPresigner + + // log signing + logSigning bool +} + +// NewPresignHTTPRequestMiddleware constructs a PresignHTTPRequestMiddleware using the given Signer for signing requests +func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { + return &PresignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + v4Signer: options.V4Presigner, + v4aSigner: options.V4aPresigner, + logSigning: options.LogSigning, + } +} + +// ID provides the middleware ID. +func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 or SigV4a presign authentication scheme. +// +// Since the signed request is not a valid HTTP request +func (p *PresignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + // fetch signer type from context + signerVersion := GetSignerVersion(ctx) + + switch signerVersion { + case v4a.Version: + v4aCredentialProvider, ok := p.credentialsProvider.(v4a.CredentialsProvider) + if !ok { + return out, metadata, fmt.Errorf("invalid credential-provider provided for sigV4a Signer") + } + + mw := v4a.NewPresignHTTPRequestMiddleware(v4a.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: v4aCredentialProvider, + Presigner: p.v4aSigner, + LogSigning: p.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + + default: + mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: p.credentialsProvider, + Presigner: p.v4Signer, + LogSigning: p.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + } +} + +// RegisterPreSigningMiddleware registers the wrapper pre-signing middleware to the stack. If a pre-signing middleware is already +// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the +// finalize step. +func RegisterPreSigningMiddleware(stack *middleware.Stack, signingMiddleware *PresignHTTPRequestMiddleware) (err error) { + const signedID = "PresignHTTPRequest" + _, present := stack.Finalize.Get(signedID) + if present { + _, err = stack.Finalize.Swap(signedID, signingMiddleware) + } else { + err = stack.Finalize.Add(signingMiddleware, middleware.After) + } + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go new file mode 100644 index 000000000000..e5f95254aace --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go @@ -0,0 +1,306 @@ +package customizations + +import ( + "context" + "fmt" + "log" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// UpdateEndpointParameterAccessor represents accessor functions used by the middleware +type UpdateEndpointParameterAccessor struct { + // functional pointer to fetch bucket name from provided input. + // The function is intended to take an input value, and + // return a string pointer to value of string, and bool if + // input has no bucket member. + GetBucketFromInput func(interface{}) (*string, bool) +} + +// UpdateEndpointOptions provides the options for the UpdateEndpoint middleware setup. +type UpdateEndpointOptions struct { + // Accessor are parameter accessors used by the middleware + Accessor UpdateEndpointParameterAccessor + + // use path style + UsePathStyle bool + + // use transfer acceleration + UseAccelerate bool + + // indicates if an operation supports s3 transfer acceleration. + SupportsAccelerate bool + + // use ARN region + UseARNRegion bool + + // Indicates that the operation should target the s3-object-lambda endpoint. + // Used to direct operations that do not route based on an input ARN. + TargetS3ObjectLambda bool + + // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver + EndpointResolver EndpointResolver + + // EndpointResolverOptions used by endpoint resolver + EndpointResolverOptions EndpointResolverOptions + + // DisableMultiRegionAccessPoints indicates multi-region access point support is disabled + DisableMultiRegionAccessPoints bool +} + +// UpdateEndpoint adds the middleware to the middleware stack based on the UpdateEndpointOptions. +func UpdateEndpoint(stack *middleware.Stack, options UpdateEndpointOptions) (err error) { + const serializerID = "OperationSerializer" + + // initial arn look up middleware + err = stack.Initialize.Add(&s3shared.ARNLookup{ + GetARNValue: options.Accessor.GetBucketFromInput, + }, middleware.Before) + if err != nil { + return err + } + + // process arn + err = stack.Serialize.Insert(&processARNResource{ + UseARNRegion: options.UseARNRegion, + UseAccelerate: options.UseAccelerate, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointResolverOptions, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }, serializerID, middleware.Before) + if err != nil { + return err + } + + // process whether the operation requires the s3-object-lambda endpoint + // Occurs before operation serializer so that hostPrefix mutations + // can be handled correctly. + err = stack.Serialize.Insert(&s3ObjectLambdaEndpoint{ + UseEndpoint: options.TargetS3ObjectLambda, + UseAccelerate: options.UseAccelerate, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointResolverOptions, + }, serializerID, middleware.Before) + if err != nil { + return err + } + + // remove bucket arn middleware + err = stack.Serialize.Insert(&removeBucketFromPathMiddleware{}, serializerID, middleware.After) + if err != nil { + return err + } + + // update endpoint to use options for path style and accelerate + err = stack.Serialize.Insert(&updateEndpoint{ + usePathStyle: options.UsePathStyle, + getBucketFromInput: options.Accessor.GetBucketFromInput, + useAccelerate: options.UseAccelerate, + supportsAccelerate: options.SupportsAccelerate, + }, serializerID, middleware.After) + if err != nil { + return err + } + + return err +} + +type updateEndpoint struct { + // path style options + usePathStyle bool + getBucketFromInput func(interface{}) (*string, bool) + + // accelerate options + useAccelerate bool + supportsAccelerate bool +} + +// ID returns the middleware ID. +func (*updateEndpoint) ID() string { + return "S3:UpdateEndpoint" +} + +func (u *updateEndpoint) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + // if arn was processed, skip this middleware + if _, ok := s3shared.GetARNResourceFromContext(ctx); ok { + return next.HandleSerialize(ctx, in) + } + + // skip this customization if host name is set as immutable + if smithyhttp.GetHostnameImmutable(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // check if accelerate is supported + if u.useAccelerate && !u.supportsAccelerate { + // accelerate is not supported, thus will be ignored + log.Println("Transfer acceleration is not supported for the operation, ignoring UseAccelerate.") + u.useAccelerate = false + } + + // transfer acceleration is not supported with path style urls + if u.useAccelerate && u.usePathStyle { + log.Println("UseAccelerate is not compatible with UsePathStyle, ignoring UsePathStyle.") + u.usePathStyle = false + } + + if u.getBucketFromInput != nil { + // Below customization only apply if bucket name is provided + bucket, ok := u.getBucketFromInput(in.Parameters) + if ok && bucket != nil { + region := awsmiddleware.GetRegion(ctx) + if err := u.updateEndpointFromConfig(req, *bucket, region); err != nil { + return out, metadata, err + } + } + } + + return next.HandleSerialize(ctx, in) +} + +func (u updateEndpoint) updateEndpointFromConfig(req *smithyhttp.Request, bucket string, region string) error { + // do nothing if path style is enforced + if u.usePathStyle { + return nil + } + + if !hostCompatibleBucketName(req.URL, bucket) { + // bucket name must be valid to put into the host for accelerate operations. + // For non-accelerate operations the bucket name can stay in the path if + // not valid hostname. + var err error + if u.useAccelerate { + err = fmt.Errorf("bucket name %s is not compatible with S3", bucket) + } + + // No-Op if not using accelerate. + return err + } + + // accelerate is only supported if use path style is disabled + if u.useAccelerate { + parts := strings.Split(req.URL.Host, ".") + if len(parts) < 3 { + return fmt.Errorf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", req.URL.Host) + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + + for i := 1; i+1 < len(parts); i++ { + if strings.EqualFold(parts[i], region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + // construct the url host + req.URL.Host = strings.Join(parts, ".") + } + + // move bucket to follow virtual host style + moveBucketNameToHost(req.URL, bucket) + return nil +} + +// updates endpoint to use virtual host styling +func moveBucketNameToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + removeBucketFromPath(u, bucket) +} + +// remove bucket from url +func removeBucketFromPath(u *url.URL, bucket string) { + if strings.HasPrefix(u.Path, "/"+bucket) { + // modify url path + u.Path = strings.Replace(u.Path, "/"+bucket, "", 1) + + // modify url raw path + u.RawPath = strings.Replace(u.RawPath, "/"+httpbinding.EscapePath(bucket, true), "", 1) + } + + if u.Path == "" { + u.Path = "/" + } + + if u.RawPath == "" { + u.RawPath = "/" + } +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if the bucket is not +// DNS compatible or the EndpointResolver resolves an aws.Endpoint with +// HostnameImmutable member set to true. +// +// https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Endpoint.HostnameImmutable +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + if strings.Contains(bucket, "..") { + return false + } + + // checks for `^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$` domain mapping + if !((bucket[0] > 96 && bucket[0] < 123) || (bucket[0] > 47 && bucket[0] < 58)) { + return false + } + + for _, c := range bucket[1:] { + if !((c > 96 && c < 123) || (c > 47 && c < 58) || c == 46 || c == 45) { + return false + } + } + + // checks for `^(\d+\.){3}\d+$` IPaddressing + v := strings.SplitN(bucket, ".", -1) + if len(v) == 4 { + for _, c := range bucket { + if !((c > 47 && c < 58) || c == 46) { + // we confirm that this is not a IP address + return true + } + } + // this is a IP address + return false + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go new file mode 100644 index 000000000000..cd09031f413c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go @@ -0,0 +1,827 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" + "strings" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver S3 endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "af-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.af-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "aws-global", + }: endpoints.Endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-north-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-west-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "fips-ca-central-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-2", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-2", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.me-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "s3-external-1", + }: endpoints.Endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-north-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", + }, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.sc2s.sgov.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3", "s3v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3", "s3v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3", "s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "fips-us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, +} + +// GetDNSSuffix returns the dnsSuffix URL component for the given partition id +func GetDNSSuffix(id string, options Options) (string, error) { + variant := transformToSharedOptions(options).GetEndpointVariant() + switch { + case strings.EqualFold(id, "aws"): + switch variant { + case endpoints.DualStackVariant: + return "amazonaws.com", nil + + case endpoints.FIPSVariant: + return "amazonaws.com", nil + + case endpoints.FIPSVariant | endpoints.DualStackVariant: + return "amazonaws.com", nil + + case 0: + return "amazonaws.com", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-cn"): + switch variant { + case endpoints.DualStackVariant: + return "amazonaws.com.cn", nil + + case endpoints.FIPSVariant: + return "amazonaws.com.cn", nil + + case endpoints.FIPSVariant | endpoints.DualStackVariant: + return "api.amazonwebservices.com.cn", nil + + case 0: + return "amazonaws.com.cn", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-iso"): + switch variant { + case endpoints.FIPSVariant: + return "c2s.ic.gov", nil + + case 0: + return "c2s.ic.gov", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-iso-b"): + switch variant { + case endpoints.FIPSVariant: + return "sc2s.sgov.gov", nil + + case 0: + return "sc2s.sgov.gov", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-us-gov"): + switch variant { + case endpoints.DualStackVariant: + return "amazonaws.com", nil + + case endpoints.FIPSVariant: + return "amazonaws.com", nil + + case endpoints.FIPSVariant | endpoints.DualStackVariant: + return "amazonaws.com", nil + + case 0: + return "amazonaws.com", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + default: + return "", fmt.Errorf("unknown partition") + + } +} + +// GetDNSSuffixFromRegion returns the DNS suffix for the provided region and +// options. +func GetDNSSuffixFromRegion(region string, options Options) (string, error) { + switch { + case partitionRegexp.Aws.MatchString(region): + return GetDNSSuffix("aws", options) + + case partitionRegexp.AwsCn.MatchString(region): + return GetDNSSuffix("aws-cn", options) + + case partitionRegexp.AwsIso.MatchString(region): + return GetDNSSuffix("aws-iso", options) + + case partitionRegexp.AwsIsoB.MatchString(region): + return GetDNSSuffix("aws-iso-b", options) + + case partitionRegexp.AwsUsGov.MatchString(region): + return GetDNSSuffix("aws-us-gov", options) + + default: + return GetDNSSuffix("aws", options) + + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go new file mode 100644 index 000000000000..f398aa95c64a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go @@ -0,0 +1,12682 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyxml "github.com/aws/smithy-go/encoding/xml" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "strconv" + "strings" +) + +type awsRestxml_serializeOpAbortMultipartUpload struct { +} + +func (*awsRestxml_serializeOpAbortMultipartUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AbortMultipartUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=AbortMultipartUpload") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipartUploadInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpCompleteMultipartUpload struct { +} + +func (*awsRestxml_serializeOpCompleteMultipartUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CompleteMultipartUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=CompleteMultipartUpload") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.MultipartUpload != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CompleteMultipartUpload", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentCompletedMultipartUpload(input.MultipartUpload, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteMultipartUploadInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + locationName := "X-Amz-Checksum-Crc32" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32) + } + + if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + locationName := "X-Amz-Checksum-Crc32c" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) + } + + if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + locationName := "X-Amz-Checksum-Sha1" + encoder.SetHeader(locationName).String(*v.ChecksumSHA1) + } + + if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + locationName := "X-Amz-Checksum-Sha256" + encoder.SetHeader(locationName).String(*v.ChecksumSHA256) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpCopyObject struct { +} + +func (*awsRestxml_serializeOpCopyObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CopyObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=CopyObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCopyObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.BucketKeyEnabled { + locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + } + + if v.CacheControl != nil && len(*v.CacheControl) > 0 { + locationName := "Cache-Control" + encoder.SetHeader(locationName).String(*v.CacheControl) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + locationName := "Content-Disposition" + encoder.SetHeader(locationName).String(*v.ContentDisposition) + } + + if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + locationName := "Content-Encoding" + encoder.SetHeader(locationName).String(*v.ContentEncoding) + } + + if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + locationName := "Content-Language" + encoder.SetHeader(locationName).String(*v.ContentLanguage) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.CopySource != nil && len(*v.CopySource) > 0 { + locationName := "X-Amz-Copy-Source" + encoder.SetHeader(locationName).String(*v.CopySource) + } + + if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 { + locationName := "X-Amz-Copy-Source-If-Match" + encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) + } + + if v.CopySourceIfModifiedSince != nil { + locationName := "X-Amz-Copy-Source-If-Modified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) + } + + if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 { + locationName := "X-Amz-Copy-Source-If-None-Match" + encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) + } + + if v.CopySourceIfUnmodifiedSince != nil { + locationName := "X-Amz-Copy-Source-If-Unmodified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) + } + + if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) + } + + if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) + } + + if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 { + locationName := "X-Amz-Source-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) + } + + if v.Expires != nil { + locationName := "Expires" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.Metadata != nil { + hv := encoder.Headers("X-Amz-Meta-") + for mapKey, mapVal := range v.Metadata { + if len(mapVal) > 0 { + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) + } + } + } + + if len(v.MetadataDirective) > 0 { + locationName := "X-Amz-Metadata-Directive" + encoder.SetHeader(locationName).String(string(v.MetadataDirective)) + } + + if len(v.ObjectLockLegalHoldStatus) > 0 { + locationName := "X-Amz-Object-Lock-Legal-Hold" + encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) + } + + if len(v.ObjectLockMode) > 0 { + locationName := "X-Amz-Object-Lock-Mode" + encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) + } + + if v.ObjectLockRetainUntilDate != nil { + locationName := "X-Amz-Object-Lock-Retain-Until-Date" + encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Context" + encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) + } + + if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + + if len(v.StorageClass) > 0 { + locationName := "X-Amz-Storage-Class" + encoder.SetHeader(locationName).String(string(v.StorageClass)) + } + + if v.Tagging != nil && len(*v.Tagging) > 0 { + locationName := "X-Amz-Tagging" + encoder.SetHeader(locationName).String(*v.Tagging) + } + + if len(v.TaggingDirective) > 0 { + locationName := "X-Amz-Tagging-Directive" + encoder.SetHeader(locationName).String(string(v.TaggingDirective)) + } + + if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + locationName := "X-Amz-Website-Redirect-Location" + encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) + } + + return nil +} + +type awsRestxml_serializeOpCreateBucket struct { +} + +func (*awsRestxml_serializeOpCreateBucket) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateBucketInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCreateBucketInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.CreateBucketConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CreateBucketConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentCreateBucketConfiguration(input.CreateBucketConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + locationName := "X-Amz-Grant-Write" + encoder.SetHeader(locationName).String(*v.GrantWrite) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.ObjectLockEnabledForBucket { + locationName := "X-Amz-Bucket-Object-Lock-Enabled" + encoder.SetHeader(locationName).Boolean(v.ObjectLockEnabledForBucket) + } + + if len(v.ObjectOwnership) > 0 { + locationName := "X-Amz-Object-Ownership" + encoder.SetHeader(locationName).String(string(v.ObjectOwnership)) + } + + return nil +} + +type awsRestxml_serializeOpCreateMultipartUpload struct { +} + +func (*awsRestxml_serializeOpCreateMultipartUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateMultipartUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?uploads&x-id=CreateMultipartUpload") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMultipartUploadInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.BucketKeyEnabled { + locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + } + + if v.CacheControl != nil && len(*v.CacheControl) > 0 { + locationName := "Cache-Control" + encoder.SetHeader(locationName).String(*v.CacheControl) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + locationName := "Content-Disposition" + encoder.SetHeader(locationName).String(*v.ContentDisposition) + } + + if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + locationName := "Content-Encoding" + encoder.SetHeader(locationName).String(*v.ContentEncoding) + } + + if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + locationName := "Content-Language" + encoder.SetHeader(locationName).String(*v.ContentLanguage) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Expires != nil { + locationName := "Expires" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.Metadata != nil { + hv := encoder.Headers("X-Amz-Meta-") + for mapKey, mapVal := range v.Metadata { + if len(mapVal) > 0 { + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) + } + } + } + + if len(v.ObjectLockLegalHoldStatus) > 0 { + locationName := "X-Amz-Object-Lock-Legal-Hold" + encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) + } + + if len(v.ObjectLockMode) > 0 { + locationName := "X-Amz-Object-Lock-Mode" + encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) + } + + if v.ObjectLockRetainUntilDate != nil { + locationName := "X-Amz-Object-Lock-Retain-Until-Date" + encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Context" + encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) + } + + if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + + if len(v.StorageClass) > 0 { + locationName := "X-Amz-Storage-Class" + encoder.SetHeader(locationName).String(string(v.StorageClass)) + } + + if v.Tagging != nil && len(*v.Tagging) > 0 { + locationName := "X-Amz-Tagging" + encoder.SetHeader(locationName).String(*v.Tagging) + } + + if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + locationName := "X-Amz-Website-Redirect-Location" + encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucket struct { +} + +func (*awsRestxml_serializeOpDeleteBucket) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketCors struct { +} + +func (*awsRestxml_serializeOpDeleteBucketCors) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketCorsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?cors") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCorsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketEncryption struct { +} + +func (*awsRestxml_serializeOpDeleteBucketEncryption) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketEncryptionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?encryption") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketInventoryConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketInventoryConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketLifecycle struct { +} + +func (*awsRestxml_serializeOpDeleteBucketLifecycle) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketLifecycleInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?lifecycle") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketMetricsConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketMetricsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketOwnershipControls struct { +} + +func (*awsRestxml_serializeOpDeleteBucketOwnershipControls) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?ownershipControls") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketPolicy struct { +} + +func (*awsRestxml_serializeOpDeleteBucketPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policy") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPolicyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketReplication struct { +} + +func (*awsRestxml_serializeOpDeleteBucketReplication) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketReplicationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?replication") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBucketReplicationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketTagging struct { +} + +func (*awsRestxml_serializeOpDeleteBucketTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketWebsite struct { +} + +func (*awsRestxml_serializeOpDeleteBucketWebsite) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketWebsiteInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?website") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteObject struct { +} + +func (*awsRestxml_serializeOpDeleteObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=DeleteObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.BypassGovernanceRetention { + locationName := "X-Amz-Bypass-Governance-Retention" + encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.MFA != nil && len(*v.MFA) > 0 { + locationName := "X-Amz-Mfa" + encoder.SetHeader(locationName).String(*v.MFA) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpDeleteObjects struct { +} + +func (*awsRestxml_serializeOpDeleteObjects) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteObjectsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?delete&x-id=DeleteObjects") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Delete != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Delete", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentDelete(input.Delete, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.BypassGovernanceRetention { + locationName := "X-Amz-Bypass-Governance-Retention" + encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.MFA != nil && len(*v.MFA) > 0 { + locationName := "X-Amz-Mfa" + encoder.SetHeader(locationName).String(*v.MFA) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + return nil +} + +type awsRestxml_serializeOpDeleteObjectTagging struct { +} + +func (*awsRestxml_serializeOpDeleteObjectTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteObjectTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpDeletePublicAccessBlock struct { +} + +func (*awsRestxml_serializeOpDeletePublicAccessBlock) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeletePublicAccessBlockInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?publicAccessBlock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketAccelerateConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketAccelerateConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?accelerate") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketAcl struct { +} + +func (*awsRestxml_serializeOpGetBucketAcl) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketAclInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?acl") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketAclInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketAnalyticsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics&x-id=GetBucketAnalyticsConfiguration") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketCors struct { +} + +func (*awsRestxml_serializeOpGetBucketCors) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketCorsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?cors") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketEncryption struct { +} + +func (*awsRestxml_serializeOpGetBucketEncryption) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketEncryptionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?encryption") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncryptionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering&x-id=GetBucketIntelligentTieringConfiguration") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketInventoryConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketInventoryConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory&x-id=GetBucketInventoryConfiguration") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketLifecycleConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketLifecycleConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?lifecycle") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketLocation struct { +} + +func (*awsRestxml_serializeOpGetBucketLocation) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketLocationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?location") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketLogging struct { +} + +func (*awsRestxml_serializeOpGetBucketLogging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketLoggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?logging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLoggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketMetricsConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketMetricsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics&x-id=GetBucketMetricsConfiguration") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketNotificationConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketNotificationConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?notification") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketOwnershipControls struct { +} + +func (*awsRestxml_serializeOpGetBucketOwnershipControls) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketOwnershipControlsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?ownershipControls") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketPolicy struct { +} + +func (*awsRestxml_serializeOpGetBucketPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policy") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketPolicyStatus struct { +} + +func (*awsRestxml_serializeOpGetBucketPolicyStatus) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketPolicyStatusInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policyStatus") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketReplication struct { +} + +func (*awsRestxml_serializeOpGetBucketReplication) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketReplicationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?replication") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketReplicationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketRequestPayment struct { +} + +func (*awsRestxml_serializeOpGetBucketRequestPayment) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketRequestPaymentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?requestPayment") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketTagging struct { +} + +func (*awsRestxml_serializeOpGetBucketTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketVersioning struct { +} + +func (*awsRestxml_serializeOpGetBucketVersioning) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketVersioningInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?versioning") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVersioningInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketWebsite struct { +} + +func (*awsRestxml_serializeOpGetBucketWebsite) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketWebsiteInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?website") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsiteInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetObject struct { +} + +func (*awsRestxml_serializeOpGetObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=GetObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumMode) > 0 { + locationName := "X-Amz-Checksum-Mode" + encoder.SetHeader(locationName).String(string(v.ChecksumMode)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.IfMatch != nil && len(*v.IfMatch) > 0 { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfModifiedSince != nil { + locationName := "If-Modified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) + } + + if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 { + locationName := "If-None-Match" + encoder.SetHeader(locationName).String(*v.IfNoneMatch) + } + + if v.IfUnmodifiedSince != nil { + locationName := "If-Unmodified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince)) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.PartNumber != 0 { + encoder.SetQuery("partNumber").Integer(v.PartNumber) + } + + if v.Range != nil && len(*v.Range) > 0 { + locationName := "Range" + encoder.SetHeader(locationName).String(*v.Range) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.ResponseCacheControl != nil { + encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl) + } + + if v.ResponseContentDisposition != nil { + encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition) + } + + if v.ResponseContentEncoding != nil { + encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding) + } + + if v.ResponseContentLanguage != nil { + encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage) + } + + if v.ResponseContentType != nil { + encoder.SetQuery("response-content-type").String(*v.ResponseContentType) + } + + if v.ResponseExpires != nil { + encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectAcl struct { +} + +func (*awsRestxml_serializeOpGetObjectAcl) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectAclInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?acl") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectAclInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectAttributes struct { +} + +func (*awsRestxml_serializeOpGetObjectAttributes) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectAttributesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?attributes") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttributesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.MaxParts != 0 { + locationName := "X-Amz-Max-Parts" + encoder.SetHeader(locationName).Integer(v.MaxParts) + } + + if v.ObjectAttributes != nil { + locationName := "X-Amz-Object-Attributes" + for i := range v.ObjectAttributes { + if len(v.ObjectAttributes[i]) > 0 { + escaped := string(v.ObjectAttributes[i]) + if strings.Index(string(v.ObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.ObjectAttributes[i]), `"`) != -1 { + escaped = strconv.Quote(string(v.ObjectAttributes[i])) + } + + encoder.AddHeader(locationName).String(string(escaped)) + } + } + } + + if v.PartNumberMarker != nil && len(*v.PartNumberMarker) > 0 { + locationName := "X-Amz-Part-Number-Marker" + encoder.SetHeader(locationName).String(*v.PartNumberMarker) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectLegalHold struct { +} + +func (*awsRestxml_serializeOpGetObjectLegalHold) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectLegalHoldInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?legal-hold") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegalHoldInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectLockConfiguration struct { +} + +func (*awsRestxml_serializeOpGetObjectLockConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectLockConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?object-lock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectRetention struct { +} + +func (*awsRestxml_serializeOpGetObjectRetention) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectRetentionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?retention") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectRetentionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectTagging struct { +} + +func (*awsRestxml_serializeOpGetObjectTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectTorrent struct { +} + +func (*awsRestxml_serializeOpGetObjectTorrent) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectTorrentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?torrent") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrentInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + return nil +} + +type awsRestxml_serializeOpGetPublicAccessBlock struct { +} + +func (*awsRestxml_serializeOpGetPublicAccessBlock) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetPublicAccessBlockInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?publicAccessBlock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAccessBlockInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpHeadBucket struct { +} + +func (*awsRestxml_serializeOpHeadBucket) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*HeadBucketInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "HEAD" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsHeadBucketInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpHeadObject struct { +} + +func (*awsRestxml_serializeOpHeadObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*HeadObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "HEAD" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsHeadObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumMode) > 0 { + locationName := "X-Amz-Checksum-Mode" + encoder.SetHeader(locationName).String(string(v.ChecksumMode)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.IfMatch != nil && len(*v.IfMatch) > 0 { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfModifiedSince != nil { + locationName := "If-Modified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) + } + + if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 { + locationName := "If-None-Match" + encoder.SetHeader(locationName).String(*v.IfNoneMatch) + } + + if v.IfUnmodifiedSince != nil { + locationName := "If-Unmodified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince)) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.PartNumber != 0 { + encoder.SetQuery("partNumber").Integer(v.PartNumber) + } + + if v.Range != nil && len(*v.Range) > 0 { + locationName := "Range" + encoder.SetHeader(locationName).String(*v.Range) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpListBucketAnalyticsConfigurations struct { +} + +func (*awsRestxml_serializeOpListBucketAnalyticsConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics&x-id=ListBucketAnalyticsConfigurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpListBucketIntelligentTieringConfigurations struct { +} + +func (*awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering&x-id=ListBucketIntelligentTieringConfigurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + return nil +} + +type awsRestxml_serializeOpListBucketInventoryConfigurations struct { +} + +func (*awsRestxml_serializeOpListBucketInventoryConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory&x-id=ListBucketInventoryConfigurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpListBucketMetricsConfigurations struct { +} + +func (*awsRestxml_serializeOpListBucketMetricsConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics&x-id=ListBucketMetricsConfigurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpListBuckets struct { +} + +func (*awsRestxml_serializeOpListBuckets) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +type awsRestxml_serializeOpListMultipartUploads struct { +} + +func (*awsRestxml_serializeOpListMultipartUploads) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListMultipartUploadsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?uploads") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipartUploadsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.Delimiter != nil { + encoder.SetQuery("delimiter").String(*v.Delimiter) + } + + if len(v.EncodingType) > 0 { + encoder.SetQuery("encoding-type").String(string(v.EncodingType)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.KeyMarker != nil { + encoder.SetQuery("key-marker").String(*v.KeyMarker) + } + + if v.MaxUploads != 0 { + encoder.SetQuery("max-uploads").Integer(v.MaxUploads) + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + + if v.UploadIdMarker != nil { + encoder.SetQuery("upload-id-marker").String(*v.UploadIdMarker) + } + + return nil +} + +type awsRestxml_serializeOpListObjects struct { +} + +func (*awsRestxml_serializeOpListObjects) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListObjectsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListObjectsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.Delimiter != nil { + encoder.SetQuery("delimiter").String(*v.Delimiter) + } + + if len(v.EncodingType) > 0 { + encoder.SetQuery("encoding-type").String(string(v.EncodingType)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Marker != nil { + encoder.SetQuery("marker").String(*v.Marker) + } + + if v.MaxKeys != 0 { + encoder.SetQuery("max-keys").Integer(v.MaxKeys) + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + return nil +} + +type awsRestxml_serializeOpListObjectsV2 struct { +} + +func (*awsRestxml_serializeOpListObjectsV2) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListObjectsV2Input) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?list-type=2") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListObjectsV2Input(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.Delimiter != nil { + encoder.SetQuery("delimiter").String(*v.Delimiter) + } + + if len(v.EncodingType) > 0 { + encoder.SetQuery("encoding-type").String(string(v.EncodingType)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.FetchOwner { + encoder.SetQuery("fetch-owner").Boolean(v.FetchOwner) + } + + if v.MaxKeys != 0 { + encoder.SetQuery("max-keys").Integer(v.MaxKeys) + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.StartAfter != nil { + encoder.SetQuery("start-after").String(*v.StartAfter) + } + + return nil +} + +type awsRestxml_serializeOpListObjectVersions struct { +} + +func (*awsRestxml_serializeOpListObjectVersions) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListObjectVersionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?versions") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVersionsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.Delimiter != nil { + encoder.SetQuery("delimiter").String(*v.Delimiter) + } + + if len(v.EncodingType) > 0 { + encoder.SetQuery("encoding-type").String(string(v.EncodingType)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.KeyMarker != nil { + encoder.SetQuery("key-marker").String(*v.KeyMarker) + } + + if v.MaxKeys != 0 { + encoder.SetQuery("max-keys").Integer(v.MaxKeys) + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + + if v.VersionIdMarker != nil { + encoder.SetQuery("version-id-marker").String(*v.VersionIdMarker) + } + + return nil +} + +type awsRestxml_serializeOpListParts struct { +} + +func (*awsRestxml_serializeOpListParts) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListPartsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=ListParts") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListPartsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.MaxParts != 0 { + encoder.SetQuery("max-parts").Integer(v.MaxParts) + } + + if v.PartNumberMarker != nil { + encoder.SetQuery("part-number-marker").String(*v.PartNumberMarker) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketAccelerateConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketAccelerateConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?accelerate") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.AccelerateConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccelerateConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentAccelerateConfiguration(input.AccelerateConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketAcl struct { +} + +func (*awsRestxml_serializeOpPutBucketAcl) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketAclInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?acl") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketAclInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.AccessControlPolicy != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlPolicy", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + locationName := "X-Amz-Grant-Write" + encoder.SetHeader(locationName).String(*v.GrantWrite) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketAnalyticsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.AnalyticsConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AnalyticsConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentAnalyticsConfiguration(input.AnalyticsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketCors struct { +} + +func (*awsRestxml_serializeOpPutBucketCors) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketCorsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?cors") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.CORSConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CORSConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentCORSConfiguration(input.CORSConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketEncryption struct { +} + +func (*awsRestxml_serializeOpPutBucketEncryption) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketEncryptionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?encryption") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.ServerSideEncryptionConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ServerSideEncryptionConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentServerSideEncryptionConfiguration(input.ServerSideEncryptionConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncryptionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.IntelligentTieringConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IntelligentTieringConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentIntelligentTieringConfiguration(input.IntelligentTieringConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketInventoryConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketInventoryConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.InventoryConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "InventoryConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentInventoryConfiguration(input.InventoryConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketLifecycleConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketLifecycleConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?lifecycle") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.LifecycleConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LifecycleConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentBucketLifecycleConfiguration(input.LifecycleConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketLogging struct { +} + +func (*awsRestxml_serializeOpPutBucketLogging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketLoggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?logging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.BucketLoggingStatus != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BucketLoggingStatus", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentBucketLoggingStatus(input.BucketLoggingStatus, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLoggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketMetricsConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketMetricsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.MetricsConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MetricsConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentMetricsConfiguration(input.MetricsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketNotificationConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketNotificationConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?notification") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.NotificationConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NotificationConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentNotificationConfiguration(input.NotificationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.SkipDestinationValidation { + locationName := "X-Amz-Skip-Destination-Validation" + encoder.SetHeader(locationName).Boolean(v.SkipDestinationValidation) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketOwnershipControls struct { +} + +func (*awsRestxml_serializeOpPutBucketOwnershipControls) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketOwnershipControlsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?ownershipControls") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.OwnershipControls != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OwnershipControls", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentOwnershipControls(input.OwnershipControls, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketPolicy struct { +} + +func (*awsRestxml_serializeOpPutBucketPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policy") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Policy != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("text/plain") + } + + payload := strings.NewReader(*input.Policy) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ConfirmRemoveSelfBucketAccess { + locationName := "X-Amz-Confirm-Remove-Self-Bucket-Access" + encoder.SetHeader(locationName).Boolean(v.ConfirmRemoveSelfBucketAccess) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketReplication struct { +} + +func (*awsRestxml_serializeOpPutBucketReplication) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketReplicationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?replication") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.ReplicationConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplicationConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentReplicationConfiguration(input.ReplicationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketReplicationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Token != nil && len(*v.Token) > 0 { + locationName := "X-Amz-Bucket-Object-Lock-Token" + encoder.SetHeader(locationName).String(*v.Token) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketRequestPayment struct { +} + +func (*awsRestxml_serializeOpPutBucketRequestPayment) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketRequestPaymentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?requestPayment") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.RequestPaymentConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RequestPaymentConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentRequestPaymentConfiguration(input.RequestPaymentConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketTagging struct { +} + +func (*awsRestxml_serializeOpPutBucketTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Tagging != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tagging", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketVersioning struct { +} + +func (*awsRestxml_serializeOpPutBucketVersioning) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketVersioningInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?versioning") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.VersioningConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "VersioningConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentVersioningConfiguration(input.VersioningConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVersioningInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.MFA != nil && len(*v.MFA) > 0 { + locationName := "X-Amz-Mfa" + encoder.SetHeader(locationName).String(*v.MFA) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketWebsite struct { +} + +func (*awsRestxml_serializeOpPutBucketWebsite) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketWebsiteInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?website") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.WebsiteConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "WebsiteConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentWebsiteConfiguration(input.WebsiteConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsiteInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutObject struct { +} + +func (*awsRestxml_serializeOpPutObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=PutObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Body != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/octet-stream") + } + + payload := input.Body + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.BucketKeyEnabled { + locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + } + + if v.CacheControl != nil && len(*v.CacheControl) > 0 { + locationName := "Cache-Control" + encoder.SetHeader(locationName).String(*v.CacheControl) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + locationName := "X-Amz-Checksum-Crc32" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32) + } + + if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + locationName := "X-Amz-Checksum-Crc32c" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) + } + + if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + locationName := "X-Amz-Checksum-Sha1" + encoder.SetHeader(locationName).String(*v.ChecksumSHA1) + } + + if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + locationName := "X-Amz-Checksum-Sha256" + encoder.SetHeader(locationName).String(*v.ChecksumSHA256) + } + + if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + locationName := "Content-Disposition" + encoder.SetHeader(locationName).String(*v.ContentDisposition) + } + + if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + locationName := "Content-Encoding" + encoder.SetHeader(locationName).String(*v.ContentEncoding) + } + + if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + locationName := "Content-Language" + encoder.SetHeader(locationName).String(*v.ContentLanguage) + } + + if v.ContentLength != 0 { + locationName := "Content-Length" + encoder.SetHeader(locationName).Long(v.ContentLength) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Expires != nil { + locationName := "Expires" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.Metadata != nil { + hv := encoder.Headers("X-Amz-Meta-") + for mapKey, mapVal := range v.Metadata { + if len(mapVal) > 0 { + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) + } + } + } + + if len(v.ObjectLockLegalHoldStatus) > 0 { + locationName := "X-Amz-Object-Lock-Legal-Hold" + encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) + } + + if len(v.ObjectLockMode) > 0 { + locationName := "X-Amz-Object-Lock-Mode" + encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) + } + + if v.ObjectLockRetainUntilDate != nil { + locationName := "X-Amz-Object-Lock-Retain-Until-Date" + encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Context" + encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) + } + + if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + + if len(v.StorageClass) > 0 { + locationName := "X-Amz-Storage-Class" + encoder.SetHeader(locationName).String(string(v.StorageClass)) + } + + if v.Tagging != nil && len(*v.Tagging) > 0 { + locationName := "X-Amz-Tagging" + encoder.SetHeader(locationName).String(*v.Tagging) + } + + if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + locationName := "X-Amz-Website-Redirect-Location" + encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectAcl struct { +} + +func (*awsRestxml_serializeOpPutObjectAcl) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectAclInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?acl") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectAclInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.AccessControlPolicy != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlPolicy", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + locationName := "X-Amz-Grant-Write" + encoder.SetHeader(locationName).String(*v.GrantWrite) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectLegalHold struct { +} + +func (*awsRestxml_serializeOpPutObjectLegalHold) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectLegalHoldInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?legal-hold") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.LegalHold != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LegalHold", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentObjectLockLegalHold(input.LegalHold, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegalHoldInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectLockConfiguration struct { +} + +func (*awsRestxml_serializeOpPutObjectLockConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectLockConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?object-lock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.ObjectLockConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectLockConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentObjectLockConfiguration(input.ObjectLockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.Token != nil && len(*v.Token) > 0 { + locationName := "X-Amz-Bucket-Object-Lock-Token" + encoder.SetHeader(locationName).String(*v.Token) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectRetention struct { +} + +func (*awsRestxml_serializeOpPutObjectRetention) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectRetentionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?retention") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Retention != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Retention", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentObjectLockRetention(input.Retention, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectRetentionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.BypassGovernanceRetention { + locationName := "X-Amz-Bypass-Governance-Retention" + encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectTagging struct { +} + +func (*awsRestxml_serializeOpPutObjectTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Tagging != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tagging", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpPutPublicAccessBlock struct { +} + +func (*awsRestxml_serializeOpPutPublicAccessBlock) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutPublicAccessBlockInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?publicAccessBlock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.PublicAccessBlockConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "PublicAccessBlockConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentPublicAccessBlockConfiguration(input.PublicAccessBlockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAccessBlockInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpRestoreObject struct { +} + +func (*awsRestxml_serializeOpRestoreObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RestoreObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?restore&x-id=RestoreObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsRestoreObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.RestoreRequest != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RestoreRequest", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentRestoreRequest(input.RestoreRequest, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpSelectObjectContent struct { +} + +func (*awsRestxml_serializeOpSelectObjectContent) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SelectObjectContentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?select&select-type=2&x-id=SelectObjectContent") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/xml") + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SelectObjectContentRequest", + }, + Attr: rootAttr, + } + root.Attr = append(root.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeOpDocumentSelectObjectContentInput(input, xmlEncoder.RootElement(root)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + if request, err = request.SetStream(bytes.NewReader(xmlEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectContentInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + return nil +} + +func awsRestxml_serializeOpDocumentSelectObjectContentInput(v *SelectObjectContentInput, value smithyxml.Value) error { + defer value.Close() + if v.Expression != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Expression", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Expression) + } + if len(v.ExpressionType) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExpressionType", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.ExpressionType)) + } + if v.InputSerialization != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "InputSerialization", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil { + return err + } + } + if v.OutputSerialization != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OutputSerialization", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil { + return err + } + } + if v.RequestProgress != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RequestProgress", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentRequestProgress(v.RequestProgress, el); err != nil { + return err + } + } + if v.ScanRange != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ScanRange", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentScanRange(v.ScanRange, el); err != nil { + return err + } + } + return nil +} + +type awsRestxml_serializeOpUploadPart struct { +} + +func (*awsRestxml_serializeOpUploadPart) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UploadPartInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=UploadPart") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsUploadPartInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Body != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/octet-stream") + } + + payload := input.Body + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + locationName := "X-Amz-Checksum-Crc32" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32) + } + + if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + locationName := "X-Amz-Checksum-Crc32c" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) + } + + if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + locationName := "X-Amz-Checksum-Sha1" + encoder.SetHeader(locationName).String(*v.ChecksumSHA1) + } + + if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + locationName := "X-Amz-Checksum-Sha256" + encoder.SetHeader(locationName).String(*v.ChecksumSHA256) + } + + if v.ContentLength != 0 { + locationName := "Content-Length" + encoder.SetHeader(locationName).Long(v.ContentLength) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + { + encoder.SetQuery("partNumber").Integer(v.PartNumber) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpUploadPartCopy struct { +} + +func (*awsRestxml_serializeOpUploadPartCopy) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UploadPartCopyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=UploadPartCopy") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.CopySource != nil && len(*v.CopySource) > 0 { + locationName := "X-Amz-Copy-Source" + encoder.SetHeader(locationName).String(*v.CopySource) + } + + if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 { + locationName := "X-Amz-Copy-Source-If-Match" + encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) + } + + if v.CopySourceIfModifiedSince != nil { + locationName := "X-Amz-Copy-Source-If-Modified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) + } + + if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 { + locationName := "X-Amz-Copy-Source-If-None-Match" + encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) + } + + if v.CopySourceIfUnmodifiedSince != nil { + locationName := "X-Amz-Copy-Source-If-Unmodified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) + } + + if v.CopySourceRange != nil && len(*v.CopySourceRange) > 0 { + locationName := "X-Amz-Copy-Source-Range" + encoder.SetHeader(locationName).String(*v.CopySourceRange) + } + + if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) + } + + if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) + } + + if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 { + locationName := "X-Amz-Source-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + { + encoder.SetQuery("partNumber").Integer(v.PartNumber) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpWriteGetObjectResponse struct { +} + +func (*awsRestxml_serializeOpWriteGetObjectResponse) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*WriteGetObjectResponseInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse?x-id=WriteGetObjectResponse") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Body != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/octet-stream") + } + + payload := input.Body + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetObjectResponseInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AcceptRanges != nil && len(*v.AcceptRanges) > 0 { + locationName := "X-Amz-Fwd-Header-Accept-Ranges" + encoder.SetHeader(locationName).String(*v.AcceptRanges) + } + + if v.BucketKeyEnabled { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + } + + if v.CacheControl != nil && len(*v.CacheControl) > 0 { + locationName := "X-Amz-Fwd-Header-Cache-Control" + encoder.SetHeader(locationName).String(*v.CacheControl) + } + + if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32) + } + + if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32c" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) + } + + if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha1" + encoder.SetHeader(locationName).String(*v.ChecksumSHA1) + } + + if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha256" + encoder.SetHeader(locationName).String(*v.ChecksumSHA256) + } + + if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Disposition" + encoder.SetHeader(locationName).String(*v.ContentDisposition) + } + + if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Encoding" + encoder.SetHeader(locationName).String(*v.ContentEncoding) + } + + if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Language" + encoder.SetHeader(locationName).String(*v.ContentLanguage) + } + + if v.ContentLength != 0 { + locationName := "Content-Length" + encoder.SetHeader(locationName).Long(v.ContentLength) + } + + if v.ContentRange != nil && len(*v.ContentRange) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Range" + encoder.SetHeader(locationName).String(*v.ContentRange) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.DeleteMarker { + locationName := "X-Amz-Fwd-Header-X-Amz-Delete-Marker" + encoder.SetHeader(locationName).Boolean(v.DeleteMarker) + } + + if v.ErrorCode != nil && len(*v.ErrorCode) > 0 { + locationName := "X-Amz-Fwd-Error-Code" + encoder.SetHeader(locationName).String(*v.ErrorCode) + } + + if v.ErrorMessage != nil && len(*v.ErrorMessage) > 0 { + locationName := "X-Amz-Fwd-Error-Message" + encoder.SetHeader(locationName).String(*v.ErrorMessage) + } + + if v.ETag != nil && len(*v.ETag) > 0 { + locationName := "X-Amz-Fwd-Header-Etag" + encoder.SetHeader(locationName).String(*v.ETag) + } + + if v.Expiration != nil && len(*v.Expiration) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Expiration" + encoder.SetHeader(locationName).String(*v.Expiration) + } + + if v.Expires != nil { + locationName := "X-Amz-Fwd-Header-Expires" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) + } + + if v.LastModified != nil { + locationName := "X-Amz-Fwd-Header-Last-Modified" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.LastModified)) + } + + if v.Metadata != nil { + hv := encoder.Headers("X-Amz-Meta-") + for mapKey, mapVal := range v.Metadata { + if len(mapVal) > 0 { + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) + } + } + } + + if v.MissingMeta != 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Missing-Meta" + encoder.SetHeader(locationName).Integer(v.MissingMeta) + } + + if len(v.ObjectLockLegalHoldStatus) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Legal-Hold" + encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) + } + + if len(v.ObjectLockMode) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Mode" + encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) + } + + if v.ObjectLockRetainUntilDate != nil { + locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Retain-Until-Date" + encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) + } + + if v.PartsCount != 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Mp-Parts-Count" + encoder.SetHeader(locationName).Integer(v.PartsCount) + } + + if len(v.ReplicationStatus) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Replication-Status" + encoder.SetHeader(locationName).String(string(v.ReplicationStatus)) + } + + if len(v.RequestCharged) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Request-Charged" + encoder.SetHeader(locationName).String(string(v.RequestCharged)) + } + + if v.RequestRoute != nil && len(*v.RequestRoute) > 0 { + locationName := "X-Amz-Request-Route" + encoder.SetHeader(locationName).String(*v.RequestRoute) + } + + if v.RequestToken != nil && len(*v.RequestToken) > 0 { + locationName := "X-Amz-Request-Token" + encoder.SetHeader(locationName).String(*v.RequestToken) + } + + if v.Restore != nil && len(*v.Restore) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Restore" + encoder.SetHeader(locationName).String(*v.Restore) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + + if v.StatusCode != 0 { + locationName := "X-Amz-Fwd-Status" + encoder.SetHeader(locationName).Integer(v.StatusCode) + } + + if len(v.StorageClass) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Storage-Class" + encoder.SetHeader(locationName).String(string(v.StorageClass)) + } + + if v.TagCount != 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Tagging-Count" + encoder.SetHeader(locationName).Integer(v.TagCount) + } + + if v.VersionId != nil && len(*v.VersionId) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Version-Id" + encoder.SetHeader(locationName).String(*v.VersionId) + } + + return nil +} + +func awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v *types.AbortIncompleteMultipartUpload, value smithyxml.Value) error { + defer value.Close() + if v.DaysAfterInitiation != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DaysAfterInitiation", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.DaysAfterInitiation) + } + return nil +} + +func awsRestxml_serializeDocumentAccelerateConfiguration(v *types.AccelerateConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentAccessControlPolicy(v *types.AccessControlPolicy, value smithyxml.Value) error { + defer value.Close() + if v.Grants != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlList", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGrants(v.Grants, el); err != nil { + return err + } + } + if v.Owner != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Owner", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentOwner(v.Owner, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentAccessControlTranslation(v *types.AccessControlTranslation, value smithyxml.Value) error { + defer value.Close() + if len(v.Owner) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Owner", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Owner)) + } + return nil +} + +func awsRestxml_serializeDocumentAllowedHeaders(v []string, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(v[i]) + } + return nil +} + +func awsRestxml_serializeDocumentAllowedMethods(v []string, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(v[i]) + } + return nil +} + +func awsRestxml_serializeDocumentAllowedOrigins(v []string, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(v[i]) + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsAndOperator(v *types.AnalyticsAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsConfiguration(v *types.AnalyticsConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAnalyticsFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if v.StorageClassAnalysis != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClassAnalysis", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentStorageClassAnalysis(v.StorageClassAnalysis, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsExportDestination(v *types.AnalyticsExportDestination, value smithyxml.Value) error { + defer value.Close() + if v.S3BucketDestination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3BucketDestination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v.S3BucketDestination, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsFilter(v types.AnalyticsFilter, value smithyxml.Value) error { + defer value.Close() + switch uv := v.(type) { + case *types.AnalyticsFilterMemberAnd: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentAnalyticsAndOperator(&uv.Value, av); err != nil { + return err + } + + case *types.AnalyticsFilterMemberPrefix: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.AnalyticsFilterMemberTag: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination, value smithyxml.Value) error { + defer value.Close() + if v.Bucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Bucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Bucket) + } + if v.BucketAccountId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BucketAccountId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.BucketAccountId) + } + if len(v.Format) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Format", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Format)) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + return nil +} + +func awsRestxml_serializeDocumentBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Rules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentLifecycleRules(v.Rules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentBucketLoggingStatus(v *types.BucketLoggingStatus, value smithyxml.Value) error { + defer value.Close() + if v.LoggingEnabled != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LoggingEnabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLoggingEnabled(v.LoggingEnabled, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCompletedMultipartUpload(v *types.CompletedMultipartUpload, value smithyxml.Value) error { + defer value.Close() + if v.Parts != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Part", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentCompletedPartList(v.Parts, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smithyxml.Value) error { + defer value.Close() + if v.ChecksumCRC32 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumCRC32", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumCRC32) + } + if v.ChecksumCRC32C != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumCRC32C", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumCRC32C) + } + if v.ChecksumSHA1 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumSHA1", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumSHA1) + } + if v.ChecksumSHA256 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumSHA256", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumSHA256) + } + if v.ETag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ETag", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ETag) + } + if v.PartNumber != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "PartNumber", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.PartNumber) + } + return nil +} + +func awsRestxml_serializeDocumentCompletedPartList(v []types.CompletedPart, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentCompletedPart(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCondition(v *types.Condition, value smithyxml.Value) error { + defer value.Close() + if v.HttpErrorCodeReturnedEquals != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "HttpErrorCodeReturnedEquals", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.HttpErrorCodeReturnedEquals) + } + if v.KeyPrefixEquals != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KeyPrefixEquals", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KeyPrefixEquals) + } + return nil +} + +func awsRestxml_serializeDocumentCORSConfiguration(v *types.CORSConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.CORSRules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CORSRule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentCORSRules(v.CORSRules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCORSRule(v *types.CORSRule, value smithyxml.Value) error { + defer value.Close() + if v.AllowedHeaders != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AllowedHeader", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentAllowedHeaders(v.AllowedHeaders, el); err != nil { + return err + } + } + if v.AllowedMethods != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AllowedMethod", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentAllowedMethods(v.AllowedMethods, el); err != nil { + return err + } + } + if v.AllowedOrigins != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AllowedOrigin", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentAllowedOrigins(v.AllowedOrigins, el); err != nil { + return err + } + } + if v.ExposeHeaders != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExposeHeader", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentExposeHeaders(v.ExposeHeaders, el); err != nil { + return err + } + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + if v.MaxAgeSeconds != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MaxAgeSeconds", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.MaxAgeSeconds) + } + return nil +} + +func awsRestxml_serializeDocumentCORSRules(v []types.CORSRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentCORSRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCreateBucketConfiguration(v *types.CreateBucketConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.LocationConstraint) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LocationConstraint", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.LocationConstraint)) + } + return nil +} + +func awsRestxml_serializeDocumentCSVInput(v *types.CSVInput, value smithyxml.Value) error { + defer value.Close() + if v.AllowQuotedRecordDelimiter { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AllowQuotedRecordDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.AllowQuotedRecordDelimiter) + } + if v.Comments != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Comments", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Comments) + } + if v.FieldDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "FieldDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.FieldDelimiter) + } + if len(v.FileHeaderInfo) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "FileHeaderInfo", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.FileHeaderInfo)) + } + if v.QuoteCharacter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteCharacter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QuoteCharacter) + } + if v.QuoteEscapeCharacter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteEscapeCharacter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QuoteEscapeCharacter) + } + if v.RecordDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RecordDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.RecordDelimiter) + } + return nil +} + +func awsRestxml_serializeDocumentCSVOutput(v *types.CSVOutput, value smithyxml.Value) error { + defer value.Close() + if v.FieldDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "FieldDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.FieldDelimiter) + } + if v.QuoteCharacter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteCharacter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QuoteCharacter) + } + if v.QuoteEscapeCharacter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteEscapeCharacter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QuoteEscapeCharacter) + } + if len(v.QuoteFields) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteFields", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.QuoteFields)) + } + if v.RecordDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RecordDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.RecordDelimiter) + } + return nil +} + +func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, value smithyxml.Value) error { + defer value.Close() + if v.Days != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Days) + } + if len(v.Mode) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Mode", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Mode)) + } + if v.Years != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Years", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Years) + } + return nil +} + +func awsRestxml_serializeDocumentDelete(v *types.Delete, value smithyxml.Value) error { + defer value.Close() + if v.Objects != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Object", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentObjectIdentifierList(v.Objects, el); err != nil { + return err + } + } + if v.Quiet { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Quiet", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.Quiet) + } + return nil +} + +func awsRestxml_serializeDocumentDeleteMarkerReplication(v *types.DeleteMarkerReplication, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentDestination(v *types.Destination, value smithyxml.Value) error { + defer value.Close() + if v.AccessControlTranslation != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlTranslation", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAccessControlTranslation(v.AccessControlTranslation, el); err != nil { + return err + } + } + if v.Account != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Account", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Account) + } + if v.Bucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Bucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Bucket) + } + if v.EncryptionConfiguration != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EncryptionConfiguration", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentEncryptionConfiguration(v.EncryptionConfiguration, el); err != nil { + return err + } + } + if v.Metrics != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Metrics", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentMetrics(v.Metrics, el); err != nil { + return err + } + } + if v.ReplicationTime != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplicationTime", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationTime(v.ReplicationTime, el); err != nil { + return err + } + } + if len(v.StorageClass) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClass", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.StorageClass)) + } + return nil +} + +func awsRestxml_serializeDocumentEncryption(v *types.Encryption, value smithyxml.Value) error { + defer value.Close() + if len(v.EncryptionType) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EncryptionType", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.EncryptionType)) + } + if v.KMSContext != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KMSContext", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KMSContext) + } + if v.KMSKeyId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KMSKeyId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KMSKeyId) + } + return nil +} + +func awsRestxml_serializeDocumentEncryptionConfiguration(v *types.EncryptionConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.ReplicaKmsKeyID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplicaKmsKeyID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ReplicaKmsKeyID) + } + return nil +} + +func awsRestxml_serializeDocumentErrorDocument(v *types.ErrorDocument, value smithyxml.Value) error { + defer value.Close() + if v.Key != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Key", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Key) + } + return nil +} + +func awsRestxml_serializeDocumentEventBridgeConfiguration(v *types.EventBridgeConfiguration, value smithyxml.Value) error { + defer value.Close() + return nil +} + +func awsRestxml_serializeDocumentEventList(v []types.Event, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(string(v[i])) + } + return nil +} + +func awsRestxml_serializeDocumentExistingObjectReplication(v *types.ExistingObjectReplication, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentExposeHeaders(v []string, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(v[i]) + } + return nil +} + +func awsRestxml_serializeDocumentFilterRule(v *types.FilterRule, value smithyxml.Value) error { + defer value.Close() + if len(v.Name) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Name", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Name)) + } + if v.Value != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Value", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Value) + } + return nil +} + +func awsRestxml_serializeDocumentFilterRuleList(v []types.FilterRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentFilterRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentGlacierJobParameters(v *types.GlacierJobParameters, value smithyxml.Value) error { + defer value.Close() + if len(v.Tier) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tier", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Tier)) + } + return nil +} + +func awsRestxml_serializeDocumentGrant(v *types.Grant, value smithyxml.Value) error { + defer value.Close() + if v.Grantee != nil { + rootAttr := []smithyxml.Attr{} + rootAttr = append(rootAttr, smithyxml.NewNamespaceAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance")) + if len(v.Grantee.Type) > 0 { + var av string + av = string(v.Grantee.Type) + rootAttr = append(rootAttr, smithyxml.NewAttribute("xsi:type", av)) + } + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Grantee", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil { + return err + } + } + if len(v.Permission) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Permission", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Permission)) + } + return nil +} + +func awsRestxml_serializeDocumentGrantee(v *types.Grantee, value smithyxml.Value) error { + defer value.Close() + if v.DisplayName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DisplayName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.DisplayName) + } + if v.EmailAddress != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EmailAddress", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.EmailAddress) + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + if v.URI != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "URI", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.URI) + } + return nil +} + +func awsRestxml_serializeDocumentGrants(v []types.Grant, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Grant", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentGrant(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentIndexDocument(v *types.IndexDocument, value smithyxml.Value) error { + defer value.Close() + if v.Suffix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Suffix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Suffix) + } + return nil +} + +func awsRestxml_serializeDocumentInputSerialization(v *types.InputSerialization, value smithyxml.Value) error { + defer value.Close() + if len(v.CompressionType) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CompressionType", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.CompressionType)) + } + if v.CSV != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CSV", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentCSVInput(v.CSV, el); err != nil { + return err + } + } + if v.JSON != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "JSON", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentJSONInput(v.JSON, el); err != nil { + return err + } + } + if v.Parquet != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Parquet", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentParquetInput(v.Parquet, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentIntelligentTieringFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + if v.Tierings != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tiering", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTieringList(v.Tierings, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentIntelligentTieringFilter(v *types.IntelligentTieringFilter, value smithyxml.Value) error { + defer value.Close() + if v.And != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentIntelligentTieringAndOperator(v.And, el); err != nil { + return err + } + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentInventoryConfiguration(v *types.InventoryConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Destination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Destination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryDestination(v.Destination, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if len(v.IncludedObjectVersions) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IncludedObjectVersions", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.IncludedObjectVersions)) + } + { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IsEnabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.IsEnabled) + } + if v.OptionalFields != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OptionalFields", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryOptionalFields(v.OptionalFields, el); err != nil { + return err + } + } + if v.Schedule != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Schedule", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventorySchedule(v.Schedule, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentInventoryDestination(v *types.InventoryDestination, value smithyxml.Value) error { + defer value.Close() + if v.S3BucketDestination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3BucketDestination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryS3BucketDestination(v.S3BucketDestination, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentInventoryEncryption(v *types.InventoryEncryption, value smithyxml.Value) error { + defer value.Close() + if v.SSEKMS != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SSE-KMS", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSSEKMS(v.SSEKMS, el); err != nil { + return err + } + } + if v.SSES3 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SSE-S3", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSSES3(v.SSES3, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentInventoryFilter(v *types.InventoryFilter, value smithyxml.Value) error { + defer value.Close() + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + return nil +} + +func awsRestxml_serializeDocumentInventoryOptionalFields(v []types.InventoryOptionalField, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Field", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + am.String(string(v[i])) + } + return nil +} + +func awsRestxml_serializeDocumentInventoryS3BucketDestination(v *types.InventoryS3BucketDestination, value smithyxml.Value) error { + defer value.Close() + if v.AccountId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccountId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.AccountId) + } + if v.Bucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Bucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Bucket) + } + if v.Encryption != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Encryption", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryEncryption(v.Encryption, el); err != nil { + return err + } + } + if len(v.Format) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Format", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Format)) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + return nil +} + +func awsRestxml_serializeDocumentInventorySchedule(v *types.InventorySchedule, value smithyxml.Value) error { + defer value.Close() + if len(v.Frequency) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Frequency", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Frequency)) + } + return nil +} + +func awsRestxml_serializeDocumentJSONInput(v *types.JSONInput, value smithyxml.Value) error { + defer value.Close() + if len(v.Type) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Type", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Type)) + } + return nil +} + +func awsRestxml_serializeDocumentJSONOutput(v *types.JSONOutput, value smithyxml.Value) error { + defer value.Close() + if v.RecordDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RecordDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.RecordDelimiter) + } + return nil +} + +func awsRestxml_serializeDocumentLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Events != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Event", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if v.LambdaFunctionArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CloudFunction", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.LambdaFunctionArn) + } + return nil +} + +func awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentLambdaFunctionConfiguration(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiration, value smithyxml.Value) error { + defer value.Close() + if v.Date != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Date", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(smithytime.FormatDateTime(*v.Date)) + } + if v.Days != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Days) + } + if v.ExpiredObjectDeleteMarker { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExpiredObjectDeleteMarker", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.ExpiredObjectDeleteMarker) + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleRule(v *types.LifecycleRule, value smithyxml.Value) error { + defer value.Close() + if v.AbortIncompleteMultipartUpload != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AbortIncompleteMultipartUpload", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v.AbortIncompleteMultipartUpload, el); err != nil { + return err + } + } + if v.Expiration != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Expiration", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLifecycleExpiration(v.Expiration, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLifecycleRuleFilter(v.Filter, el); err != nil { + return err + } + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + if v.NoncurrentVersionExpiration != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NoncurrentVersionExpiration", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentNoncurrentVersionExpiration(v.NoncurrentVersionExpiration, el); err != nil { + return err + } + } + if v.NoncurrentVersionTransitions != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NoncurrentVersionTransition", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v.NoncurrentVersionTransitions, el); err != nil { + return err + } + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + if v.Transitions != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Transition", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTransitionList(v.Transitions, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.ObjectSizeGreaterThan != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectSizeGreaterThan", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(v.ObjectSizeGreaterThan) + } + if v.ObjectSizeLessThan != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectSizeLessThan", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(v.ObjectSizeLessThan) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleRuleFilter(v types.LifecycleRuleFilter, value smithyxml.Value) error { + defer value.Close() + switch uv := v.(type) { + case *types.LifecycleRuleFilterMemberAnd: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(&uv.Value, av); err != nil { + return err + } + + case *types.LifecycleRuleFilterMemberObjectSizeGreaterThan: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectSizeGreaterThan", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.Long(uv.Value) + + case *types.LifecycleRuleFilterMemberObjectSizeLessThan: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectSizeLessThan", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.Long(uv.Value) + + case *types.LifecycleRuleFilterMemberPrefix: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.LifecycleRuleFilterMemberTag: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleRules(v []types.LifecycleRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentLifecycleRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentLoggingEnabled(v *types.LoggingEnabled, value smithyxml.Value) error { + defer value.Close() + if v.TargetBucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TargetBucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TargetBucket) + } + if v.TargetGrants != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TargetGrants", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTargetGrants(v.TargetGrants, el); err != nil { + return err + } + } + if v.TargetPrefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TargetPrefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TargetPrefix) + } + return nil +} + +func awsRestxml_serializeDocumentMetadataEntry(v *types.MetadataEntry, value smithyxml.Value) error { + defer value.Close() + if v.Name != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Name", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Name) + } + if v.Value != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Value", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Value) + } + return nil +} + +func awsRestxml_serializeDocumentMetrics(v *types.Metrics, value smithyxml.Value) error { + defer value.Close() + if v.EventThreshold != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EventThreshold", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationTimeValue(v.EventThreshold, el); err != nil { + return err + } + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentMetricsAndOperator(v *types.MetricsAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.AccessPointArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessPointArn", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.AccessPointArn) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentMetricsConfiguration(v *types.MetricsConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentMetricsFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + return nil +} + +func awsRestxml_serializeDocumentMetricsFilter(v types.MetricsFilter, value smithyxml.Value) error { + defer value.Close() + switch uv := v.(type) { + case *types.MetricsFilterMemberAccessPointArn: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessPointArn", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.MetricsFilterMemberAnd: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentMetricsAndOperator(&uv.Value, av); err != nil { + return err + } + + case *types.MetricsFilterMemberPrefix: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.MetricsFilterMemberTag: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.NoncurrentVersionExpiration, value smithyxml.Value) error { + defer value.Close() + if v.NewerNoncurrentVersions != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NewerNoncurrentVersions", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.NewerNoncurrentVersions) + } + if v.NoncurrentDays != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NoncurrentDays", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.NoncurrentDays) + } + return nil +} + +func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.NoncurrentVersionTransition, value smithyxml.Value) error { + defer value.Close() + if v.NewerNoncurrentVersions != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NewerNoncurrentVersions", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.NewerNoncurrentVersions) + } + if v.NoncurrentDays != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NoncurrentDays", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.NoncurrentDays) + } + if len(v.StorageClass) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClass", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.StorageClass)) + } + return nil +} + +func awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v []types.NoncurrentVersionTransition, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentNoncurrentVersionTransition(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentNotificationConfiguration(v *types.NotificationConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.EventBridgeConfiguration != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EventBridgeConfiguration", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentEventBridgeConfiguration(v.EventBridgeConfiguration, el); err != nil { + return err + } + } + if v.LambdaFunctionConfigurations != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CloudFunctionConfiguration", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations, el); err != nil { + return err + } + } + if v.QueueConfigurations != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QueueConfiguration", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentQueueConfigurationList(v.QueueConfigurations, el); err != nil { + return err + } + } + if v.TopicConfigurations != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TopicConfiguration", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTopicConfigurationList(v.TopicConfigurations, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentNotificationConfigurationFilter(v *types.NotificationConfigurationFilter, value smithyxml.Value) error { + defer value.Close() + if v.Key != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3Key", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentS3KeyFilter(v.Key, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, value smithyxml.Value) error { + defer value.Close() + if v.Key != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Key", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Key) + } + if v.VersionId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "VersionId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.VersionId) + } + return nil +} + +func awsRestxml_serializeDocumentObjectIdentifierList(v []types.ObjectIdentifier, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentObjectIdentifier(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentObjectLockConfiguration(v *types.ObjectLockConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.ObjectLockEnabled) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectLockEnabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.ObjectLockEnabled)) + } + if v.Rule != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentObjectLockRule(v.Rule, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentObjectLockLegalHold(v *types.ObjectLockLegalHold, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentObjectLockRetention(v *types.ObjectLockRetention, value smithyxml.Value) error { + defer value.Close() + if len(v.Mode) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Mode", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Mode)) + } + if v.RetainUntilDate != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RetainUntilDate", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(smithytime.FormatDateTime(*v.RetainUntilDate)) + } + return nil +} + +func awsRestxml_serializeDocumentObjectLockRule(v *types.ObjectLockRule, value smithyxml.Value) error { + defer value.Close() + if v.DefaultRetention != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DefaultRetention", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentDefaultRetention(v.DefaultRetention, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentOutputLocation(v *types.OutputLocation, value smithyxml.Value) error { + defer value.Close() + if v.S3 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentS3Location(v.S3, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentOutputSerialization(v *types.OutputSerialization, value smithyxml.Value) error { + defer value.Close() + if v.CSV != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CSV", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentCSVOutput(v.CSV, el); err != nil { + return err + } + } + if v.JSON != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "JSON", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentJSONOutput(v.JSON, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentOwner(v *types.Owner, value smithyxml.Value) error { + defer value.Close() + if v.DisplayName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DisplayName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.DisplayName) + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + return nil +} + +func awsRestxml_serializeDocumentOwnershipControls(v *types.OwnershipControls, value smithyxml.Value) error { + defer value.Close() + if v.Rules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentOwnershipControlsRules(v.Rules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentOwnershipControlsRule(v *types.OwnershipControlsRule, value smithyxml.Value) error { + defer value.Close() + if len(v.ObjectOwnership) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectOwnership", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.ObjectOwnership)) + } + return nil +} + +func awsRestxml_serializeDocumentOwnershipControlsRules(v []types.OwnershipControlsRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentOwnershipControlsRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentParquetInput(v *types.ParquetInput, value smithyxml.Value) error { + defer value.Close() + return nil +} + +func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicAccessBlockConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.BlockPublicAcls { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BlockPublicAcls", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.BlockPublicAcls) + } + if v.BlockPublicPolicy { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BlockPublicPolicy", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.BlockPublicPolicy) + } + if v.IgnorePublicAcls { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IgnorePublicAcls", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.IgnorePublicAcls) + } + if v.RestrictPublicBuckets { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RestrictPublicBuckets", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.RestrictPublicBuckets) + } + return nil +} + +func awsRestxml_serializeDocumentQueueConfiguration(v *types.QueueConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Events != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Event", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if v.QueueArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Queue", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QueueArn) + } + return nil +} + +func awsRestxml_serializeDocumentQueueConfigurationList(v []types.QueueConfiguration, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentQueueConfiguration(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentRedirect(v *types.Redirect, value smithyxml.Value) error { + defer value.Close() + if v.HostName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "HostName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.HostName) + } + if v.HttpRedirectCode != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "HttpRedirectCode", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.HttpRedirectCode) + } + if len(v.Protocol) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Protocol", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Protocol)) + } + if v.ReplaceKeyPrefixWith != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplaceKeyPrefixWith", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ReplaceKeyPrefixWith) + } + if v.ReplaceKeyWith != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplaceKeyWith", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ReplaceKeyWith) + } + return nil +} + +func awsRestxml_serializeDocumentRedirectAllRequestsTo(v *types.RedirectAllRequestsTo, value smithyxml.Value) error { + defer value.Close() + if v.HostName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "HostName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.HostName) + } + if len(v.Protocol) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Protocol", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Protocol)) + } + return nil +} + +func awsRestxml_serializeDocumentReplicaModifications(v *types.ReplicaModifications, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentReplicationConfiguration(v *types.ReplicationConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Role != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Role", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Role) + } + if v.Rules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentReplicationRules(v.Rules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentReplicationRule(v *types.ReplicationRule, value smithyxml.Value) error { + defer value.Close() + if v.DeleteMarkerReplication != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DeleteMarkerReplication", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentDeleteMarkerReplication(v.DeleteMarkerReplication, el); err != nil { + return err + } + } + if v.Destination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Destination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentDestination(v.Destination, el); err != nil { + return err + } + } + if v.ExistingObjectReplication != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExistingObjectReplication", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentExistingObjectReplication(v.ExistingObjectReplication, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationRuleFilter(v.Filter, el); err != nil { + return err + } + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Priority != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Priority", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Priority) + } + if v.SourceSelectionCriteria != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SourceSelectionCriteria", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSourceSelectionCriteria(v.SourceSelectionCriteria, el); err != nil { + return err + } + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentReplicationRuleFilter(v types.ReplicationRuleFilter, value smithyxml.Value) error { + defer value.Close() + switch uv := v.(type) { + case *types.ReplicationRuleFilterMemberAnd: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(&uv.Value, av); err != nil { + return err + } + + case *types.ReplicationRuleFilterMemberPrefix: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.ReplicationRuleFilterMemberTag: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsRestxml_serializeDocumentReplicationRules(v []types.ReplicationRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentReplicationRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentReplicationTime(v *types.ReplicationTime, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + if v.Time != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Time", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationTimeValue(v.Time, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentReplicationTimeValue(v *types.ReplicationTimeValue, value smithyxml.Value) error { + defer value.Close() + if v.Minutes != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Minutes", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Minutes) + } + return nil +} + +func awsRestxml_serializeDocumentRequestPaymentConfiguration(v *types.RequestPaymentConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.Payer) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Payer", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Payer)) + } + return nil +} + +func awsRestxml_serializeDocumentRequestProgress(v *types.RequestProgress, value smithyxml.Value) error { + defer value.Close() + if v.Enabled { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Enabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.Enabled) + } + return nil +} + +func awsRestxml_serializeDocumentRestoreRequest(v *types.RestoreRequest, value smithyxml.Value) error { + defer value.Close() + if v.Days != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Days) + } + if v.Description != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Description", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Description) + } + if v.GlacierJobParameters != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "GlacierJobParameters", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGlacierJobParameters(v.GlacierJobParameters, el); err != nil { + return err + } + } + if v.OutputLocation != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OutputLocation", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentOutputLocation(v.OutputLocation, el); err != nil { + return err + } + } + if v.SelectParameters != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SelectParameters", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSelectParameters(v.SelectParameters, el); err != nil { + return err + } + } + if len(v.Tier) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tier", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Tier)) + } + if len(v.Type) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Type", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Type)) + } + return nil +} + +func awsRestxml_serializeDocumentRoutingRule(v *types.RoutingRule, value smithyxml.Value) error { + defer value.Close() + if v.Condition != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Condition", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentCondition(v.Condition, el); err != nil { + return err + } + } + if v.Redirect != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Redirect", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentRedirect(v.Redirect, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentRoutingRules(v []types.RoutingRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RoutingRule", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentRoutingRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentS3KeyFilter(v *types.S3KeyFilter, value smithyxml.Value) error { + defer value.Close() + if v.FilterRules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "FilterRule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentFilterRuleList(v.FilterRules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentS3Location(v *types.S3Location, value smithyxml.Value) error { + defer value.Close() + if v.AccessControlList != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlList", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGrants(v.AccessControlList, el); err != nil { + return err + } + } + if v.BucketName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BucketName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.BucketName) + } + if len(v.CannedACL) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CannedACL", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.CannedACL)) + } + if v.Encryption != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Encryption", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentEncryption(v.Encryption, el); err != nil { + return err + } + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if len(v.StorageClass) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClass", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.StorageClass)) + } + if v.Tagging != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tagging", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTagging(v.Tagging, el); err != nil { + return err + } + } + if v.UserMetadata != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "UserMetadata", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentUserMetadata(v.UserMetadata, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.Value) error { + defer value.Close() + if v.End != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "End", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(v.End) + } + if v.Start != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Start", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(v.Start) + } + return nil +} + +func awsRestxml_serializeDocumentSelectParameters(v *types.SelectParameters, value smithyxml.Value) error { + defer value.Close() + if v.Expression != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Expression", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Expression) + } + if len(v.ExpressionType) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExpressionType", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.ExpressionType)) + } + if v.InputSerialization != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "InputSerialization", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil { + return err + } + } + if v.OutputSerialization != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OutputSerialization", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault, value smithyxml.Value) error { + defer value.Close() + if v.KMSMasterKeyID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KMSMasterKeyID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KMSMasterKeyID) + } + if len(v.SSEAlgorithm) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SSEAlgorithm", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.SSEAlgorithm)) + } + return nil +} + +func awsRestxml_serializeDocumentServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Rules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentServerSideEncryptionRules(v.Rules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentServerSideEncryptionRule(v *types.ServerSideEncryptionRule, value smithyxml.Value) error { + defer value.Close() + if v.ApplyServerSideEncryptionByDefault != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ApplyServerSideEncryptionByDefault", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault, el); err != nil { + return err + } + } + if v.BucketKeyEnabled { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BucketKeyEnabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.BucketKeyEnabled) + } + return nil +} + +func awsRestxml_serializeDocumentServerSideEncryptionRules(v []types.ServerSideEncryptionRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentServerSideEncryptionRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentSourceSelectionCriteria(v *types.SourceSelectionCriteria, value smithyxml.Value) error { + defer value.Close() + if v.ReplicaModifications != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplicaModifications", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicaModifications(v.ReplicaModifications, el); err != nil { + return err + } + } + if v.SseKmsEncryptedObjects != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SseKmsEncryptedObjects", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSseKmsEncryptedObjects(v.SseKmsEncryptedObjects, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentSSEKMS(v *types.SSEKMS, value smithyxml.Value) error { + defer value.Close() + if v.KeyId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KeyId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KeyId) + } + return nil +} + +func awsRestxml_serializeDocumentSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentSSES3(v *types.SSES3, value smithyxml.Value) error { + defer value.Close() + return nil +} + +func awsRestxml_serializeDocumentStorageClassAnalysis(v *types.StorageClassAnalysis, value smithyxml.Value) error { + defer value.Close() + if v.DataExport != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DataExport", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v.DataExport, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport, value smithyxml.Value) error { + defer value.Close() + if v.Destination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Destination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAnalyticsExportDestination(v.Destination, el); err != nil { + return err + } + } + if len(v.OutputSchemaVersion) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OutputSchemaVersion", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.OutputSchemaVersion)) + } + return nil +} + +func awsRestxml_serializeDocumentTag(v *types.Tag, value smithyxml.Value) error { + defer value.Close() + if v.Key != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Key", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Key) + } + if v.Value != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Value", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Value) + } + return nil +} + +func awsRestxml_serializeDocumentTagging(v *types.Tagging, value smithyxml.Value) error { + defer value.Close() + if v.TagSet != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TagSet", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.TagSet, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTagSet(v []types.Tag, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTag(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTargetGrant(v *types.TargetGrant, value smithyxml.Value) error { + defer value.Close() + if v.Grantee != nil { + rootAttr := []smithyxml.Attr{} + rootAttr = append(rootAttr, smithyxml.NewNamespaceAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance")) + if len(v.Grantee.Type) > 0 { + var av string + av = string(v.Grantee.Type) + rootAttr = append(rootAttr, smithyxml.NewAttribute("xsi:type", av)) + } + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Grantee", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil { + return err + } + } + if len(v.Permission) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Permission", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Permission)) + } + return nil +} + +func awsRestxml_serializeDocumentTargetGrants(v []types.TargetGrant, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Grant", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTargetGrant(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTiering(v *types.Tiering, value smithyxml.Value) error { + defer value.Close() + if len(v.AccessTier) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessTier", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.AccessTier)) + } + { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Days) + } + return nil +} + +func awsRestxml_serializeDocumentTieringList(v []types.Tiering, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTiering(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTopicConfiguration(v *types.TopicConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Events != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Event", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if v.TopicArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Topic", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TopicArn) + } + return nil +} + +func awsRestxml_serializeDocumentTopicConfigurationList(v []types.TopicConfiguration, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTopicConfiguration(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTransition(v *types.Transition, value smithyxml.Value) error { + defer value.Close() + if v.Date != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Date", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(smithytime.FormatDateTime(*v.Date)) + } + if v.Days != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Days) + } + if len(v.StorageClass) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClass", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.StorageClass)) + } + return nil +} + +func awsRestxml_serializeDocumentTransitionList(v []types.Transition, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTransition(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentUserMetadata(v []types.MetadataEntry, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MetadataEntry", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentMetadataEntry(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentVersioningConfiguration(v *types.VersioningConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.MFADelete) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MfaDelete", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.MFADelete)) + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentWebsiteConfiguration(v *types.WebsiteConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.ErrorDocument != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ErrorDocument", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentErrorDocument(v.ErrorDocument, el); err != nil { + return err + } + } + if v.IndexDocument != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IndexDocument", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentIndexDocument(v.IndexDocument, el); err != nil { + return err + } + } + if v.RedirectAllRequestsTo != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RedirectAllRequestsTo", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentRedirectAllRequestsTo(v.RedirectAllRequestsTo, el); err != nil { + return err + } + } + if v.RoutingRules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RoutingRules", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentRoutingRules(v.RoutingRules, el); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go new file mode 100644 index 000000000000..5b5254083eb8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go @@ -0,0 +1,1200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type AnalyticsS3ExportFileFormat string + +// Enum values for AnalyticsS3ExportFileFormat +const ( + AnalyticsS3ExportFileFormatCsv AnalyticsS3ExportFileFormat = "CSV" +) + +// Values returns all known values for AnalyticsS3ExportFileFormat. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (AnalyticsS3ExportFileFormat) Values() []AnalyticsS3ExportFileFormat { + return []AnalyticsS3ExportFileFormat{ + "CSV", + } +} + +type ArchiveStatus string + +// Enum values for ArchiveStatus +const ( + ArchiveStatusArchiveAccess ArchiveStatus = "ARCHIVE_ACCESS" + ArchiveStatusDeepArchiveAccess ArchiveStatus = "DEEP_ARCHIVE_ACCESS" +) + +// Values returns all known values for ArchiveStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ArchiveStatus) Values() []ArchiveStatus { + return []ArchiveStatus{ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS", + } +} + +type BucketAccelerateStatus string + +// Enum values for BucketAccelerateStatus +const ( + BucketAccelerateStatusEnabled BucketAccelerateStatus = "Enabled" + BucketAccelerateStatusSuspended BucketAccelerateStatus = "Suspended" +) + +// Values returns all known values for BucketAccelerateStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketAccelerateStatus) Values() []BucketAccelerateStatus { + return []BucketAccelerateStatus{ + "Enabled", + "Suspended", + } +} + +type BucketCannedACL string + +// Enum values for BucketCannedACL +const ( + BucketCannedACLPrivate BucketCannedACL = "private" + BucketCannedACLPublicRead BucketCannedACL = "public-read" + BucketCannedACLPublicReadWrite BucketCannedACL = "public-read-write" + BucketCannedACLAuthenticatedRead BucketCannedACL = "authenticated-read" +) + +// Values returns all known values for BucketCannedACL. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketCannedACL) Values() []BucketCannedACL { + return []BucketCannedACL{ + "private", + "public-read", + "public-read-write", + "authenticated-read", + } +} + +type BucketLocationConstraint string + +// Enum values for BucketLocationConstraint +const ( + BucketLocationConstraintAfSouth1 BucketLocationConstraint = "af-south-1" + BucketLocationConstraintApEast1 BucketLocationConstraint = "ap-east-1" + BucketLocationConstraintApNortheast1 BucketLocationConstraint = "ap-northeast-1" + BucketLocationConstraintApNortheast2 BucketLocationConstraint = "ap-northeast-2" + BucketLocationConstraintApNortheast3 BucketLocationConstraint = "ap-northeast-3" + BucketLocationConstraintApSouth1 BucketLocationConstraint = "ap-south-1" + BucketLocationConstraintApSoutheast1 BucketLocationConstraint = "ap-southeast-1" + BucketLocationConstraintApSoutheast2 BucketLocationConstraint = "ap-southeast-2" + BucketLocationConstraintCaCentral1 BucketLocationConstraint = "ca-central-1" + BucketLocationConstraintCnNorth1 BucketLocationConstraint = "cn-north-1" + BucketLocationConstraintCnNorthwest1 BucketLocationConstraint = "cn-northwest-1" + BucketLocationConstraintEu BucketLocationConstraint = "EU" + BucketLocationConstraintEuCentral1 BucketLocationConstraint = "eu-central-1" + BucketLocationConstraintEuNorth1 BucketLocationConstraint = "eu-north-1" + BucketLocationConstraintEuSouth1 BucketLocationConstraint = "eu-south-1" + BucketLocationConstraintEuWest1 BucketLocationConstraint = "eu-west-1" + BucketLocationConstraintEuWest2 BucketLocationConstraint = "eu-west-2" + BucketLocationConstraintEuWest3 BucketLocationConstraint = "eu-west-3" + BucketLocationConstraintMeSouth1 BucketLocationConstraint = "me-south-1" + BucketLocationConstraintSaEast1 BucketLocationConstraint = "sa-east-1" + BucketLocationConstraintUsEast2 BucketLocationConstraint = "us-east-2" + BucketLocationConstraintUsGovEast1 BucketLocationConstraint = "us-gov-east-1" + BucketLocationConstraintUsGovWest1 BucketLocationConstraint = "us-gov-west-1" + BucketLocationConstraintUsWest1 BucketLocationConstraint = "us-west-1" + BucketLocationConstraintUsWest2 BucketLocationConstraint = "us-west-2" +) + +// Values returns all known values for BucketLocationConstraint. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketLocationConstraint) Values() []BucketLocationConstraint { + return []BucketLocationConstraint{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "cn-north-1", + "cn-northwest-1", + "EU", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", + } +} + +type BucketLogsPermission string + +// Enum values for BucketLogsPermission +const ( + BucketLogsPermissionFullControl BucketLogsPermission = "FULL_CONTROL" + BucketLogsPermissionRead BucketLogsPermission = "READ" + BucketLogsPermissionWrite BucketLogsPermission = "WRITE" +) + +// Values returns all known values for BucketLogsPermission. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketLogsPermission) Values() []BucketLogsPermission { + return []BucketLogsPermission{ + "FULL_CONTROL", + "READ", + "WRITE", + } +} + +type BucketVersioningStatus string + +// Enum values for BucketVersioningStatus +const ( + BucketVersioningStatusEnabled BucketVersioningStatus = "Enabled" + BucketVersioningStatusSuspended BucketVersioningStatus = "Suspended" +) + +// Values returns all known values for BucketVersioningStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketVersioningStatus) Values() []BucketVersioningStatus { + return []BucketVersioningStatus{ + "Enabled", + "Suspended", + } +} + +type ChecksumAlgorithm string + +// Enum values for ChecksumAlgorithm +const ( + ChecksumAlgorithmCrc32 ChecksumAlgorithm = "CRC32" + ChecksumAlgorithmCrc32c ChecksumAlgorithm = "CRC32C" + ChecksumAlgorithmSha1 ChecksumAlgorithm = "SHA1" + ChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256" +) + +// Values returns all known values for ChecksumAlgorithm. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ChecksumAlgorithm) Values() []ChecksumAlgorithm { + return []ChecksumAlgorithm{ + "CRC32", + "CRC32C", + "SHA1", + "SHA256", + } +} + +type ChecksumMode string + +// Enum values for ChecksumMode +const ( + ChecksumModeEnabled ChecksumMode = "ENABLED" +) + +// Values returns all known values for ChecksumMode. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (ChecksumMode) Values() []ChecksumMode { + return []ChecksumMode{ + "ENABLED", + } +} + +type CompressionType string + +// Enum values for CompressionType +const ( + CompressionTypeNone CompressionType = "NONE" + CompressionTypeGzip CompressionType = "GZIP" + CompressionTypeBzip2 CompressionType = "BZIP2" +) + +// Values returns all known values for CompressionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (CompressionType) Values() []CompressionType { + return []CompressionType{ + "NONE", + "GZIP", + "BZIP2", + } +} + +type DeleteMarkerReplicationStatus string + +// Enum values for DeleteMarkerReplicationStatus +const ( + DeleteMarkerReplicationStatusEnabled DeleteMarkerReplicationStatus = "Enabled" + DeleteMarkerReplicationStatusDisabled DeleteMarkerReplicationStatus = "Disabled" +) + +// Values returns all known values for DeleteMarkerReplicationStatus. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (DeleteMarkerReplicationStatus) Values() []DeleteMarkerReplicationStatus { + return []DeleteMarkerReplicationStatus{ + "Enabled", + "Disabled", + } +} + +type EncodingType string + +// Enum values for EncodingType +const ( + EncodingTypeUrl EncodingType = "url" +) + +// Values returns all known values for EncodingType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (EncodingType) Values() []EncodingType { + return []EncodingType{ + "url", + } +} + +type Event string + +// Values returns all known values for Event. Note that this can be expanded in the +// future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Event) Values() []Event { + return []Event{ + "s3:ReducedRedundancyLostObject", + "s3:ObjectCreated:*", + "s3:ObjectCreated:Put", + "s3:ObjectCreated:Post", + "s3:ObjectCreated:Copy", + "s3:ObjectCreated:CompleteMultipartUpload", + "s3:ObjectRemoved:*", + "s3:ObjectRemoved:Delete", + "s3:ObjectRemoved:DeleteMarkerCreated", + "s3:ObjectRestore:*", + "s3:ObjectRestore:Post", + "s3:ObjectRestore:Completed", + "s3:Replication:*", + "s3:Replication:OperationFailedReplication", + "s3:Replication:OperationNotTracked", + "s3:Replication:OperationMissedThreshold", + "s3:Replication:OperationReplicatedAfterThreshold", + "s3:ObjectRestore:Delete", + "s3:LifecycleTransition", + "s3:IntelligentTiering", + "s3:ObjectAcl:Put", + "s3:LifecycleExpiration:*", + "s3:LifecycleExpiration:Delete", + "s3:LifecycleExpiration:DeleteMarkerCreated", + "s3:ObjectTagging:*", + "s3:ObjectTagging:Put", + "s3:ObjectTagging:Delete", + } +} + +type ExistingObjectReplicationStatus string + +// Enum values for ExistingObjectReplicationStatus +const ( + ExistingObjectReplicationStatusEnabled ExistingObjectReplicationStatus = "Enabled" + ExistingObjectReplicationStatusDisabled ExistingObjectReplicationStatus = "Disabled" +) + +// Values returns all known values for ExistingObjectReplicationStatus. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (ExistingObjectReplicationStatus) Values() []ExistingObjectReplicationStatus { + return []ExistingObjectReplicationStatus{ + "Enabled", + "Disabled", + } +} + +type ExpirationStatus string + +// Enum values for ExpirationStatus +const ( + ExpirationStatusEnabled ExpirationStatus = "Enabled" + ExpirationStatusDisabled ExpirationStatus = "Disabled" +) + +// Values returns all known values for ExpirationStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ExpirationStatus) Values() []ExpirationStatus { + return []ExpirationStatus{ + "Enabled", + "Disabled", + } +} + +type ExpressionType string + +// Enum values for ExpressionType +const ( + ExpressionTypeSql ExpressionType = "SQL" +) + +// Values returns all known values for ExpressionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ExpressionType) Values() []ExpressionType { + return []ExpressionType{ + "SQL", + } +} + +type FileHeaderInfo string + +// Enum values for FileHeaderInfo +const ( + FileHeaderInfoUse FileHeaderInfo = "USE" + FileHeaderInfoIgnore FileHeaderInfo = "IGNORE" + FileHeaderInfoNone FileHeaderInfo = "NONE" +) + +// Values returns all known values for FileHeaderInfo. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (FileHeaderInfo) Values() []FileHeaderInfo { + return []FileHeaderInfo{ + "USE", + "IGNORE", + "NONE", + } +} + +type FilterRuleName string + +// Enum values for FilterRuleName +const ( + FilterRuleNamePrefix FilterRuleName = "prefix" + FilterRuleNameSuffix FilterRuleName = "suffix" +) + +// Values returns all known values for FilterRuleName. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (FilterRuleName) Values() []FilterRuleName { + return []FilterRuleName{ + "prefix", + "suffix", + } +} + +type IntelligentTieringAccessTier string + +// Enum values for IntelligentTieringAccessTier +const ( + IntelligentTieringAccessTierArchiveAccess IntelligentTieringAccessTier = "ARCHIVE_ACCESS" + IntelligentTieringAccessTierDeepArchiveAccess IntelligentTieringAccessTier = "DEEP_ARCHIVE_ACCESS" +) + +// Values returns all known values for IntelligentTieringAccessTier. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier { + return []IntelligentTieringAccessTier{ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS", + } +} + +type IntelligentTieringStatus string + +// Enum values for IntelligentTieringStatus +const ( + IntelligentTieringStatusEnabled IntelligentTieringStatus = "Enabled" + IntelligentTieringStatusDisabled IntelligentTieringStatus = "Disabled" +) + +// Values returns all known values for IntelligentTieringStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (IntelligentTieringStatus) Values() []IntelligentTieringStatus { + return []IntelligentTieringStatus{ + "Enabled", + "Disabled", + } +} + +type InventoryFormat string + +// Enum values for InventoryFormat +const ( + InventoryFormatCsv InventoryFormat = "CSV" + InventoryFormatOrc InventoryFormat = "ORC" + InventoryFormatParquet InventoryFormat = "Parquet" +) + +// Values returns all known values for InventoryFormat. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (InventoryFormat) Values() []InventoryFormat { + return []InventoryFormat{ + "CSV", + "ORC", + "Parquet", + } +} + +type InventoryFrequency string + +// Enum values for InventoryFrequency +const ( + InventoryFrequencyDaily InventoryFrequency = "Daily" + InventoryFrequencyWeekly InventoryFrequency = "Weekly" +) + +// Values returns all known values for InventoryFrequency. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (InventoryFrequency) Values() []InventoryFrequency { + return []InventoryFrequency{ + "Daily", + "Weekly", + } +} + +type InventoryIncludedObjectVersions string + +// Enum values for InventoryIncludedObjectVersions +const ( + InventoryIncludedObjectVersionsAll InventoryIncludedObjectVersions = "All" + InventoryIncludedObjectVersionsCurrent InventoryIncludedObjectVersions = "Current" +) + +// Values returns all known values for InventoryIncludedObjectVersions. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (InventoryIncludedObjectVersions) Values() []InventoryIncludedObjectVersions { + return []InventoryIncludedObjectVersions{ + "All", + "Current", + } +} + +type InventoryOptionalField string + +// Enum values for InventoryOptionalField +const ( + InventoryOptionalFieldSize InventoryOptionalField = "Size" + InventoryOptionalFieldLastModifiedDate InventoryOptionalField = "LastModifiedDate" + InventoryOptionalFieldStorageClass InventoryOptionalField = "StorageClass" + InventoryOptionalFieldETag InventoryOptionalField = "ETag" + InventoryOptionalFieldIsMultipartUploaded InventoryOptionalField = "IsMultipartUploaded" + InventoryOptionalFieldReplicationStatus InventoryOptionalField = "ReplicationStatus" + InventoryOptionalFieldEncryptionStatus InventoryOptionalField = "EncryptionStatus" + InventoryOptionalFieldObjectLockRetainUntilDate InventoryOptionalField = "ObjectLockRetainUntilDate" + InventoryOptionalFieldObjectLockMode InventoryOptionalField = "ObjectLockMode" + InventoryOptionalFieldObjectLockLegalHoldStatus InventoryOptionalField = "ObjectLockLegalHoldStatus" + InventoryOptionalFieldIntelligentTieringAccessTier InventoryOptionalField = "IntelligentTieringAccessTier" + InventoryOptionalFieldBucketKeyStatus InventoryOptionalField = "BucketKeyStatus" + InventoryOptionalFieldChecksumAlgorithm InventoryOptionalField = "ChecksumAlgorithm" +) + +// Values returns all known values for InventoryOptionalField. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (InventoryOptionalField) Values() []InventoryOptionalField { + return []InventoryOptionalField{ + "Size", + "LastModifiedDate", + "StorageClass", + "ETag", + "IsMultipartUploaded", + "ReplicationStatus", + "EncryptionStatus", + "ObjectLockRetainUntilDate", + "ObjectLockMode", + "ObjectLockLegalHoldStatus", + "IntelligentTieringAccessTier", + "BucketKeyStatus", + "ChecksumAlgorithm", + } +} + +type JSONType string + +// Enum values for JSONType +const ( + JSONTypeDocument JSONType = "DOCUMENT" + JSONTypeLines JSONType = "LINES" +) + +// Values returns all known values for JSONType. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (JSONType) Values() []JSONType { + return []JSONType{ + "DOCUMENT", + "LINES", + } +} + +type MetadataDirective string + +// Enum values for MetadataDirective +const ( + MetadataDirectiveCopy MetadataDirective = "COPY" + MetadataDirectiveReplace MetadataDirective = "REPLACE" +) + +// Values returns all known values for MetadataDirective. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MetadataDirective) Values() []MetadataDirective { + return []MetadataDirective{ + "COPY", + "REPLACE", + } +} + +type MetricsStatus string + +// Enum values for MetricsStatus +const ( + MetricsStatusEnabled MetricsStatus = "Enabled" + MetricsStatusDisabled MetricsStatus = "Disabled" +) + +// Values returns all known values for MetricsStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MetricsStatus) Values() []MetricsStatus { + return []MetricsStatus{ + "Enabled", + "Disabled", + } +} + +type MFADelete string + +// Enum values for MFADelete +const ( + MFADeleteEnabled MFADelete = "Enabled" + MFADeleteDisabled MFADelete = "Disabled" +) + +// Values returns all known values for MFADelete. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (MFADelete) Values() []MFADelete { + return []MFADelete{ + "Enabled", + "Disabled", + } +} + +type MFADeleteStatus string + +// Enum values for MFADeleteStatus +const ( + MFADeleteStatusEnabled MFADeleteStatus = "Enabled" + MFADeleteStatusDisabled MFADeleteStatus = "Disabled" +) + +// Values returns all known values for MFADeleteStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MFADeleteStatus) Values() []MFADeleteStatus { + return []MFADeleteStatus{ + "Enabled", + "Disabled", + } +} + +type ObjectAttributes string + +// Enum values for ObjectAttributes +const ( + ObjectAttributesEtag ObjectAttributes = "ETag" + ObjectAttributesChecksum ObjectAttributes = "Checksum" + ObjectAttributesObjectParts ObjectAttributes = "ObjectParts" + ObjectAttributesStorageClass ObjectAttributes = "StorageClass" + ObjectAttributesObjectSize ObjectAttributes = "ObjectSize" +) + +// Values returns all known values for ObjectAttributes. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectAttributes) Values() []ObjectAttributes { + return []ObjectAttributes{ + "ETag", + "Checksum", + "ObjectParts", + "StorageClass", + "ObjectSize", + } +} + +type ObjectCannedACL string + +// Enum values for ObjectCannedACL +const ( + ObjectCannedACLPrivate ObjectCannedACL = "private" + ObjectCannedACLPublicRead ObjectCannedACL = "public-read" + ObjectCannedACLPublicReadWrite ObjectCannedACL = "public-read-write" + ObjectCannedACLAuthenticatedRead ObjectCannedACL = "authenticated-read" + ObjectCannedACLAwsExecRead ObjectCannedACL = "aws-exec-read" + ObjectCannedACLBucketOwnerRead ObjectCannedACL = "bucket-owner-read" + ObjectCannedACLBucketOwnerFullControl ObjectCannedACL = "bucket-owner-full-control" +) + +// Values returns all known values for ObjectCannedACL. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectCannedACL) Values() []ObjectCannedACL { + return []ObjectCannedACL{ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control", + } +} + +type ObjectLockEnabled string + +// Enum values for ObjectLockEnabled +const ( + ObjectLockEnabledEnabled ObjectLockEnabled = "Enabled" +) + +// Values returns all known values for ObjectLockEnabled. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectLockEnabled) Values() []ObjectLockEnabled { + return []ObjectLockEnabled{ + "Enabled", + } +} + +type ObjectLockLegalHoldStatus string + +// Enum values for ObjectLockLegalHoldStatus +const ( + ObjectLockLegalHoldStatusOn ObjectLockLegalHoldStatus = "ON" + ObjectLockLegalHoldStatusOff ObjectLockLegalHoldStatus = "OFF" +) + +// Values returns all known values for ObjectLockLegalHoldStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (ObjectLockLegalHoldStatus) Values() []ObjectLockLegalHoldStatus { + return []ObjectLockLegalHoldStatus{ + "ON", + "OFF", + } +} + +type ObjectLockMode string + +// Enum values for ObjectLockMode +const ( + ObjectLockModeGovernance ObjectLockMode = "GOVERNANCE" + ObjectLockModeCompliance ObjectLockMode = "COMPLIANCE" +) + +// Values returns all known values for ObjectLockMode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectLockMode) Values() []ObjectLockMode { + return []ObjectLockMode{ + "GOVERNANCE", + "COMPLIANCE", + } +} + +type ObjectLockRetentionMode string + +// Enum values for ObjectLockRetentionMode +const ( + ObjectLockRetentionModeGovernance ObjectLockRetentionMode = "GOVERNANCE" + ObjectLockRetentionModeCompliance ObjectLockRetentionMode = "COMPLIANCE" +) + +// Values returns all known values for ObjectLockRetentionMode. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectLockRetentionMode) Values() []ObjectLockRetentionMode { + return []ObjectLockRetentionMode{ + "GOVERNANCE", + "COMPLIANCE", + } +} + +type ObjectOwnership string + +// Enum values for ObjectOwnership +const ( + ObjectOwnershipBucketOwnerPreferred ObjectOwnership = "BucketOwnerPreferred" + ObjectOwnershipObjectWriter ObjectOwnership = "ObjectWriter" + ObjectOwnershipBucketOwnerEnforced ObjectOwnership = "BucketOwnerEnforced" +) + +// Values returns all known values for ObjectOwnership. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectOwnership) Values() []ObjectOwnership { + return []ObjectOwnership{ + "BucketOwnerPreferred", + "ObjectWriter", + "BucketOwnerEnforced", + } +} + +type ObjectStorageClass string + +// Enum values for ObjectStorageClass +const ( + ObjectStorageClassStandard ObjectStorageClass = "STANDARD" + ObjectStorageClassReducedRedundancy ObjectStorageClass = "REDUCED_REDUNDANCY" + ObjectStorageClassGlacier ObjectStorageClass = "GLACIER" + ObjectStorageClassStandardIa ObjectStorageClass = "STANDARD_IA" + ObjectStorageClassOnezoneIa ObjectStorageClass = "ONEZONE_IA" + ObjectStorageClassIntelligentTiering ObjectStorageClass = "INTELLIGENT_TIERING" + ObjectStorageClassDeepArchive ObjectStorageClass = "DEEP_ARCHIVE" + ObjectStorageClassOutposts ObjectStorageClass = "OUTPOSTS" + ObjectStorageClassGlacierIr ObjectStorageClass = "GLACIER_IR" +) + +// Values returns all known values for ObjectStorageClass. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectStorageClass) Values() []ObjectStorageClass { + return []ObjectStorageClass{ + "STANDARD", + "REDUCED_REDUNDANCY", + "GLACIER", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "DEEP_ARCHIVE", + "OUTPOSTS", + "GLACIER_IR", + } +} + +type ObjectVersionStorageClass string + +// Enum values for ObjectVersionStorageClass +const ( + ObjectVersionStorageClassStandard ObjectVersionStorageClass = "STANDARD" +) + +// Values returns all known values for ObjectVersionStorageClass. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass { + return []ObjectVersionStorageClass{ + "STANDARD", + } +} + +type OwnerOverride string + +// Enum values for OwnerOverride +const ( + OwnerOverrideDestination OwnerOverride = "Destination" +) + +// Values returns all known values for OwnerOverride. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (OwnerOverride) Values() []OwnerOverride { + return []OwnerOverride{ + "Destination", + } +} + +type Payer string + +// Enum values for Payer +const ( + PayerRequester Payer = "Requester" + PayerBucketOwner Payer = "BucketOwner" +) + +// Values returns all known values for Payer. Note that this can be expanded in the +// future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Payer) Values() []Payer { + return []Payer{ + "Requester", + "BucketOwner", + } +} + +type Permission string + +// Enum values for Permission +const ( + PermissionFullControl Permission = "FULL_CONTROL" + PermissionWrite Permission = "WRITE" + PermissionWriteAcp Permission = "WRITE_ACP" + PermissionRead Permission = "READ" + PermissionReadAcp Permission = "READ_ACP" +) + +// Values returns all known values for Permission. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (Permission) Values() []Permission { + return []Permission{ + "FULL_CONTROL", + "WRITE", + "WRITE_ACP", + "READ", + "READ_ACP", + } +} + +type Protocol string + +// Enum values for Protocol +const ( + ProtocolHttp Protocol = "http" + ProtocolHttps Protocol = "https" +) + +// Values returns all known values for Protocol. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Protocol) Values() []Protocol { + return []Protocol{ + "http", + "https", + } +} + +type QuoteFields string + +// Enum values for QuoteFields +const ( + QuoteFieldsAlways QuoteFields = "ALWAYS" + QuoteFieldsAsneeded QuoteFields = "ASNEEDED" +) + +// Values returns all known values for QuoteFields. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (QuoteFields) Values() []QuoteFields { + return []QuoteFields{ + "ALWAYS", + "ASNEEDED", + } +} + +type ReplicaModificationsStatus string + +// Enum values for ReplicaModificationsStatus +const ( + ReplicaModificationsStatusEnabled ReplicaModificationsStatus = "Enabled" + ReplicaModificationsStatusDisabled ReplicaModificationsStatus = "Disabled" +) + +// Values returns all known values for ReplicaModificationsStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (ReplicaModificationsStatus) Values() []ReplicaModificationsStatus { + return []ReplicaModificationsStatus{ + "Enabled", + "Disabled", + } +} + +type ReplicationRuleStatus string + +// Enum values for ReplicationRuleStatus +const ( + ReplicationRuleStatusEnabled ReplicationRuleStatus = "Enabled" + ReplicationRuleStatusDisabled ReplicationRuleStatus = "Disabled" +) + +// Values returns all known values for ReplicationRuleStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ReplicationRuleStatus) Values() []ReplicationRuleStatus { + return []ReplicationRuleStatus{ + "Enabled", + "Disabled", + } +} + +type ReplicationStatus string + +// Enum values for ReplicationStatus +const ( + ReplicationStatusComplete ReplicationStatus = "COMPLETE" + ReplicationStatusPending ReplicationStatus = "PENDING" + ReplicationStatusFailed ReplicationStatus = "FAILED" + ReplicationStatusReplica ReplicationStatus = "REPLICA" +) + +// Values returns all known values for ReplicationStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ReplicationStatus) Values() []ReplicationStatus { + return []ReplicationStatus{ + "COMPLETE", + "PENDING", + "FAILED", + "REPLICA", + } +} + +type ReplicationTimeStatus string + +// Enum values for ReplicationTimeStatus +const ( + ReplicationTimeStatusEnabled ReplicationTimeStatus = "Enabled" + ReplicationTimeStatusDisabled ReplicationTimeStatus = "Disabled" +) + +// Values returns all known values for ReplicationTimeStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ReplicationTimeStatus) Values() []ReplicationTimeStatus { + return []ReplicationTimeStatus{ + "Enabled", + "Disabled", + } +} + +type RequestCharged string + +// Enum values for RequestCharged +const ( + RequestChargedRequester RequestCharged = "requester" +) + +// Values returns all known values for RequestCharged. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (RequestCharged) Values() []RequestCharged { + return []RequestCharged{ + "requester", + } +} + +type RequestPayer string + +// Enum values for RequestPayer +const ( + RequestPayerRequester RequestPayer = "requester" +) + +// Values returns all known values for RequestPayer. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (RequestPayer) Values() []RequestPayer { + return []RequestPayer{ + "requester", + } +} + +type RestoreRequestType string + +// Enum values for RestoreRequestType +const ( + RestoreRequestTypeSelect RestoreRequestType = "SELECT" +) + +// Values returns all known values for RestoreRequestType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (RestoreRequestType) Values() []RestoreRequestType { + return []RestoreRequestType{ + "SELECT", + } +} + +type ServerSideEncryption string + +// Enum values for ServerSideEncryption +const ( + ServerSideEncryptionAes256 ServerSideEncryption = "AES256" + ServerSideEncryptionAwsKms ServerSideEncryption = "aws:kms" +) + +// Values returns all known values for ServerSideEncryption. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ServerSideEncryption) Values() []ServerSideEncryption { + return []ServerSideEncryption{ + "AES256", + "aws:kms", + } +} + +type SseKmsEncryptedObjectsStatus string + +// Enum values for SseKmsEncryptedObjectsStatus +const ( + SseKmsEncryptedObjectsStatusEnabled SseKmsEncryptedObjectsStatus = "Enabled" + SseKmsEncryptedObjectsStatusDisabled SseKmsEncryptedObjectsStatus = "Disabled" +) + +// Values returns all known values for SseKmsEncryptedObjectsStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus { + return []SseKmsEncryptedObjectsStatus{ + "Enabled", + "Disabled", + } +} + +type StorageClass string + +// Enum values for StorageClass +const ( + StorageClassStandard StorageClass = "STANDARD" + StorageClassReducedRedundancy StorageClass = "REDUCED_REDUNDANCY" + StorageClassStandardIa StorageClass = "STANDARD_IA" + StorageClassOnezoneIa StorageClass = "ONEZONE_IA" + StorageClassIntelligentTiering StorageClass = "INTELLIGENT_TIERING" + StorageClassGlacier StorageClass = "GLACIER" + StorageClassDeepArchive StorageClass = "DEEP_ARCHIVE" + StorageClassOutposts StorageClass = "OUTPOSTS" + StorageClassGlacierIr StorageClass = "GLACIER_IR" +) + +// Values returns all known values for StorageClass. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (StorageClass) Values() []StorageClass { + return []StorageClass{ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "GLACIER", + "DEEP_ARCHIVE", + "OUTPOSTS", + "GLACIER_IR", + } +} + +type StorageClassAnalysisSchemaVersion string + +// Enum values for StorageClassAnalysisSchemaVersion +const ( + StorageClassAnalysisSchemaVersionV1 StorageClassAnalysisSchemaVersion = "V_1" +) + +// Values returns all known values for StorageClassAnalysisSchemaVersion. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion { + return []StorageClassAnalysisSchemaVersion{ + "V_1", + } +} + +type TaggingDirective string + +// Enum values for TaggingDirective +const ( + TaggingDirectiveCopy TaggingDirective = "COPY" + TaggingDirectiveReplace TaggingDirective = "REPLACE" +) + +// Values returns all known values for TaggingDirective. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (TaggingDirective) Values() []TaggingDirective { + return []TaggingDirective{ + "COPY", + "REPLACE", + } +} + +type Tier string + +// Enum values for Tier +const ( + TierStandard Tier = "Standard" + TierBulk Tier = "Bulk" + TierExpedited Tier = "Expedited" +) + +// Values returns all known values for Tier. Note that this can be expanded in the +// future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Tier) Values() []Tier { + return []Tier{ + "Standard", + "Bulk", + "Expedited", + } +} + +type TransitionStorageClass string + +// Enum values for TransitionStorageClass +const ( + TransitionStorageClassGlacier TransitionStorageClass = "GLACIER" + TransitionStorageClassStandardIa TransitionStorageClass = "STANDARD_IA" + TransitionStorageClassOnezoneIa TransitionStorageClass = "ONEZONE_IA" + TransitionStorageClassIntelligentTiering TransitionStorageClass = "INTELLIGENT_TIERING" + TransitionStorageClassDeepArchive TransitionStorageClass = "DEEP_ARCHIVE" + TransitionStorageClassGlacierIr TransitionStorageClass = "GLACIER_IR" +) + +// Values returns all known values for TransitionStorageClass. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (TransitionStorageClass) Values() []TransitionStorageClass { + return []TransitionStorageClass{ + "GLACIER", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "DEEP_ARCHIVE", + "GLACIER_IR", + } +} + +type Type string + +// Enum values for Type +const ( + TypeCanonicalUser Type = "CanonicalUser" + TypeAmazonCustomerByEmail Type = "AmazonCustomerByEmail" + TypeGroup Type = "Group" +) + +// Values returns all known values for Type. Note that this can be expanded in the +// future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Type) Values() []Type { + return []Type{ + "CanonicalUser", + "AmazonCustomerByEmail", + "Group", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go new file mode 100644 index 000000000000..8c3a386f7f23 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The requested bucket name is not available. The bucket namespace is shared by +// all users of the system. Select a different name and try again. +type BucketAlreadyExists struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *BucketAlreadyExists) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *BucketAlreadyExists) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *BucketAlreadyExists) ErrorCode() string { return "BucketAlreadyExists" } +func (e *BucketAlreadyExists) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The bucket you tried to create already exists, and you own it. Amazon S3 returns +// this error in all Amazon Web Services Regions except in the North Virginia +// Region. For legacy compatibility, if you re-create an existing bucket that you +// already own in the North Virginia Region, Amazon S3 returns 200 OK and resets +// the bucket access control lists (ACLs). +type BucketAlreadyOwnedByYou struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *BucketAlreadyOwnedByYou) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *BucketAlreadyOwnedByYou) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *BucketAlreadyOwnedByYou) ErrorCode() string { return "BucketAlreadyOwnedByYou" } +func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Object is archived and inaccessible until restored. +type InvalidObjectState struct { + Message *string + + StorageClass StorageClass + AccessTier IntelligentTieringAccessTier + + noSmithyDocumentSerde +} + +func (e *InvalidObjectState) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidObjectState) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidObjectState) ErrorCode() string { return "InvalidObjectState" } +func (e *InvalidObjectState) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified bucket does not exist. +type NoSuchBucket struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NoSuchBucket) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NoSuchBucket) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NoSuchBucket) ErrorCode() string { return "NoSuchBucket" } +func (e *NoSuchBucket) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified key does not exist. +type NoSuchKey struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NoSuchKey) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NoSuchKey) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NoSuchKey) ErrorCode() string { return "NoSuchKey" } +func (e *NoSuchKey) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified multipart upload does not exist. +type NoSuchUpload struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NoSuchUpload) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NoSuchUpload) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NoSuchUpload) ErrorCode() string { return "NoSuchUpload" } +func (e *NoSuchUpload) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified content does not exist. +type NotFound struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NotFound) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NotFound) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NotFound) ErrorCode() string { return "NotFound" } +func (e *NotFound) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// This action is not allowed against this storage tier. +type ObjectAlreadyInActiveTierError struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ObjectAlreadyInActiveTierError) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ObjectAlreadyInActiveTierError) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ObjectAlreadyInActiveTierError) ErrorCode() string { return "ObjectAlreadyInActiveTierError" } +func (e *ObjectAlreadyInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The source object of the COPY action is not in the active tier and is only +// stored in Amazon S3 Glacier. +type ObjectNotInActiveTierError struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ObjectNotInActiveTierError) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ObjectNotInActiveTierError) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ObjectNotInActiveTierError) ErrorCode() string { return "ObjectNotInActiveTierError" } +func (e *ObjectNotInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go new file mode 100644 index 000000000000..a9692d006472 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go @@ -0,0 +1,3888 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// Specifies the days since the initiation of an incomplete multipart upload that +// Amazon S3 will wait before permanently removing all parts of the upload. For +// more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon S3 User Guide. +type AbortIncompleteMultipartUpload struct { + + // Specifies the number of days after which Amazon S3 aborts an incomplete + // multipart upload. + DaysAfterInitiation int32 + + noSmithyDocumentSerde +} + +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) in +// the Amazon S3 User Guide. +type AccelerateConfiguration struct { + + // Specifies the transfer acceleration status of the bucket. + Status BucketAccelerateStatus + + noSmithyDocumentSerde +} + +// Contains the elements that set the ACL permissions for an object per grantee. +type AccessControlPolicy struct { + + // A list of grants. + Grants []Grant + + // Container for the bucket owner's display name and ID. + Owner *Owner + + noSmithyDocumentSerde +} + +// A container for information about access control for replicas. +type AccessControlTranslation struct { + + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon S3 API Reference. + // + // This member is required. + Owner OwnerOverride + + noSmithyDocumentSerde +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a metrics +// filter. The operator must have at least two predicates in any combination, and +// an object must match all of the predicates for the filter to apply. +type AnalyticsAndOperator struct { + + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. + Prefix *string + + // The list of tags to use when evaluating an AND predicate. + Tags []Tag + + noSmithyDocumentSerde +} + +// Specifies the configuration and any analyses for the analytics filter of an +// Amazon S3 bucket. +type AnalyticsConfiguration struct { + + // The ID that identifies the analytics configuration. + // + // This member is required. + Id *string + + // Contains data related to access patterns to be collected and made available to + // analyze the tradeoffs between different storage classes. + // + // This member is required. + StorageClassAnalysis *StorageClassAnalysis + + // The filter used to describe a set of objects for analyses. A filter must have + // exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no + // filter is provided, all objects will be considered in any analysis. + Filter AnalyticsFilter + + noSmithyDocumentSerde +} + +// Where to publish the analytics results. +type AnalyticsExportDestination struct { + + // A destination signifying output to an S3 bucket. + // + // This member is required. + S3BucketDestination *AnalyticsS3BucketDestination + + noSmithyDocumentSerde +} + +// The filter used to describe a set of objects for analyses. A filter must have +// exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no +// filter is provided, all objects will be considered in any analysis. +// +// The following types satisfy this interface: +// AnalyticsFilterMemberAnd +// AnalyticsFilterMemberPrefix +// AnalyticsFilterMemberTag +type AnalyticsFilter interface { + isAnalyticsFilter() +} + +// A conjunction (logical AND) of predicates, which is used in evaluating an +// analytics filter. The operator must have at least two predicates. +type AnalyticsFilterMemberAnd struct { + Value AnalyticsAndOperator + + noSmithyDocumentSerde +} + +func (*AnalyticsFilterMemberAnd) isAnalyticsFilter() {} + +// The prefix to use when evaluating an analytics filter. +type AnalyticsFilterMemberPrefix struct { + Value string + + noSmithyDocumentSerde +} + +func (*AnalyticsFilterMemberPrefix) isAnalyticsFilter() {} + +// The tag to use when evaluating an analytics filter. +type AnalyticsFilterMemberTag struct { + Value Tag + + noSmithyDocumentSerde +} + +func (*AnalyticsFilterMemberTag) isAnalyticsFilter() {} + +// Contains information about where to publish the analytics results. +type AnalyticsS3BucketDestination struct { + + // The Amazon Resource Name (ARN) of the bucket to which data is exported. + // + // This member is required. + Bucket *string + + // Specifies the file format used when exporting data to Amazon S3. + // + // This member is required. + Format AnalyticsS3ExportFileFormat + + // The account ID that owns the destination S3 bucket. If no account ID is + // provided, the owner is not validated before exporting data. Although this value + // is optional, we strongly recommend that you set it to help prevent problems if + // the destination bucket ownership changes. + BucketAccountId *string + + // The prefix to use when exporting data. The prefix is prepended to all results. + Prefix *string + + noSmithyDocumentSerde +} + +// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is +// globally unique, and the namespace is shared by all Amazon Web Services +// accounts. +type Bucket struct { + + // Date the bucket was created. This date can change when making changes to your + // bucket, such as editing its bucket policy. + CreationDate *time.Time + + // The name of the bucket. + Name *string + + noSmithyDocumentSerde +} + +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For +// more information, see Object Lifecycle Management +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) in +// the Amazon S3 User Guide. +type BucketLifecycleConfiguration struct { + + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // + // This member is required. + Rules []LifecycleRule + + noSmithyDocumentSerde +} + +// Container for logging status information. +type BucketLoggingStatus struct { + + // Describes where logs are stored and the prefix that Amazon S3 assigns to all log + // object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in + // the Amazon S3 API Reference. + LoggingEnabled *LoggingEnabled + + noSmithyDocumentSerde +} + +// Contains all the possible checksum or digest values for an object. +type Checksum struct { + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + noSmithyDocumentSerde +} + +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act like +// subdirectories in the directory specified by Prefix. For example, if the prefix +// is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common +// prefix is notes/summer/. +type CommonPrefix struct { + + // Container for the specified common prefix. + Prefix *string + + noSmithyDocumentSerde +} + +// The container for the completed multipart upload details. +type CompletedMultipartUpload struct { + + // Array of CompletedPart data types. If you do not supply a valid Part with your + // request, the service sends back an HTTP 400 response. + Parts []CompletedPart + + noSmithyDocumentSerde +} + +// Details of the parts that were uploaded. +type CompletedPart struct { + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag returned when the part was uploaded. + ETag *string + + // Part number that identifies the part. This is a positive integer between 1 and + // 10,000. + PartNumber int32 + + noSmithyDocumentSerde +} + +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. +type Condition struct { + + // The HTTP error code when the redirect is applied. In the event of an error, if + // the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will be + // /docs, which identifies all objects in the docs/ folder. Required when the + // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is + // not specified. If both conditions are specified, both must be true for the + // redirect to be applied. Replacement must be made for object keys containing + // special characters (such as carriage returns) when using XML requests. For more + // information, see XML related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + KeyPrefixEquals *string + + noSmithyDocumentSerde +} + +// +type ContinuationEvent struct { + noSmithyDocumentSerde +} + +// Container for all response elements. +type CopyObjectResult struct { + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. + ETag *string + + // Creation date of the object. + LastModified *time.Time + + noSmithyDocumentSerde +} + +// Container for all response elements. +type CopyPartResult struct { + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag of the object. + ETag *string + + // Date and time at which the object was uploaded. + LastModified *time.Time + + noSmithyDocumentSerde +} + +// Describes the cross-origin access configuration for objects in an Amazon S3 +// bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 +// User Guide. +type CORSConfiguration struct { + + // A set of origins and methods (cross-origin access that you want to allow). You + // can add up to 100 rules to the configuration. + // + // This member is required. + CORSRules []CORSRule + + noSmithyDocumentSerde +} + +// Specifies a cross-origin access rule for an Amazon S3 bucket. +type CORSRule struct { + + // An HTTP method that you allow the origin to execute. Valid values are GET, PUT, + // HEAD, POST, and DELETE. + // + // This member is required. + AllowedMethods []string + + // One or more origins you want customers to be able to access the bucket from. + // + // This member is required. + AllowedOrigins []string + + // Headers that are specified in the Access-Control-Request-Headers header. These + // headers are allowed in a preflight OPTIONS request. In response to any preflight + // OPTIONS request, Amazon S3 returns any requested headers that are allowed. + AllowedHeaders []string + + // One or more headers in the response that you want customers to be able to access + // from their applications (for example, from a JavaScript XMLHttpRequest object). + ExposeHeaders []string + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string + + // The time in seconds that your browser is to cache the preflight response for the + // specified resource. + MaxAgeSeconds int32 + + noSmithyDocumentSerde +} + +// The configuration information for the bucket. +type CreateBucketConfiguration struct { + + // Specifies the Region where the bucket will be created. If you don't specify a + // Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). + LocationConstraint BucketLocationConstraint + + noSmithyDocumentSerde +} + +// Describes how an uncompressed comma-separated values (CSV)-formatted input +// object is formatted. +type CSVInput struct { + + // Specifies that CSV field values may contain quoted record delimiters and such + // records should be allowed. Default value is FALSE. Setting this value to TRUE + // may lower performance. + AllowQuotedRecordDelimiter bool + + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character to + // indicate a comment line. + Comments *string + + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. + FieldDelimiter *string + + // Describes the first line of input. Valid values are: + // + // * NONE: First line is not + // a header. + // + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such as + // _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). + // + // * Use: First + // line is a header, and you can use the header value to identify a column in an + // expression (SELECT "name" FROM OBJECT). + FileHeaderInfo FileHeaderInfo + + // A single character used for escaping when the field delimiter is part of the + // value. For example, if the value is a, b, Amazon S3 wraps this field value in + // quotation marks, as follows: " a , b ". Type: String Default: " Ancestors: CSV + QuoteCharacter *string + + // A single character used for escaping the quotation mark character inside an + // already escaped value. For example, the value """ a , b """ is parsed as " a , b + // ". + QuoteEscapeCharacter *string + + // A single character used to separate individual records in the input. Instead of + // the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string + + noSmithyDocumentSerde +} + +// Describes how uncompressed comma-separated values (CSV)-formatted results are +// formatted. +type CSVOutput struct { + + // The value used to separate individual fields in a record. You can specify an + // arbitrary delimiter. + FieldDelimiter *string + + // A single character used for escaping when the field delimiter is part of the + // value. For example, if the value is a, b, Amazon S3 wraps this field value in + // quotation marks, as follows: " a , b ". + QuoteCharacter *string + + // The single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string + + // Indicates whether to use quotation marks around output fields. + // + // * ALWAYS: Always + // use quotation marks for output fields. + // + // * ASNEEDED: Use quotation marks for + // output fields when needed. + QuoteFields QuoteFields + + // A single character used to separate individual records in the output. Instead of + // the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string + + noSmithyDocumentSerde +} + +// The container element for specifying the default Object Lock retention settings +// for new objects placed in the specified bucket. +// +// * The DefaultRetention settings +// require both a mode and a period. +// +// * The DefaultRetention period can be either +// Days or Years but you must select one. You cannot specify Days and Years at the +// same time. +type DefaultRetention struct { + + // The number of days that you want to specify for the default retention period. + // Must be used with Mode. + Days int32 + + // The default Object Lock retention mode you want to apply to new objects placed + // in the specified bucket. Must be used with either Days or Years. + Mode ObjectLockRetentionMode + + // The number of years that you want to specify for the default retention period. + // Must be used with Mode. + Years int32 + + noSmithyDocumentSerde +} + +// Container for the objects to delete. +type Delete struct { + + // The objects to delete. + // + // This member is required. + Objects []ObjectIdentifier + + // Element to enable quiet mode for the request. When you add this element, you + // must set its value to true. + Quiet bool + + noSmithyDocumentSerde +} + +// Information about the deleted object. +type DeletedObject struct { + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. + DeleteMarker bool + + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header is + // the version ID of the object version deleted. + DeleteMarkerVersionId *string + + // The name of the deleted object. + Key *string + + // The version ID of the deleted object. + VersionId *string + + noSmithyDocumentSerde +} + +// Information about the delete marker. +type DeleteMarkerEntry struct { + + // Specifies whether the object is (true) or is not (false) the latest version of + // an object. + IsLatest bool + + // The object key. + Key *string + + // Date and time the object was last modified. + LastModified *time.Time + + // The account that created the delete marker.> + Owner *Owner + + // Version ID of an object. + VersionId *string + + noSmithyDocumentSerde +} + +// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter +// in your replication configuration, you must also include a +// DeleteMarkerReplication element. If your Filter includes a Tag element, the +// DeleteMarkerReplicationStatus must be set to Disabled, because Amazon S3 does +// not support replicating delete markers for tag-based rules. For an example +// configuration, see Basic Rule Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// For more information about delete marker replication, see Basic Rule +// Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). +// If you are using an earlier version of the replication configuration, Amazon S3 +// handles replication of delete markers differently. For more information, see +// Backward Compatibility +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +type DeleteMarkerReplication struct { + + // Indicates whether to replicate delete markers. Indicates whether to replicate + // delete markers. + Status DeleteMarkerReplicationStatus + + noSmithyDocumentSerde +} + +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). +type Destination struct { + + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store + // the results. + // + // This member is required. + Bucket *string + + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership to the + // Amazon Web Services account that owns the destination bucket. If this is not + // specified in the replication configuration, the replicas are owned by same + // Amazon Web Services account that owns the source object. + AccessControlTranslation *AccessControlTranslation + + // Destination bucket owner account ID. In a cross-account scenario, if you direct + // Amazon S3 to change replica ownership to the Amazon Web Services account that + // owns the destination bucket by specifying the AccessControlTranslation property, + // this is the account ID of the destination bucket owner. For more information, + // see Replication Additional Configuration: Changing the Replica Owner + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon S3 User Guide. + Account *string + + // A container that provides information about encryption. If + // SourceSelectionCriteria is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfiguration + + // A container specifying replication metrics-related settings enabling replication + // metrics and events. + Metrics *Metrics + + // A container specifying S3 Replication Time Control (S3 RTC), including whether + // S3 RTC is enabled and the time when all objects and operations on objects must + // be replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime + + // The storage class to use when replicating objects, such as S3 Standard or + // reduced redundancy. By default, Amazon S3 uses the storage class of the source + // object to create the object replica. For valid values, see the StorageClass + // element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon S3 API Reference. + StorageClass StorageClass + + noSmithyDocumentSerde +} + +// Contains the type of server-side encryption used. +type Encryption struct { + + // The server-side encryption algorithm used when storing job results in Amazon S3 + // (for example, AES256, aws:kms). + // + // This member is required. + EncryptionType ServerSideEncryption + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string + + // If the encryption type is aws:kms, this optional value specifies the ID of the + // symmetric customer managed key to use for encryption of job results. Amazon S3 + // only supports symmetric keys. For more information, see Using symmetric and + // asymmetric keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + KMSKeyId *string + + noSmithyDocumentSerde +} + +// Specifies encryption-related information for an Amazon S3 bucket that is a +// destination for replicated objects. +type EncryptionConfiguration struct { + + // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web + // Services KMS key stored in Amazon Web Services Key Management Service (KMS) for + // the destination bucket. Amazon S3 uses this key to encrypt replica objects. + // Amazon S3 only supports symmetric, customer managed KMS keys. For more + // information, see Using symmetric and asymmetric keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + ReplicaKmsKeyID *string + + noSmithyDocumentSerde +} + +// A message that indicates the request is complete and no more messages will be +// sent. You should not assume that the request is complete until the client +// receives an EndEvent. +type EndEvent struct { + noSmithyDocumentSerde +} + +// Container for all error elements. +type Error struct { + + // The error code is a string that uniquely identifies an error condition. It is + // meant to be read and understood by programs that detect and handle errors by + // type. Amazon S3 error codes + // + // * Code: AccessDenied + // + // * Description: Access + // Denied + // + // * HTTP Status Code: 403 Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * + // Code: AccountProblem + // + // * Description: There is a problem with your Amazon Web + // Services account that prevents the action from completing successfully. Contact + // Amazon Web Services Support for further assistance. + // + // * HTTP Status Code: 403 + // Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * Code: AllAccessDisabled + // + // * + // Description: All access to this Amazon S3 resource has been disabled. Contact + // Amazon Web Services Support for further assistance. + // + // * HTTP Status Code: 403 + // Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // AmbiguousGrantByEmailAddress + // + // * Description: The email address you provided is + // associated with more than one account. + // + // * HTTP Status Code: 400 Bad Request + // + // * + // SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed + // + // * + // Description: The authorization header you provided is invalid. + // + // * HTTP Status + // Code: 400 Bad Request + // + // * HTTP Status Code: N/A + // + // * Code: BadDigest + // + // * + // Description: The Content-MD5 you specified did not match what we received. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // BucketAlreadyExists + // + // * Description: The requested bucket name is not available. + // The bucket namespace is shared by all users of the system. Please select a + // different name and try again. + // + // * HTTP Status Code: 409 Conflict + // + // * SOAP Fault + // Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou + // + // * Description: The bucket + // you tried to create already exists, and you own it. Amazon S3 returns this error + // in all Amazon Web Services Regions except in the North Virginia Region. For + // legacy compatibility, if you re-create an existing bucket that you already own + // in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket + // access control lists (ACLs). + // + // * Code: 409 Conflict (in all Regions except the + // North Virginia Region) + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // BucketNotEmpty + // + // * Description: The bucket you tried to delete is not empty. + // + // * + // HTTP Status Code: 409 Conflict + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // CredentialsNotSupported + // + // * Description: This request does not support + // credentials. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited + // + // * Description: Cross-location + // logging not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. + // + // * HTTP Status Code: 403 Forbidden + // + // * SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall + // + // * Description: Your proposed + // upload is smaller than the minimum allowed object size. + // + // * HTTP Status Code: 400 + // Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: EntityTooLarge + // + // * + // Description: Your proposed upload exceeds the maximum allowed object size. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // ExpiredToken + // + // * Description: The provided token has expired. + // + // * HTTP Status + // Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // IllegalVersioningConfigurationException + // + // * Description: Indicates that the + // versioning configuration specified in the request is invalid. + // + // * HTTP Status + // Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // IncompleteBody + // + // * Description: You did not provide the number of bytes specified + // by the Content-Length HTTP header + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP + // Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest + // + // * + // Description: POST requires exactly one file upload per request. + // + // * HTTP Status + // Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InlineDataTooLarge + // + // * Description: Inline data exceeds the maximum allowed + // size. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * + // Code: InternalError + // + // * Description: We encountered an internal error. Please try + // again. + // + // * HTTP Status Code: 500 Internal Server Error + // + // * SOAP Fault Code Prefix: + // Server + // + // * Code: InvalidAccessKeyId + // + // * Description: The Amazon Web Services + // access key ID you provided does not exist in our records. + // + // * HTTP Status Code: + // 403 Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidAddressingHeader + // + // * Description: You must specify the Anonymous role. + // + // * + // HTTP Status Code: N/A + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidArgument + // + // * Description: Invalid Argument + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName + // + // * + // Description: The specified bucket is not valid. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState + // + // * + // Description: The request is not valid with the current state of the bucket. + // + // * + // HTTP Status Code: 409 Conflict + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidDigest + // + // * Description: The Content-MD5 you specified is not valid. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidEncryptionAlgorithmError + // + // * Description: The encryption request you + // specified is not valid. The valid value is AES256. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint + // + // * + // Description: The specified location constraint is not valid. For more + // information about Regions, see How to Select a Region for Your Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // + // * + // HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidObjectState + // + // * Description: The action is not valid for the current state + // of the object. + // + // * HTTP Status Code: 403 Forbidden + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidPart + // + // * Description: One or more of the specified parts + // could not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. + // + // * HTTP Status Code: + // 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder + // + // * + // Description: The list of parts was not in ascending order. Parts list must be + // specified in order by part number. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidPayer + // + // * Description: All access to + // this object has been disabled. Please contact Amazon Web Services Support for + // further assistance. + // + // * HTTP Status Code: 403 Forbidden + // + // * SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidPolicyDocument + // + // * Description: The content of the + // form does not meet the conditions specified in the policy document. + // + // * HTTP + // Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidRange + // + // * Description: The requested range cannot be satisfied. + // + // * HTTP + // Status Code: 416 Requested Range Not Satisfiable + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest + // + // * Description: Please use AWS4-HMAC-SHA256. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * Code: N/A + // + // * Code: InvalidRequest + // + // * + // Description: SOAP requests must be made over an HTTPS connection. + // + // * HTTP Status + // Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidRequest + // + // * Description: Amazon S3 Transfer Acceleration is not supported + // for buckets with non-DNS compliant names. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * Code: N/A + // + // * Code: InvalidRequest + // + // * Description: Amazon S3 Transfer + // Acceleration is not supported for buckets with periods (.) in their names. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * Code: N/A + // + // * Code: InvalidRequest + // + // * + // Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style + // requests. + // + // * HTTP Status Code: 400 Bad Request + // + // * Code: N/A + // + // * Code: + // InvalidRequest + // + // * Description: Amazon S3 Transfer Accelerate is not configured + // on this bucket. + // + // * HTTP Status Code: 400 Bad Request + // + // * Code: N/A + // + // * Code: + // InvalidRequest + // + // * Description: Amazon S3 Transfer Accelerate is disabled on this + // bucket. + // + // * HTTP Status Code: 400 Bad Request + // + // * Code: N/A + // + // * Code: + // InvalidRequest + // + // * Description: Amazon S3 Transfer Acceleration is not supported + // on this bucket. Contact Amazon Web Services Support for more information. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * Code: N/A + // + // * Code: InvalidRequest + // + // * + // Description: Amazon S3 Transfer Acceleration cannot be enabled on this bucket. + // Contact Amazon Web Services Support for more information. + // + // * HTTP Status Code: + // 400 Bad Request + // + // * Code: N/A + // + // * Code: InvalidSecurity + // + // * Description: The + // provided security credentials are not valid. + // + // * HTTP Status Code: 403 + // Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * Code: InvalidSOAPRequest + // + // * + // Description: The SOAP request body is invalid. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass + // + // * + // Description: The storage class you specified is not valid. + // + // * HTTP Status Code: + // 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidTargetBucketForLogging + // + // * Description: The target bucket for logging does + // not exist, is not owned by you, or does not have the appropriate grants for the + // log-delivery group. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidToken + // + // * Description: The provided token is + // malformed or otherwise invalid. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidURI + // + // * Description: Couldn't parse the + // specified URI. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: KeyTooLongError + // + // * Description: Your key is too long. + // + // * HTTP + // Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // MalformedACLError + // + // * Description: The XML you provided was not well-formed or + // did not validate against our published schema. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest + // + // * + // Description: The body of your POST request is not well-formed + // multipart/form-data. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code + // Prefix: Client + // + // * Code: MalformedXML + // + // * Description: This happens when the user + // sends malformed XML (XML that doesn't conform to the published XSD) for the + // configuration. The error message is, "The XML you provided was not well-formed + // or did not validate against our published schema." + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded + // + // * + // Description: Your request was too big. + // + // * HTTP Status Code: 400 Bad Request + // + // * + // SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError + // + // * + // Description: Your POST request fields preceding the upload file were too + // large. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * + // Code: MetadataTooLarge + // + // * Description: Your metadata headers exceed the maximum + // allowed metadata size. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code + // Prefix: Client + // + // * Code: MethodNotAllowed + // + // * Description: The specified method is + // not allowed against this resource. + // + // * HTTP Status Code: 405 Method Not + // Allowed + // + // * SOAP Fault Code Prefix: Client + // + // * Code: MissingAttachment + // + // * + // Description: A SOAP attachment was expected, but none were found. + // + // * HTTP Status + // Code: N/A + // + // * SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength + // + // * + // Description: You must provide the Content-Length HTTP header. + // + // * HTTP Status + // Code: 411 Length Required + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // MissingRequestBodyError + // + // * Description: This happens when the user sends an + // empty XML document as a request. The error message is, "Request body is + // empty." + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement + // + // * Description: The SOAP 1.1 request is + // missing a security element. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault + // Code Prefix: Client + // + // * Code: MissingSecurityHeader + // + // * Description: Your request + // is missing a required header. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault + // Code Prefix: Client + // + // * Code: NoLoggingStatusForKey + // + // * Description: There is no + // such thing as a logging status subresource for a key. + // + // * HTTP Status Code: 400 + // Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket + // + // * + // Description: The specified bucket does not exist. + // + // * HTTP Status Code: 404 Not + // Found + // + // * SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy + // + // * + // Description: The specified bucket does not have a bucket policy. + // + // * HTTP Status + // Code: 404 Not Found + // + // * SOAP Fault Code Prefix: Client + // + // * Code: NoSuchKey + // + // * + // Description: The specified key does not exist. + // + // * HTTP Status Code: 404 Not + // Found + // + // * SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration + // + // * + // Description: The lifecycle configuration does not exist. + // + // * HTTP Status Code: + // 404 Not Found + // + // * SOAP Fault Code Prefix: Client + // + // * Code: NoSuchUpload + // + // * + // Description: The specified multipart upload does not exist. The upload ID might + // be invalid, or the multipart upload might have been aborted or completed. + // + // * + // HTTP Status Code: 404 Not Found + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // NoSuchVersion + // + // * Description: Indicates that the version ID specified in the + // request does not match an existing version. + // + // * HTTP Status Code: 404 Not + // Found + // + // * SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented + // + // * Description: + // A header you provided implies functionality that is not implemented. + // + // * HTTP + // Status Code: 501 Not Implemented + // + // * SOAP Fault Code Prefix: Server + // + // * Code: + // NotSignedUp + // + // * Description: Your account is not signed up for the Amazon S3 + // service. You must sign up before you can use Amazon S3. You can sign up at the + // following URL: Amazon S3 (http://aws.amazon.com/s3) + // + // * HTTP Status Code: 403 + // Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted + // + // * + // Description: A conflicting conditional action is currently in progress against + // this resource. Try again. + // + // * HTTP Status Code: 409 Conflict + // + // * SOAP Fault Code + // Prefix: Client + // + // * Code: PermanentRedirect + // + // * Description: The bucket you are + // attempting to access must be addressed using the specified endpoint. Send all + // future requests to this endpoint. + // + // * HTTP Status Code: 301 Moved Permanently + // + // * + // SOAP Fault Code Prefix: Client + // + // * Code: PreconditionFailed + // + // * Description: At + // least one of the preconditions you specified did not hold. + // + // * HTTP Status Code: + // 412 Precondition Failed + // + // * SOAP Fault Code Prefix: Client + // + // * Code: Redirect + // + // * + // Description: Temporary redirect. + // + // * HTTP Status Code: 307 Moved Temporarily + // + // * + // SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress + // + // * Description: + // Object restore is already in progress. + // + // * HTTP Status Code: 409 Conflict + // + // * SOAP + // Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent + // + // * Description: + // Bucket POST must be of the enclosure-type multipart/form-data. + // + // * HTTP Status + // Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // RequestTimeout + // + // * Description: Your socket connection to the server was not read + // from or written to within the timeout period. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed + // + // * + // Description: The difference between the request time and the server's time is + // too large. + // + // * HTTP Status Code: 403 Forbidden + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: RequestTorrentOfBucketError + // + // * Description: Requesting the + // torrent file of a bucket is not permitted. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch + // + // * + // Description: The request signature we calculated does not match the signature + // you provided. Check your Amazon Web Services secret access key and signing + // method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) and + // SOAP Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) for + // details. + // + // * HTTP Status Code: 403 Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * + // Code: ServiceUnavailable + // + // * Description: Reduce your request rate. + // + // * HTTP + // Status Code: 503 Service Unavailable + // + // * SOAP Fault Code Prefix: Server + // + // * Code: + // SlowDown + // + // * Description: Reduce your request rate. + // + // * HTTP Status Code: 503 Slow + // Down + // + // * SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect + // + // * + // Description: You are being redirected to the bucket while DNS updates. + // + // * HTTP + // Status Code: 307 Moved Temporarily + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // TokenRefreshRequired + // + // * Description: The provided token must be refreshed. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // TooManyBuckets + // + // * Description: You have attempted to create more buckets than + // allowed. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: UnexpectedContent + // + // * Description: This request does not support + // content. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: UnresolvableGrantByEmailAddress + // + // * Description: The email + // address you provided does not match any account on record. + // + // * HTTP Status Code: + // 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // UserKeyMustBeSpecified + // + // * Description: The bucket POST must contain the + // specified field name. If it is specified, check the order of the fields. + // + // * HTTP + // Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + Code *string + + // The error key. + Key *string + + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they don't + // know how or don't care to handle. Sophisticated programs with more exhaustive + // error handling and proper internationalization are more likely to ignore the + // error message. + Message *string + + // The version ID of the error. + VersionId *string + + noSmithyDocumentSerde +} + +// The error information. +type ErrorDocument struct { + + // The object key name to use when a 4XX class error occurs. Replacement must be + // made for object keys containing special characters (such as carriage returns) + // when using XML requests. For more information, see XML related object key + // constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // This member is required. + Key *string + + noSmithyDocumentSerde +} + +// A container for specifying the configuration for Amazon EventBridge. +type EventBridgeConfiguration struct { + noSmithyDocumentSerde +} + +// Optional configuration to replicate existing source bucket objects. For more +// information, see Replicating Existing Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// in the Amazon S3 User Guide. +type ExistingObjectReplication struct { + + // + // + // This member is required. + Status ExistingObjectReplicationStatus + + noSmithyDocumentSerde +} + +// Specifies the Amazon S3 object key name to filter on and whether to filter on +// the suffix or prefix of the key name. +type FilterRule struct { + + // The object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the + // Amazon S3 User Guide. + Name FilterRuleName + + // The value that the filter searches for in object key names. + Value *string + + noSmithyDocumentSerde +} + +// A collection of parts associated with a multipart upload. +type GetObjectAttributesParts struct { + + // Indicates whether the returned list of parts is truncated. A value of true + // indicates that the list was truncated. A list can be truncated if the number of + // parts exceeds the limit returned in the MaxParts element. + IsTruncated bool + + // The maximum number of parts allowed in the response. + MaxParts int32 + + // When a list is truncated, this element specifies the last part in the list, as + // well as the value to use for the PartNumberMarker request parameter in a + // subsequent request. + NextPartNumberMarker *string + + // The marker for the current part. + PartNumberMarker *string + + // A container for elements related to a particular part. A response can contain + // zero or more Parts elements. + Parts []ObjectPart + + // The total number of parts. + TotalPartsCount int32 + + noSmithyDocumentSerde +} + +// Container for S3 Glacier job parameters. +type GlacierJobParameters struct { + + // Retrieval tier at which the restore will be processed. + // + // This member is required. + Tier Tier + + noSmithyDocumentSerde +} + +// Container for grant information. +type Grant struct { + + // The person being granted permissions. + Grantee *Grantee + + // Specifies the permission given to the grantee. + Permission Permission + + noSmithyDocumentSerde +} + +// Container for the person being granted permissions. +type Grantee struct { + + // Type of grantee + // + // This member is required. + Type Type + + // Screen name of the grantee. + DisplayName *string + + // Email address of the grantee. Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: + // + // * US East (N. + // Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific + // (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe + // (Ireland) + // + // * South America (São Paulo) + // + // For a list of all the Amazon S3 + // supported Regions and endpoints, see Regions and Endpoints + // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the + // Amazon Web Services General Reference. + EmailAddress *string + + // The canonical user ID of the grantee. + ID *string + + // URI of the grantee group. + URI *string + + noSmithyDocumentSerde +} + +// Container for the Suffix element. +type IndexDocument struct { + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (for example,if the suffix is index.html and you make a request to + // samplebucket/images/ the data that is returned will be for the object with the + // key name images/index.html) The suffix must not be empty and must not include a + // slash character. Replacement must be made for object keys containing special + // characters (such as carriage returns) when using XML requests. For more + // information, see XML related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // This member is required. + Suffix *string + + noSmithyDocumentSerde +} + +// Container element that identifies who initiated the multipart upload. +type Initiator struct { + + // Name of the Principal. + DisplayName *string + + // If the principal is an Amazon Web Services account, it provides the Canonical + // User ID. If the principal is an IAM User, it provides a user ARN value. + ID *string + + noSmithyDocumentSerde +} + +// Describes the serialization format of the object. +type InputSerialization struct { + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType CompressionType + + // Specifies JSON as object's input serialization format. + JSON *JSONInput + + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput + + noSmithyDocumentSerde +} + +// A container for specifying S3 Intelligent-Tiering filters. The filters determine +// the subset of objects to which the rule applies. +type IntelligentTieringAndOperator struct { + + // An object key name prefix that identifies the subset of objects to which the + // configuration applies. + Prefix *string + + // All of these tags must exist in the object's tag set in order for the + // configuration to apply. + Tags []Tag + + noSmithyDocumentSerde +} + +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. For +// information about the S3 Intelligent-Tiering storage class, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +type IntelligentTieringConfiguration struct { + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // This member is required. + Id *string + + // Specifies the status of the configuration. + // + // This member is required. + Status IntelligentTieringStatus + + // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. + // + // This member is required. + Tierings []Tiering + + // Specifies a bucket filter. The configuration only includes objects that meet the + // filter's criteria. + Filter *IntelligentTieringFilter + + noSmithyDocumentSerde +} + +// The Filter is used to identify objects that the S3 Intelligent-Tiering +// configuration applies to. +type IntelligentTieringFilter struct { + + // A conjunction (logical AND) of predicates, which is used in evaluating a metrics + // filter. The operator must have at least two predicates, and an object must match + // all of the predicates in order for the filter to apply. + And *IntelligentTieringAndOperator + + // An object key name prefix that identifies the subset of objects to which the + // rule applies. Replacement must be made for object keys containing special + // characters (such as carriage returns) when using XML requests. For more + // information, see XML related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string + + // A container of a key value name pair. + Tag *Tag + + noSmithyDocumentSerde +} + +// Specifies the inventory configuration for an Amazon S3 bucket. For more +// information, see GET Bucket inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon S3 API Reference. +type InventoryConfiguration struct { + + // Contains information about where to publish the inventory results. + // + // This member is required. + Destination *InventoryDestination + + // The ID used to identify the inventory configuration. + // + // This member is required. + Id *string + + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields + // VersionId, IsLatest, and DeleteMarker to the list. If set to Current, the list + // does not contain these version-related fields. + // + // This member is required. + IncludedObjectVersions InventoryIncludedObjectVersions + + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. + // + // This member is required. + IsEnabled bool + + // Specifies the schedule for generating inventory results. + // + // This member is required. + Schedule *InventorySchedule + + // Specifies an inventory filter. The inventory only includes objects that meet the + // filter's criteria. + Filter *InventoryFilter + + // Contains the optional fields that are included in the inventory results. + OptionalFields []InventoryOptionalField + + noSmithyDocumentSerde +} + +// Specifies the inventory configuration for an Amazon S3 bucket. +type InventoryDestination struct { + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // This member is required. + S3BucketDestination *InventoryS3BucketDestination + + noSmithyDocumentSerde +} + +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. + SSEKMS *SSEKMS + + // Specifies the use of SSE-S3 to encrypt delivered inventory reports. + SSES3 *SSES3 + + noSmithyDocumentSerde +} + +// Specifies an inventory filter. The inventory only includes objects that meet the +// filter's criteria. +type InventoryFilter struct { + + // The prefix that an object must have to be included in the inventory results. + // + // This member is required. + Prefix *string + + noSmithyDocumentSerde +} + +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. +type InventoryS3BucketDestination struct { + + // The Amazon Resource Name (ARN) of the bucket where inventory results will be + // published. + // + // This member is required. + Bucket *string + + // Specifies the output format of the inventory results. + // + // This member is required. + Format InventoryFormat + + // The account ID that owns the destination S3 bucket. If no account ID is + // provided, the owner is not validated before exporting data. Although this value + // is optional, we strongly recommend that you set it to help prevent problems if + // the destination bucket ownership changes. + AccountId *string + + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption + + // The prefix that is prepended to all inventory results. + Prefix *string + + noSmithyDocumentSerde +} + +// Specifies the schedule for generating inventory results. +type InventorySchedule struct { + + // Specifies how frequently inventory results are produced. + // + // This member is required. + Frequency InventoryFrequency + + noSmithyDocumentSerde +} + +// Specifies JSON as object's input serialization format. +type JSONInput struct { + + // The type of JSON. Valid values: Document, Lines. + Type JSONType + + noSmithyDocumentSerde +} + +// Specifies JSON as request's output serialization format. +type JSONOutput struct { + + // The value used to separate individual records in the output. If no value is + // specified, Amazon S3 uses a newline character ('\n'). + RecordDelimiter *string + + noSmithyDocumentSerde +} + +// A container for specifying the configuration for Lambda notifications. +type LambdaFunctionConfiguration struct { + + // The Amazon S3 bucket event for which to invoke the Lambda function. For more + // information, see Supported Event Types + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Events []Event + + // The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes + // when the specified event type occurs. + // + // This member is required. + LambdaFunctionArn *string + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the + // Amazon S3 User Guide. + Filter *NotificationConfigurationFilter + + // An optional unique identifier for configurations in a notification + // configuration. If you don't provide one, Amazon S3 will assign an ID. + Id *string + + noSmithyDocumentSerde +} + +// Container for the expiration for the lifecycle of the object. +type LifecycleExpiration struct { + + // Indicates at what date the object is to be moved or deleted. Should be in GMT + // ISO 8601 Format. + Date *time.Time + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days int32 + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false the + // policy takes no action. This cannot be specified with Days or Date in a + // Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker bool + + noSmithyDocumentSerde +} + +// A lifecycle rule for individual objects in an Amazon S3 bucket. +type LifecycleRule struct { + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is + // not currently being applied. + // + // This member is required. + Status ExpirationStatus + + // Specifies the days since the initiation of an incomplete multipart upload that + // Amazon S3 will wait before permanently removing all parts of the upload. For + // more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload + + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. + Expiration *LifecycleExpiration + + // The Filter is used to identify objects that a Lifecycle Rule applies to. A + // Filter must have exactly one of Prefix, Tag, or And specified. Filter is + // required if the LifecycleRule does not contain a Prefix element. + Filter LifecycleRuleFilter + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 + // permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) to + // request that Amazon S3 delete noncurrent object versions at a specific period in + // the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration + + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket is + // versioning-enabled (or versioning is suspended), you can set this action to + // request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. + NoncurrentVersionTransitions []NoncurrentVersionTransition + + // Prefix identifying one or more objects to which the rule applies. This is no + // longer used; use Filter instead. Replacement must be made for object keys + // containing special characters (such as carriage returns) when using XML + // requests. For more information, see XML related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Deprecated: This member has been deprecated. + Prefix *string + + // Specifies when an Amazon S3 object transitions to a specified storage class. + Transitions []Transition + + noSmithyDocumentSerde +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more +// predicates. The Lifecycle Rule will apply to any object matching all of the +// predicates configured inside the And operator. +type LifecycleRuleAndOperator struct { + + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan int64 + + // Maximum object size to which the rule applies. + ObjectSizeLessThan int64 + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string + + // All of these tags must exist in the object's tag set in order for the rule to + // apply. + Tags []Tag + + noSmithyDocumentSerde +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. A +// Filter must have exactly one of Prefix, Tag, or And specified. +// +// The following types satisfy this interface: +// LifecycleRuleFilterMemberAnd +// LifecycleRuleFilterMemberObjectSizeGreaterThan +// LifecycleRuleFilterMemberObjectSizeLessThan +// LifecycleRuleFilterMemberPrefix +// LifecycleRuleFilterMemberTag +type LifecycleRuleFilter interface { + isLifecycleRuleFilter() +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more +// predicates. The Lifecycle Rule will apply to any object matching all of the +// predicates configured inside the And operator. +type LifecycleRuleFilterMemberAnd struct { + Value LifecycleRuleAndOperator + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberAnd) isLifecycleRuleFilter() {} + +// Minimum object size to which the rule applies. +type LifecycleRuleFilterMemberObjectSizeGreaterThan struct { + Value int64 + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberObjectSizeGreaterThan) isLifecycleRuleFilter() {} + +// Maximum object size to which the rule applies. +type LifecycleRuleFilterMemberObjectSizeLessThan struct { + Value int64 + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberObjectSizeLessThan) isLifecycleRuleFilter() {} + +// Prefix identifying one or more objects to which the rule applies. Replacement +// must be made for object keys containing special characters (such as carriage +// returns) when using XML requests. For more information, see XML related object +// key constraints +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). +type LifecycleRuleFilterMemberPrefix struct { + Value string + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberPrefix) isLifecycleRuleFilter() {} + +// This tag must exist in the object's tag set in order for the rule to apply. +type LifecycleRuleFilterMemberTag struct { + Value Tag + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {} + +// Describes where logs are stored and the prefix that Amazon S3 assigns to all log +// object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in +// the Amazon S3 API Reference. +type LoggingEnabled struct { + + // Specifies the bucket where you want Amazon S3 to store server access logs. You + // can have your logs delivered to any bucket that you own, including the same + // bucket that is being logged. You can also configure multiple buckets to deliver + // their logs to the same target bucket. In this case, you should choose a + // different TargetPrefix for each source bucket so that the delivered log files + // can be distinguished by key. + // + // This member is required. + TargetBucket *string + + // A prefix for all log object keys. If you store log files from multiple Amazon S3 + // buckets in a single bucket, you can use a prefix to distinguish which log files + // came from which bucket. + // + // This member is required. + TargetPrefix *string + + // Container for granting information. Buckets that use the bucket owner enforced + // setting for Object Ownership don't support target grants. For more information, + // see Permissions for server access log delivery + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) + // in the Amazon S3 User Guide. + TargetGrants []TargetGrant + + noSmithyDocumentSerde +} + +// A metadata key-value pair to store with an object. +type MetadataEntry struct { + + // Name of the Object. + Name *string + + // Value of the Object. + Value *string + + noSmithyDocumentSerde +} + +// A container specifying replication metrics-related settings enabling replication +// metrics and events. +type Metrics struct { + + // Specifies whether the replication metrics are enabled. + // + // This member is required. + Status MetricsStatus + + // A container specifying the time threshold for emitting the + // s3:Replication:OperationMissedThreshold event. + EventThreshold *ReplicationTimeValue + + noSmithyDocumentSerde +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a metrics +// filter. The operator must have at least two predicates, and an object must match +// all of the predicates in order for the filter to apply. +type MetricsAndOperator struct { + + // The access point ARN used when evaluating an AND predicate. + AccessPointArn *string + + // The prefix used when evaluating an AND predicate. + Prefix *string + + // The list of tags used when evaluating an AND predicate. + Tags []Tag + + noSmithyDocumentSerde +} + +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an +// existing metrics configuration, note that this is a full replacement of the +// existing metrics configuration. If you don't include the elements you want to +// keep, they are erased. For more information, see PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html). +type MetricsConfiguration struct { + + // The ID used to identify the metrics configuration. + // + // This member is required. + Id *string + + // Specifies a metrics configuration filter. The metrics configuration will only + // include objects that meet the filter's criteria. A filter must be a prefix, an + // object tag, an access point ARN, or a conjunction (MetricsAndOperator). + Filter MetricsFilter + + noSmithyDocumentSerde +} + +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, an +// object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more +// information, see PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). +// +// The following types satisfy this interface: +// MetricsFilterMemberAccessPointArn +// MetricsFilterMemberAnd +// MetricsFilterMemberPrefix +// MetricsFilterMemberTag +type MetricsFilter interface { + isMetricsFilter() +} + +// The access point ARN used when evaluating a metrics filter. +type MetricsFilterMemberAccessPointArn struct { + Value string + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberAccessPointArn) isMetricsFilter() {} + +// A conjunction (logical AND) of predicates, which is used in evaluating a metrics +// filter. The operator must have at least two predicates, and an object must match +// all of the predicates in order for the filter to apply. +type MetricsFilterMemberAnd struct { + Value MetricsAndOperator + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberAnd) isMetricsFilter() {} + +// The prefix used when evaluating a metrics filter. +type MetricsFilterMemberPrefix struct { + Value string + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberPrefix) isMetricsFilter() {} + +// The tag used when evaluating a metrics filter. +type MetricsFilterMemberTag struct { + Value Tag + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberTag) isMetricsFilter() {} + +// Container for the MultipartUpload for the Amazon S3 object. +type MultipartUpload struct { + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm ChecksumAlgorithm + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time + + // Identifies who initiated the multipart upload. + Initiator *Initiator + + // Key of the object for which the multipart upload was initiated. + Key *string + + // Specifies the owner of the object that is part of the multipart upload. + Owner *Owner + + // The class of storage used to store the object. + StorageClass StorageClass + + // Upload ID that identifies the multipart upload. + UploadId *string + + noSmithyDocumentSerde +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 +// permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) to +// request that Amazon S3 delete noncurrent object versions at a specific period in +// the object's lifetime. +type NoncurrentVersionExpiration struct { + + // Specifies how many noncurrent versions Amazon S3 will retain. If there are this + // many more recent noncurrent versions, Amazon S3 will take the associated action. + // For more information about noncurrent versions, see Lifecycle configuration + // elements + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions int32 + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. The value must be a non-zero positive integer. + // For information about the noncurrent days calculations, see How Amazon S3 + // Calculates When an Object Became Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon S3 User Guide. + NoncurrentDays int32 + + noSmithyDocumentSerde +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, +// GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or +// versioning is suspended), you can set this action to request that Amazon S3 +// transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, +// INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at a +// specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + + // Specifies how many noncurrent versions Amazon S3 will retain. If there are this + // many more recent noncurrent versions, Amazon S3 will take the associated action. + // For more information about noncurrent versions, see Lifecycle configuration + // elements + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions int32 + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates How Long an Object Has Been + // Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon S3 User Guide. + NoncurrentDays int32 + + // The class of storage used to store the object. + StorageClass TransitionStorageClass + + noSmithyDocumentSerde +} + +// A container for specifying the notification configuration of the bucket. If this +// element is empty, notifications are turned off for the bucket. +type NotificationConfiguration struct { + + // Enables delivery of events to Amazon EventBridge. + EventBridgeConfiguration *EventBridgeConfiguration + + // Describes the Lambda functions to invoke and the events for which to invoke + // them. + LambdaFunctionConfigurations []LambdaFunctionConfiguration + + // The Amazon Simple Queue Service queues to publish messages to and the events for + // which to publish messages. + QueueConfigurations []QueueConfiguration + + // The topic to which notifications are sent and the events for which notifications + // are generated. + TopicConfigurations []TopicConfiguration + + noSmithyDocumentSerde +} + +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring Event Notifications +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the +// Amazon S3 User Guide. +type NotificationConfigurationFilter struct { + + // A container for object key name prefix and suffix filtering rules. + Key *S3KeyFilter + + noSmithyDocumentSerde +} + +// An object consists of data and its descriptive metadata. +type Object struct { + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []ChecksumAlgorithm + + // The entity tag is a hash of the object. The ETag reflects changes only to the + // contents of an object, not its metadata. The ETag may or may not be an MD5 + // digest of the object data. Whether or not it is depends on how the object was + // created and how it is encrypted as described below: + // + // * Objects created by the + // PUT Object, POST Object, or Copy operation, or through the Amazon Web Services + // Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that + // are an MD5 digest of their object data. + // + // * Objects created by the PUT Object, + // POST Object, or Copy operation, or through the Amazon Web Services Management + // Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 + // digest of their object data. + // + // * If an object is created by either the Multipart + // Upload or Part Copy operation, the ETag is not an MD5 digest, regardless of the + // method of encryption. If an object is larger than 16 MB, the Amazon Web Services + // Management Console will upload or copy that object as a Multipart Upload, and + // therefore the ETag will not be an MD5 digest. + ETag *string + + // The name that you assign to an object. You use the object key to retrieve the + // object. + Key *string + + // Creation date of the object. + LastModified *time.Time + + // The owner of the object + Owner *Owner + + // Size in bytes of the object + Size int64 + + // The class of storage used to store the object. + StorageClass ObjectStorageClass + + noSmithyDocumentSerde +} + +// Object Identifier is unique value to identify objects. +type ObjectIdentifier struct { + + // Key name of the object. Replacement must be made for object keys containing + // special characters (such as carriage returns) when using XML requests. For more + // information, see XML related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // This member is required. + Key *string + + // VersionId for the specific version of the object to delete. + VersionId *string + + noSmithyDocumentSerde +} + +// The container element for Object Lock configuration parameters. +type ObjectLockConfiguration struct { + + // Indicates whether this bucket has an Object Lock configuration enabled. Enable + // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. + ObjectLockEnabled ObjectLockEnabled + + // Specifies the Object Lock rule for the specified object. Enable the this rule + // when you apply ObjectLockConfiguration to a bucket. Bucket settings require both + // a mode and a period. The period can be either Days or Years but you must select + // one. You cannot specify Days and Years at the same time. + Rule *ObjectLockRule + + noSmithyDocumentSerde +} + +// A legal hold configuration for an object. +type ObjectLockLegalHold struct { + + // Indicates whether the specified object has a legal hold in place. + Status ObjectLockLegalHoldStatus + + noSmithyDocumentSerde +} + +// A Retention configuration for an object. +type ObjectLockRetention struct { + + // Indicates the Retention mode for the specified object. + Mode ObjectLockRetentionMode + + // The date on which this Object Lock Retention will expire. + RetainUntilDate *time.Time + + noSmithyDocumentSerde +} + +// The container element for an Object Lock rule. +type ObjectLockRule struct { + + // The default Object Lock retention mode and period that you want to apply to new + // objects placed in the specified bucket. Bucket settings require both a mode and + // a period. The period can be either Days or Years but you must select one. You + // cannot specify Days and Years at the same time. + DefaultRetention *DefaultRetention + + noSmithyDocumentSerde +} + +// A container for elements related to an individual part. +type ObjectPart struct { + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // The part number identifying the part. This value is a positive integer between 1 + // and 10,000. + PartNumber int32 + + // The size of the uploaded part in bytes. + Size int64 + + noSmithyDocumentSerde +} + +// The version of an object. +type ObjectVersion struct { + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []ChecksumAlgorithm + + // The entity tag is an MD5 hash of that version of the object. + ETag *string + + // Specifies whether the object is (true) or is not (false) the latest version of + // an object. + IsLatest bool + + // The object key. + Key *string + + // Date and time the object was last modified. + LastModified *time.Time + + // Specifies the owner of the object. + Owner *Owner + + // Size in bytes of the object. + Size int64 + + // The class of storage used to store the object. + StorageClass ObjectVersionStorageClass + + // Version ID of an object. + VersionId *string + + noSmithyDocumentSerde +} + +// Describes the location where the restore job's output is stored. +type OutputLocation struct { + + // Describes an S3 location that will receive the results of the restore request. + S3 *S3Location + + noSmithyDocumentSerde +} + +// Describes how results of the Select job are serialized. +type OutputSerialization struct { + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput + + noSmithyDocumentSerde +} + +// Container for the owner's display name and ID. +type Owner struct { + + // Container for the display name of the owner. + DisplayName *string + + // Container for the ID of the owner. + ID *string + + noSmithyDocumentSerde +} + +// The container element for a bucket's ownership controls. +type OwnershipControls struct { + + // The container element for an ownership control rule. + // + // This member is required. + Rules []OwnershipControlsRule + + noSmithyDocumentSerde +} + +// The container element for an ownership control rule. +type OwnershipControlsRule struct { + + // The container element for object ownership for a bucket's ownership controls. + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the + // bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. ObjectWriter - The uploading account will own the object if the + // object is uploaded with the bucket-owner-full-control canned ACL. + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that don't + // specify an ACL or bucket owner full control ACLs, such as the + // bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed + // in the XML format. + // + // This member is required. + ObjectOwnership ObjectOwnership + + noSmithyDocumentSerde +} + +// Container for Parquet. +type ParquetInput struct { + noSmithyDocumentSerde +} + +// Container for elements related to a part. +type Part struct { + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag returned when the part was uploaded. + ETag *string + + // Date and time at which the part was uploaded. + LastModified *time.Time + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber int32 + + // Size in bytes of the uploaded part data. + Size int64 + + noSmithyDocumentSerde +} + +// The container element for a bucket's policy status. +type PolicyStatus struct { + + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. + IsPublic bool + + noSmithyDocumentSerde +} + +// This data type contains information about progress of an operation. +type Progress struct { + + // The current number of uncompressed object bytes processed. + BytesProcessed int64 + + // The current number of bytes of records payload data returned. + BytesReturned int64 + + // The current number of object bytes scanned. + BytesScanned int64 + + noSmithyDocumentSerde +} + +// This data type contains information about the progress event of an operation. +type ProgressEvent struct { + + // The Progress event details. + Details *Progress + + noSmithyDocumentSerde +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon S3 +// bucket. You can enable the configuration options in any combination. For more +// information about when Amazon S3 considers a bucket or object public, see The +// Meaning of "Public" +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon S3 User Guide. +type PublicAccessBlockConfiguration struct { + + // Specifies whether Amazon S3 should block public access control lists (ACLs) for + // this bucket and objects in this bucket. Setting this element to TRUE causes the + // following behavior: + // + // * PUT Bucket ACL and PUT Object ACL calls fail if the + // specified ACL is public. + // + // * PUT Object calls fail if the request includes a + // public ACL. + // + // * PUT Bucket calls fail if the request includes a public + // ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls bool + + // Specifies whether Amazon S3 should block public bucket policies for this bucket. + // Setting this element to TRUE causes Amazon S3 to reject calls to PUT Bucket + // policy if the specified bucket policy allows public access. Enabling this + // setting doesn't affect existing bucket policies. + BlockPublicPolicy bool + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore + // all public ACLs on this bucket and objects in this bucket. Enabling this setting + // doesn't affect the persistence of any existing ACLs and doesn't prevent new + // public ACLs from being set. + IgnorePublicAcls bool + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // Amazon Web Service principals and authorized users within this account if the + // bucket has a public policy. Enabling this setting doesn't affect previously + // stored bucket policies, except that public and cross-account access within any + // public bucket policy, including non-public delegation to specific accounts, is + // blocked. + RestrictPublicBuckets bool + + noSmithyDocumentSerde +} + +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. +type QueueConfiguration struct { + + // A collection of bucket events for which to send notifications + // + // This member is required. + Events []Event + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // This member is required. + QueueArn *string + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the + // Amazon S3 User Guide. + Filter *NotificationConfigurationFilter + + // An optional unique identifier for configurations in a notification + // configuration. If you don't provide one, Amazon S3 will assign an ID. + Id *string + + noSmithyDocumentSerde +} + +// The container for the records event. +type RecordsEvent struct { + + // The byte array of partial, one or more result records. + Payload []byte + + noSmithyDocumentSerde +} + +// Specifies how requests are redirected. In the event of an error, you can specify +// a different error code to return. +type Redirect struct { + + // The host name to use in the redirect request. + HostName *string + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string + + // Protocol to use when redirecting requests. The default is the protocol that is + // used in the original request. + Protocol Protocol + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ and + // in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of + // the siblings is present. Can be present only if ReplaceKeyWith is not provided. + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see XML + // related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyPrefixWith *string + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the siblings is present. Can be + // present only if ReplaceKeyPrefixWith is not provided. Replacement must be made + // for object keys containing special characters (such as carriage returns) when + // using XML requests. For more information, see XML related object key + // constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyWith *string + + noSmithyDocumentSerde +} + +// Specifies the redirect behavior of all requests to a website endpoint of an +// Amazon S3 bucket. +type RedirectAllRequestsTo struct { + + // Name of the host where requests are redirected. + // + // This member is required. + HostName *string + + // Protocol to use when redirecting requests. The default is the protocol that is + // used in the original request. + Protocol Protocol + + noSmithyDocumentSerde +} + +// A filter that you can specify for selection for modifications on replicas. +// Amazon S3 doesn't replicate replica modifications by default. In the latest +// version of replication configuration (when Filter is specified), you can specify +// this element and set the status to Enabled to replicate modifications on +// replicas. If you don't specify the Filter element, Amazon S3 assumes that the +// replication configuration is the earlier version, V1. In the earlier version, +// this element is not allowed. +type ReplicaModifications struct { + + // Specifies whether Amazon S3 replicates modifications on replicas. + // + // This member is required. + Status ReplicaModificationsStatus + + noSmithyDocumentSerde +} + +// A container for replication rules. You can add up to 1,000 rules. The maximum +// size of a replication configuration is 2 MB. +type ReplicationConfiguration struct { + + // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role + // that Amazon S3 assumes when replicating objects. For more information, see How + // to Set Up Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) in + // the Amazon S3 User Guide. + // + // This member is required. + Role *string + + // A container for one or more replication rules. A replication configuration must + // have at least one rule and can contain a maximum of 1,000 rules. + // + // This member is required. + Rules []ReplicationRule + + noSmithyDocumentSerde +} + +// Specifies which Amazon S3 objects to replicate and where to store the replicas. +type ReplicationRule struct { + + // A container for information about the replication destination and its + // configurations including enabling the S3 Replication Time Control (S3 RTC). + // + // This member is required. + Destination *Destination + + // Specifies whether the rule is enabled. + // + // This member is required. + Status ReplicationRuleStatus + + // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + // in your replication configuration, you must also include a + // DeleteMarkerReplication element. If your Filter includes a Tag element, the + // DeleteMarkerReplicationStatus must be set to Disabled, because Amazon S3 does + // not support replicating delete markers for tag-based rules. For an example + // configuration, see Basic Rule Configuration + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // For more information about delete marker replication, see Basic Rule + // Configuration + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). + // If you are using an earlier version of the replication configuration, Amazon S3 + // handles replication of delete markers differently. For more information, see + // Backward Compatibility + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). + DeleteMarkerReplication *DeleteMarkerReplication + + // + ExistingObjectReplication *ExistingObjectReplication + + // A filter that identifies the subset of objects to which the replication rule + // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + Filter ReplicationRuleFilter + + // A unique identifier for the rule. The maximum value is 255 characters. + ID *string + + // An object key name prefix that identifies the object or objects to which the + // rule applies. The maximum prefix length is 1,024 characters. To include all + // objects in a bucket, specify an empty string. Replacement must be made for + // object keys containing special characters (such as carriage returns) when using + // XML requests. For more information, see XML related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Deprecated: This member has been deprecated. + Prefix *string + + // The priority indicates which rule has precedence whenever two or more + // replication rules conflict. Amazon S3 will attempt to replicate objects + // according to all replication rules. However, if there are two or more rules with + // the same destination bucket, then objects will be replicated according to the + // rule with the highest priority. The higher the number, the higher the priority. + // For more information, see Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon + // S3 User Guide. + Priority int32 + + // A container that describes additional filters for identifying the source objects + // that you want to replicate. You can choose to enable or disable the replication + // of these objects. Currently, Amazon S3 supports only the filter that you can + // specify for objects created with server-side encryption using a customer managed + // key stored in Amazon Web Services Key Management Service (SSE-KMS). + SourceSelectionCriteria *SourceSelectionCriteria + + noSmithyDocumentSerde +} + +// A container for specifying rule filters. The filters determine the subset of +// objects to which the rule applies. This element is required only if you specify +// more than one filter. For example: +// +// * If you specify both a Prefix and a Tag +// filter, wrap these filters in an And tag. +// +// * If you specify a filter based on +// multiple tags, wrap the Tag elements in an And tag. +type ReplicationRuleAndOperator struct { + + // An object key name prefix that identifies the subset of objects to which the + // rule applies. + Prefix *string + + // An array of tags containing key and value pairs. + Tags []Tag + + noSmithyDocumentSerde +} + +// A filter that identifies the subset of objects to which the replication rule +// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +// +// The following types satisfy this interface: +// ReplicationRuleFilterMemberAnd +// ReplicationRuleFilterMemberPrefix +// ReplicationRuleFilterMemberTag +type ReplicationRuleFilter interface { + isReplicationRuleFilter() +} + +// A container for specifying rule filters. The filters determine the subset of +// objects to which the rule applies. This element is required only if you specify +// more than one filter. For example: +// +// * If you specify both a Prefix and a Tag +// filter, wrap these filters in an And tag. +// +// * If you specify a filter based on +// multiple tags, wrap the Tag elements in an And tag. +type ReplicationRuleFilterMemberAnd struct { + Value ReplicationRuleAndOperator + + noSmithyDocumentSerde +} + +func (*ReplicationRuleFilterMemberAnd) isReplicationRuleFilter() {} + +// An object key name prefix that identifies the subset of objects to which the +// rule applies. Replacement must be made for object keys containing special +// characters (such as carriage returns) when using XML requests. For more +// information, see XML related object key constraints +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). +type ReplicationRuleFilterMemberPrefix struct { + Value string + + noSmithyDocumentSerde +} + +func (*ReplicationRuleFilterMemberPrefix) isReplicationRuleFilter() {} + +// A container for specifying a tag key and value. The rule applies only to objects +// that have the tag in their tag set. +type ReplicationRuleFilterMemberTag struct { + Value Tag + + noSmithyDocumentSerde +} + +func (*ReplicationRuleFilterMemberTag) isReplicationRuleFilter() {} + +// A container specifying S3 Replication Time Control (S3 RTC) related information, +// including whether S3 RTC is enabled and the time when all objects and operations +// on objects must be replicated. Must be specified together with a Metrics block. +type ReplicationTime struct { + + // Specifies whether the replication time is enabled. + // + // This member is required. + Status ReplicationTimeStatus + + // A container specifying the time by which replication should be complete for all + // objects and operations on objects. + // + // This member is required. + Time *ReplicationTimeValue + + noSmithyDocumentSerde +} + +// A container specifying the time value for S3 Replication Time Control (S3 RTC) +// and replication metrics EventThreshold. +type ReplicationTimeValue struct { + + // Contains an integer specifying time in minutes. Valid value: 15 + Minutes int32 + + noSmithyDocumentSerde +} + +// Container for Payer. +type RequestPaymentConfiguration struct { + + // Specifies who pays for the download and request fees. + // + // This member is required. + Payer Payer + + noSmithyDocumentSerde +} + +// Container for specifying if periodic QueryProgress messages should be sent. +type RequestProgress struct { + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled bool + + noSmithyDocumentSerde +} + +// Container for restore job parameters. +type RestoreRequest struct { + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. The Days element is required for regular restores, and must not + // be provided for select requests. + Days int32 + + // The optional description for the job. + Description *string + + // S3 Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters + + // Retrieval tier at which the restore will be processed. + Tier Tier + + // Type of restore request. + Type RestoreRequestType + + noSmithyDocumentSerde +} + +// Specifies the redirect behavior and when a redirect is applied. For more +// information about routing rules, see Configuring advanced conditional redirects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +// in the Amazon S3 User Guide. +type RoutingRule struct { + + // Container for redirect information. You can redirect requests to another host, + // to another page, or with another protocol. In the event of an error, you can + // specify a different error code to return. + // + // This member is required. + Redirect *Redirect + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition + + noSmithyDocumentSerde +} + +// A container for object key name prefix and suffix filtering rules. +type S3KeyFilter struct { + + // A list of containers for the key-value pair that defines the criteria for the + // filter rule. + FilterRules []FilterRule + + noSmithyDocumentSerde +} + +// Describes an Amazon S3 location that will receive the results of the restore +// request. +type S3Location struct { + + // The name of the bucket where the restore results will be placed. + // + // This member is required. + BucketName *string + + // The prefix that is prepended to the restore results for this request. + // + // This member is required. + Prefix *string + + // A list of grants that control access to the staged results. + AccessControlList []Grant + + // The canned ACL to apply to the restore results. + CannedACL ObjectCannedACL + + // Contains the type of server-side encryption used. + Encryption *Encryption + + // The class of storage used to store the restore results. + StorageClass StorageClass + + // The tag-set that is applied to the restore results. + Tagging *Tagging + + // A list of metadata to store with the restore results in S3. + UserMetadata []MetadataEntry + + noSmithyDocumentSerde +} + +// Specifies the byte range of the object to get the records from. A record is +// processed when its first byte is contained by the range. This parameter is +// optional, but when specified, it must not be empty. See RFC 2616, Section +// 14.35.1 about how to specify the start and end of the range. +type ScanRange struct { + + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the object + // being queried. If only the End parameter is supplied, it is interpreted to mean + // scan the last N bytes of the file. For example, 50 means scan the last 50 bytes. + End int64 + + // Specifies the start of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is 0. If only start is supplied, it + // means scan from that point to the end of the file. For example, 50 means scan + // from byte 50 until the end of the file. + Start int64 + + noSmithyDocumentSerde +} + +// The container for selecting objects from a content event stream. +// +// The following types satisfy this interface: +// SelectObjectContentEventStreamMemberCont +// SelectObjectContentEventStreamMemberEnd +// SelectObjectContentEventStreamMemberProgress +// SelectObjectContentEventStreamMemberRecords +// SelectObjectContentEventStreamMemberStats +type SelectObjectContentEventStream interface { + isSelectObjectContentEventStream() +} + +// The Continuation Event. +type SelectObjectContentEventStreamMemberCont struct { + Value ContinuationEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberCont) isSelectObjectContentEventStream() {} + +// The End Event. +type SelectObjectContentEventStreamMemberEnd struct { + Value EndEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberEnd) isSelectObjectContentEventStream() {} + +// The Progress Event. +type SelectObjectContentEventStreamMemberProgress struct { + Value ProgressEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberProgress) isSelectObjectContentEventStream() {} + +// The Records Event. +type SelectObjectContentEventStreamMemberRecords struct { + Value RecordsEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberRecords) isSelectObjectContentEventStream() {} + +// The Stats Event. +type SelectObjectContentEventStreamMemberStats struct { + Value StatsEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberStats) isSelectObjectContentEventStream() {} + +// Describes the parameters for Select job types. +type SelectParameters struct { + + // The expression that is used to query the object. + // + // This member is required. + Expression *string + + // The type of the provided expression (for example, SQL). + // + // This member is required. + ExpressionType ExpressionType + + // Describes the serialization format of the object. + // + // This member is required. + InputSerialization *InputSerialization + + // Describes how the results of the Select job are serialized. + // + // This member is required. + OutputSerialization *OutputSerialization + + noSmithyDocumentSerde +} + +// Describes the default server-side encryption to apply to new objects in the +// bucket. If a PUT Object request doesn't specify any server-side encryption, this +// default encryption will be applied. If you don't specify a customer managed key +// at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key +// in your Amazon Web Services account the first time that you add an object +// encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for +// SSE-KMS. For more information, see PUT Bucket encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon S3 API Reference. +type ServerSideEncryptionByDefault struct { + + // Server-side encryption algorithm to use for the default encryption. + // + // This member is required. + SSEAlgorithm ServerSideEncryption + + // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services + // KMS key ID to use for the default encryption. This parameter is allowed if and + // only if SSEAlgorithm is set to aws:kms. You can specify the key ID or the Amazon + // Resource Name (ARN) of the KMS key. However, if you are using encryption with + // cross-account or Amazon Web Services service operations you must use a fully + // qualified KMS key ARN. For more information, see Using encryption for + // cross-account operations + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // Amazon + // S3 only supports symmetric KMS keys and not asymmetric KMS keys. For more + // information, see Using symmetric and asymmetric keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + KMSMasterKeyID *string + + noSmithyDocumentSerde +} + +// Specifies the default server-side-encryption configuration. +type ServerSideEncryptionConfiguration struct { + + // Container for information about a particular server-side encryption + // configuration rule. + // + // This member is required. + Rules []ServerSideEncryptionRule + + noSmithyDocumentSerde +} + +// Specifies the default server-side encryption configuration. +type ServerSideEncryptionRule struct { + + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, this + // default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault + + // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side + // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects + // are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 + // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. For more + // information, see Amazon S3 Bucket Keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon + // S3 User Guide. + BucketKeyEnabled bool + + noSmithyDocumentSerde +} + +// A container that describes additional filters for identifying the source objects +// that you want to replicate. You can choose to enable or disable the replication +// of these objects. Currently, Amazon S3 supports only the filter that you can +// specify for objects created with server-side encryption using a customer managed +// key stored in Amazon Web Services Key Management Service (SSE-KMS). +type SourceSelectionCriteria struct { + + // A filter that you can specify for selections for modifications on replicas. + // Amazon S3 doesn't replicate replica modifications by default. In the latest + // version of replication configuration (when Filter is specified), you can specify + // this element and set the status to Enabled to replicate modifications on + // replicas. If you don't specify the Filter element, Amazon S3 assumes that the + // replication configuration is the earlier version, V1. In the earlier version, + // this element is not allowed + ReplicaModifications *ReplicaModifications + + // A container for filter information for the selection of Amazon S3 objects + // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria + // in the replication configuration, this element is required. + SseKmsEncryptedObjects *SseKmsEncryptedObjects + + noSmithyDocumentSerde +} + +// Specifies the use of SSE-KMS to encrypt delivered inventory reports. +type SSEKMS struct { + + // Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web + // Services KMS) symmetric customer managed key to use for encrypting inventory + // reports. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +// A container for filter information for the selection of S3 objects encrypted +// with Amazon Web Services KMS. +type SseKmsEncryptedObjects struct { + + // Specifies whether Amazon S3 replicates objects created with server-side + // encryption using an Amazon Web Services KMS key stored in Amazon Web Services + // Key Management Service. + // + // This member is required. + Status SseKmsEncryptedObjectsStatus + + noSmithyDocumentSerde +} + +// Specifies the use of SSE-S3 to encrypt delivered inventory reports. +type SSES3 struct { + noSmithyDocumentSerde +} + +// Container for the stats details. +type Stats struct { + + // The total number of uncompressed object bytes processed. + BytesProcessed int64 + + // The total number of bytes of records payload data returned. + BytesReturned int64 + + // The total number of object bytes scanned. + BytesScanned int64 + + noSmithyDocumentSerde +} + +// Container for the Stats Event. +type StatsEvent struct { + + // The Stats event details. + Details *Stats + + noSmithyDocumentSerde +} + +// Specifies data related to access patterns to be collected and made available to +// analyze the tradeoffs between different storage classes for an Amazon S3 bucket. +type StorageClassAnalysis struct { + + // Specifies how data related to the storage class analysis for an Amazon S3 bucket + // should be exported. + DataExport *StorageClassAnalysisDataExport + + noSmithyDocumentSerde +} + +// Container for data related to the storage class analysis for an Amazon S3 bucket +// for export. +type StorageClassAnalysisDataExport struct { + + // The place to store the data for an analysis. + // + // This member is required. + Destination *AnalyticsExportDestination + + // The version of the output schema to use when exporting data. Must be V_1. + // + // This member is required. + OutputSchemaVersion StorageClassAnalysisSchemaVersion + + noSmithyDocumentSerde +} + +// A container of a key value name pair. +type Tag struct { + + // Name of the object key. + // + // This member is required. + Key *string + + // Value of the tag. + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +// Container for TagSet elements. +type Tagging struct { + + // A collection for a set of tags + // + // This member is required. + TagSet []Tag + + noSmithyDocumentSerde +} + +// Container for granting information. Buckets that use the bucket owner enforced +// setting for Object Ownership don't support target grants. For more information, +// see Permissions server access log delivery +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. +type TargetGrant struct { + + // Container for the person being granted permissions. + Grantee *Grantee + + // Logging permissions assigned to the grantee for the bucket. + Permission BucketLogsPermission + + noSmithyDocumentSerde +} + +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, +// without additional operational overhead. +type Tiering struct { + + // S3 Intelligent-Tiering access tier. See Storage class for automatically + // optimizing frequently and infrequently accessed objects + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) + // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // + // This member is required. + AccessTier IntelligentTieringAccessTier + + // The number of consecutive days of no access after which an object will be + // eligible to be transitioned to the corresponding tier. The minimum number of + // days specified for Archive Access tier must be at least 90 days and Deep Archive + // Access tier must be at least 180 days. The maximum can be up to 2 years (730 + // days). + // + // This member is required. + Days int32 + + noSmithyDocumentSerde +} + +// A container for specifying the configuration for publication of messages to an +// Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects +// specified events. +type TopicConfiguration struct { + + // The Amazon S3 bucket event about which to send notifications. For more + // information, see Supported Event Types + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Events []Event + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // This member is required. + TopicArn *string + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the + // Amazon S3 User Guide. + Filter *NotificationConfigurationFilter + + // An optional unique identifier for configurations in a notification + // configuration. If you don't provide one, Amazon S3 will assign an ID. + Id *string + + noSmithyDocumentSerde +} + +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon S3 User Guide. +type Transition struct { + + // Indicates when objects are transitioned to the specified storage class. The date + // value must be in ISO 8601 format. The time is always midnight UTC. + Date *time.Time + + // Indicates the number of days after creation when objects are transitioned to the + // specified storage class. The value must be a positive integer. + Days int32 + + // The storage class to which you want the object to transition. + StorageClass TransitionStorageClass + + noSmithyDocumentSerde +} + +// Describes the versioning state of an Amazon S3 bucket. For more information, see +// PUT Bucket versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon S3 API Reference. +type VersioningConfiguration struct { + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA delete. + // If the bucket has never been so configured, this element is not returned. + MFADelete MFADelete + + // The versioning state of the bucket. + Status BucketVersioningStatus + + noSmithyDocumentSerde +} + +// Specifies website configuration parameters for an Amazon S3 bucket. +type WebsiteConfiguration struct { + + // The name of the error document for the website. + ErrorDocument *ErrorDocument + + // The name of the index document for the website. + IndexDocument *IndexDocument + + // The redirect behavior for every request to this bucket's website endpoint. If + // you specify this property, you can't specify any other property. + RedirectAllRequestsTo *RedirectAllRequestsTo + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []RoutingRule + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +// UnknownUnionMember is returned when a union member is returned over the wire, +// but has an unknown tag. +type UnknownUnionMember struct { + Tag string + Value []byte + + noSmithyDocumentSerde +} + +func (*UnknownUnionMember) isAnalyticsFilter() {} +func (*UnknownUnionMember) isLifecycleRuleFilter() {} +func (*UnknownUnionMember) isMetricsFilter() {} +func (*UnknownUnionMember) isReplicationRuleFilter() {} +func (*UnknownUnionMember) isSelectObjectContentEventStream() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go new file mode 100644 index 000000000000..ccd845a71e98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go @@ -0,0 +1,5494 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpAbortMultipartUpload struct { +} + +func (*validateOpAbortMultipartUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAbortMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AbortMultipartUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAbortMultipartUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCompleteMultipartUpload struct { +} + +func (*validateOpCompleteMultipartUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCompleteMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CompleteMultipartUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCompleteMultipartUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCopyObject struct { +} + +func (*validateOpCopyObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCopyObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CopyObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCopyObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateBucket struct { +} + +func (*validateOpCreateBucket) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateBucketInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateBucketInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateMultipartUpload struct { +} + +func (*validateOpCreateMultipartUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateMultipartUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateMultipartUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketAnalyticsConfiguration struct { +} + +func (*validateOpDeleteBucketAnalyticsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketAnalyticsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketCors struct { +} + +func (*validateOpDeleteBucketCors) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketCorsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketCorsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketEncryption struct { +} + +func (*validateOpDeleteBucketEncryption) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketEncryptionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketEncryptionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucket struct { +} + +func (*validateOpDeleteBucket) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketIntelligentTieringConfiguration struct { +} + +func (*validateOpDeleteBucketIntelligentTieringConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketIntelligentTieringConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketInventoryConfiguration struct { +} + +func (*validateOpDeleteBucketInventoryConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketInventoryConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketLifecycle struct { +} + +func (*validateOpDeleteBucketLifecycle) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketLifecycle) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketLifecycleInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketLifecycleInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketMetricsConfiguration struct { +} + +func (*validateOpDeleteBucketMetricsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketMetricsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketOwnershipControls struct { +} + +func (*validateOpDeleteBucketOwnershipControls) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketOwnershipControlsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketPolicy struct { +} + +func (*validateOpDeleteBucketPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketReplication struct { +} + +func (*validateOpDeleteBucketReplication) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketReplicationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketReplicationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketTagging struct { +} + +func (*validateOpDeleteBucketTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketWebsite struct { +} + +func (*validateOpDeleteBucketWebsite) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketWebsiteInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketWebsiteInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteObject struct { +} + +func (*validateOpDeleteObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteObjects struct { +} + +func (*validateOpDeleteObjects) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteObjectsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteObjectsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteObjectTagging struct { +} + +func (*validateOpDeleteObjectTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteObjectTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteObjectTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeletePublicAccessBlock struct { +} + +func (*validateOpDeletePublicAccessBlock) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeletePublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeletePublicAccessBlockInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeletePublicAccessBlockInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketAccelerateConfiguration struct { +} + +func (*validateOpGetBucketAccelerateConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketAccelerateConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketAcl struct { +} + +func (*validateOpGetBucketAcl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketAclInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketAclInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketAnalyticsConfiguration struct { +} + +func (*validateOpGetBucketAnalyticsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketAnalyticsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketCors struct { +} + +func (*validateOpGetBucketCors) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketCorsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketCorsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketEncryption struct { +} + +func (*validateOpGetBucketEncryption) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketEncryptionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketEncryptionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketIntelligentTieringConfiguration struct { +} + +func (*validateOpGetBucketIntelligentTieringConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketIntelligentTieringConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketInventoryConfiguration struct { +} + +func (*validateOpGetBucketInventoryConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketInventoryConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketLifecycleConfiguration struct { +} + +func (*validateOpGetBucketLifecycleConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketLifecycleConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketLocation struct { +} + +func (*validateOpGetBucketLocation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketLocation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketLocationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketLocationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketLogging struct { +} + +func (*validateOpGetBucketLogging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketLoggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketLoggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketMetricsConfiguration struct { +} + +func (*validateOpGetBucketMetricsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketMetricsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketNotificationConfiguration struct { +} + +func (*validateOpGetBucketNotificationConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketNotificationConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketOwnershipControls struct { +} + +func (*validateOpGetBucketOwnershipControls) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketOwnershipControlsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketOwnershipControlsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketPolicy struct { +} + +func (*validateOpGetBucketPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketPolicyStatus struct { +} + +func (*validateOpGetBucketPolicyStatus) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketPolicyStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketPolicyStatusInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketPolicyStatusInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketReplication struct { +} + +func (*validateOpGetBucketReplication) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketReplicationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketReplicationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketRequestPayment struct { +} + +func (*validateOpGetBucketRequestPayment) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketRequestPaymentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketRequestPaymentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketTagging struct { +} + +func (*validateOpGetBucketTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketVersioning struct { +} + +func (*validateOpGetBucketVersioning) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketVersioningInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketVersioningInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketWebsite struct { +} + +func (*validateOpGetBucketWebsite) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketWebsiteInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketWebsiteInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectAcl struct { +} + +func (*validateOpGetObjectAcl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectAclInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectAclInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectAttributes struct { +} + +func (*validateOpGetObjectAttributes) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectAttributes) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectAttributesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectAttributesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObject struct { +} + +func (*validateOpGetObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectLegalHold struct { +} + +func (*validateOpGetObjectLegalHold) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectLegalHoldInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectLegalHoldInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectLockConfiguration struct { +} + +func (*validateOpGetObjectLockConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectLockConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectLockConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectRetention struct { +} + +func (*validateOpGetObjectRetention) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectRetentionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectRetentionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectTagging struct { +} + +func (*validateOpGetObjectTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectTorrent struct { +} + +func (*validateOpGetObjectTorrent) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectTorrent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectTorrentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectTorrentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetPublicAccessBlock struct { +} + +func (*validateOpGetPublicAccessBlock) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetPublicAccessBlockInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetPublicAccessBlockInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpHeadBucket struct { +} + +func (*validateOpHeadBucket) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpHeadBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*HeadBucketInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpHeadBucketInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpHeadObject struct { +} + +func (*validateOpHeadObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpHeadObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*HeadObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpHeadObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListBucketAnalyticsConfigurations struct { +} + +func (*validateOpListBucketAnalyticsConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListBucketAnalyticsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListBucketAnalyticsConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListBucketIntelligentTieringConfigurations struct { +} + +func (*validateOpListBucketIntelligentTieringConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListBucketIntelligentTieringConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListBucketIntelligentTieringConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListBucketInventoryConfigurations struct { +} + +func (*validateOpListBucketInventoryConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListBucketInventoryConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListBucketInventoryConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListBucketMetricsConfigurations struct { +} + +func (*validateOpListBucketMetricsConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListBucketMetricsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListBucketMetricsConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListMultipartUploads struct { +} + +func (*validateOpListMultipartUploads) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListMultipartUploads) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListMultipartUploadsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListMultipartUploadsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListObjects struct { +} + +func (*validateOpListObjects) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListObjectsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListObjectsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListObjectsV2 struct { +} + +func (*validateOpListObjectsV2) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListObjectsV2) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListObjectsV2Input) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListObjectsV2Input(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListObjectVersions struct { +} + +func (*validateOpListObjectVersions) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListObjectVersions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListObjectVersionsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListObjectVersionsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListParts struct { +} + +func (*validateOpListParts) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListParts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListPartsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListPartsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketAccelerateConfiguration struct { +} + +func (*validateOpPutBucketAccelerateConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketAccelerateConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketAcl struct { +} + +func (*validateOpPutBucketAcl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketAclInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketAclInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketAnalyticsConfiguration struct { +} + +func (*validateOpPutBucketAnalyticsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketAnalyticsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketCors struct { +} + +func (*validateOpPutBucketCors) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketCorsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketCorsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketEncryption struct { +} + +func (*validateOpPutBucketEncryption) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketEncryptionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketEncryptionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketIntelligentTieringConfiguration struct { +} + +func (*validateOpPutBucketIntelligentTieringConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketIntelligentTieringConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketInventoryConfiguration struct { +} + +func (*validateOpPutBucketInventoryConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketInventoryConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketLifecycleConfiguration struct { +} + +func (*validateOpPutBucketLifecycleConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketLifecycleConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketLogging struct { +} + +func (*validateOpPutBucketLogging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketLoggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketLoggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketMetricsConfiguration struct { +} + +func (*validateOpPutBucketMetricsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketMetricsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketNotificationConfiguration struct { +} + +func (*validateOpPutBucketNotificationConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketNotificationConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketOwnershipControls struct { +} + +func (*validateOpPutBucketOwnershipControls) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketOwnershipControlsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketOwnershipControlsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketPolicy struct { +} + +func (*validateOpPutBucketPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketReplication struct { +} + +func (*validateOpPutBucketReplication) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketReplicationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketReplicationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketRequestPayment struct { +} + +func (*validateOpPutBucketRequestPayment) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketRequestPaymentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketRequestPaymentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketTagging struct { +} + +func (*validateOpPutBucketTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketVersioning struct { +} + +func (*validateOpPutBucketVersioning) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketVersioningInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketVersioningInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketWebsite struct { +} + +func (*validateOpPutBucketWebsite) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketWebsiteInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketWebsiteInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectAcl struct { +} + +func (*validateOpPutObjectAcl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectAclInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectAclInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObject struct { +} + +func (*validateOpPutObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectLegalHold struct { +} + +func (*validateOpPutObjectLegalHold) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectLegalHoldInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectLegalHoldInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectLockConfiguration struct { +} + +func (*validateOpPutObjectLockConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectLockConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectLockConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectRetention struct { +} + +func (*validateOpPutObjectRetention) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectRetentionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectRetentionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectTagging struct { +} + +func (*validateOpPutObjectTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutPublicAccessBlock struct { +} + +func (*validateOpPutPublicAccessBlock) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutPublicAccessBlockInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutPublicAccessBlockInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRestoreObject struct { +} + +func (*validateOpRestoreObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRestoreObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RestoreObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRestoreObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpSelectObjectContent struct { +} + +func (*validateOpSelectObjectContent) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpSelectObjectContent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*SelectObjectContentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpSelectObjectContentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUploadPartCopy struct { +} + +func (*validateOpUploadPartCopy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUploadPartCopy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UploadPartCopyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUploadPartCopyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUploadPart struct { +} + +func (*validateOpUploadPart) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUploadPart) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UploadPartInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUploadPartInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpWriteGetObjectResponse struct { +} + +func (*validateOpWriteGetObjectResponse) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpWriteGetObjectResponse) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*WriteGetObjectResponseInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpWriteGetObjectResponseInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpAbortMultipartUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAbortMultipartUpload{}, middleware.After) +} + +func addOpCompleteMultipartUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCompleteMultipartUpload{}, middleware.After) +} + +func addOpCopyObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCopyObject{}, middleware.After) +} + +func addOpCreateBucketValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateBucket{}, middleware.After) +} + +func addOpCreateMultipartUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateMultipartUpload{}, middleware.After) +} + +func addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketAnalyticsConfiguration{}, middleware.After) +} + +func addOpDeleteBucketCorsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketCors{}, middleware.After) +} + +func addOpDeleteBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketEncryption{}, middleware.After) +} + +func addOpDeleteBucketValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucket{}, middleware.After) +} + +func addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) +} + +func addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketInventoryConfiguration{}, middleware.After) +} + +func addOpDeleteBucketLifecycleValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketLifecycle{}, middleware.After) +} + +func addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketMetricsConfiguration{}, middleware.After) +} + +func addOpDeleteBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketOwnershipControls{}, middleware.After) +} + +func addOpDeleteBucketPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketPolicy{}, middleware.After) +} + +func addOpDeleteBucketReplicationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketReplication{}, middleware.After) +} + +func addOpDeleteBucketTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketTagging{}, middleware.After) +} + +func addOpDeleteBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketWebsite{}, middleware.After) +} + +func addOpDeleteObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteObject{}, middleware.After) +} + +func addOpDeleteObjectsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteObjects{}, middleware.After) +} + +func addOpDeleteObjectTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteObjectTagging{}, middleware.After) +} + +func addOpDeletePublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeletePublicAccessBlock{}, middleware.After) +} + +func addOpGetBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketAccelerateConfiguration{}, middleware.After) +} + +func addOpGetBucketAclValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketAcl{}, middleware.After) +} + +func addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketAnalyticsConfiguration{}, middleware.After) +} + +func addOpGetBucketCorsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketCors{}, middleware.After) +} + +func addOpGetBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketEncryption{}, middleware.After) +} + +func addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketIntelligentTieringConfiguration{}, middleware.After) +} + +func addOpGetBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketInventoryConfiguration{}, middleware.After) +} + +func addOpGetBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketLifecycleConfiguration{}, middleware.After) +} + +func addOpGetBucketLocationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketLocation{}, middleware.After) +} + +func addOpGetBucketLoggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketLogging{}, middleware.After) +} + +func addOpGetBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketMetricsConfiguration{}, middleware.After) +} + +func addOpGetBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketNotificationConfiguration{}, middleware.After) +} + +func addOpGetBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketOwnershipControls{}, middleware.After) +} + +func addOpGetBucketPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketPolicy{}, middleware.After) +} + +func addOpGetBucketPolicyStatusValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketPolicyStatus{}, middleware.After) +} + +func addOpGetBucketReplicationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketReplication{}, middleware.After) +} + +func addOpGetBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketRequestPayment{}, middleware.After) +} + +func addOpGetBucketTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketTagging{}, middleware.After) +} + +func addOpGetBucketVersioningValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketVersioning{}, middleware.After) +} + +func addOpGetBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketWebsite{}, middleware.After) +} + +func addOpGetObjectAclValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectAcl{}, middleware.After) +} + +func addOpGetObjectAttributesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectAttributes{}, middleware.After) +} + +func addOpGetObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObject{}, middleware.After) +} + +func addOpGetObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectLegalHold{}, middleware.After) +} + +func addOpGetObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectLockConfiguration{}, middleware.After) +} + +func addOpGetObjectRetentionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectRetention{}, middleware.After) +} + +func addOpGetObjectTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectTagging{}, middleware.After) +} + +func addOpGetObjectTorrentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectTorrent{}, middleware.After) +} + +func addOpGetPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetPublicAccessBlock{}, middleware.After) +} + +func addOpHeadBucketValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpHeadBucket{}, middleware.After) +} + +func addOpHeadObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpHeadObject{}, middleware.After) +} + +func addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListBucketAnalyticsConfigurations{}, middleware.After) +} + +func addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListBucketIntelligentTieringConfigurations{}, middleware.After) +} + +func addOpListBucketInventoryConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListBucketInventoryConfigurations{}, middleware.After) +} + +func addOpListBucketMetricsConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListBucketMetricsConfigurations{}, middleware.After) +} + +func addOpListMultipartUploadsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListMultipartUploads{}, middleware.After) +} + +func addOpListObjectsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListObjects{}, middleware.After) +} + +func addOpListObjectsV2ValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListObjectsV2{}, middleware.After) +} + +func addOpListObjectVersionsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListObjectVersions{}, middleware.After) +} + +func addOpListPartsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListParts{}, middleware.After) +} + +func addOpPutBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketAccelerateConfiguration{}, middleware.After) +} + +func addOpPutBucketAclValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketAcl{}, middleware.After) +} + +func addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketAnalyticsConfiguration{}, middleware.After) +} + +func addOpPutBucketCorsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketCors{}, middleware.After) +} + +func addOpPutBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketEncryption{}, middleware.After) +} + +func addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketIntelligentTieringConfiguration{}, middleware.After) +} + +func addOpPutBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketInventoryConfiguration{}, middleware.After) +} + +func addOpPutBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketLifecycleConfiguration{}, middleware.After) +} + +func addOpPutBucketLoggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketLogging{}, middleware.After) +} + +func addOpPutBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketMetricsConfiguration{}, middleware.After) +} + +func addOpPutBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketNotificationConfiguration{}, middleware.After) +} + +func addOpPutBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketOwnershipControls{}, middleware.After) +} + +func addOpPutBucketPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketPolicy{}, middleware.After) +} + +func addOpPutBucketReplicationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketReplication{}, middleware.After) +} + +func addOpPutBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketRequestPayment{}, middleware.After) +} + +func addOpPutBucketTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketTagging{}, middleware.After) +} + +func addOpPutBucketVersioningValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketVersioning{}, middleware.After) +} + +func addOpPutBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketWebsite{}, middleware.After) +} + +func addOpPutObjectAclValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectAcl{}, middleware.After) +} + +func addOpPutObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObject{}, middleware.After) +} + +func addOpPutObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectLegalHold{}, middleware.After) +} + +func addOpPutObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectLockConfiguration{}, middleware.After) +} + +func addOpPutObjectRetentionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectRetention{}, middleware.After) +} + +func addOpPutObjectTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectTagging{}, middleware.After) +} + +func addOpPutPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutPublicAccessBlock{}, middleware.After) +} + +func addOpRestoreObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRestoreObject{}, middleware.After) +} + +func addOpSelectObjectContentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpSelectObjectContent{}, middleware.After) +} + +func addOpUploadPartCopyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUploadPartCopy{}, middleware.After) +} + +func addOpUploadPartValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUploadPart{}, middleware.After) +} + +func addOpWriteGetObjectResponseValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpWriteGetObjectResponse{}, middleware.After) +} + +func validateAccessControlPolicy(v *types.AccessControlPolicy) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AccessControlPolicy"} + if v.Grants != nil { + if err := validateGrants(v.Grants); err != nil { + invalidParams.AddNested("Grants", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAccessControlTranslation(v *types.AccessControlTranslation) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AccessControlTranslation"} + if len(v.Owner) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Owner")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsAndOperator(v *types.AnalyticsAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsConfiguration(v *types.AnalyticsConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsConfiguration"} + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.Filter != nil { + if err := validateAnalyticsFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if v.StorageClassAnalysis == nil { + invalidParams.Add(smithy.NewErrParamRequired("StorageClassAnalysis")) + } else if v.StorageClassAnalysis != nil { + if err := validateStorageClassAnalysis(v.StorageClassAnalysis); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsExportDestination(v *types.AnalyticsExportDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsExportDestination"} + if v.S3BucketDestination == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination")) + } else if v.S3BucketDestination != nil { + if err := validateAnalyticsS3BucketDestination(v.S3BucketDestination); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsFilter(v types.AnalyticsFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsFilter"} + switch uv := v.(type) { + case *types.AnalyticsFilterMemberAnd: + if err := validateAnalyticsAndOperator(&uv.Value); err != nil { + invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + } + + case *types.AnalyticsFilterMemberTag: + if err := validateTag(&uv.Value); err != nil { + invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsS3BucketDestination"} + if len(v.Format) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Format")) + } + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BucketLifecycleConfiguration"} + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateLifecycleRules(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateBucketLoggingStatus(v *types.BucketLoggingStatus) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BucketLoggingStatus"} + if v.LoggingEnabled != nil { + if err := validateLoggingEnabled(v.LoggingEnabled); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCORSConfiguration(v *types.CORSConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CORSConfiguration"} + if v.CORSRules == nil { + invalidParams.Add(smithy.NewErrParamRequired("CORSRules")) + } else if v.CORSRules != nil { + if err := validateCORSRules(v.CORSRules); err != nil { + invalidParams.AddNested("CORSRules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCORSRule(v *types.CORSRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CORSRule"} + if v.AllowedMethods == nil { + invalidParams.Add(smithy.NewErrParamRequired("AllowedMethods")) + } + if v.AllowedOrigins == nil { + invalidParams.Add(smithy.NewErrParamRequired("AllowedOrigins")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCORSRules(v []types.CORSRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CORSRules"} + for i := range v { + if err := validateCORSRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDelete(v *types.Delete) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Delete"} + if v.Objects == nil { + invalidParams.Add(smithy.NewErrParamRequired("Objects")) + } else if v.Objects != nil { + if err := validateObjectIdentifierList(v.Objects); err != nil { + invalidParams.AddNested("Objects", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDestination(v *types.Destination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Destination"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.AccessControlTranslation != nil { + if err := validateAccessControlTranslation(v.AccessControlTranslation); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(smithy.InvalidParamsError)) + } + } + if v.ReplicationTime != nil { + if err := validateReplicationTime(v.ReplicationTime); err != nil { + invalidParams.AddNested("ReplicationTime", err.(smithy.InvalidParamsError)) + } + } + if v.Metrics != nil { + if err := validateMetrics(v.Metrics); err != nil { + invalidParams.AddNested("Metrics", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateEncryption(v *types.Encryption) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Encryption"} + if len(v.EncryptionType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("EncryptionType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateErrorDocument(v *types.ErrorDocument) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ErrorDocument"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateExistingObjectReplication(v *types.ExistingObjectReplication) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ExistingObjectReplication"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlacierJobParameters(v *types.GlacierJobParameters) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlacierJobParameters"} + if len(v.Tier) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Tier")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGrant(v *types.Grant) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Grant"} + if v.Grantee != nil { + if err := validateGrantee(v.Grantee); err != nil { + invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGrantee(v *types.Grantee) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Grantee"} + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGrants(v []types.Grant) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Grants"} + for i := range v { + if err := validateGrant(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateIndexDocument(v *types.IndexDocument) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "IndexDocument"} + if v.Suffix == nil { + invalidParams.Add(smithy.NewErrParamRequired("Suffix")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringConfiguration"} + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.Filter != nil { + if err := validateIntelligentTieringFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if v.Tierings == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tierings")) + } else if v.Tierings != nil { + if err := validateTieringList(v.Tierings); err != nil { + invalidParams.AddNested("Tierings", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateIntelligentTieringFilter(v *types.IntelligentTieringFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringFilter"} + if v.Tag != nil { + if err := validateTag(v.Tag); err != nil { + invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) + } + } + if v.And != nil { + if err := validateIntelligentTieringAndOperator(v.And); err != nil { + invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryConfiguration(v *types.InventoryConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryConfiguration"} + if v.Destination == nil { + invalidParams.Add(smithy.NewErrParamRequired("Destination")) + } else if v.Destination != nil { + if err := validateInventoryDestination(v.Destination); err != nil { + invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) + } + } + if v.Filter != nil { + if err := validateInventoryFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if len(v.IncludedObjectVersions) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("IncludedObjectVersions")) + } + if v.Schedule == nil { + invalidParams.Add(smithy.NewErrParamRequired("Schedule")) + } else if v.Schedule != nil { + if err := validateInventorySchedule(v.Schedule); err != nil { + invalidParams.AddNested("Schedule", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryDestination(v *types.InventoryDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryDestination"} + if v.S3BucketDestination == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination")) + } else if v.S3BucketDestination != nil { + if err := validateInventoryS3BucketDestination(v.S3BucketDestination); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryEncryption(v *types.InventoryEncryption) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryEncryption"} + if v.SSEKMS != nil { + if err := validateSSEKMS(v.SSEKMS); err != nil { + invalidParams.AddNested("SSEKMS", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryFilter(v *types.InventoryFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryFilter"} + if v.Prefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("Prefix")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryS3BucketDestination(v *types.InventoryS3BucketDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryS3BucketDestination"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if len(v.Format) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Format")) + } + if v.Encryption != nil { + if err := validateInventoryEncryption(v.Encryption); err != nil { + invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventorySchedule(v *types.InventorySchedule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventorySchedule"} + if len(v.Frequency) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Frequency")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfiguration"} + if v.LambdaFunctionArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("LambdaFunctionArn")) + } + if v.Events == nil { + invalidParams.Add(smithy.NewErrParamRequired("Events")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfigurationList"} + for i := range v { + if err := validateLambdaFunctionConfiguration(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLifecycleRule(v *types.LifecycleRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LifecycleRule"} + if v.Filter != nil { + if err := validateLifecycleRuleFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLifecycleRuleFilter(v types.LifecycleRuleFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleFilter"} + switch uv := v.(type) { + case *types.LifecycleRuleFilterMemberAnd: + if err := validateLifecycleRuleAndOperator(&uv.Value); err != nil { + invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + } + + case *types.LifecycleRuleFilterMemberTag: + if err := validateTag(&uv.Value); err != nil { + invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLifecycleRules(v []types.LifecycleRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LifecycleRules"} + for i := range v { + if err := validateLifecycleRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLoggingEnabled(v *types.LoggingEnabled) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LoggingEnabled"} + if v.TargetBucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetBucket")) + } + if v.TargetGrants != nil { + if err := validateTargetGrants(v.TargetGrants); err != nil { + invalidParams.AddNested("TargetGrants", err.(smithy.InvalidParamsError)) + } + } + if v.TargetPrefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetPrefix")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetrics(v *types.Metrics) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Metrics"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetricsAndOperator(v *types.MetricsAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetricsAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetricsConfiguration(v *types.MetricsConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetricsConfiguration"} + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.Filter != nil { + if err := validateMetricsFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetricsFilter(v types.MetricsFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetricsFilter"} + switch uv := v.(type) { + case *types.MetricsFilterMemberAnd: + if err := validateMetricsAndOperator(&uv.Value); err != nil { + invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + } + + case *types.MetricsFilterMemberTag: + if err := validateTag(&uv.Value); err != nil { + invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateNotificationConfiguration(v *types.NotificationConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "NotificationConfiguration"} + if v.TopicConfigurations != nil { + if err := validateTopicConfigurationList(v.TopicConfigurations); err != nil { + invalidParams.AddNested("TopicConfigurations", err.(smithy.InvalidParamsError)) + } + } + if v.QueueConfigurations != nil { + if err := validateQueueConfigurationList(v.QueueConfigurations); err != nil { + invalidParams.AddNested("QueueConfigurations", err.(smithy.InvalidParamsError)) + } + } + if v.LambdaFunctionConfigurations != nil { + if err := validateLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations); err != nil { + invalidParams.AddNested("LambdaFunctionConfigurations", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateObjectIdentifier(v *types.ObjectIdentifier) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifier"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateObjectIdentifierList(v []types.ObjectIdentifier) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifierList"} + for i := range v { + if err := validateObjectIdentifier(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOutputLocation(v *types.OutputLocation) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OutputLocation"} + if v.S3 != nil { + if err := validateS3Location(v.S3); err != nil { + invalidParams.AddNested("S3", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOwnershipControls(v *types.OwnershipControls) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OwnershipControls"} + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateOwnershipControlsRules(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOwnershipControlsRule(v *types.OwnershipControlsRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRule"} + if len(v.ObjectOwnership) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ObjectOwnership")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOwnershipControlsRules(v []types.OwnershipControlsRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRules"} + for i := range v { + if err := validateOwnershipControlsRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateQueueConfiguration(v *types.QueueConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "QueueConfiguration"} + if v.QueueArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueueArn")) + } + if v.Events == nil { + invalidParams.Add(smithy.NewErrParamRequired("Events")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateQueueConfigurationList(v []types.QueueConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "QueueConfigurationList"} + for i := range v { + if err := validateQueueConfiguration(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRedirectAllRequestsTo(v *types.RedirectAllRequestsTo) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RedirectAllRequestsTo"} + if v.HostName == nil { + invalidParams.Add(smithy.NewErrParamRequired("HostName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaModifications(v *types.ReplicaModifications) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaModifications"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationConfiguration(v *types.ReplicationConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationConfiguration"} + if v.Role == nil { + invalidParams.Add(smithy.NewErrParamRequired("Role")) + } + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateReplicationRules(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRule(v *types.ReplicationRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRule"} + if v.Filter != nil { + if err := validateReplicationRuleFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if v.SourceSelectionCriteria != nil { + if err := validateSourceSelectionCriteria(v.SourceSelectionCriteria); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(smithy.InvalidParamsError)) + } + } + if v.ExistingObjectReplication != nil { + if err := validateExistingObjectReplication(v.ExistingObjectReplication); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(smithy.InvalidParamsError)) + } + } + if v.Destination == nil { + invalidParams.Add(smithy.NewErrParamRequired("Destination")) + } else if v.Destination != nil { + if err := validateDestination(v.Destination); err != nil { + invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRuleFilter(v types.ReplicationRuleFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleFilter"} + switch uv := v.(type) { + case *types.ReplicationRuleFilterMemberAnd: + if err := validateReplicationRuleAndOperator(&uv.Value); err != nil { + invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + } + + case *types.ReplicationRuleFilterMemberTag: + if err := validateTag(&uv.Value); err != nil { + invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRules(v []types.ReplicationRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRules"} + for i := range v { + if err := validateReplicationRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationTime(v *types.ReplicationTime) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationTime"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if v.Time == nil { + invalidParams.Add(smithy.NewErrParamRequired("Time")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRequestPaymentConfiguration(v *types.RequestPaymentConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RequestPaymentConfiguration"} + if len(v.Payer) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Payer")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRestoreRequest(v *types.RestoreRequest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RestoreRequest"} + if v.GlacierJobParameters != nil { + if err := validateGlacierJobParameters(v.GlacierJobParameters); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(smithy.InvalidParamsError)) + } + } + if v.SelectParameters != nil { + if err := validateSelectParameters(v.SelectParameters); err != nil { + invalidParams.AddNested("SelectParameters", err.(smithy.InvalidParamsError)) + } + } + if v.OutputLocation != nil { + if err := validateOutputLocation(v.OutputLocation); err != nil { + invalidParams.AddNested("OutputLocation", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRoutingRule(v *types.RoutingRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RoutingRule"} + if v.Redirect == nil { + invalidParams.Add(smithy.NewErrParamRequired("Redirect")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRoutingRules(v []types.RoutingRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RoutingRules"} + for i := range v { + if err := validateRoutingRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateS3Location(v *types.S3Location) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "S3Location"} + if v.BucketName == nil { + invalidParams.Add(smithy.NewErrParamRequired("BucketName")) + } + if v.Prefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("Prefix")) + } + if v.Encryption != nil { + if err := validateEncryption(v.Encryption); err != nil { + invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError)) + } + } + if v.AccessControlList != nil { + if err := validateGrants(v.AccessControlList); err != nil { + invalidParams.AddNested("AccessControlList", err.(smithy.InvalidParamsError)) + } + } + if v.Tagging != nil { + if err := validateTagging(v.Tagging); err != nil { + invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSelectParameters(v *types.SelectParameters) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SelectParameters"} + if v.InputSerialization == nil { + invalidParams.Add(smithy.NewErrParamRequired("InputSerialization")) + } + if len(v.ExpressionType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ExpressionType")) + } + if v.Expression == nil { + invalidParams.Add(smithy.NewErrParamRequired("Expression")) + } + if v.OutputSerialization == nil { + invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionByDefault"} + if len(v.SSEAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SSEAlgorithm")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionConfiguration"} + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateServerSideEncryptionRules(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServerSideEncryptionRule(v *types.ServerSideEncryptionRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRule"} + if v.ApplyServerSideEncryptionByDefault != nil { + if err := validateServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServerSideEncryptionRules(v []types.ServerSideEncryptionRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRules"} + for i := range v { + if err := validateServerSideEncryptionRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSourceSelectionCriteria(v *types.SourceSelectionCriteria) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SourceSelectionCriteria"} + if v.SseKmsEncryptedObjects != nil { + if err := validateSseKmsEncryptedObjects(v.SseKmsEncryptedObjects); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(smithy.InvalidParamsError)) + } + } + if v.ReplicaModifications != nil { + if err := validateReplicaModifications(v.ReplicaModifications); err != nil { + invalidParams.AddNested("ReplicaModifications", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSSEKMS(v *types.SSEKMS) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SSEKMS"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SseKmsEncryptedObjects"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateStorageClassAnalysis(v *types.StorageClassAnalysis) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysis"} + if v.DataExport != nil { + if err := validateStorageClassAnalysisDataExport(v.DataExport); err != nil { + invalidParams.AddNested("DataExport", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysisDataExport"} + if len(v.OutputSchemaVersion) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("OutputSchemaVersion")) + } + if v.Destination == nil { + invalidParams.Add(smithy.NewErrParamRequired("Destination")) + } else if v.Destination != nil { + if err := validateAnalyticsExportDestination(v.Destination); err != nil { + invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagging(v *types.Tagging) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tagging"} + if v.TagSet == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagSet")) + } else if v.TagSet != nil { + if err := validateTagSet(v.TagSet); err != nil { + invalidParams.AddNested("TagSet", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagSet(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagSet"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTargetGrant(v *types.TargetGrant) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TargetGrant"} + if v.Grantee != nil { + if err := validateGrantee(v.Grantee); err != nil { + invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTargetGrants(v []types.TargetGrant) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TargetGrants"} + for i := range v { + if err := validateTargetGrant(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTiering(v *types.Tiering) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tiering"} + if len(v.AccessTier) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("AccessTier")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTieringList(v []types.Tiering) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TieringList"} + for i := range v { + if err := validateTiering(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTopicConfiguration(v *types.TopicConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TopicConfiguration"} + if v.TopicArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("TopicArn")) + } + if v.Events == nil { + invalidParams.Add(smithy.NewErrParamRequired("Events")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTopicConfigurationList(v []types.TopicConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TopicConfigurationList"} + for i := range v { + if err := validateTopicConfiguration(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateWebsiteConfiguration(v *types.WebsiteConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "WebsiteConfiguration"} + if v.ErrorDocument != nil { + if err := validateErrorDocument(v.ErrorDocument); err != nil { + invalidParams.AddNested("ErrorDocument", err.(smithy.InvalidParamsError)) + } + } + if v.IndexDocument != nil { + if err := validateIndexDocument(v.IndexDocument); err != nil { + invalidParams.AddNested("IndexDocument", err.(smithy.InvalidParamsError)) + } + } + if v.RedirectAllRequestsTo != nil { + if err := validateRedirectAllRequestsTo(v.RedirectAllRequestsTo); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(smithy.InvalidParamsError)) + } + } + if v.RoutingRules != nil { + if err := validateRoutingRules(v.RoutingRules); err != nil { + invalidParams.AddNested("RoutingRules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAbortMultipartUploadInput(v *AbortMultipartUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AbortMultipartUploadInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCompleteMultipartUploadInput(v *CompleteMultipartUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CompleteMultipartUploadInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCopyObjectInput(v *CopyObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CopyObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.CopySource == nil { + invalidParams.Add(smithy.NewErrParamRequired("CopySource")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateBucketInput(v *CreateBucketInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateBucketInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateMultipartUploadInput(v *CreateMultipartUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateMultipartUploadInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketAnalyticsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketCorsInput(v *DeleteBucketCorsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketCorsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketEncryptionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketInput(v *DeleteBucketInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketIntelligentTieringConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInventoryConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketLifecycleInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetricsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketOwnershipControlsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketPolicyInput(v *DeleteBucketPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketPolicyInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketReplicationInput(v *DeleteBucketReplicationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketReplicationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketTaggingInput(v *DeleteBucketTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketWebsiteInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteObjectInput(v *DeleteObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteObjectsInput(v *DeleteObjectsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Delete == nil { + invalidParams.Add(smithy.NewErrParamRequired("Delete")) + } else if v.Delete != nil { + if err := validateDelete(v.Delete); err != nil { + invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteObjectTaggingInput(v *DeleteObjectTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeletePublicAccessBlockInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketAccelerateConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketAclInput(v *GetBucketAclInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketAclInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketAnalyticsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketCorsInput(v *GetBucketCorsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketCorsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketEncryptionInput(v *GetBucketEncryptionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketEncryptionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketIntelligentTieringConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketInventoryConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketLifecycleConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketLocationInput(v *GetBucketLocationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketLocationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketLoggingInput(v *GetBucketLoggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketLoggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetricsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketNotificationConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketOwnershipControlsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketPolicyInput(v *GetBucketPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyStatusInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketReplicationInput(v *GetBucketReplicationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketReplicationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketRequestPaymentInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketTaggingInput(v *GetBucketTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketVersioningInput(v *GetBucketVersioningInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketVersioningInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketWebsiteInput(v *GetBucketWebsiteInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketWebsiteInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectAclInput(v *GetObjectAclInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectAclInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectAttributesInput(v *GetObjectAttributesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectAttributesInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.ObjectAttributes == nil { + invalidParams.Add(smithy.NewErrParamRequired("ObjectAttributes")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectInput(v *GetObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectLegalHoldInput(v *GetObjectLegalHoldInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectLegalHoldInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectLockConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectRetentionInput(v *GetObjectRetentionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectRetentionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectTaggingInput(v *GetObjectTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectTorrentInput(v *GetObjectTorrentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectTorrentInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetPublicAccessBlockInput(v *GetPublicAccessBlockInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetPublicAccessBlockInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpHeadBucketInput(v *HeadBucketInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HeadBucketInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpHeadObjectInput(v *HeadObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HeadObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListBucketAnalyticsConfigurationsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListBucketIntelligentTieringConfigurationsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListBucketInventoryConfigurationsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListBucketMetricsConfigurationsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListMultipartUploadsInput(v *ListMultipartUploadsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListMultipartUploadsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListObjectsInput(v *ListObjectsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListObjectsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListObjectsV2Input(v *ListObjectsV2Input) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListObjectsV2Input"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListObjectVersionsInput(v *ListObjectVersionsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListObjectVersionsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListPartsInput(v *ListPartsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListPartsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketAccelerateConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.AccelerateConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccelerateConfiguration")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketAclInput(v *PutBucketAclInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketAclInput"} + if v.AccessControlPolicy != nil { + if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) + } + } + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketAnalyticsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.AnalyticsConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("AnalyticsConfiguration")) + } else if v.AnalyticsConfiguration != nil { + if err := validateAnalyticsConfiguration(v.AnalyticsConfiguration); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketCorsInput(v *PutBucketCorsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketCorsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.CORSConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("CORSConfiguration")) + } else if v.CORSConfiguration != nil { + if err := validateCORSConfiguration(v.CORSConfiguration); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketEncryptionInput(v *PutBucketEncryptionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketEncryptionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } else if v.ServerSideEncryptionConfiguration != nil { + if err := validateServerSideEncryptionConfiguration(v.ServerSideEncryptionConfiguration); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketIntelligentTieringConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.IntelligentTieringConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("IntelligentTieringConfiguration")) + } else if v.IntelligentTieringConfiguration != nil { + if err := validateIntelligentTieringConfiguration(v.IntelligentTieringConfiguration); err != nil { + invalidParams.AddNested("IntelligentTieringConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketInventoryConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.InventoryConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("InventoryConfiguration")) + } else if v.InventoryConfiguration != nil { + if err := validateInventoryConfiguration(v.InventoryConfiguration); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketLifecycleConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.LifecycleConfiguration != nil { + if err := validateBucketLifecycleConfiguration(v.LifecycleConfiguration); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketLoggingInput(v *PutBucketLoggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketLoggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.BucketLoggingStatus == nil { + invalidParams.Add(smithy.NewErrParamRequired("BucketLoggingStatus")) + } else if v.BucketLoggingStatus != nil { + if err := validateBucketLoggingStatus(v.BucketLoggingStatus); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketMetricsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.MetricsConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("MetricsConfiguration")) + } else if v.MetricsConfiguration != nil { + if err := validateMetricsConfiguration(v.MetricsConfiguration); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketNotificationConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.NotificationConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("NotificationConfiguration")) + } else if v.NotificationConfiguration != nil { + if err := validateNotificationConfiguration(v.NotificationConfiguration); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketOwnershipControlsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.OwnershipControls == nil { + invalidParams.Add(smithy.NewErrParamRequired("OwnershipControls")) + } else if v.OwnershipControls != nil { + if err := validateOwnershipControls(v.OwnershipControls); err != nil { + invalidParams.AddNested("OwnershipControls", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketPolicyInput(v *PutBucketPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketPolicyInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Policy == nil { + invalidParams.Add(smithy.NewErrParamRequired("Policy")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketReplicationInput(v *PutBucketReplicationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketReplicationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.ReplicationConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("ReplicationConfiguration")) + } else if v.ReplicationConfiguration != nil { + if err := validateReplicationConfiguration(v.ReplicationConfiguration); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketRequestPaymentInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.RequestPaymentConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("RequestPaymentConfiguration")) + } else if v.RequestPaymentConfiguration != nil { + if err := validateRequestPaymentConfiguration(v.RequestPaymentConfiguration); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketTaggingInput(v *PutBucketTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Tagging == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tagging")) + } else if v.Tagging != nil { + if err := validateTagging(v.Tagging); err != nil { + invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketVersioningInput(v *PutBucketVersioningInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketVersioningInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.VersioningConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("VersioningConfiguration")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketWebsiteInput(v *PutBucketWebsiteInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketWebsiteInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.WebsiteConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("WebsiteConfiguration")) + } else if v.WebsiteConfiguration != nil { + if err := validateWebsiteConfiguration(v.WebsiteConfiguration); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectAclInput(v *PutObjectAclInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectAclInput"} + if v.AccessControlPolicy != nil { + if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) + } + } + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectInput(v *PutObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectLegalHoldInput(v *PutObjectLegalHoldInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectLegalHoldInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectLockConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectRetentionInput(v *PutObjectRetentionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectRetentionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectTaggingInput(v *PutObjectTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Tagging == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tagging")) + } else if v.Tagging != nil { + if err := validateTagging(v.Tagging); err != nil { + invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutPublicAccessBlockInput(v *PutPublicAccessBlockInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutPublicAccessBlockInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.PublicAccessBlockConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("PublicAccessBlockConfiguration")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRestoreObjectInput(v *RestoreObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RestoreObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.RestoreRequest != nil { + if err := validateRestoreRequest(v.RestoreRequest); err != nil { + invalidParams.AddNested("RestoreRequest", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpSelectObjectContentInput(v *SelectObjectContentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SelectObjectContentInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Expression == nil { + invalidParams.Add(smithy.NewErrParamRequired("Expression")) + } + if len(v.ExpressionType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ExpressionType")) + } + if v.InputSerialization == nil { + invalidParams.Add(smithy.NewErrParamRequired("InputSerialization")) + } + if v.OutputSerialization == nil { + invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUploadPartCopyInput(v *UploadPartCopyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UploadPartCopyInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.CopySource == nil { + invalidParams.Add(smithy.NewErrParamRequired("CopySource")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUploadPartInput(v *UploadPartInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UploadPartInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpWriteGetObjectResponseInput(v *WriteGetObjectResponseInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "WriteGetObjectResponseInput"} + if v.RequestRoute == nil { + invalidParams.Add(smithy.NewErrParamRequired("RequestRoute")) + } + if v.RequestToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("RequestToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md new file mode 100644 index 000000000000..5edcc9ee9eeb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -0,0 +1,105 @@ +# v1.11.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: Updated API models +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. + +# v1.6.2 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go new file mode 100644 index 000000000000..7bb069844422 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -0,0 +1,433 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "SSO" +const ServiceAPIVersion = "2019-06-10" + +// Client provides the API client to make operations call for AWS Single Sign-On. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go new file mode 100644 index 000000000000..85556599f83a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -0,0 +1,127 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the STS short-term credentials for a given role name that is assigned to +// the user. +func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) { + if params == nil { + params = &GetRoleCredentialsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRoleCredentials", params, optFns, c.addOperationGetRoleCredentialsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetRoleCredentialsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetRoleCredentialsInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + // The identifier for the AWS account that is assigned to the user. + // + // This member is required. + AccountId *string + + // The friendly name of the role that is assigned to the user. + // + // This member is required. + RoleName *string + + noSmithyDocumentSerde +} + +type GetRoleCredentialsOutput struct { + + // The credentials for the role that is assigned to the user. + RoleCredentials *types.RoleCredentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetRoleCredentials{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetRoleCredentials(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetRoleCredentials", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go new file mode 100644 index 000000000000..1923c4a9d6bd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -0,0 +1,223 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all roles that are assigned to the user for a given AWS account. +func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesInput, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { + if params == nil { + params = &ListAccountRolesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAccountRoles", params, optFns, c.addOperationListAccountRolesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAccountRolesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAccountRolesInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + // The identifier for the AWS account that is assigned to the user. + // + // This member is required. + AccountId *string + + // The number of items that clients can request per page. + MaxResults *int32 + + // The page token from the previous response output when you request subsequent + // pages. + NextToken *string + + noSmithyDocumentSerde +} + +type ListAccountRolesOutput struct { + + // The page token client that is used to retrieve the list of accounts. + NextToken *string + + // A paginated response with the list of roles and the next token if more results + // are available. + RoleList []types.RoleInfo + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccountRoles{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListAccountRolesAPIClient is a client that implements the ListAccountRoles +// operation. +type ListAccountRolesAPIClient interface { + ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) +} + +var _ ListAccountRolesAPIClient = (*Client)(nil) + +// ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles +type ListAccountRolesPaginatorOptions struct { + // The number of items that clients can request per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAccountRolesPaginator is a paginator for ListAccountRoles +type ListAccountRolesPaginator struct { + options ListAccountRolesPaginatorOptions + client ListAccountRolesAPIClient + params *ListAccountRolesInput + nextToken *string + firstPage bool +} + +// NewListAccountRolesPaginator returns a new ListAccountRolesPaginator +func NewListAccountRolesPaginator(client ListAccountRolesAPIClient, params *ListAccountRolesInput, optFns ...func(*ListAccountRolesPaginatorOptions)) *ListAccountRolesPaginator { + if params == nil { + params = &ListAccountRolesInput{} + } + + options := ListAccountRolesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAccountRolesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAccountRolesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAccountRoles page. +func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAccountRoles", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go new file mode 100644 index 000000000000..c76f6ca38d1d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -0,0 +1,220 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by +// the administrator of the account. For more information, see Assign User Access +// (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// in the AWS SSO User Guide. This operation returns a paginated response. +func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { + if params == nil { + params = &ListAccountsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAccounts", params, optFns, c.addOperationListAccountsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAccountsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAccountsInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + // This is the number of items clients can request per page. + MaxResults *int32 + + // (Optional) When requesting subsequent pages, this is the page token from the + // previous response output. + NextToken *string + + noSmithyDocumentSerde +} + +type ListAccountsOutput struct { + + // A paginated response with the list of account information and the next token if + // more results are available. + AccountList []types.AccountInfo + + // The page token client that is used to retrieve the list of accounts. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccounts{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListAccountsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListAccountsAPIClient is a client that implements the ListAccounts operation. +type ListAccountsAPIClient interface { + ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) +} + +var _ ListAccountsAPIClient = (*Client)(nil) + +// ListAccountsPaginatorOptions is the paginator options for ListAccounts +type ListAccountsPaginatorOptions struct { + // This is the number of items clients can request per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAccountsPaginator is a paginator for ListAccounts +type ListAccountsPaginator struct { + options ListAccountsPaginatorOptions + client ListAccountsAPIClient + params *ListAccountsInput + nextToken *string + firstPage bool +} + +// NewListAccountsPaginator returns a new ListAccountsPaginator +func NewListAccountsPaginator(client ListAccountsAPIClient, params *ListAccountsInput, optFns ...func(*ListAccountsPaginatorOptions)) *ListAccountsPaginator { + if params == nil { + params = &ListAccountsInput{} + } + + options := ListAccountsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAccountsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAccountsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAccounts page. +func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListAccounts(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAccounts", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go new file mode 100644 index 000000000000..cbc72877d9b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -0,0 +1,111 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the client- and server-side session that is associated with the user. +func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { + if params == nil { + params = &LogoutInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Logout", params, optFns, c.addOperationLogoutMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*LogoutOutput) + out.ResultMetadata = metadata + return out, nil +} + +type LogoutInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + noSmithyDocumentSerde +} + +type LogoutOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpLogout{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpLogoutValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opLogout(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "Logout", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go new file mode 100644 index 000000000000..6a1851da2510 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go @@ -0,0 +1,1151 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "strings" +) + +type awsRestjson1_deserializeOpGetRoleCredentials struct { +} + +func (*awsRestjson1_deserializeOpGetRoleCredentials) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetRoleCredentials(response, &metadata) + } + output := &GetRoleCredentialsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(v **GetRoleCredentialsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRoleCredentialsOutput + if *v == nil { + sv = &GetRoleCredentialsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "roleCredentials": + if err := awsRestjson1_deserializeDocumentRoleCredentials(&sv.RoleCredentials, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListAccountRoles struct { +} + +func (*awsRestjson1_deserializeOpListAccountRoles) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListAccountRoles(response, &metadata) + } + output := &ListAccountRolesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListAccountRolesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListAccountRolesOutput(v **ListAccountRolesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAccountRolesOutput + if *v == nil { + sv = &ListAccountRolesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "roleList": + if err := awsRestjson1_deserializeDocumentRoleListType(&sv.RoleList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListAccounts struct { +} + +func (*awsRestjson1_deserializeOpListAccounts) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListAccounts(response, &metadata) + } + output := &ListAccountsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListAccountsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListAccountsOutput(v **ListAccountsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAccountsOutput + if *v == nil { + sv = &ListAccountsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountList": + if err := awsRestjson1_deserializeDocumentAccountListType(&sv.AccountList, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpLogout struct { +} + +func (*awsRestjson1_deserializeOpLogout) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorLogout(response, &metadata) + } + output := &LogoutOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequestException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ResourceNotFoundException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.TooManyRequestsException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentTooManyRequestsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnauthorizedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnauthorizedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnauthorizedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccountInfo(v **types.AccountInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccountInfo + if *v == nil { + sv = &types.AccountInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) + } + sv.AccountId = ptr.String(jtv) + } + + case "accountName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountNameType to be of type string, got %T instead", value) + } + sv.AccountName = ptr.String(jtv) + } + + case "emailAddress": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EmailAddressType to be of type string, got %T instead", value) + } + sv.EmailAddress = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAccountListType(v *[]types.AccountInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AccountInfo + if *v == nil { + cv = []types.AccountInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AccountInfo + destAddr := &col + if err := awsRestjson1_deserializeDocumentAccountInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestException + if *v == nil { + sv = &types.InvalidRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceNotFoundException + if *v == nil { + sv = &types.ResourceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleCredentials(v **types.RoleCredentials, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RoleCredentials + if *v == nil { + sv = &types.RoleCredentials{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccessKeyType to be of type string, got %T instead", value) + } + sv.AccessKeyId = ptr.String(jtv) + } + + case "expiration": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationTimestampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Expiration = i64 + } + + case "secretAccessKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SecretAccessKeyType to be of type string, got %T instead", value) + } + sv.SecretAccessKey = ptr.String(jtv) + } + + case "sessionToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionTokenType to be of type string, got %T instead", value) + } + sv.SessionToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleInfo(v **types.RoleInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RoleInfo + if *v == nil { + sv = &types.RoleInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) + } + sv.AccountId = ptr.String(jtv) + } + + case "roleName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RoleNameType to be of type string, got %T instead", value) + } + sv.RoleName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleListType(v *[]types.RoleInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.RoleInfo + if *v == nil { + cv = []types.RoleInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.RoleInfo + destAddr := &col + if err := awsRestjson1_deserializeDocumentRoleInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TooManyRequestsException + if *v == nil { + sv = &types.TooManyRequestsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.UnauthorizedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnauthorizedException + if *v == nil { + sv = &types.UnauthorizedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go new file mode 100644 index 000000000000..c5d03d8e4a3a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go @@ -0,0 +1,20 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package sso provides the API client, operations, and parameter types for AWS +// Single Sign-On. +// +// AWS Single Sign-On Portal is a web service that makes it easy for you to assign +// user access to AWS SSO resources such as the user portal. Users can get AWS +// account applications and roles assigned to them and get federated into the +// application. For general information about AWS SSO, see What is AWS Single +// Sign-On? +// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) in the +// AWS SSO User Guide. This API reference guide describes the AWS SSO Portal +// operations that you can call programatically and includes detailed information +// on data types and errors. AWS provides SDKs that consist of libraries and sample +// code for various programming languages and platforms, such as Java, Ruby, .Net, +// iOS, or Android. The SDKs provide a convenient way to create programmatic access +// to AWS SSO and other AWS services. For more information about the AWS SDKs, +// including how to download and install them, see Tools for Amazon Web Services +// (http://aws.amazon.com/tools/). +package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go new file mode 100644 index 000000000000..43c06f11afea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "awsssoportal" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json new file mode 100644 index 000000000000..5be0e34cd6ae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json @@ -0,0 +1,30 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_GetRoleCredentials.go", + "api_op_ListAccountRoles.go", + "api_op_ListAccounts.go", + "api_op_Logout.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/sso", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go new file mode 100644 index 000000000000..6ea751b8f842 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package sso + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.11.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go new file mode 100644 index 000000000000..c8d1689927d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -0,0 +1,390 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver SSO endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go new file mode 100644 index 000000000000..29e320811942 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go @@ -0,0 +1,256 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpGetRoleCredentials struct { +} + +func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetRoleCredentialsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/federation/credentials") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.AccountId != nil { + encoder.SetQuery("account_id").String(*v.AccountId) + } + + if v.RoleName != nil { + encoder.SetQuery("role_name").String(*v.RoleName) + } + + return nil +} + +type awsRestjson1_serializeOpListAccountRoles struct { +} + +func (*awsRestjson1_serializeOpListAccountRoles) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAccountRolesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/assignment/roles") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.AccountId != nil { + encoder.SetQuery("account_id").String(*v.AccountId) + } + + if v.MaxResults != nil { + encoder.SetQuery("max_result").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("next_token").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListAccounts struct { +} + +func (*awsRestjson1_serializeOpListAccounts) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAccountsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/assignment/accounts") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListAccountsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.MaxResults != nil { + encoder.SetQuery("max_result").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("next_token").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpLogout struct { +} + +func (*awsRestjson1_serializeOpLogout) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*LogoutInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/logout") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsLogoutInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go new file mode 100644 index 000000000000..1401d585cfed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go @@ -0,0 +1,87 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// Indicates that a problem occurred with the input to the request. For example, a +// required parameter might be missing or out of range. +type InvalidRequestException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestException) ErrorCode() string { return "InvalidRequestException" } +func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified resource doesn't exist. +type ResourceNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceNotFoundException) ErrorCode() string { return "ResourceNotFoundException" } +func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the request is being made too frequently and is more than what +// the server can handle. +type TooManyRequestsException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TooManyRequestsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TooManyRequestsException) ErrorCode() string { return "TooManyRequestsException" } +func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +type UnauthorizedException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnauthorizedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnauthorizedException) ErrorCode() string { return "UnauthorizedException" } +func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go new file mode 100644 index 000000000000..051056b75985 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go @@ -0,0 +1,64 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" +) + +// Provides information about your AWS account. +type AccountInfo struct { + + // The identifier of the AWS account that is assigned to the user. + AccountId *string + + // The display name of the AWS account that is assigned to the user. + AccountName *string + + // The email address of the AWS account that is assigned to the user. + EmailAddress *string + + noSmithyDocumentSerde +} + +// Provides information about the role credentials that are assigned to the user. +type RoleCredentials struct { + + // The identifier used for the temporary security credentials. For more + // information, see Using Temporary Security Credentials to Request Access to AWS + // Resources + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + AccessKeyId *string + + // The date on which temporary security credentials expire. + Expiration int64 + + // The key that is used to sign the request. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SecretAccessKey *string + + // The token used for temporary credentials. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SessionToken *string + + noSmithyDocumentSerde +} + +// Provides information about the role that is assigned to the user. +type RoleInfo struct { + + // The identifier of the AWS account assigned to the user. + AccountId *string + + // The friendly name of the role that is assigned to the user. + RoleName *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go new file mode 100644 index 000000000000..f6bf461f74ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go @@ -0,0 +1,175 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpGetRoleCredentials struct { +} + +func (*validateOpGetRoleCredentials) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetRoleCredentials) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetRoleCredentialsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetRoleCredentialsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListAccountRoles struct { +} + +func (*validateOpListAccountRoles) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListAccountRoles) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListAccountRolesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListAccountRolesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListAccounts struct { +} + +func (*validateOpListAccounts) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListAccounts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListAccountsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListAccountsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpLogout struct { +} + +func (*validateOpLogout) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpLogout) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*LogoutInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpLogoutInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpGetRoleCredentialsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetRoleCredentials{}, middleware.After) +} + +func addOpListAccountRolesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListAccountRoles{}, middleware.After) +} + +func addOpListAccountsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListAccounts{}, middleware.After) +} + +func addOpLogoutValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpLogout{}, middleware.After) +} + +func validateOpGetRoleCredentialsInput(v *GetRoleCredentialsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetRoleCredentialsInput"} + if v.RoleName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleName")) + } + if v.AccountId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccountId")) + } + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListAccountRolesInput(v *ListAccountRolesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListAccountRolesInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if v.AccountId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccountId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListAccountsInput(v *ListAccountsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListAccountsInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpLogoutInput(v *LogoutInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LogoutInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md new file mode 100644 index 000000000000..7c861f7cd688 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -0,0 +1,116 @@ +# v1.16.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: Updated service client model to latest release. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-12-21) + +* **Feature**: Updated to latest service endpoints + +# v1.11.1 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-11-30) + +* **Feature**: API client updated + +# v1.10.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: API client updated +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.2 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-07-15) + +* **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* **Documentation**: Updated service model to latest revision. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-06-25) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go new file mode 100644 index 000000000000..4bff1dfe229b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -0,0 +1,534 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/protocol/query" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "STS" +const ServiceAPIVersion = "2011-06-15" + +// Client provides the API client to make operations call for AWS Security Token +// Service. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +// HTTPPresignerV4 represents presigner interface used by presign url client +type HTTPPresignerV4 interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*v4.SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignOptions represents the presign client options +type PresignOptions struct { + + // ClientOptions are list of functional options to mutate client options used by + // the presign client. + ClientOptions []func(*Options) + + // Presigner is the presigner used by the presign url client + Presigner HTTPPresignerV4 +} + +func (o PresignOptions) copy() PresignOptions { + clientOptions := make([]func(*Options), len(o.ClientOptions)) + copy(clientOptions, o.ClientOptions) + o.ClientOptions = clientOptions + return o +} + +// WithPresignClientFromClientOptions is a helper utility to retrieve a function +// that takes PresignOption as input +func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { + return withPresignClientFromClientOptions(optFns).options +} + +type withPresignClientFromClientOptions []func(*Options) + +func (w withPresignClientFromClientOptions) options(o *PresignOptions) { + o.ClientOptions = append(o.ClientOptions, w...) +} + +// PresignClient represents the presign url client +type PresignClient struct { + client *Client + options PresignOptions +} + +// NewPresignClient generates a presign client using provided API Client and +// presign options +func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { + var options PresignOptions + for _, fn := range optFns { + fn(&options) + } + if len(options.ClientOptions) != 0 { + c = New(c.options, options.ClientOptions...) + } + + if options.Presigner == nil { + options.Presigner = newDefaultV4Signer(c.options) + } + + return &PresignClient{ + client: c, + options: options, + } +} + +func withNopHTTPClientAPIOption(o *Options) { + o.HTTPClient = smithyhttp.NopClient{} +} + +type presignConverter PresignOptions + +func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { + stack.Finalize.Clear() + stack.Deserialize.Clear() + stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) + stack.Build.Remove("UserAgent") + pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + Presigner: c.Presigner, + LogSigning: options.ClientLogMode.IsSigning(), + }) + err = stack.Finalize.Add(pmw, middleware.After) + if err != nil { + return err + } + // convert request to a GET request + err = query.AddAsGetRequestMiddleware(stack) + if err != nil { + return err + } + err = presignedurlcust.AddAsIsPresigingMiddleware(stack) + if err != nil { + return err + } + return nil +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go new file mode 100644 index 000000000000..7d00b6bd7fa1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -0,0 +1,417 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials that you can use to access +// Amazon Web Services resources that you might not normally have access to. These +// temporary credentials consist of an access key ID, a secret access key, and a +// security token. Typically, you use AssumeRole within your account or for +// cross-account access. For a comparison of AssumeRole with other API operations +// that produce temporary credentials, see Requesting Temporary Security +// Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. Permissions The temporary security credentials created by +// AssumeRole can be used to make API calls to any Amazon Web Services service with +// the following exception: You cannot call the Amazon Web Services STS +// GetFederationToken or GetSessionToken API operations. (Optional) You can pass +// inline or managed session policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to use as +// managed session policies. The plaintext that you use for both inline and managed +// session policies can't exceed 2,048 characters. Passing policies to this +// operation returns new temporary credentials. The resulting session's permissions +// are the intersection of the role's identity-based policy and the session +// policies. You can use the role's temporary credentials in subsequent Amazon Web +// Services API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see Session Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. When you create a role, you create two policies: A role +// trust policy that specifies who can assume the role and a permissions policy +// that specifies what can be done with the role. You specify the trusted principal +// who is allowed to assume the role in the role trust policy. To assume a role +// from a different account, your Amazon Web Services account must be trusted by +// the role. The trust relationship is defined in the role's trust policy when the +// role is created. That trust policy states which accounts are allowed to delegate +// that access to users in the account. A user who wants to access a role in a +// different account must also have permissions that are delegated from the user +// account administrator. The administrator must attach a policy that allows the +// user to call AssumeRole for the ARN of the role in the other account. To allow a +// user to assume a role in the same account, you can do either of the +// following: +// +// * Attach a policy to the user that allows the user to call +// AssumeRole (as long as the role's trust policy trusts the account). +// +// * Add the +// user as a principal directly in the role's trust policy. +// +// You can do either +// because the role’s trust policy acts as an IAM resource-based policy. When a +// resource-based policy grants access to a principal in the same account, no +// additional identity-based policy is required. For more information about trust +// policies and resource-based policies, see IAM Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) in the +// IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your +// session. These tags are called session tags. For more information about session +// tags, see Passing Session Tags in STS +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the +// IAM User Guide. An administrator must grant you the permissions necessary to +// pass session tags. The administrator can also create granular permissions to +// allow you to pass only specific session tags. For more information, see +// Tutorial: Using Tags for Attribute-Based Access Control +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles with +// Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include +// multi-factor authentication (MFA) information when you call AssumeRole. This is +// useful for cross-account scenarios to ensure that the user that assumes the role +// has been authenticated with an Amazon Web Services MFA device. In that scenario, +// the trust policy of the role being assumed includes a condition that tests for +// MFA authentication. If the caller does not include valid MFA information, the +// request to assume the role is denied. The condition in a trust policy that tests +// for MFA authentication might look like the following example. "Condition": +// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see +// Configuring MFA-Protected API Access +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) in the +// IAM User Guide guide. To use MFA with AssumeRole, you pass values for the +// SerialNumber and TokenCode parameters. The SerialNumber value identifies the +// user's hardware or virtual MFA device. The TokenCode is the time-based one-time +// password (TOTP) that the MFA device produces. +func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { + if params == nil { + params = &AssumeRoleInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRole", params, optFns, c.addOperationAssumeRoleMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleInput struct { + + // The Amazon Resource Name (ARN) of the role to assume. + // + // This member is required. + RoleArn *string + + // An identifier for the assumed role session. Use the role session name to + // uniquely identify a session when the same role is assumed by different + // principals or for different reasons. In cross-account scenarios, the role + // session name is visible to, and can be logged by the account that owns the role. + // The role session name is also used in the ARN of the assumed role principal. + // This means that subsequent cross-account API requests that use the temporary + // security credentials will expose the role session name to the external account + // in their CloudTrail logs. The regex used to validate this parameter is a string + // of characters consisting of upper- and lower-case alphanumeric characters with + // no spaces. You can also include underscores or any of the following characters: + // =,.@- + // + // This member is required. + RoleSessionName *string + + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for the + // role. The maximum session duration setting can have a value from 1 hour to 12 + // hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify a + // session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. Role chaining limits your Amazon Web + // Services CLI or Amazon Web Services API role session to a maximum of one hour. + // When you use the AssumeRole API operation to assume a role, you can specify the + // duration of your role session with the DurationSeconds parameter. You can + // specify a parameter value of up to 43200 seconds (12 hours), depending on the + // maximum session duration setting for your role. However, if you assume a role + // using role chaining and provide a DurationSeconds parameter value greater than + // one hour, the operation fails. To learn how to view the maximum value for your + // role, see View the Maximum Session Duration Setting for a Role + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A + // cross-account role is usually set up to trust everyone in an account. Therefore, + // the administrator of the trusting account might send an external ID to the + // administrator of the trusted account. That way, only someone with the ID can + // assume the role, rather than everyone in the account. For more information about + // the external ID, see How to Use an External ID When Granting Access to Your + // Amazon Web Services Resources to a Third Party + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@:/- + ExternalId *string + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed session policies and session tags into + // a packed binary format that has a separate limit. Your request can fail for this + // limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in + // the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed session policies and session tags into a packed binary + // format that has a separate limit. Your request can fail for this limit even if + // your plaintext meets the other requirements. The PackedPolicySize response + // element indicates by percentage how close the policies and tags for your request + // are to the upper size limit. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user who + // is making the AssumeRole call. Specify this value if the trust policy of the + // role being assumed includes a condition that requires MFA authentication. The + // value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as + // arn:aws:iam::123456789012:mfa/user). The regex used to validate this parameter + // is a string of characters consisting of upper- and lower-case alphanumeric + // characters with no spaces. You can also include underscores or any of the + // following characters: =,.@- + SerialNumber *string + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@-. You cannot use a value that begins with the text aws:. This prefix is + // reserved for Amazon Web Services internal use. + SourceIdentity *string + + // A list of session tags that you want to pass. Each session tag consists of a key + // name and an associated value. For more information about session tags, see + // Tagging Amazon Web Services STS Sessions + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the + // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. + // The plaintext session tag keys can’t exceed 128 characters, and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. An Amazon Web Services conversion compresses the passed + // session policies and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates by + // percentage how close the policies and tags for your request are to the upper + // size limit. You can pass a session tag with the same key as a tag that is + // already attached to the role. When you do, session tags override a role tag with + // the same key. Tag key–value pairs are not case sensitive, but case is preserved. + // This means that you cannot have separate Department and department tag keys. + // Assume that the role has the Department=Marketing tag and you pass the + // department=engineering session tag. Department and department are not saved as + // separate tags, and the session tag passed in the request takes precedence over + // the role tag. Additionally, if you used temporary credentials to perform this + // operation, the new session inherits any transitive session tags from the calling + // session. If you pass a session tag with the same key as an inherited tag, the + // operation fails. To view the inherited tags for a session, see the CloudTrail + // logs. For more information, see Viewing Session Tags in CloudTrail + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []types.Tag + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA. (In other words, if the policy includes a condition that + // tests for MFA). If the role being assumed requires MFA and if the TokenCode + // value is missing or expired, the AssumeRole call returns an "access denied" + // error. The format for this parameter, as described by its regex pattern, is a + // sequence of six numeric digits. + TokenCode *string + + // A list of keys for session tags that you want to set as transitive. If you set a + // tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. This parameter is optional. When you set session tags as + // transitive, the session policy and session tags packed binary limit is not + // affected. If you choose not to specify a transitive tag key, then no tags are + // passed from this session to any subsequent sessions. + TransitiveTagKeys []string + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRole request, including temporary +// Amazon Web Services credentials that can be used to make Amazon Web Services +// requests. +type AssumeRoleOutput struct { + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. For + // example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the + // RoleSessionName that you specified when you called AssumeRole. + AssumedRoleUser *types.AssumedRoleUser + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRole{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRole", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go new file mode 100644 index 000000000000..e12315e4c140 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -0,0 +1,377 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials for users who have been +// authenticated via a SAML authentication response. This operation provides a +// mechanism for tying an enterprise identity store or directory to role-based +// Amazon Web Services access without user-specific credentials or configuration. +// For a comparison of AssumeRoleWithSAML with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. The temporary security credentials returned by this +// operation consist of an access key ID, a secret access key, and a security +// token. Applications can use these temporary security credentials to sign calls +// to Amazon Web Services services. Session Duration By default, the temporary +// security credentials created by AssumeRoleWithSAML last for one hour. However, +// you can use the optional DurationSeconds parameter to specify the duration of +// your session. Your role session lasts for the duration that you specify, or +// until the time specified in the SAML authentication response's +// SessionNotOnOrAfter value, whichever is shorter. You can provide a +// DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour to 12 +// hours. To learn how to view the maximum value for your role, see View the +// Maximum Session Duration Setting for a Role +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you use +// the AssumeRole* API operations or the assume-role* CLI commands. However the +// limit does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM +// User Guide. Role chaining +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// limits your CLI or Amazon Web Services API role session to a maximum of one +// hour. When you use the AssumeRole API operation to assume a role, you can +// specify the duration of your role session with the DurationSeconds parameter. +// You can specify a parameter value of up to 43200 seconds (12 hours), depending +// on the maximum session duration setting for your role. However, if you assume a +// role using role chaining and provide a DurationSeconds parameter value greater +// than one hour, the operation fails. Permissions The temporary security +// credentials created by AssumeRoleWithSAML can be used to make API calls to any +// Amazon Web Services service with the following exception: you cannot call the +// STS GetFederationToken or GetSessionToken API operations. (Optional) You can +// pass inline or managed session policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to use as +// managed session policies. The plaintext that you use for both inline and managed +// session policies can't exceed 2,048 characters. Passing policies to this +// operation returns new temporary credentials. The resulting session's permissions +// are the intersection of the role's identity-based policy and the session +// policies. You can use the role's temporary credentials in subsequent Amazon Web +// Services API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see Session Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of +// Amazon Web Services security credentials. The identity of the caller is +// validated by using keys in the metadata document that is uploaded for the SAML +// provider entity for your identity provider. Calling AssumeRoleWithSAML can +// result in an entry in your CloudTrail logs. The entry includes the value in the +// NameID element of the SAML assertion. We recommend that you use a NameIDType +// that is not associated with any personally identifiable information (PII). For +// example, you could instead use the persistent identifier +// (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). Tags (Optional) You can +// configure your IdP to pass attributes into your SAML assertion as session tags. +// Each session tag consists of a key name and an associated value. For more +// information about session tags, see Passing Session Tags in STS +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the +// IAM User Guide. You can pass up to 50 session tags. The plaintext session tag +// keys can’t exceed 128 characters and the values can’t exceed 256 characters. For +// these and additional limits, see IAM and STS Character Limits +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. An Amazon Web Services conversion compresses the passed +// session policies and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates by +// percentage how close the policies and tags for your request are to the upper +// size limit. You can pass a session tag with the same key as a tag that is +// attached to the role. When you do, session tags override the role's tags with +// the same key. An administrator must grant you the permissions necessary to pass +// session tags. The administrator can also create granular permissions to allow +// you to pass only specific session tags. For more information, see Tutorial: +// Using Tags for Attribute-Based Access Control +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles with +// Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. SAML Configuration Before your application can call +// AssumeRoleWithSAML, you must configure your SAML identity provider (IdP) to +// issue the claims required by Amazon Web Services. Additionally, you must use +// Identity and Access Management (IAM) to create a SAML provider entity in your +// Amazon Web Services account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. +// For more information, see the following resources: +// +// * About SAML 2.0-based +// Federation +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { + if params == nil { + params = &AssumeRoleWithSAMLInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithSAML", params, optFns, c.addOperationAssumeRoleWithSAMLMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleWithSAMLOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleWithSAMLInput struct { + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the + // IdP. + // + // This member is required. + PrincipalArn *string + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // This member is required. + RoleArn *string + + // The base64 encoded SAML authentication response provided by the IdP. For more + // information, see Configuring a Relying Party and Adding Claims + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // This member is required. + SAMLAssertion *string + + // The duration, in seconds, of the role session. Your role session lasts for the + // duration that you specify for the DurationSeconds parameter, or until the time + // specified in the SAML authentication response's SessionNotOnOrAfter value, + // whichever is shorter. You can provide a DurationSeconds value from 900 seconds + // (15 minutes) up to the maximum session duration setting for the role. This + // setting can have a value from 1 hour to 12 hours. If you specify a value higher + // than this setting, the operation fails. For example, if you specify a session + // duration of 12 hours, but your administrator set the maximum session duration to + // 6 hours, your operation fails. To learn how to view the maximum value for your + // role, see View the Maximum Session Duration Setting for a Role + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed session policies and session tags into + // a packed binary format that has a separate limit. Your request can fail for this + // limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in + // the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed session policies and session tags into a packed binary + // format that has a separate limit. Your request can fail for this limit even if + // your plaintext meets the other requirements. The PackedPolicySize response + // element indicates by percentage how close the policies and tags for your request + // are to the upper size limit. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type AssumeRoleWithSAMLOutput struct { + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *types.AssumedRoleUser + + // The value of the Recipient attribute of the SubjectConfirmationData element of + // the SAML assertion. + Audience *string + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // The value of the Issuer element of the SAML assertion. + Issuer *string + + // A hash value based on the concatenation of the following: + // + // * The Issuer response + // value. + // + // * The Amazon Web Services account ID. + // + // * The friendly name (the last + // part of the ARN) of the SAML provider in IAM. + // + // The combination of NameQualifier + // and Subject can be used to uniquely identify a federated user. The following + // pseudocode shows how the hash value is calculated: BASE64 ( SHA1 ( + // "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) + NameQualifier *string + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The value in the SourceIdentity attribute in the SAML assertion. You can require + // users to set a source identity value when they assume a role. You do this by + // using the sts:SourceIdentity condition key in a role trust policy. That way, + // actions that are taken with the role are associated with that user. After the + // source identity is set, the value cannot be changed. It is present in the + // request for all actions that are taken by the role and persists across chained + // role + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your SAML identity provider to use an attribute + // associated with your users, like user name or email, as the source identity when + // calling AssumeRoleWithSAML. You do this by adding an attribute to the SAML + // assertion. For more information about using source identity, see Monitor and + // control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient or + // persistent. If the format includes the prefix + // urn:oasis:names:tc:SAML:2.0:nameid-format, that prefix is removed. For example, + // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient. If + // the format includes any other prefix, the format is returned with no + // modifications. + SubjectType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithSAML{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRoleWithSAML", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go new file mode 100644 index 000000000000..2e8b51c98d5f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -0,0 +1,395 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials for users who have been +// authenticated in a mobile or web application with a web identity provider. +// Example providers include the OAuth 2.0 providers Login with Amazon and +// Facebook, or any OpenID Connect-compatible identity provider such as Google or +// Amazon Cognito federated identities +// (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html). +// For mobile applications, we recommend that you use Amazon Cognito. You can use +// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide +// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android +// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify a +// user. You can also supply the user with a consistent identity throughout the +// lifetime of an application. To learn more about Amazon Cognito, see Amazon +// Cognito Overview +// (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito +// Overview +// (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the Amazon Web Services SDK for iOS Developer Guide. Calling +// AssumeRoleWithWebIdentity does not require the use of Amazon Web Services +// security credentials. Therefore, you can distribute an application (for example, +// on mobile devices) that requests temporary security credentials without +// including long-term Amazon Web Services credentials in the application. You also +// don't need to deploy server-based proxy services that use long-term Amazon Web +// Services credentials. Instead, the identity of the caller is validated by using +// a token from the web identity provider. For a comparison of +// AssumeRoleWithWebIdentity with the other API operations that produce temporary +// credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. The temporary security credentials returned by this API +// consist of an access key ID, a secret access key, and a security token. +// Applications can use these temporary security credentials to sign calls to +// Amazon Web Services service API operations. Session Duration By default, the +// temporary security credentials created by AssumeRoleWithWebIdentity last for one +// hour. However, you can use the optional DurationSeconds parameter to specify the +// duration of your session. You can provide a value from 900 seconds (15 minutes) +// up to the maximum session duration setting for the role. This setting can have a +// value from 1 hour to 12 hours. To learn how to view the maximum value for your +// role, see View the Maximum Session Duration Setting for a Role +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you use +// the AssumeRole* API operations or the assume-role* CLI commands. However the +// limit does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM +// User Guide. Permissions The temporary security credentials created by +// AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web +// Services service with the following exception: you cannot call the STS +// GetFederationToken or GetSessionToken API operations. (Optional) You can pass +// inline or managed session policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to use as +// managed session policies. The plaintext that you use for both inline and managed +// session policies can't exceed 2,048 characters. Passing policies to this +// operation returns new temporary credentials. The resulting session's permissions +// are the intersection of the role's identity-based policy and the session +// policies. You can use the role's temporary credentials in subsequent Amazon Web +// Services API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see Session Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass +// attributes into your web identity token as session tags. Each session tag +// consists of a key name and an associated value. For more information about +// session tags, see Passing Session Tags in STS +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the +// IAM User Guide. You can pass up to 50 session tags. The plaintext session tag +// keys can’t exceed 128 characters and the values can’t exceed 256 characters. For +// these and additional limits, see IAM and STS Character Limits +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. An Amazon Web Services conversion compresses the passed +// session policies and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates by +// percentage how close the policies and tags for your request are to the upper +// size limit. You can pass a session tag with the same key as a tag that is +// attached to the role. When you do, the session tag overrides the role tag with +// the same key. An administrator must grant you the permissions necessary to pass +// session tags. The administrator can also create granular permissions to allow +// you to pass only specific session tags. For more information, see Tutorial: +// Using Tags for Attribute-Based Access Control +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles with +// Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. Identities Before your application can call +// AssumeRoleWithWebIdentity, you must have an identity token from a supported +// identity provider and create a role that the application can assume. The role +// that your application assumes must trust the identity provider that is +// associated with the identity token. In other words, the identity provider must +// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can +// result in an entry in your CloudTrail logs. The entry includes the Subject +// (http://openid.net/specs/openid-connect-core-1_0.html#Claims) of the provided +// web identity token. We recommend that you avoid using any personally +// identifiable information (PII) in this field. For example, you could instead use +// a GUID or a pairwise identifier, as suggested in the OIDC specification +// (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). For more +// information about how to use web identity federation and the +// AssumeRoleWithWebIdentity API, see the following resources: +// +// * Using Web +// Identity Federation API Operations for Mobile Apps +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// * +// Web Identity Federation Playground +// (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). +// Walk through the process of authenticating through Login with Amazon, Facebook, +// or Google, getting temporary security credentials, and then using those +// credentials to make a request to Amazon Web Services. +// +// * Amazon Web Services SDK +// for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and Amazon Web +// Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// +// * Web Identity +// Federation with Mobile Applications +// (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of how to +// use web identity federation to get access to content in Amazon S3. +func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { + if params == nil { + params = &AssumeRoleWithWebIdentityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithWebIdentity", params, optFns, c.addOperationAssumeRoleWithWebIdentityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleWithWebIdentityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleWithWebIdentityInput struct { + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // This member is required. + RoleArn *string + + // An identifier for the assumed role session. Typically, you pass the name or + // identifier that is associated with the user who is using your application. That + // way, the temporary security credentials that your application will use are + // associated with that user. This session name is included as part of the ARN and + // assumed role ID in the AssumedRoleUser response element. The regex used to + // validate this parameter is a string of characters consisting of upper- and + // lower-case alphanumeric characters with no spaces. You can also include + // underscores or any of the following characters: =,.@- + // + // This member is required. + RoleSessionName *string + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the + // identity provider. Your application must get this token by authenticating the + // user who is using your application with a web identity provider before the + // application makes an AssumeRoleWithWebIdentity call. + // + // This member is required. + WebIdentityToken *string + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify a + // session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a Role + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed session policies and session tags into + // a packed binary format that has a separate limit. Your request can fail for this + // limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in + // the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed session policies and session tags into a packed binary + // format that has a separate limit. Your request can fail for this limit even if + // your plaintext meets the other requirements. The PackedPolicySize response + // element indicates by percentage how close the policies and tags for your request + // are to the upper size limit. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + // The fully qualified host component of the domain name of the OAuth 2.0 identity + // provider. Do not specify this value for an OpenID Connect identity provider. + // Currently www.amazon.com and graph.facebook.com are the only supported identity + // providers for OAuth 2.0 access tokens. Do not include URL schemes and port + // numbers. Do not specify this value for OpenID Connect ID tokens. + ProviderId *string + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary Amazon Web Services credentials that can be used to make +// Amazon Web Services requests. +type AssumeRoleWithWebIdentityOutput struct { + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. For + // example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the + // RoleSessionName that you specified when you called AssumeRole. + AssumedRoleUser *types.AssumedRoleUser + + // The intended audience (also known as client ID) of the web identity token. This + // is traditionally the client identifier issued to the application that requested + // the web identity token. + Audience *string + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. The size of the security token that STS API + // operations return is not fixed. We strongly recommend that you make no + // assumptions about the maximum size. + Credentials *types.Credentials + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The issuing authority of the web identity token presented. For OpenID Connect ID + // tokens, this contains the value of the iss field. For OAuth 2.0 access tokens, + // this contains the value of the ProviderId parameter that was passed in the + // AssumeRoleWithWebIdentity request. + Provider *string + + // The value of the source identity that is returned in the JSON web token (JWT) + // from the identity provider. You can require users to set a source identity value + // when they assume a role. You do this by using the sts:SourceIdentity condition + // key in a role trust policy. That way, actions that are taken with the role are + // associated with that user. After the source identity is set, the value cannot be + // changed. It is present in the request for all actions that are taken by the role + // and persists across chained role + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your identity provider to use an attribute + // associated with your users, like user name or email, as the source identity when + // calling AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web + // token. To learn more about OIDC tokens and claims, see Using Tokens with User + // Pools + // (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // in the Amazon Cognito Developer Guide. For more information about using source + // identity, see Monitor and control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with the + // AssumeRoleWithWebIdentity call. The identifier is typically unique to the user + // and the application that acquired the WebIdentityToken (pairwise identifier). + // For OpenID Connect ID tokens, this field contains the value returned by the + // identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithWebIdentity{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRoleWithWebIdentity", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go new file mode 100644 index 000000000000..b7a637d420cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -0,0 +1,155 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Decodes additional information about the authorization status of a request from +// an encoded message returned in response to an Amazon Web Services request. For +// example, if a user is not authorized to perform an operation that he or she has +// requested, the request returns a Client.UnauthorizedOperation response (an HTTP +// 403 response). Some Amazon Web Services operations additionally return an +// encoded message that can provide details about this authorization failure. Only +// certain Amazon Web Services operations return an encoded authorization message. +// The documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. The message is +// encoded because the details of the authorization status can contain privileged +// information that the user who requested the operation should not see. To decode +// an authorization status message, a user must be granted permissions through an +// IAM policy +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) to +// request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action. +// The decoded message includes the following type of information: +// +// * Whether the +// request was denied due to an explicit deny or due to the absence of an explicit +// allow. For more information, see Determining Whether a Request is Allowed or +// Denied +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested +// action. +// +// * The requested resource. +// +// * The values of condition keys in the +// context of the user's request. +func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { + if params == nil { + params = &DecodeAuthorizationMessageInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DecodeAuthorizationMessage", params, optFns, c.addOperationDecodeAuthorizationMessageMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DecodeAuthorizationMessageOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DecodeAuthorizationMessageInput struct { + + // The encoded message that was returned with the response. + // + // This member is required. + EncodedMessage *string + + noSmithyDocumentSerde +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an Amazon +// Web Services request. +type DecodeAuthorizationMessageOutput struct { + + // The API returns a response with the decoded message. + DecodedMessage *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDecodeAuthorizationMessage{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "DecodeAuthorizationMessage", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go new file mode 100644 index 000000000000..b86a425d0a87 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -0,0 +1,141 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the account identifier for the specified access key ID. Access keys +// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) and a +// secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). For +// more information about access keys, see Managing Access Keys for IAM Users +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. When you pass an access key ID to this operation, it +// returns the ID of the Amazon Web Services account to which the keys belong. +// Access key IDs beginning with AKIA are long-term credentials for an IAM user or +// the Amazon Web Services account root user. Access key IDs beginning with ASIA +// are temporary credentials that are created using STS operations. If the account +// in the response belongs to you, you can sign in as the root user and review your +// root user access keys. Then, you can pull a credentials report +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail logs +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. This operation does not indicate the state of the access +// key. The key might be active, inactive, or deleted. Active keys might not have +// permissions to perform an operation. Providing a deleted access key might return +// an error that the key doesn't exist. +func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { + if params == nil { + params = &GetAccessKeyInfoInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetAccessKeyInfo", params, optFns, c.addOperationGetAccessKeyInfoMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetAccessKeyInfoOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetAccessKeyInfoInput struct { + + // The identifier of an access key. This parameter allows (through its regex + // pattern) a string of characters that can consist of any upper- or lowercase + // letter or digit. + // + // This member is required. + AccessKeyId *string + + noSmithyDocumentSerde +} + +type GetAccessKeyInfoOutput struct { + + // The number used to identify the Amazon Web Services account. + Account *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetAccessKeyInfo{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetAccessKeyInfo", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go new file mode 100644 index 000000000000..a7f96c22014e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -0,0 +1,156 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns details about the IAM user or role whose credentials are used to call +// the operation. No permissions are required to perform this operation. If an +// administrator adds a policy to your IAM user or role that explicitly denies +// access to the sts:GetCallerIdentity action, you can still perform this +// operation. Permissions are not required because the same information is returned +// when an IAM user or role is denied access. To view an example response, see I Am +// Not Authorized to Perform: iam:DeleteVirtualMFADevice +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { + if params == nil { + params = &GetCallerIdentityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetCallerIdentity", params, optFns, c.addOperationGetCallerIdentityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetCallerIdentityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetCallerIdentityInput struct { + noSmithyDocumentSerde +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + + // The Amazon Web Services account ID number of the account that owns or contains + // the calling entity. + Account *string + + // The Amazon Web Services ARN associated with the calling entity. + Arn *string + + // The unique identifier of the calling entity. The exact value depends on the type + // of entity that is making the call. The values returned are those listed in the + // aws:userid column in the Principal table + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetCallerIdentity{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetCallerIdentity", + } +} + +// PresignGetCallerIdentity is used to generate a presigned HTTP Request which +// contains presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &GetCallerIdentityInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "GetCallerIdentity", params, clientOptFns, + c.client.addOperationGetCallerIdentityMiddlewares, + presignConverter(options).convertToPresignMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go new file mode 100644 index 000000000000..01a3d411b2cf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -0,0 +1,324 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials (consisting of an access key ID, +// a secret access key, and a security token) for a federated user. A typical use +// is in a proxy application that gets temporary security credentials on behalf of +// distributed applications inside a corporate network. You must call the +// GetFederationToken operation using the long-term security credentials of an IAM +// user. As a result, this call is appropriate in contexts where those credentials +// can be safely stored, usually in a server-based application. For a comparison of +// GetFederationToken with the other API operations that produce temporary +// credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. You can create a mobile-based or browser-based app that +// can authenticate users using a web identity provider like Login with Amazon, +// Facebook, Google, or an OpenID Connect-compatible identity provider. In this +// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) +// or AssumeRoleWithWebIdentity. For more information, see Federation Through a +// Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. You can also call GetFederationToken using the security +// credentials of an Amazon Web Services account root user, but we do not recommend +// it. Instead, we recommend that you create an IAM user for the purpose of the +// proxy application. Then attach a policy to the IAM user that limits federated +// users to only the actions and resources that they need to access. For more +// information, see IAM Best Practices +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) in the +// IAM User Guide. Session duration The temporary credentials are valid for the +// specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours). The default session duration is 43,200 seconds (12 hours). +// Temporary credentials obtained by using the Amazon Web Services account root +// user credentials have a maximum duration of 3,600 seconds (1 hour). Permissions +// You can use the temporary credentials created by GetFederationToken in any +// Amazon Web Services service except the following: +// +// * You cannot call any IAM +// operations using the CLI or the Amazon Web Services API. +// +// * You cannot call any +// STS operations except GetCallerIdentity. +// +// You must pass an inline or managed +// session policy +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to use as +// managed session policies. The plaintext that you use for both inline and managed +// session policies can't exceed 2,048 characters. Though the session policy +// parameters are optional, if you do not pass a policy, then the resulting +// federated user session has no permissions. When you pass session policies, the +// session permissions are the intersection of the IAM user policies and the +// session policies that you pass. This gives you a way to further restrict the +// permissions for a federated user. You cannot use session policies to grant more +// permissions than those that are defined in the permissions policy of the IAM +// user. For more information, see Session Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to create +// temporary security credentials, see GetFederationToken—Federation Through a +// Custom Identity Broker +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session in the +// Principal element of the policy, the session has the permissions allowed by the +// policy. These permissions are granted in addition to the permissions granted by +// the session policies. Tags (Optional) You can pass tag key-value pairs to your +// session. These are called session tags. For more information about session tags, +// see Passing Session Tags in STS +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the +// IAM User Guide. You can create a mobile-based or browser-based app that can +// authenticate users using a web identity provider like Login with Amazon, +// Facebook, Google, or an OpenID Connect-compatible identity provider. In this +// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) +// or AssumeRoleWithWebIdentity. For more information, see Federation Through a +// Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. An administrator must grant you the permissions necessary +// to pass session tags. The administrator can also create granular permissions to +// allow you to pass only specific session tags. For more information, see +// Tutorial: Using Tags for Attribute-Based Access Control +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is +// preserved. This means that you cannot have separate Department and department +// tag keys. Assume that the user that you are federating has the +// Department=Marketing tag and you pass the department=engineering session tag. +// Department and department are not saved as separate tags, and the session tag +// passed in the request takes precedence over the user tag. +func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { + if params == nil { + params = &GetFederationTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetFederationToken", params, optFns, c.addOperationGetFederationTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetFederationTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetFederationTokenInput struct { + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference the + // federated user name in a resource-based policy, such as in an Amazon S3 bucket + // policy. The regex used to validate this parameter is a string of characters + // consisting of upper- and lower-case alphanumeric characters with no spaces. You + // can also include underscores or any of the following characters: =,.@- + // + // This member is required. + Name *string + + // The duration, in seconds, that the session should last. Acceptable durations for + // federation sessions range from 900 seconds (15 minutes) to 129,600 seconds (36 + // hours), with 43,200 seconds (12 hours) as the default. Sessions obtained using + // Amazon Web Services account root user credentials are restricted to a maximum of + // 3,600 seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using root user credentials defaults to one hour. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // You must pass an inline or managed session policy + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to use as + // managed session policies. This parameter is optional. However, if you do not + // pass any session policies, then the resulting federated user session has no + // permissions. When you pass session policies, the session permissions are the + // intersection of the IAM user policies and the session policies that you pass. + // This gives you a way to further restrict the permissions for a federated user. + // You cannot use session policies to grant more permissions than those that are + // defined in the permissions policy of the IAM user. For more information, see + // Session Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The resulting credentials can be used to access a + // resource that has a resource-based policy. If that policy specifically + // references the federated user session in the Principal element of the policy, + // the session has the permissions allowed by the policy. These permissions are + // granted in addition to the permissions that are granted by the session policies. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. An Amazon Web Services conversion compresses the + // passed session policies and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates by + // percentage how close the policies and tags for your request are to the upper + // size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as a managed session policy. The policies must exist in the same account as + // the IAM user that is requesting federated access. You must pass an inline or + // managed session policy + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to use as + // managed session policies. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. You can provide up to 10 managed + // policy ARNs. For more information about ARNs, see Amazon Resource Names (ARNs) + // and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in + // the Amazon Web Services General Reference. This parameter is optional. However, + // if you do not pass any session policies, then the resulting federated user + // session has no permissions. When you pass session policies, the session + // permissions are the intersection of the IAM user policies and the session + // policies that you pass. This gives you a way to further restrict the permissions + // for a federated user. You cannot use session policies to grant more permissions + // than those that are defined in the permissions policy of the IAM user. For more + // information, see Session Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The resulting credentials can be used to access a + // resource that has a resource-based policy. If that policy specifically + // references the federated user session in the Principal element of the policy, + // the session has the permissions allowed by the policy. These permissions are + // granted in addition to the permissions that are granted by the session policies. + // An Amazon Web Services conversion compresses the passed session policies and + // session tags into a packed binary format that has a separate limit. Your request + // can fail for this limit even if your plaintext meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + PolicyArns []types.PolicyDescriptorType + + // A list of session tags. Each session tag consists of a key name and an + // associated value. For more information about session tags, see Passing Session + // Tags in STS + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the + // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. + // The plaintext session tag keys can’t exceed 128 characters and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. An Amazon Web Services conversion compresses the passed + // session policies and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates by + // percentage how close the policies and tags for your request are to the upper + // size limit. You can pass a session tag with the same key as a tag that is + // already attached to the user you are federating. When you do, session tags + // override a user tag with the same key. Tag key–value pairs are not case + // sensitive, but case is preserved. This means that you cannot have separate + // Department and department tag keys. Assume that the role has the + // Department=Marketing tag and you pass the department=engineering session tag. + // Department and department are not saved as separate tags, and the session tag + // passed in the request takes precedence over the role tag. + Tags []types.Tag + + noSmithyDocumentSerde +} + +// Contains the response to a successful GetFederationToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type GetFederationTokenOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // Identifiers for the federated user associated with the credentials (such as + // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You can use + // the federated user's ARN in your resource-based policies, such as an Amazon S3 + // bucket policy. + FederatedUser *types.FederatedUser + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetFederationToken{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetFederationToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go new file mode 100644 index 000000000000..b292f208a0ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -0,0 +1,196 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary credentials for an Amazon Web Services account or IAM +// user. The credentials consist of an access key ID, a secret access key, and a +// security token. Typically, you use GetSessionToken if you want to use MFA to +// protect programmatic calls to specific Amazon Web Services API operations like +// Amazon EC2 StopInstances. MFA-enabled IAM users would need to call +// GetSessionToken and submit an MFA code that is associated with their MFA device. +// Using the temporary security credentials that are returned from the call, IAM +// users can then make programmatic calls to API operations that require MFA +// authentication. If you do not supply a correct MFA code, then the API returns an +// access denied error. For a comparison of GetSessionToken with the other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. Session Duration The GetSessionToken operation must be +// called by using the long-term Amazon Web Services security credentials of the +// Amazon Web Services account root user or an IAM user. Credentials that are +// created by IAM users are valid for the duration that you specify. This duration +// can range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 +// hours), with a default of 43,200 seconds (12 hours). Credentials based on +// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds +// (1 hour), with a default of 1 hour. Permissions The temporary security +// credentials created by GetSessionToken can be used to make API calls to any +// Amazon Web Services service with the following exceptions: +// +// * You cannot call +// any IAM API operations unless MFA authentication information is included in the +// request. +// +// * You cannot call any STS API except AssumeRole or +// GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with +// Amazon Web Services account root user credentials. Instead, follow our best +// practices +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, and +// using IAM users for everyday interaction with Amazon Web Services. The +// credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. If +// GetSessionToken is called using Amazon Web Services account root user +// credentials, the temporary credentials have root user permissions. Similarly, if +// GetSessionToken is called using the credentials of an IAM user, the temporary +// credentials have the same permissions as the IAM user. For more information +// about using GetSessionToken to create temporary credentials, go to Temporary +// Credentials for Users in Untrusted Environments +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { + if params == nil { + params = &GetSessionTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetSessionToken", params, optFns, c.addOperationGetSessionTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetSessionTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetSessionTokenInput struct { + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions for + // Amazon Web Services account owners are restricted to a maximum of 3,600 seconds + // (one hour). If the duration is longer than one hour, the session for Amazon Web + // Services account owners defaults to one hour. + DurationSeconds *int32 + + // The identification number of the MFA device that is associated with the IAM user + // who is making the GetSessionToken call. Specify this value if the IAM user has a + // policy that requires MFA authentication. The value is either the serial number + // for a hardware device (such as GAHT12345678) or an Amazon Resource Name (ARN) + // for a virtual device (such as arn:aws:iam::123456789012:mfa/user). You can find + // the device for an IAM user by going to the Amazon Web Services Management + // Console and viewing the user's security credentials. The regex used to validate + // this parameter is a string of characters consisting of upper- and lower-case + // alphanumeric characters with no spaces. You can also include underscores or any + // of the following characters: =,.@:/- + SerialNumber *string + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication is + // required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. The + // format for this parameter, as described by its regex pattern, is a sequence of + // six numeric digits. + TokenCode *string + + noSmithyDocumentSerde +} + +// Contains the response to a successful GetSessionToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type GetSessionTokenOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetSessionToken{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetSessionToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go new file mode 100644 index 000000000000..5d634ce35c8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -0,0 +1,2507 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strconv" + "strings" +) + +type awsAwsquery_deserializeOpAssumeRole struct { +} + +func (*awsAwsquery_deserializeOpAssumeRole) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRole(response, &metadata) + } + output := &AssumeRoleOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRole(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoleWithSAML struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoleWithSAML) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response, &metadata) + } + output := &AssumeRoleWithSAMLOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleWithSAMLResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("IDPRejectedClaim", errorCode): + return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) + + case strings.EqualFold("InvalidIdentityToken", errorCode): + return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoleWithWebIdentity struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response, &metadata) + } + output := &AssumeRoleWithWebIdentityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleWithWebIdentityResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("IDPCommunicationError", errorCode): + return awsAwsquery_deserializeErrorIDPCommunicationErrorException(response, errorBody) + + case strings.EqualFold("IDPRejectedClaim", errorCode): + return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) + + case strings.EqualFold("InvalidIdentityToken", errorCode): + return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct { +} + +func (*awsAwsquery_deserializeOpDecodeAuthorizationMessage) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response, &metadata) + } + output := &DecodeAuthorizationMessageOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("DecodeAuthorizationMessageResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("InvalidAuthorizationMessageException", errorCode): + return awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetAccessKeyInfo struct { +} + +func (*awsAwsquery_deserializeOpGetAccessKeyInfo) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response, &metadata) + } + output := &GetAccessKeyInfoOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetAccessKeyInfoResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetCallerIdentity struct { +} + +func (*awsAwsquery_deserializeOpGetCallerIdentity) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetCallerIdentity(response, &metadata) + } + output := &GetCallerIdentityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetCallerIdentityResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetCallerIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetFederationToken struct { +} + +func (*awsAwsquery_deserializeOpGetFederationToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetFederationToken(response, &metadata) + } + output := &GetFederationTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetFederationTokenResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetFederationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetSessionToken struct { +} + +func (*awsAwsquery_deserializeOpGetSessionToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetSessionToken(response, &metadata) + } + output := &GetSessionTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetSessionTokenResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetSessionToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsquery_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ExpiredTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentExpiredTokenException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorIDPCommunicationErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.IDPCommunicationErrorException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentIDPCommunicationErrorException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorIDPRejectedClaimException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.IDPRejectedClaimException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentIDPRejectedClaimException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidAuthorizationMessageException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorInvalidIdentityTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidIdentityTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidIdentityTokenException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.MalformedPolicyDocumentException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.PackedPolicyTooLargeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorRegionDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.RegionDisabledException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentRegionDisabledException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeDocumentAssumedRoleUser(v **types.AssumedRoleUser, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AssumedRoleUser + if *v == nil { + sv = &types.AssumedRoleUser{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("AssumedRoleId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AssumedRoleId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentCredentials(v **types.Credentials, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Credentials + if *v == nil { + sv = &types.Credentials{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessKeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessKeyId = ptr.String(xtv) + } + + case strings.EqualFold("Expiration", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Expiration = ptr.Time(t) + } + + case strings.EqualFold("SecretAccessKey", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SecretAccessKey = ptr.String(xtv) + } + + case strings.EqualFold("SessionToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SessionToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ExpiredTokenException + if *v == nil { + sv = &types.ExpiredTokenException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentFederatedUser(v **types.FederatedUser, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.FederatedUser + if *v == nil { + sv = &types.FederatedUser{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("FederatedUserId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.FederatedUserId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIDPCommunicationErrorException(v **types.IDPCommunicationErrorException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IDPCommunicationErrorException + if *v == nil { + sv = &types.IDPCommunicationErrorException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIDPRejectedClaimException(v **types.IDPRejectedClaimException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IDPRejectedClaimException + if *v == nil { + sv = &types.IDPRejectedClaimException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(v **types.InvalidAuthorizationMessageException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidAuthorizationMessageException + if *v == nil { + sv = &types.InvalidAuthorizationMessageException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentInvalidIdentityTokenException(v **types.InvalidIdentityTokenException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidIdentityTokenException + if *v == nil { + sv = &types.InvalidIdentityTokenException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MalformedPolicyDocumentException + if *v == nil { + sv = &types.MalformedPolicyDocumentException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(v **types.PackedPolicyTooLargeException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PackedPolicyTooLargeException + if *v == nil { + sv = &types.PackedPolicyTooLargeException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentRegionDisabledException(v **types.RegionDisabledException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RegionDisabledException + if *v == nil { + sv = &types.RegionDisabledException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleOutput(v **AssumeRoleOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleOutput + if *v == nil { + sv = &AssumeRoleOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(v **AssumeRoleWithSAMLOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleWithSAMLOutput + if *v == nil { + sv = &AssumeRoleWithSAMLOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Audience", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Audience = ptr.String(xtv) + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Issuer", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Issuer = ptr.String(xtv) + } + + case strings.EqualFold("NameQualifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NameQualifier = ptr.String(xtv) + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + case strings.EqualFold("Subject", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Subject = ptr.String(xtv) + } + + case strings.EqualFold("SubjectType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubjectType = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **AssumeRoleWithWebIdentityOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleWithWebIdentityOutput + if *v == nil { + sv = &AssumeRoleWithWebIdentityOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Audience", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Audience = ptr.String(xtv) + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("Provider", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Provider = ptr.String(xtv) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + case strings.EqualFold("SubjectFromWebIdentityToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubjectFromWebIdentityToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DecodeAuthorizationMessageOutput + if *v == nil { + sv = &DecodeAuthorizationMessageOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DecodedMessage", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DecodedMessage = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(v **GetAccessKeyInfoOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetAccessKeyInfoOutput + if *v == nil { + sv = &GetAccessKeyInfoOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(v **GetCallerIdentityOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetCallerIdentityOutput + if *v == nil { + sv = &GetCallerIdentityOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("UserId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UserId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(v **GetFederationTokenOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetFederationTokenOutput + if *v == nil { + sv = &GetFederationTokenOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("FederatedUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentFederatedUser(&sv.FederatedUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(v **GetSessionTokenOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetSessionTokenOutput + if *v == nil { + sv = &GetSessionTokenOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go new file mode 100644 index 000000000000..7cabbb97e9f5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go @@ -0,0 +1,12 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package sts provides the API client, operations, and parameter types for AWS +// Security Token Service. +// +// Security Token Service Security Token Service (STS) enables you to request +// temporary, limited-privilege credentials for Identity and Access Management +// (IAM) users or for users that you authenticate (federated users). This guide +// provides descriptions of the STS API. For more information about using this +// service, see Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go new file mode 100644 index 000000000000..cababea22d90 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "sts" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json new file mode 100644 index 000000000000..86341bb7d7ee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -0,0 +1,35 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_AssumeRole.go", + "api_op_AssumeRoleWithSAML.go", + "api_op_AssumeRoleWithWebIdentity.go", + "api_op_DecodeAuthorizationMessage.go", + "api_op_GetAccessKeyInfo.go", + "api_op_GetCallerIdentity.go", + "api_op_GetFederationToken.go", + "api_op_GetSessionToken.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/sts", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go new file mode 100644 index 000000000000..e55b7a74a7c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package sts + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.16.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go new file mode 100644 index 000000000000..28ed441bf8e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -0,0 +1,445 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver STS endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "aws-global", + }: endpoints.Endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.us-gov-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.us-gov-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go new file mode 100644 index 000000000000..05531d369599 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go @@ -0,0 +1,835 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/query" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "path" +) + +type awsAwsquery_serializeOpAssumeRole struct { +} + +func (*awsAwsquery_serializeOpAssumeRole) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRole") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoleWithSAML struct { +} + +func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoleWithSAML") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoleWithWebIdentity struct { +} + +func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoleWithWebIdentity") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpDecodeAuthorizationMessage struct { +} + +func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("DecodeAuthorizationMessage") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetAccessKeyInfo struct { +} + +func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetAccessKeyInfoInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetAccessKeyInfo") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetCallerIdentity struct { +} + +func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetCallerIdentityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetCallerIdentity") + body.Key("Version").String("2011-06-15") + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetFederationToken struct { +} + +func (*awsAwsquery_serializeOpGetFederationToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetFederationTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetFederationToken") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetFederationTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetSessionToken struct { +} + +func (*awsAwsquery_serializeOpGetSessionToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetSessionTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetSessionToken") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetSessionTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentPolicyDescriptorType(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptorType, value query.Value) error { + object := value.Object() + _ = object + + if v.Arn != nil { + objectKey := object.Key("arn") + objectKey.String(*v.Arn) + } + + return nil +} + +func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error { + object := value.Object() + _ = object + + if v.Key != nil { + objectKey := object.Key("Key") + objectKey.String(*v.Key) + } + + if v.Value != nil { + objectKey := object.Key("Value") + objectKey.String(*v.Value) + } + + return nil +} + +func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("member") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.ExternalId != nil { + objectKey := object.Key("ExternalId") + objectKey.String(*v.ExternalId) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.RoleSessionName != nil { + objectKey := object.Key("RoleSessionName") + objectKey.String(*v.RoleSessionName) + } + + if v.SerialNumber != nil { + objectKey := object.Key("SerialNumber") + objectKey.String(*v.SerialNumber) + } + + if v.SourceIdentity != nil { + objectKey := object.Key("SourceIdentity") + objectKey.String(*v.SourceIdentity) + } + + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { + return err + } + } + + if v.TokenCode != nil { + objectKey := object.Key("TokenCode") + objectKey.String(*v.TokenCode) + } + + if v.TransitiveTagKeys != nil { + objectKey := object.Key("TransitiveTagKeys") + if err := awsAwsquery_serializeDocumentTagKeyListType(v.TransitiveTagKeys, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.PrincipalArn != nil { + objectKey := object.Key("PrincipalArn") + objectKey.String(*v.PrincipalArn) + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.SAMLAssertion != nil { + objectKey := object.Key("SAMLAssertion") + objectKey.String(*v.SAMLAssertion) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.ProviderId != nil { + objectKey := object.Key("ProviderId") + objectKey.String(*v.ProviderId) + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.RoleSessionName != nil { + objectKey := object.Key("RoleSessionName") + objectKey.String(*v.RoleSessionName) + } + + if v.WebIdentityToken != nil { + objectKey := object.Key("WebIdentityToken") + objectKey.String(*v.WebIdentityToken) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error { + object := value.Object() + _ = object + + if v.EncodedMessage != nil { + objectKey := object.Key("EncodedMessage") + objectKey.String(*v.EncodedMessage) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(v *GetAccessKeyInfoInput, value query.Value) error { + object := value.Object() + _ = object + + if v.AccessKeyId != nil { + objectKey := object.Key("AccessKeyId") + objectKey.String(*v.AccessKeyId) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetCallerIdentityInput(v *GetCallerIdentityInput, value query.Value) error { + object := value.Object() + _ = object + + return nil +} + +func awsAwsquery_serializeOpDocumentGetFederationTokenInput(v *GetFederationTokenInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Name != nil { + objectKey := object.Key("Name") + objectKey.String(*v.Name) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetSessionTokenInput(v *GetSessionTokenInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.SerialNumber != nil { + objectKey := object.Key("SerialNumber") + objectKey.String(*v.SerialNumber) + } + + if v.TokenCode != nil { + objectKey := object.Key("TokenCode") + objectKey.String(*v.TokenCode) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go new file mode 100644 index 000000000000..b109fe5fc181 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -0,0 +1,193 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The web identity token that was passed is expired or is not valid. Get a new +// identity token from the identity provider and then retry the request. +type ExpiredTokenException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredTokenException) ErrorCode() string { return "ExpiredTokenException" } +func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request could not be fulfilled because the identity provider (IDP) that was +// asked to verify the incoming identity token could not be reached. This is often +// a transient error caused by network conditions. Retry the request a limited +// number of times so that you don't exceed the request rate. If the error +// persists, the identity provider might be down or not responding. +type IDPCommunicationErrorException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *IDPCommunicationErrorException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IDPCommunicationErrorException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IDPCommunicationErrorException) ErrorCode() string { return "IDPCommunicationError" } +func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The identity provider (IdP) reported that authentication failed. This might be +// because the claim is invalid. If this error is returned for the +// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired +// or has been explicitly revoked. +type IDPRejectedClaimException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *IDPRejectedClaimException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IDPRejectedClaimException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IDPRejectedClaimException) ErrorCode() string { return "IDPRejectedClaim" } +func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +type InvalidAuthorizationMessageException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidAuthorizationMessageException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidAuthorizationMessageException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidAuthorizationMessageException) ErrorCode() string { + return "InvalidAuthorizationMessageException" +} +func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry the +// request. +type InvalidIdentityTokenException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidIdentityTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidIdentityTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidIdentityTokenException) ErrorCode() string { return "InvalidIdentityToken" } +func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +type MalformedPolicyDocumentException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *MalformedPolicyDocumentException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *MalformedPolicyDocumentException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *MalformedPolicyDocumentException) ErrorCode() string { return "MalformedPolicyDocument" } +func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session tags +// into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper size +// limit. For more information, see Passing Session Tags in STS +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the +// IAM User Guide. You could receive this error even though you meet other defined +// session policy and session tag limits. For more information, see IAM and STS +// Entity Character Limits +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +type PackedPolicyTooLargeException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *PackedPolicyTooLargeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PackedPolicyTooLargeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PackedPolicyTooLargeException) ErrorCode() string { return "PackedPolicyTooLarge" } +func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// STS is not activated in the requested region for the account that is being asked +// to generate credentials. The account administrator must use the IAM console to +// activate STS in that region. For more information, see Activating and +// Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +type RegionDisabledException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *RegionDisabledException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RegionDisabledException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RegionDisabledException) ErrorCode() string { return "RegionDisabledException" } +func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go new file mode 100644 index 000000000000..86e509905bea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -0,0 +1,124 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in + // the IAM User Guide. + // + // This member is required. + Arn *string + + // A unique identifier that contains the role ID and the role session name of the + // role that is being assumed. The role ID is generated by Amazon Web Services when + // the role is created. + // + // This member is required. + AssumedRoleId *string + + noSmithyDocumentSerde +} + +// Amazon Web Services credentials for API authentication. +type Credentials struct { + + // The access key ID that identifies the temporary security credentials. + // + // This member is required. + AccessKeyId *string + + // The date on which the current credentials expire. + // + // This member is required. + Expiration *time.Time + + // The secret access key that can be used to sign requests. + // + // This member is required. + SecretAccessKey *string + + // The token that users must pass to the service API to use the temporary + // credentials. + // + // This member is required. + SessionToken *string + + noSmithyDocumentSerde +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + + // The ARN that specifies the federated user that is associated with the + // credentials. For more information about ARNs and how to use them in policies, + // see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in + // the IAM User Guide. + // + // This member is required. + Arn *string + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // This member is required. + FederatedUserId *string + + noSmithyDocumentSerde +} + +// A reference to the IAM managed policy that is passed as a session policy for a +// role session or a federated user session. +type PolicyDescriptorType struct { + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource Names + // (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in + // the Amazon Web Services General Reference. + Arn *string + + noSmithyDocumentSerde +} + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags to +// control access to resources. For more information, see Tagging Amazon Web +// Services STS Sessions +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the +// IAM User Guide. +type Tag struct { + + // The key for a session tag. You can pass up to 50 session tags. The plain text + // session tag keys can’t exceed 128 characters. For these and additional limits, + // see IAM and STS Character Limits + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // This member is required. + Key *string + + // The value for a session tag. You can pass up to 50 session tags. The plain text + // session tag values can’t exceed 256 characters. For these and additional limits, + // see IAM and STS Character Limits + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go new file mode 100644 index 000000000000..3e4bad2a9255 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go @@ -0,0 +1,305 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpAssumeRole struct { +} + +func (*validateOpAssumeRole) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRole) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoleWithSAML struct { +} + +func (*validateOpAssumeRoleWithSAML) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoleWithSAML) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleWithSAMLInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoleWithWebIdentity struct { +} + +func (*validateOpAssumeRoleWithWebIdentity) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleWithWebIdentityInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDecodeAuthorizationMessage struct { +} + +func (*validateOpDecodeAuthorizationMessage) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDecodeAuthorizationMessage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDecodeAuthorizationMessageInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetAccessKeyInfo struct { +} + +func (*validateOpGetAccessKeyInfo) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetAccessKeyInfo) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetAccessKeyInfoInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetAccessKeyInfoInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetFederationToken struct { +} + +func (*validateOpGetFederationToken) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetFederationToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetFederationTokenInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetFederationTokenInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpAssumeRoleValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRole{}, middleware.After) +} + +func addOpAssumeRoleWithSAMLValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoleWithSAML{}, middleware.After) +} + +func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After) +} + +func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After) +} + +func addOpGetAccessKeyInfoValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetAccessKeyInfo{}, middleware.After) +} + +func addOpGetFederationTokenValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetFederationToken{}, middleware.After) +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagListType(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagListType"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleInput(v *AssumeRoleInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.RoleSessionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) + } + if v.Tags != nil { + if err := validateTagListType(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithSAMLInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.PrincipalArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn")) + } + if v.SAMLAssertion == nil { + invalidParams.Add(smithy.NewErrParamRequired("SAMLAssertion")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithWebIdentityInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.RoleSessionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) + } + if v.WebIdentityToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("WebIdentityToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DecodeAuthorizationMessageInput"} + if v.EncodedMessage == nil { + invalidParams.Add(smithy.NewErrParamRequired("EncodedMessage")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetAccessKeyInfoInput(v *GetAccessKeyInfoInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetAccessKeyInfoInput"} + if v.AccessKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetFederationTokenInput(v *GetFederationTokenInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetFederationTokenInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Tags != nil { + if err := validateTagListType(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore new file mode 100644 index 000000000000..c01141aa450d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/.gitignore @@ -0,0 +1,22 @@ +# Eclipse +.classpath +.project +.settings/ + +# Intellij +.idea/ +*.iml +*.iws + +# Mac +.DS_Store + +# Maven +target/ +**/dependency-reduced-pom.xml + +# Gradle +/.gradle +build/ +*/out/ +*/*/out/ diff --git a/vendor/github.com/aws/smithy-go/.travis.yml b/vendor/github.com/aws/smithy-go/.travis.yml new file mode 100644 index 000000000000..f8d1035cc332 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/.travis.yml @@ -0,0 +1,28 @@ +language: go +sudo: true +dist: bionic + +branches: + only: + - main + +os: + - linux + - osx + # Travis doesn't work with windows and Go tip + #- windows + +go: + - tip + +matrix: + allow_failures: + - go: tip + +before_install: + - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi + - (cd /tmp/; go get golang.org/x/lint/golint) + +script: + - make go test -v ./...; + diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md new file mode 100644 index 000000000000..a5b73cf60ec7 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -0,0 +1,121 @@ +# Release (v1.11.2) + +* No change notes available for this release. + +# Release (v1.11.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.1 + * **Bug Fix**: Updates the smithy-go HTTP Request to correctly handle building the request to an http.Request. Related to [aws/aws-sdk-go-v2#1583](https://github.com/aws/aws-sdk-go-v2/issues/1583) + +# Release (v1.11.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.0 + * **Feature**: Updates deserialization of header list to supported quoted strings + +# Release (v1.10.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.10.0 + * **Feature**: Add `ptr.Duration`, `ptr.ToDuration`, `ptr.DurationSlice`, `ptr.ToDurationSlice`, `ptr.DurationMap`, and `ptr.ToDurationMap` functions for the `time.Duration` type. + +# Release (v1.9.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.9.1 + * **Documentation**: Fixes various typos in Go package documentation. + +# Release (v1.9.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.9.0 + * **Feature**: sync: OnceErr, can be used to concurrently record a signal when an error has occurred. + * **Bug Fix**: `transport/http`: CloseResponseBody and ErrorCloseResponseBody middleware have been updated to ensure that the body is fully drained before closing. + +# Release v1.8.1 + +### Smithy Go Module +* **Bug Fix**: Fixed an issue that would cause the HTTP Content-Length to be set to 0 if the stream body was not set. + * Fixes [aws/aws-sdk-go-v2#1418](https://github.com/aws/aws-sdk-go-v2/issues/1418) + +# Release v1.8.0 + +### Smithy Go Module + +* `time`: Add support for parsing additional DateTime timestamp format ([#324](https://github.com/aws/smithy-go/pull/324)) + * Adds support for parsing DateTime timestamp formatted time similar to RFC 3339, but without the `Z` character, nor UTC offset. + * Fixes [#1387](https://github.com/aws/aws-sdk-go-v2/issues/1387) + +# Release v1.7.0 + +### Smithy Go Module +* `ptr`: Handle error for deferred file close call ([#314](https://github.com/aws/smithy-go/pull/314)) + * Handle error for defer close call +* `middleware`: Add Clone to Metadata ([#318](https://github.com/aws/smithy-go/pull/318)) + * Adds a new Clone method to the middleware Metadata type. This provides a shallow clone of the entries in the Metadata. +* `document`: Add new package for document shape serialization support ([#310](https://github.com/aws/smithy-go/pull/310)) + +### Codegen +* Add Smithy Document Shape Support ([#310](https://github.com/aws/smithy-go/pull/310)) + * Adds support for Smithy Document shapes and supporting types for protocols to implement support + +# Release v1.6.0 (2021-07-15) + +### Smithy Go Module +* `encoding/httpbinding`: Support has been added for encoding `float32` and `float64` values that are `NaN`, `Infinity`, or `-Infinity`. ([#316](https://github.com/aws/smithy-go/pull/316)) + +### Codegen +* Adds support for handling `float32` and `float64` `NaN` values in HTTP Protocol Unit Tests. ([#316](https://github.com/aws/smithy-go/pull/316)) +* Adds support protocol generator implementations to override the error code string returned by `ErrorCode` methods on generated error types. ([#315](https://github.com/aws/smithy-go/pull/315)) + +# Release v1.5.0 (2021-06-25) + +### Smithy Go module +* `time`: Update time parsing to not be as strict for HTTPDate and DateTime ([#307](https://github.com/aws/smithy-go/pull/307)) + * Fixes [#302](https://github.com/aws/smithy-go/issues/302) by changing time to UTC before formatting so no local offset time is lost. + +### Codegen +* Adds support for integrating client members via plugins ([#301](https://github.com/aws/smithy-go/pull/301)) +* Fix serialization of enum types marked with payload trait ([#296](https://github.com/aws/smithy-go/pull/296)) +* Update generation of API client modules to include a manifest of files generated ([#283](https://github.com/aws/smithy-go/pull/283)) +* Update Group Java group ID for smithy-go generator ([#298](https://github.com/aws/smithy-go/pull/298)) +* Support the delegation of determining the errors that can occur for an operation ([#304](https://github.com/aws/smithy-go/pull/304)) +* Support for marking and documenting deprecated client config fields. ([#303](https://github.com/aws/smithy-go/pull/303)) + +# Release v1.4.0 (2021-05-06) + +### Smithy Go module +* `encoding/xml`: Fix escaping of Next Line and Line Start in XML Encoder ([#267](https://github.com/aws/smithy-go/pull/267)) + +### Codegen +* Add support for Smithy 1.7 ([#289](https://github.com/aws/smithy-go/pull/289)) +* Add support for httpQueryParams location +* Add support for model renaming conflict resolution with service closure + +# Release v1.3.1 (2021-04-08) + +### Smithy Go module +* `transport/http`: Loosen endpoint hostname validation to allow specifying port numbers. ([#279](https://github.com/aws/smithy-go/pull/279)) +* `io`: Fix RingBuffer panics due to out of bounds index. ([#282](https://github.com/aws/smithy-go/pull/282)) + +# Release v1.3.0 (2021-04-01) + +### Smithy Go module +* `transport/http`: Add utility to safely join string to url path, and url raw query. + +### Codegen +* Update HttpBindingProtocolGenerator to use http/transport JoinPath and JoinQuery utility. + +# Release v1.2.0 (2021-03-12) + +### Smithy Go module +* Fix support for parsing shortened year format in HTTP Date header. +* Fix GitHub APIDiff action workflow to get gorelease tool correctly. +* Fix codegen artifact unit test for Go 1.16 + +### Codegen +* Fix generating paginator nil parameter handling before usage. +* Fix Serialize unboxed members decorated as required. +* Add ability to define resolvers at both client construction and operation invocation. +* Support for extending paginators with custom runtime trait diff --git a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..5b627cfa60b5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md new file mode 100644 index 000000000000..c4b6a1c5081a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md @@ -0,0 +1,59 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *main* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. + + +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. + + +## Security issue notifications +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. + + +## Licensing + +See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/vendor/github.com/aws/smithy-go/LICENSE b/vendor/github.com/aws/smithy-go/LICENSE new file mode 100644 index 000000000000..67db8588217f --- /dev/null +++ b/vendor/github.com/aws/smithy-go/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile new file mode 100644 index 000000000000..b8c657435ee5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -0,0 +1,63 @@ +PRE_RELEASE_VERSION ?= + +RELEASE_MANIFEST_FILE ?= +RELEASE_CHGLOG_DESC_FILE ?= + +REPOTOOLS_VERSION ?= latest +REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools +REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS ?= +REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION} + +ifneq ($(PRE_RELEASE_VERSION),) + REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} +endif + +smithy-publish-local: + cd codegen && ./gradlew publishToMavenLocal + +smithy-build: + cd codegen && ./gradlew build + +smithy-clean: + cd codegen && ./gradlew clean + +##################### +# Release Process # +##################### +.PHONY: preview-release pre-release-validation release + +preview-release: + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} + +pre-release-validation: + @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \ + echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \ + fi + @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \ + echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \ + fi + +release: pre-release-validation + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} + go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE} + go run ${REPOTOOLS_CMD_CHANGELOG} rm -all + go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE} + +module-version: + @go run ${REPOTOOLS_CMD_MODULE_VERSION} . + +############## +# Repo Tools # +############## +.PHONY: install-changelog + +install-changelog: + go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} diff --git a/vendor/github.com/aws/smithy-go/NOTICE b/vendor/github.com/aws/smithy-go/NOTICE new file mode 100644 index 000000000000..616fc5889451 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/NOTICE @@ -0,0 +1 @@ +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md new file mode 100644 index 000000000000..789b37889606 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/README.md @@ -0,0 +1,12 @@ +## Smithy Go + +[![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) + +Smithy code generators for Go. + +**WARNING: All interfaces are subject to change.** + +## License + +This project is licensed under the Apache-2.0 License. + diff --git a/vendor/github.com/aws/smithy-go/doc.go b/vendor/github.com/aws/smithy-go/doc.go new file mode 100644 index 000000000000..87b0c74b75c6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/doc.go @@ -0,0 +1,2 @@ +// Package smithy provides the core components for a Smithy SDK. +package smithy diff --git a/vendor/github.com/aws/smithy-go/document.go b/vendor/github.com/aws/smithy-go/document.go new file mode 100644 index 000000000000..dec498c57bf9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document.go @@ -0,0 +1,10 @@ +package smithy + +// Document provides access to loosely structured data in a document-like +// format. +// +// Deprecated: See the github.com/aws/smithy-go/document package. +type Document interface { + UnmarshalDocument(interface{}) error + GetValue() (interface{}, error) +} diff --git a/vendor/github.com/aws/smithy-go/document/doc.go b/vendor/github.com/aws/smithy-go/document/doc.go new file mode 100644 index 000000000000..03055b7a1c2e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/doc.go @@ -0,0 +1,12 @@ +// Package document provides interface definitions and error types for document types. +// +// A document is a protocol-agnostic type which supports a JSON-like data-model. You can use this type to send +// UTF-8 strings, arbitrary precision numbers, booleans, nulls, a list of these values, and a map of UTF-8 +// strings to these values. +// +// API Clients expose document constructors in their respective client document packages which must be used to +// Marshal and Unmarshal Go types to and from their respective protocol representations. +// +// See the Marshaler and Unmarshaler type documentation for more details on how to Go types can be converted to and from +// document types. +package document diff --git a/vendor/github.com/aws/smithy-go/document/document.go b/vendor/github.com/aws/smithy-go/document/document.go new file mode 100644 index 000000000000..8f852d95c699 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/document.go @@ -0,0 +1,153 @@ +package document + +import ( + "fmt" + "math/big" + "strconv" +) + +// Marshaler is an interface for a type that marshals a document to its protocol-specific byte representation and +// returns the resulting bytes. A non-nil error will be returned if an error is encountered during marshaling. +// +// Marshal supports basic scalars (int,uint,float,bool,string), big.Int, and big.Float, maps, slices, and structs. +// Anonymous nested types are flattened based on Go anonymous type visibility. +// +// When defining struct types. the `document` struct tag can be used to control how the value will be +// marshaled into the resulting protocol document. +// +// // Field is ignored +// Field int `document:"-"` +// +// // Field object of key "myName" +// Field int `document:"myName"` +// +// // Field object key of key "myName", and +// // Field is omitted if the field is a zero value for the type. +// Field int `document:"myName,omitempty"` +// +// // Field object key of "Field", and +// // Field is omitted if the field is a zero value for the type. +// Field int `document:",omitempty"` +// +// All struct fields, including anonymous fields, are marshaled unless the +// any of the following conditions are meet. +// +// - the field is not exported +// - document field tag is "-" +// - document field tag specifies "omitempty", and is a zero value. +// +// Pointer and interface values are encoded as the value pointed to or +// contained in the interface. A nil value encodes as a null +// value unless `omitempty` struct tag is provided. +// +// Channel, complex, and function values are not encoded and will be skipped +// when walking the value to be marshaled. +// +// time.Time is not supported and will cause the Marshaler to return an error. These values should be represented +// by your application as a string or numerical representation. +// +// Errors that occur when marshaling will stop the marshaler, and return the error. +// +// Marshal cannot represent cyclic data structures and will not handle them. +// Passing cyclic structures to Marshal will result in an infinite recursion. +type Marshaler interface { + MarshalSmithyDocument() ([]byte, error) +} + +// Unmarshaler is an interface for a type that unmarshals a document from its protocol-specific representation, and +// stores the result into the value pointed by v. If v is nil or not a pointer then InvalidUnmarshalError will be +// returned. +// +// Unmarshaler supports the same encodings produced by a document Marshaler. This includes support for the `document` +// struct field tag for controlling how struct fields are unmarshaled. +// +// Both generic interface{} and concrete types are valid unmarshal destination types. When unmarshaling a document +// into an empty interface the Unmarshaler will store one of these values: +// bool, for boolean values +// document.Number, for arbitrary-precision numbers (int64, float64, big.Int, big.Float) +// string, for string values +// []interface{}, for array values +// map[string]interface{}, for objects +// nil, for null values +// +// When unmarshaling, any error that occurs will halt the unmarshal and return the error. +type Unmarshaler interface { + UnmarshalSmithyDocument(v interface{}) error +} + +type noSerde interface { + noSmithyDocumentSerde() +} + +// NoSerde is a sentinel value to indicate that a given type should not be marshaled or unmarshaled +// into a protocol document. +type NoSerde struct{} + +func (n NoSerde) noSmithyDocumentSerde() {} + +var _ noSerde = (*NoSerde)(nil) + +// IsNoSerde returns whether the given type implements the no smithy document serde interface. +func IsNoSerde(x interface{}) bool { + _, ok := x.(noSerde) + return ok +} + +// Number is an arbitrary precision numerical value +type Number string + +// Int64 returns the number as a string. +func (n Number) String() string { + return string(n) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return n.intOfBitSize(64) +} + +func (n Number) intOfBitSize(bitSize int) (int64, error) { + return strconv.ParseInt(string(n), 10, bitSize) +} + +// Uint64 returns the number as a uint64. +func (n Number) Uint64() (uint64, error) { + return n.uintOfBitSize(64) +} + +func (n Number) uintOfBitSize(bitSize int) (uint64, error) { + return strconv.ParseUint(string(n), 10, bitSize) +} + +// Float32 returns the number parsed as a 32-bit float, returns a float64. +func (n Number) Float32() (float64, error) { + return n.floatOfBitSize(32) +} + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return n.floatOfBitSize(64) +} + +// Float64 returns the number as a float64. +func (n Number) floatOfBitSize(bitSize int) (float64, error) { + return strconv.ParseFloat(string(n), bitSize) +} + +// BigFloat attempts to convert the number to a big.Float, returns an error if the operation fails. +func (n Number) BigFloat() (*big.Float, error) { + f, ok := (&big.Float{}).SetString(string(n)) + if !ok { + return nil, fmt.Errorf("failed to convert to big.Float") + } + return f, nil +} + +// BigInt attempts to convert the number to a big.Int, returns an error if the operation fails. +func (n Number) BigInt() (*big.Int, error) { + f, ok := (&big.Int{}).SetString(string(n), 10) + if !ok { + return nil, fmt.Errorf("failed to convert to big.Float") + } + return f, nil +} diff --git a/vendor/github.com/aws/smithy-go/document/errors.go b/vendor/github.com/aws/smithy-go/document/errors.go new file mode 100644 index 000000000000..046a7a765318 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/errors.go @@ -0,0 +1,75 @@ +package document + +import ( + "fmt" + "reflect" +) + +// UnmarshalTypeError is an error type representing an error +// unmarshaling a Smithy document to a Go value type. This is different +// from UnmarshalError in that it does not wrap an underlying error type. +type UnmarshalTypeError struct { + Value string + Type reflect.Type +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *UnmarshalTypeError) Error() string { + return fmt.Sprintf("unmarshal failed, cannot unmarshal %s into Go value type %s", + e.Value, e.Type.String()) +} + +// An InvalidUnmarshalError is an error type representing an invalid type +// encountered while unmarshaling a Smithy document to a Go value type. +type InvalidUnmarshalError struct { + Type reflect.Type +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *InvalidUnmarshalError) Error() string { + var msg string + if e.Type == nil { + msg = "cannot unmarshal to nil value" + } else if e.Type.Kind() != reflect.Ptr { + msg = fmt.Sprintf("cannot unmarshal to non-pointer value, got %s", e.Type.String()) + } else { + msg = fmt.Sprintf("cannot unmarshal to nil value, %s", e.Type.String()) + } + + return fmt.Sprintf("unmarshal failed, %s", msg) +} + +// An UnmarshalError wraps an error that occurred while unmarshaling a +// Smithy document into a Go type. This is different from +// UnmarshalTypeError in that it wraps the underlying error that occurred. +type UnmarshalError struct { + Err error + Value string + Type reflect.Type +} + +// Unwrap returns the underlying unmarshaling error +func (e *UnmarshalError) Unwrap() error { + return e.Err +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *UnmarshalError) Error() string { + return fmt.Sprintf("unmarshal failed, cannot unmarshal %q into %s, %v", + e.Value, e.Type.String(), e.Err) +} + +// An InvalidMarshalError is an error type representing an error +// occurring when marshaling a Go value type. +type InvalidMarshalError struct { + Message string +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *InvalidMarshalError) Error() string { + return fmt.Sprintf("marshal failed, %s", e.Message) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/doc.go b/vendor/github.com/aws/smithy-go/encoding/doc.go new file mode 100644 index 000000000000..792fdfa08b39 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/doc.go @@ -0,0 +1,4 @@ +// Package encoding provides utilities for encoding values for specific +// document encodings. + +package encoding diff --git a/vendor/github.com/aws/smithy-go/encoding/encoding.go b/vendor/github.com/aws/smithy-go/encoding/encoding.go new file mode 100644 index 000000000000..2fdfb5225027 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/encoding.go @@ -0,0 +1,40 @@ +package encoding + +import ( + "fmt" + "math" + "strconv" +) + +// EncodeFloat encodes a float value as per the stdlib encoder for json and xml protocol +// This encodes a float value into dst while attempting to conform to ES6 ToString for Numbers +// +// Based on encoding/json floatEncoder from the Go Standard Library +// https://golang.org/src/encoding/json/encode.go +func EncodeFloat(dst []byte, v float64, bits int) []byte { + if math.IsInf(v, 0) || math.IsNaN(v) { + panic(fmt.Sprintf("invalid float value: %s", strconv.FormatFloat(v, 'g', -1, bits))) + } + + abs := math.Abs(v) + fmt := byte('f') + + if abs != 0 { + if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + + dst = strconv.AppendFloat(dst, v, fmt, -1, bits) + + if fmt == 'e' { + // clean up e-09 to e-9 + n := len(dst) + if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' { + dst[n-2] = dst[n-1] + dst = dst[:n-1] + } + } + + return dst +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go new file mode 100644 index 000000000000..96abd073ab1c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go @@ -0,0 +1,116 @@ +package httpbinding + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + contentLengthHeader = "Content-Length" + floatNaN = "NaN" + floatInfinity = "Infinity" + floatNegInfinity = "-Infinity" +) + +// An Encoder provides encoding of REST URI path, query, and header components +// of an HTTP request. Can also encode a stream as the payload. +// +// Does not support SetFields. +type Encoder struct { + path, rawPath, pathBuffer []byte + + query url.Values + header http.Header +} + +// NewEncoder creates a new encoder from the passed in request. All query and +// header values will be added on top of the request's existing values. Overwriting +// duplicate values. +func NewEncoder(path, query string, headers http.Header) (*Encoder, error) { + parseQuery, err := url.ParseQuery(query) + if err != nil { + return nil, fmt.Errorf("failed to parse query string: %w", err) + } + + e := &Encoder{ + path: []byte(path), + rawPath: []byte(path), + query: parseQuery, + header: headers.Clone(), + } + + return e, nil +} + +// Encode returns a REST protocol encoder for encoding HTTP bindings. +// +// Due net/http requiring `Content-Length` to be specified on the http.Request#ContentLength directly. Encode +// will look for whether the header is present, and if so will remove it and set the respective value on http.Request. +// +// Returns any error occurring during encoding. +func (e *Encoder) Encode(req *http.Request) (*http.Request, error) { + req.URL.Path, req.URL.RawPath = string(e.path), string(e.rawPath) + req.URL.RawQuery = e.query.Encode() + + // net/http ignores Content-Length header and requires it to be set on http.Request + if v := e.header.Get(contentLengthHeader); len(v) > 0 { + iv, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, err + } + req.ContentLength = iv + e.header.Del(contentLengthHeader) + } + + req.Header = e.header + + return req, nil +} + +// AddHeader returns a HeaderValue for appending to the given header name +func (e *Encoder) AddHeader(key string) HeaderValue { + return newHeaderValue(e.header, key, true) +} + +// SetHeader returns a HeaderValue for setting the given header name +func (e *Encoder) SetHeader(key string) HeaderValue { + return newHeaderValue(e.header, key, false) +} + +// Headers returns a Header used for encoding headers with the given prefix +func (e *Encoder) Headers(prefix string) Headers { + return Headers{ + header: e.header, + prefix: strings.TrimSpace(prefix), + } +} + +// HasHeader returns if a header with the key specified exists with one or +// more value. +func (e Encoder) HasHeader(key string) bool { + return len(e.header[key]) != 0 +} + +// SetURI returns a URIValue used for setting the given path key +func (e *Encoder) SetURI(key string) URIValue { + return newURIValue(&e.path, &e.rawPath, &e.pathBuffer, key) +} + +// SetQuery returns a QueryValue used for setting the given query key +func (e *Encoder) SetQuery(key string) QueryValue { + return NewQueryValue(e.query, key, false) +} + +// AddQuery returns a QueryValue used for appending the given query key +func (e *Encoder) AddQuery(key string) QueryValue { + return NewQueryValue(e.query, key, true) +} + +// HasQuery returns if a query with the key specified exists with one or +// more values. +func (e *Encoder) HasQuery(key string) bool { + return len(e.query.Get(key)) != 0 +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go new file mode 100644 index 000000000000..f9256e175fc9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go @@ -0,0 +1,122 @@ +package httpbinding + +import ( + "encoding/base64" + "math" + "math/big" + "net/http" + "strconv" + "strings" +) + +// Headers is used to encode header keys using a provided prefix +type Headers struct { + header http.Header + prefix string +} + +// AddHeader returns a HeaderValue used to append values to prefix+key +func (h Headers) AddHeader(key string) HeaderValue { + return h.newHeaderValue(key, true) +} + +// SetHeader returns a HeaderValue used to set the value of prefix+key +func (h Headers) SetHeader(key string) HeaderValue { + return h.newHeaderValue(key, false) +} + +func (h Headers) newHeaderValue(key string, append bool) HeaderValue { + return newHeaderValue(h.header, h.prefix+strings.TrimSpace(key), append) +} + +// HeaderValue is used to encode values to an HTTP header +type HeaderValue struct { + header http.Header + key string + append bool +} + +func newHeaderValue(header http.Header, key string, append bool) HeaderValue { + return HeaderValue{header: header, key: strings.TrimSpace(key), append: append} +} + +func (h HeaderValue) modifyHeader(value string) { + if h.append { + h.header[h.key] = append(h.header[h.key], value) + } else { + h.header[h.key] = append(h.header[h.key][:0], value) + } +} + +// String encodes the value v as the header string value +func (h HeaderValue) String(v string) { + h.modifyHeader(v) +} + +// Byte encodes the value v as a query string value +func (h HeaderValue) Byte(v int8) { + h.Long(int64(v)) +} + +// Short encodes the value v as a query string value +func (h HeaderValue) Short(v int16) { + h.Long(int64(v)) +} + +// Integer encodes the value v as the header string value +func (h HeaderValue) Integer(v int32) { + h.Long(int64(v)) +} + +// Long encodes the value v as the header string value +func (h HeaderValue) Long(v int64) { + h.modifyHeader(strconv.FormatInt(v, 10)) +} + +// Boolean encodes the value v as a query string value +func (h HeaderValue) Boolean(v bool) { + h.modifyHeader(strconv.FormatBool(v)) +} + +// Float encodes the value v as a query string value +func (h HeaderValue) Float(v float32) { + h.float(float64(v), 32) +} + +// Double encodes the value v as a query string value +func (h HeaderValue) Double(v float64) { + h.float(v, 64) +} + +func (h HeaderValue) float(v float64, bitSize int) { + switch { + case math.IsNaN(v): + h.String(floatNaN) + case math.IsInf(v, 1): + h.String(floatInfinity) + case math.IsInf(v, -1): + h.String(floatNegInfinity) + default: + h.modifyHeader(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes the value v as a query string value +func (h HeaderValue) BigInteger(v *big.Int) { + h.modifyHeader(v.String()) +} + +// BigDecimal encodes the value v as a query string value +func (h HeaderValue) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + h.Long(i) + return + } + h.modifyHeader(v.Text('e', -1)) +} + +// Blob encodes the value v as a base64 header string value +func (h HeaderValue) Blob(v []byte) { + encodeToString := base64.StdEncoding.EncodeToString(v) + h.modifyHeader(encodeToString) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go new file mode 100644 index 000000000000..e78926c9a562 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go @@ -0,0 +1,108 @@ +package httpbinding + +import ( + "bytes" + "fmt" +) + +const ( + uriTokenStart = '{' + uriTokenStop = '}' + uriTokenSkip = '+' +) + +func bufCap(b []byte, n int) []byte { + if cap(b) < n { + return make([]byte, 0, n) + } + + return b[0:0] +} + +// replacePathElement replaces a single element in the path []byte. +// Escape is used to control whether the value will be escaped using Amazon path escape style. +func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) { + fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } + fieldBuf = append(fieldBuf, uriTokenStart) + fieldBuf = append(fieldBuf, key...) + + start := bytes.Index(path, fieldBuf) + end := start + len(fieldBuf) + if start < 0 || len(path[end:]) == 0 { + // TODO what to do about error? + return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path) + } + + encodeSep := true + if path[end] == uriTokenSkip { + // '+' token means do not escape slashes + encodeSep = false + end++ + } + + if escape { + val = EscapePath(val, encodeSep) + } + + if path[end] != uriTokenStop { + return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path) + } + end++ + + fieldBuf = bufCap(fieldBuf, len(val)) + fieldBuf = append(fieldBuf, val...) + + keyLen := end - start + valLen := len(fieldBuf) + + if keyLen == valLen { + copy(path[start:], fieldBuf) + return path, fieldBuf, nil + } + + newLen := len(path) + (valLen - keyLen) + if len(path) < newLen { + path = path[:cap(path)] + } + if cap(path) < newLen { + newURI := make([]byte, newLen) + copy(newURI, path) + path = newURI + } + + // shift + copy(path[start+valLen:], path[end:]) + path = path[:newLen] + copy(path[start:], fieldBuf) + + return path, fieldBuf, nil +} + +// EscapePath escapes part of a URL path in Amazon style. +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +var noEscape [256]bool + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go new file mode 100644 index 000000000000..c2e7d0a20f45 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go @@ -0,0 +1,107 @@ +package httpbinding + +import ( + "encoding/base64" + "math" + "math/big" + "net/url" + "strconv" +) + +// QueryValue is used to encode query key values +type QueryValue struct { + query url.Values + key string + append bool +} + +// NewQueryValue creates a new QueryValue which enables encoding +// a query value into the given url.Values. +func NewQueryValue(query url.Values, key string, append bool) QueryValue { + return QueryValue{ + query: query, + key: key, + append: append, + } +} + +func (qv QueryValue) updateKey(value string) { + if qv.append { + qv.query.Add(qv.key, value) + } else { + qv.query.Set(qv.key, value) + } +} + +// Blob encodes v as a base64 query string value +func (qv QueryValue) Blob(v []byte) { + encodeToString := base64.StdEncoding.EncodeToString(v) + qv.updateKey(encodeToString) +} + +// Boolean encodes v as a query string value +func (qv QueryValue) Boolean(v bool) { + qv.updateKey(strconv.FormatBool(v)) +} + +// String encodes v as a query string value +func (qv QueryValue) String(v string) { + qv.updateKey(v) +} + +// Byte encodes v as a query string value +func (qv QueryValue) Byte(v int8) { + qv.Long(int64(v)) +} + +// Short encodes v as a query string value +func (qv QueryValue) Short(v int16) { + qv.Long(int64(v)) +} + +// Integer encodes v as a query string value +func (qv QueryValue) Integer(v int32) { + qv.Long(int64(v)) +} + +// Long encodes v as a query string value +func (qv QueryValue) Long(v int64) { + qv.updateKey(strconv.FormatInt(v, 10)) +} + +// Float encodes v as a query string value +func (qv QueryValue) Float(v float32) { + qv.float(float64(v), 32) +} + +// Double encodes v as a query string value +func (qv QueryValue) Double(v float64) { + qv.float(v, 64) +} + +func (qv QueryValue) float(v float64, bitSize int) { + switch { + case math.IsNaN(v): + qv.String(floatNaN) + case math.IsInf(v, 1): + qv.String(floatInfinity) + case math.IsInf(v, -1): + qv.String(floatNegInfinity) + default: + qv.updateKey(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes v as a query string value +func (qv QueryValue) BigInteger(v *big.Int) { + qv.updateKey(v.String()) +} + +// BigDecimal encodes v as a query string value +func (qv QueryValue) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + qv.Long(i) + return + } + qv.updateKey(v.Text('e', -1)) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go new file mode 100644 index 000000000000..f04e11984ac3 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go @@ -0,0 +1,111 @@ +package httpbinding + +import ( + "math" + "math/big" + "strconv" + "strings" +) + +// URIValue is used to encode named URI parameters +type URIValue struct { + path, rawPath, buffer *[]byte + + key string +} + +func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIValue { + return URIValue{path: path, rawPath: rawPath, buffer: buffer, key: key} +} + +func (u URIValue) modifyURI(value string) (err error) { + *u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false) + if err != nil { + return err + } + *u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true) + return err +} + +// Boolean encodes v as a URI string value +func (u URIValue) Boolean(v bool) error { + return u.modifyURI(strconv.FormatBool(v)) +} + +// String encodes v as a URI string value +func (u URIValue) String(v string) error { + return u.modifyURI(v) +} + +// Byte encodes v as a URI string value +func (u URIValue) Byte(v int8) error { + return u.Long(int64(v)) +} + +// Short encodes v as a URI string value +func (u URIValue) Short(v int16) error { + return u.Long(int64(v)) +} + +// Integer encodes v as a URI string value +func (u URIValue) Integer(v int32) error { + return u.Long(int64(v)) +} + +// Long encodes v as a URI string value +func (u URIValue) Long(v int64) error { + return u.modifyURI(strconv.FormatInt(v, 10)) +} + +// Float encodes v as a query string value +func (u URIValue) Float(v float32) error { + return u.float(float64(v), 32) +} + +// Double encodes v as a query string value +func (u URIValue) Double(v float64) error { + return u.float(v, 64) +} + +func (u URIValue) float(v float64, bitSize int) error { + switch { + case math.IsNaN(v): + return u.String(floatNaN) + case math.IsInf(v, 1): + return u.String(floatInfinity) + case math.IsInf(v, -1): + return u.String(floatNegInfinity) + default: + return u.modifyURI(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes v as a query string value +func (u URIValue) BigInteger(v *big.Int) error { + return u.modifyURI(v.String()) +} + +// BigDecimal encodes v as a query string value +func (u URIValue) BigDecimal(v *big.Float) error { + if i, accuracy := v.Int64(); accuracy == big.Exact { + return u.Long(i) + } + return u.modifyURI(v.Text('e', -1)) +} + +// SplitURI parses a Smithy HTTP binding trait URI +func SplitURI(uri string) (path, query string) { + queryStart := strings.IndexRune(uri, '?') + if queryStart == -1 { + path = uri + return path, query + } + + path = uri[:queryStart] + if queryStart+1 >= len(uri) { + return path, query + } + query = uri[queryStart+1:] + + return path, query +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/array.go b/vendor/github.com/aws/smithy-go/encoding/xml/array.go new file mode 100644 index 000000000000..508f3c997ec5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/array.go @@ -0,0 +1,49 @@ +package xml + +// arrayMemberWrapper is the default member wrapper tag name for XML Array type +var arrayMemberWrapper = StartElement{ + Name: Name{Local: "member"}, +} + +// Array represents the encoding of a XML array type +type Array struct { + w writer + scratch *[]byte + + // member start element is the array member wrapper start element + memberStartElement StartElement + + // isFlattened indicates if the array is a flattened array. + isFlattened bool +} + +// newArray returns an array encoder. +// It also takes in the member start element, array start element. +// It takes in a isFlattened bool, indicating that an array is flattened array. +// +// A wrapped array ["value1", "value2"] is represented as +// `value1value2`. + +// A flattened array `someList: ["value1", "value2"]` is represented as +// `value1value2`. +func newArray(w writer, scratch *[]byte, memberStartElement StartElement, arrayStartElement StartElement, isFlattened bool) *Array { + var memberWrapper = memberStartElement + if isFlattened { + memberWrapper = arrayStartElement + } + + return &Array{ + w: w, + scratch: scratch, + memberStartElement: memberWrapper, + isFlattened: isFlattened, + } +} + +// Member adds a new member to the XML array. +// It returns a Value encoder. +func (a *Array) Member() Value { + v := newValue(a.w, a.scratch, a.memberStartElement) + v.isFlattened = a.isFlattened + return v +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go new file mode 100644 index 000000000000..ccee90a636bb --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go @@ -0,0 +1,10 @@ +package xml + +const ( + leftAngleBracket = '<' + rightAngleBracket = '>' + forwardSlash = '/' + colon = ':' + equals = '=' + quote = '"' +) diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go new file mode 100644 index 000000000000..d6e1e41e164d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go @@ -0,0 +1,49 @@ +/* +Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to +shape serializer function in which a xml.Value will be passed around. + +Resources followed: https://awslabs.github.io/smithy/1.0/spec/core/xml-traits.html# + +Member Element + +Member element should be used to encode xml shapes into xml elements except for flattened xml shapes. Member element +write their own element start tag. These elements should always be closed. + +Flattened Element + +Flattened element should be used to encode shapes marked with flattened trait into xml elements. Flattened element +do not write a start tag, and thus should not be closed. + +Simple types encoding + +All simple type methods on value such as String(), Long() etc; auto close the associated member element. + +Array + +Array returns the collection encoder. It has two modes, wrapped and flattened encoding. + +Wrapped arrays have two methods Array() and ArrayWithCustomName() which facilitate array member wrapping. +By default, a wrapped array members are wrapped with `member` named start element. + + appletree + +Flattened arrays rely on Value being marked as flattened. +If a shape is marked as flattened, Array() will use the shape element name as wrapper for array elements. + + appletree + +Map + +Map is the map encoder. It has two modes, wrapped and flattened encoding. + +Wrapped map has Array() method, which facilitate map member wrapping. +By default, a wrapped map members are wrapped with `entry` named start element. + + appletreesnowice + +Flattened map rely on Value being marked as flattened. +If a shape is marked as flattened, Map() will use the shape element name as wrapper for map entry elements. + + appletreesnowice +*/ +package xml diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/element.go b/vendor/github.com/aws/smithy-go/encoding/xml/element.go new file mode 100644 index 000000000000..ae84e7999edb --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/element.go @@ -0,0 +1,91 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.14 stdlib's encoding/xml + +package xml + +// A Name represents an XML name (Local) annotated +// with a name space identifier (Space). +// In tokens returned by Decoder.Token, the Space identifier +// is given as a canonical URL, not the short prefix used +// in the document being parsed. +type Name struct { + Space, Local string +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +/* +NewAttribute returns a pointer to an attribute. +It takes in a local name aka attribute name, and value +representing the attribute value. +*/ +func NewAttribute(local, value string) Attr { + return Attr{ + Name: Name{ + Local: local, + }, + Value: value, + } +} + +/* +NewNamespaceAttribute returns a pointer to an attribute. +It takes in a local name aka attribute name, and value +representing the attribute value. + +NewNamespaceAttribute appends `xmlns:` in front of namespace +prefix. + +For creating a name space attribute representing +`xmlns:prefix="http://example.com`, the breakdown would be: +local = "prefix" +value = "http://example.com" +*/ +func NewNamespaceAttribute(local, value string) Attr { + attr := NewAttribute(local, value) + + // default name space identifier + attr.Name.Space = "xmlns" + return attr +} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +// Copy creates a new copy of StartElement. +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// returns true if start element local name is empty +func (e StartElement) isZero() bool { + return len(e.Name.Local) == 0 +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// returns true if end element local name is empty +func (e EndElement) isZero() bool { + return len(e.Name.Local) == 0 +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go new file mode 100644 index 000000000000..16fb3dddb0a8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go @@ -0,0 +1,51 @@ +package xml + +// writer interface used by the xml encoder to write an encoded xml +// document in a writer. +type writer interface { + + // Write takes in a byte slice and returns number of bytes written and error + Write(p []byte) (n int, err error) + + // WriteRune takes in a rune and returns number of bytes written and error + WriteRune(r rune) (n int, err error) + + // WriteString takes in a string and returns number of bytes written and error + WriteString(s string) (n int, err error) + + // String method returns a string + String() string + + // Bytes return a byte slice. + Bytes() []byte +} + +// Encoder is an XML encoder that supports construction of XML values +// using methods. The encoder takes in a writer and maintains a scratch buffer. +type Encoder struct { + w writer + scratch *[]byte +} + +// NewEncoder returns an XML encoder +func NewEncoder(w writer) *Encoder { + scratch := make([]byte, 64) + + return &Encoder{w: w, scratch: &scratch} +} + +// String returns the string output of the XML encoder +func (e Encoder) String() string { + return e.w.String() +} + +// Bytes returns the []byte slice of the XML encoder +func (e Encoder) Bytes() []byte { + return e.w.Bytes() +} + +// RootElement builds a root element encoding +// It writes it's start element tag. The value should be closed. +func (e Encoder) RootElement(element StartElement) Value { + return newValue(e.w, e.scratch, element) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go new file mode 100644 index 000000000000..f3db6ccca85c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go @@ -0,0 +1,51 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "io" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string + Message string +} + +// GetErrorResponseComponents returns the error fields from an xml error response body +func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { + if noErrorWrapping { + var errResponse noWrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + }, nil + } + + var errResponse wrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + }, nil +} + +// noWrappedErrorResponse represents the error response body with +// no internal ... +type wrappedErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go new file mode 100644 index 000000000000..1c5479af677d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go @@ -0,0 +1,137 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.14 stdlib's encoding/xml + +package xml + +import ( + "unicode/utf8" +) + +// Copied from Go 1.14 stdlib's encoding/xml +var ( + escQuot = []byte(""") // shorter than """ + escApos = []byte("'") // shorter than "'" + escAmp = []byte("&") + escLT = []byte("<") + escGT = []byte(">") + escTab = []byte(" ") + escNL = []byte(" ") + escCR = []byte(" ") + escFFFD = []byte("\uFFFD") // Unicode replacement character + + // Additional Escapes + escNextLine = []byte("…") + escLS = []byte("
") +) + +// Decide whether the given rune is in the XML Character Range, per +// the Char production of https://www.xml.com/axml/testaxml.htm, +// Section 2.2 Characters. +func isInCharacterRange(r rune) (inrange bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} + +// TODO: When do we need to escape the string? +// Based on encoding/xml escapeString from the Go Standard Library. +// https://golang.org/src/encoding/xml/xml.go +func escapeString(e writer, s string) { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRuneInString(s[i:]) + i += width + switch r { + case '"': + esc = escQuot + case '\'': + esc = escApos + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + case '\t': + esc = escTab + case '\n': + esc = escNL + case '\r': + esc = escCR + case '\u0085': + // Not escaped by stdlib + esc = escNextLine + case '\u2028': + // Not escaped by stdlib + esc = escLS + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = escFFFD + break + } + continue + } + e.WriteString(s[last : i-width]) + e.Write(esc) + last = i + } + e.WriteString(s[last:]) +} + +// escapeText writes to w the properly escaped XML equivalent +// of the plain text data s. If escapeNewline is true, newline +// characters will be escaped. +// +// Based on encoding/xml escapeText from the Go Standard Library. +// https://golang.org/src/encoding/xml/xml.go +func escapeText(e writer, s []byte) { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRune(s[i:]) + i += width + switch r { + case '"': + esc = escQuot + case '\'': + esc = escApos + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + case '\t': + esc = escTab + case '\n': + // This always escapes newline, which is different than stdlib's optional + // escape of new line. + esc = escNL + case '\r': + esc = escCR + case '\u0085': + // Not escaped by stdlib + esc = escNextLine + case '\u2028': + // Not escaped by stdlib + esc = escLS + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = escFFFD + break + } + continue + } + e.Write(s[last : i-width]) + e.Write(esc) + last = i + } + e.Write(s[last:]) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/map.go b/vendor/github.com/aws/smithy-go/encoding/xml/map.go new file mode 100644 index 000000000000..e42858965ccc --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/map.go @@ -0,0 +1,53 @@ +package xml + +// mapEntryWrapper is the default member wrapper start element for XML Map entry +var mapEntryWrapper = StartElement{ + Name: Name{Local: "entry"}, +} + +// Map represents the encoding of a XML map type +type Map struct { + w writer + scratch *[]byte + + // member start element is the map entry wrapper start element + memberStartElement StartElement + + // isFlattened returns true if the map is a flattened map + isFlattened bool +} + +// newMap returns a map encoder which sets the default map +// entry wrapper to `entry`. +// +// A map `someMap : {{key:"abc", value:"123"}}` is represented as +// `abc123`. +func newMap(w writer, scratch *[]byte) *Map { + return &Map{ + w: w, + scratch: scratch, + memberStartElement: mapEntryWrapper, + } +} + +// newFlattenedMap returns a map encoder which sets the map +// entry wrapper to the passed in memberWrapper`. +// +// A flattened map `someMap : {{key:"abc", value:"123"}}` is represented as +// `abc123`. +func newFlattenedMap(w writer, scratch *[]byte, memberWrapper StartElement) *Map { + return &Map{ + w: w, + scratch: scratch, + memberStartElement: memberWrapper, + isFlattened: true, + } +} + +// Entry returns a Value encoder with map's element. +// It writes the member wrapper start tag for each entry. +func (m *Map) Entry() Value { + v := newValue(m.w, m.scratch, m.memberStartElement) + v.isFlattened = m.isFlattened + return v +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/value.go b/vendor/github.com/aws/smithy-go/encoding/xml/value.go new file mode 100644 index 000000000000..09434b2c0b55 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/value.go @@ -0,0 +1,302 @@ +package xml + +import ( + "encoding/base64" + "fmt" + "math/big" + "strconv" + + "github.com/aws/smithy-go/encoding" +) + +// Value represents an XML Value type +// XML Value types: Object, Array, Map, String, Number, Boolean. +type Value struct { + w writer + scratch *[]byte + + // xml start element is the associated start element for the Value + startElement StartElement + + // indicates if the Value represents a flattened shape + isFlattened bool +} + +// newFlattenedValue returns a Value encoder. newFlattenedValue does NOT write the start element tag +func newFlattenedValue(w writer, scratch *[]byte, startElement StartElement) Value { + return Value{ + w: w, + scratch: scratch, + startElement: startElement, + } +} + +// newValue writes the start element xml tag and returns a Value +func newValue(w writer, scratch *[]byte, startElement StartElement) Value { + writeStartElement(w, startElement) + return Value{w: w, scratch: scratch, startElement: startElement} +} + +// writeStartElement takes in a start element and writes it. +// It handles namespace, attributes in start element. +func writeStartElement(w writer, el StartElement) error { + if el.isZero() { + return fmt.Errorf("xml start element cannot be nil") + } + + w.WriteRune(leftAngleBracket) + + if len(el.Name.Space) != 0 { + escapeString(w, el.Name.Space) + w.WriteRune(colon) + } + escapeString(w, el.Name.Local) + for _, attr := range el.Attr { + w.WriteRune(' ') + writeAttribute(w, &attr) + } + + w.WriteRune(rightAngleBracket) + return nil +} + +// writeAttribute writes an attribute from a provided Attribute +// For a namespace attribute, the attr.Name.Space must be defined as "xmlns". +// https://www.w3.org/TR/REC-xml-names/#NT-DefaultAttName +func writeAttribute(w writer, attr *Attr) { + // if local, space both are not empty + if len(attr.Name.Space) != 0 && len(attr.Name.Local) != 0 { + escapeString(w, attr.Name.Space) + w.WriteRune(colon) + } + + // if prefix is empty, the default `xmlns` space should be used as prefix. + if len(attr.Name.Local) == 0 { + attr.Name.Local = attr.Name.Space + } + + escapeString(w, attr.Name.Local) + w.WriteRune(equals) + w.WriteRune(quote) + escapeString(w, attr.Value) + w.WriteRune(quote) +} + +// writeEndElement takes in a end element and writes it. +func writeEndElement(w writer, el EndElement) error { + if el.isZero() { + return fmt.Errorf("xml end element cannot be nil") + } + + w.WriteRune(leftAngleBracket) + w.WriteRune(forwardSlash) + + if len(el.Name.Space) != 0 { + escapeString(w, el.Name.Space) + w.WriteRune(colon) + } + escapeString(w, el.Name.Local) + w.WriteRune(rightAngleBracket) + + return nil +} + +// String encodes v as a XML string. +// It will auto close the parent xml element tag. +func (xv Value) String(v string) { + escapeString(xv.w, v) + xv.Close() +} + +// Byte encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Byte(v int8) { + xv.Long(int64(v)) +} + +// Short encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Short(v int16) { + xv.Long(int64(v)) +} + +// Integer encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Integer(v int32) { + xv.Long(int64(v)) +} + +// Long encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Long(v int64) { + *xv.scratch = strconv.AppendInt((*xv.scratch)[:0], v, 10) + xv.w.Write(*xv.scratch) + + xv.Close() +} + +// Float encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Float(v float32) { + xv.float(float64(v), 32) + xv.Close() +} + +// Double encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Double(v float64) { + xv.float(v, 64) + xv.Close() +} + +func (xv Value) float(v float64, bits int) { + *xv.scratch = encoding.EncodeFloat((*xv.scratch)[:0], v, bits) + xv.w.Write(*xv.scratch) +} + +// Boolean encodes v as a XML boolean. +// It will auto close the parent xml element tag. +func (xv Value) Boolean(v bool) { + *xv.scratch = strconv.AppendBool((*xv.scratch)[:0], v) + xv.w.Write(*xv.scratch) + + xv.Close() +} + +// Base64EncodeBytes writes v as a base64 value in XML string. +// It will auto close the parent xml element tag. +func (xv Value) Base64EncodeBytes(v []byte) { + encodeByteSlice(xv.w, (*xv.scratch)[:0], v) + xv.Close() +} + +// BigInteger encodes v big.Int as XML value. +// It will auto close the parent xml element tag. +func (xv Value) BigInteger(v *big.Int) { + xv.w.Write([]byte(v.Text(10))) + xv.Close() +} + +// BigDecimal encodes v big.Float as XML value. +// It will auto close the parent xml element tag. +func (xv Value) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + xv.Long(i) + return + } + + xv.w.Write([]byte(v.Text('e', -1))) + xv.Close() +} + +// Write writes v directly to the xml document +// if escapeXMLText is set to true, write will escape text. +// It will auto close the parent xml element tag. +func (xv Value) Write(v []byte, escapeXMLText bool) { + // escape and write xml text + if escapeXMLText { + escapeText(xv.w, v) + } else { + // write xml directly + xv.w.Write(v) + } + + xv.Close() +} + +// MemberElement does member element encoding. It returns a Value. +// Member Element method should be used for all shapes except flattened shapes. +// +// A call to MemberElement will write nested element tags directly using the +// provided start element. The value returned by MemberElement should be closed. +func (xv Value) MemberElement(element StartElement) Value { + return newValue(xv.w, xv.scratch, element) +} + +// FlattenedElement returns flattened element encoding. It returns a Value. +// This method should be used for flattened shapes. +// +// Unlike MemberElement, flattened element will NOT write element tags +// directly for the associated start element. +// +// The value returned by the FlattenedElement does not need to be closed. +func (xv Value) FlattenedElement(element StartElement) Value { + v := newFlattenedValue(xv.w, xv.scratch, element) + v.isFlattened = true + return v +} + +// Array returns an array encoder. By default, the members of array are +// wrapped with `` element tag. +// If value is marked as flattened, the start element is used to wrap the members instead of +// the `` element. +func (xv Value) Array() *Array { + return newArray(xv.w, xv.scratch, arrayMemberWrapper, xv.startElement, xv.isFlattened) +} + +/* +ArrayWithCustomName returns an array encoder. + +It takes named start element as an argument, the named start element will used to wrap xml array entries. +for eg, `entry1` +Here `customName` named start element will be wrapped on each array member. +*/ +func (xv Value) ArrayWithCustomName(element StartElement) *Array { + return newArray(xv.w, xv.scratch, element, xv.startElement, xv.isFlattened) +} + +/* +Map returns a map encoder. By default, the map entries are +wrapped with `` element tag. + +If value is marked as flattened, the start element is used to wrap the entry instead of +the `` element. +*/ +func (xv Value) Map() *Map { + // flattened map + if xv.isFlattened { + return newFlattenedMap(xv.w, xv.scratch, xv.startElement) + } + + // un-flattened map + return newMap(xv.w, xv.scratch) +} + +// encodeByteSlice is modified copy of json encoder's encodeByteSlice. +// It is used to base64 encode a byte slice. +func encodeByteSlice(w writer, scratch []byte, v []byte) { + if v == nil { + return + } + + encodedLen := base64.StdEncoding.EncodedLen(len(v)) + if encodedLen <= len(scratch) { + // If the encoded bytes fit in e.scratch, avoid an extra + // allocation and use the cheaper Encoding.Encode. + dst := scratch[:encodedLen] + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else if encodedLen <= 1024 { + // The encoded bytes are short enough to allocate for, and + // Encoding.Encode is still cheaper. + dst := make([]byte, encodedLen) + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else { + // The encoded bytes are too long to cheaply allocate, and + // Encoding.Encode is no longer noticeably cheaper. + enc := base64.NewEncoder(base64.StdEncoding, w) + enc.Write(v) + enc.Close() + } +} + +// IsFlattened returns true if value is for flattened shape. +func (xv Value) IsFlattened() bool { + return xv.isFlattened +} + +// Close closes the value. +func (xv Value) Close() { + writeEndElement(xv.w, xv.startElement.End()) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go new file mode 100644 index 000000000000..dc4eebdffa72 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go @@ -0,0 +1,154 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "strings" +) + +// NodeDecoder is a XML decoder wrapper that is responsible to decoding +// a single XML Node element and it's nested member elements. This wrapper decoder +// takes in the start element of the top level node being decoded. +type NodeDecoder struct { + Decoder *xml.Decoder + StartEl xml.StartElement +} + +// WrapNodeDecoder returns an initialized XMLNodeDecoder +func WrapNodeDecoder(decoder *xml.Decoder, startEl xml.StartElement) NodeDecoder { + return NodeDecoder{ + Decoder: decoder, + StartEl: startEl, + } +} + +// Token on a Node Decoder returns a xml StartElement. It returns a boolean that indicates the +// a token is the node decoder's end node token; and an error which indicates any error +// that occurred while retrieving the start element +func (d NodeDecoder) Token() (t xml.StartElement, done bool, err error) { + for { + token, e := d.Decoder.Token() + if e != nil { + return t, done, e + } + + // check if we reach end of the node being decoded + if el, ok := token.(xml.EndElement); ok { + return t, el == d.StartEl.End(), err + } + + if t, ok := token.(xml.StartElement); ok { + return restoreAttrNamespaces(t), false, err + } + + // skip token if it is a comment or preamble or empty space value due to indentation + // or if it's a value and is not expected + } +} + +// restoreAttrNamespaces update XML attributes to restore the short namespaces found within +// the raw XML document. +func restoreAttrNamespaces(node xml.StartElement) xml.StartElement { + if len(node.Attr) == 0 { + return node + } + + // Generate a mapping of XML namespace values to their short names. + ns := map[string]string{} + for _, a := range node.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + break + } + } + + for i, a := range node.Attr { + if a.Name.Space == "xmlns" { + continue + } + // By default, xml.Decoder will fully resolve these namespaces. So if you had + // then by default the second attribute would have the `Name.Space` resolved to `baz`. But we need it to + // continue to resolve as `bar` so we can easily identify it later on. + if v, ok := ns[node.Attr[i].Name.Space]; ok { + node.Attr[i].Name.Space = v + } + } + return node +} + +// GetElement looks for the given tag name at the current level, and returns the element if found, and +// skipping over non-matching elements. Returns an error if the node is not found, or if an error occurs while walking +// the document. +func (d NodeDecoder) GetElement(name string) (t xml.StartElement, err error) { + for { + token, done, err := d.Token() + if err != nil { + return t, err + } + if done { + return t, fmt.Errorf("%s node not found", name) + } + switch { + case strings.EqualFold(name, token.Name.Local): + return token, nil + default: + err = d.Decoder.Skip() + if err != nil { + return t, err + } + } + } +} + +// Value provides an abstraction to retrieve char data value within an xml element. +// The method will return an error if it encounters a nested xml element instead of char data. +// This method should only be used to retrieve simple type or blob shape values as []byte. +func (d NodeDecoder) Value() (c []byte, err error) { + t, e := d.Decoder.Token() + if e != nil { + return c, e + } + + endElement := d.StartEl.End() + + switch ev := t.(type) { + case xml.CharData: + c = ev.Copy() + case xml.EndElement: // end tag or self-closing + if ev == endElement { + return []byte{}, err + } + return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) + default: + return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) + } + + t, e = d.Decoder.Token() + if e != nil { + return c, e + } + + if ev, ok := t.(xml.EndElement); ok { + if ev == endElement { + return c, err + } + } + + return c, fmt.Errorf("expected end element %v, got %T type %v instead", endElement, t, t) +} + +// FetchRootElement takes in a decoder and returns the first start element within the xml body. +// This function is useful in fetching the start element of an XML response and ignore the +// comments and preamble +func FetchRootElement(decoder *xml.Decoder) (startElement xml.StartElement, err error) { + for { + t, e := decoder.Token() + if e != nil { + return startElement, e + } + + if startElement, ok := t.(xml.StartElement); ok { + return startElement, err + } + } +} diff --git a/vendor/github.com/aws/smithy-go/errors.go b/vendor/github.com/aws/smithy-go/errors.go new file mode 100644 index 000000000000..d6948d020623 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/errors.go @@ -0,0 +1,137 @@ +package smithy + +import "fmt" + +// APIError provides the generic API and protocol agnostic error type all SDK +// generated exception types will implement. +type APIError interface { + error + + // ErrorCode returns the error code for the API exception. + ErrorCode() string + // ErrorMessage returns the error message for the API exception. + ErrorMessage() string + // ErrorFault returns the fault for the API exception. + ErrorFault() ErrorFault +} + +// GenericAPIError provides a generic concrete API error type that SDKs can use +// to deserialize error responses into. Should be used for unmodeled or untyped +// errors. +type GenericAPIError struct { + Code string + Message string + Fault ErrorFault +} + +// ErrorCode returns the error code for the API exception. +func (e *GenericAPIError) ErrorCode() string { return e.Code } + +// ErrorMessage returns the error message for the API exception. +func (e *GenericAPIError) ErrorMessage() string { return e.Message } + +// ErrorFault returns the fault for the API exception. +func (e *GenericAPIError) ErrorFault() ErrorFault { return e.Fault } + +func (e *GenericAPIError) Error() string { + return fmt.Sprintf("api error %s: %s", e.Code, e.Message) +} + +var _ APIError = (*GenericAPIError)(nil) + +// OperationError decorates an underlying error which occurred while invoking +// an operation with names of the operation and API. +type OperationError struct { + ServiceID string + OperationName string + Err error +} + +// Service returns the name of the API service the error occurred with. +func (e *OperationError) Service() string { return e.ServiceID } + +// Operation returns the name of the API operation the error occurred with. +func (e *OperationError) Operation() string { return e.OperationName } + +// Unwrap returns the nested error if any, or nil. +func (e *OperationError) Unwrap() error { return e.Err } + +func (e *OperationError) Error() string { + return fmt.Sprintf("operation error %s: %s, %v", e.ServiceID, e.OperationName, e.Err) +} + +// DeserializationError provides a wrapper for an error that occurs during +// deserialization. +type DeserializationError struct { + Err error // original error + Snapshot []byte +} + +// Error returns a formatted error for DeserializationError +func (e *DeserializationError) Error() string { + const msg = "deserialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s, %v", msg, e.Err) +} + +// Unwrap returns the underlying Error in DeserializationError +func (e *DeserializationError) Unwrap() error { return e.Err } + +// ErrorFault provides the type for a Smithy API error fault. +type ErrorFault int + +// ErrorFault enumeration values +const ( + FaultUnknown ErrorFault = iota + FaultServer + FaultClient +) + +func (f ErrorFault) String() string { + switch f { + case FaultServer: + return "server" + case FaultClient: + return "client" + default: + return "unknown" + } +} + +// SerializationError represents an error that occurred while attempting to serialize a request +type SerializationError struct { + Err error // original error +} + +// Error returns a formatted error for SerializationError +func (e *SerializationError) Error() string { + const msg = "serialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s: %v", msg, e.Err) +} + +// Unwrap returns the underlying Error in SerializationError +func (e *SerializationError) Unwrap() error { return e.Err } + +// CanceledError is the error that will be returned by an API request that was +// canceled. API operations given a Context may return this error when +// canceled. +type CanceledError struct { + Err error +} + +// CanceledError returns true to satisfy interfaces checking for canceled errors. +func (*CanceledError) CanceledError() bool { return true } + +// Unwrap returns the underlying error, if there was one. +func (e *CanceledError) Unwrap() error { + return e.Err +} + +func (e *CanceledError) Error() string { + return fmt.Sprintf("canceled, %v", e.Err) +} diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go new file mode 100644 index 000000000000..7e252ec8c150 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package smithy + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.11.2" diff --git a/vendor/github.com/aws/smithy-go/io/byte.go b/vendor/github.com/aws/smithy-go/io/byte.go new file mode 100644 index 000000000000..f8417c15b85b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/byte.go @@ -0,0 +1,12 @@ +package io + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/smithy-go/io/doc.go b/vendor/github.com/aws/smithy-go/io/doc.go new file mode 100644 index 000000000000..a6a33eaf5672 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/doc.go @@ -0,0 +1,2 @@ +// Package io provides utilities for Smithy generated API clients. +package io diff --git a/vendor/github.com/aws/smithy-go/io/reader.go b/vendor/github.com/aws/smithy-go/io/reader.go new file mode 100644 index 000000000000..07063f2960d8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/reader.go @@ -0,0 +1,16 @@ +package io + +import ( + "io" +) + +// ReadSeekNopCloser wraps an io.ReadSeeker with an additional Close method +// that does nothing. +type ReadSeekNopCloser struct { + io.ReadSeeker +} + +// Close does nothing. +func (ReadSeekNopCloser) Close() error { + return nil +} diff --git a/vendor/github.com/aws/smithy-go/io/ringbuffer.go b/vendor/github.com/aws/smithy-go/io/ringbuffer.go new file mode 100644 index 000000000000..06b476add8a2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/ringbuffer.go @@ -0,0 +1,94 @@ +package io + +import ( + "bytes" + "io" +) + +// RingBuffer struct satisfies io.ReadWrite interface. +// +// ReadBuffer is a revolving buffer data structure, which can be used to store snapshots of data in a +// revolving window. +type RingBuffer struct { + slice []byte + start int + end int + size int +} + +// NewRingBuffer method takes in a byte slice as an input and returns a RingBuffer. +func NewRingBuffer(slice []byte) *RingBuffer { + ringBuf := RingBuffer{ + slice: slice, + } + return &ringBuf +} + +// Write method inserts the elements in a byte slice, and returns the number of bytes written along with any error. +func (r *RingBuffer) Write(p []byte) (int, error) { + for _, b := range p { + // check if end points to invalid index, we need to circle back + if r.end == len(r.slice) { + r.end = 0 + } + // check if start points to invalid index, we need to circle back + if r.start == len(r.slice) { + r.start = 0 + } + // if ring buffer is filled, increment the start index + if r.size == len(r.slice) { + r.size-- + r.start++ + } + + r.slice[r.end] = b + r.end++ + r.size++ + } + return len(p), nil +} + +// Read copies the data on the ring buffer into the byte slice provided to the method. +// Returns the read count along with any error encountered while reading. +func (r *RingBuffer) Read(p []byte) (int, error) { + // readCount keeps track of the number of bytes read + var readCount int + for j := 0; j < len(p); j++ { + // if ring buffer is empty or completely read + // return EOF error. + if r.size == 0 { + return readCount, io.EOF + } + + if r.start == len(r.slice) { + r.start = 0 + } + + p[j] = r.slice[r.start] + readCount++ + // increment the start pointer for ring buffer + r.start++ + // decrement the size of ring buffer + r.size-- + } + return readCount, nil +} + +// Len returns the number of unread bytes in the buffer. +func (r *RingBuffer) Len() int { + return r.size +} + +// Bytes returns a copy of the RingBuffer's bytes. +func (r RingBuffer) Bytes() []byte { + var b bytes.Buffer + io.Copy(&b, &r) + return b.Bytes() +} + +// Reset resets the ring buffer. +func (r *RingBuffer) Reset() { + *r = RingBuffer{ + slice: r.slice, + } +} diff --git a/vendor/github.com/aws/smithy-go/local-mod-replace.sh b/vendor/github.com/aws/smithy-go/local-mod-replace.sh new file mode 100644 index 000000000000..800bf3769542 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/local-mod-replace.sh @@ -0,0 +1,39 @@ +#1/usr/bin/env bash + +PROJECT_DIR="" +SMITHY_SOURCE_DIR=$(cd `dirname $0` && pwd) + +usage() { + echo "Usage: $0 [-s SMITHY_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2 + exit 1 +} + +while getopts "hs:d:" options; do + case "${options}" in + s) + SMITHY_SOURCE_DIR=${OPTARG} + if [ "$SMITHY_SOURCE_DIR" == "" ]; then + echo "path to smithy-go source directory is required" || exit + usage + fi + ;; + d) + PROJECT_DIR=${OPTARG} + ;; + h) + usage + ;; + *) + usage + ;; + esac +done + +if [ "$PROJECT_DIR" != "" ]; then + cd $PROJECT_DIR || exit +fi + +go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/smithy-go" | while read x; do + repPath=${x/github.com\/aws\/smithy-go/${SMITHY_SOURCE_DIR}} + echo -replace $x=$repPath +done | xargs go mod edit diff --git a/vendor/github.com/aws/smithy-go/logging/logger.go b/vendor/github.com/aws/smithy-go/logging/logger.go new file mode 100644 index 000000000000..2071924bd306 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/logging/logger.go @@ -0,0 +1,82 @@ +package logging + +import ( + "context" + "io" + "log" +) + +// Classification is the type of the log entry's classification name. +type Classification string + +// Set of standard classifications that can be used by clients and middleware +const ( + Warn Classification = "WARN" + Debug Classification = "DEBUG" +) + +// Logger is an interface for logging entries at certain classifications. +type Logger interface { + // Logf is expected to support the standard fmt package "verbs". + Logf(classification Classification, format string, v ...interface{}) +} + +// LoggerFunc is a wrapper around a function to satisfy the Logger interface. +type LoggerFunc func(classification Classification, format string, v ...interface{}) + +// Logf delegates the logging request to the wrapped function. +func (f LoggerFunc) Logf(classification Classification, format string, v ...interface{}) { + f(classification, format, v...) +} + +// ContextLogger is an optional interface a Logger implementation may expose that provides +// the ability to create context aware log entries. +type ContextLogger interface { + WithContext(context.Context) Logger +} + +// WithContext will pass the provided context to logger if it implements the ContextLogger interface and return the resulting +// logger. Otherwise the logger will be returned as is. As a special case if a nil logger is provided, a Nop logger will +// be returned to the caller. +func WithContext(ctx context.Context, logger Logger) Logger { + if logger == nil { + return Nop{} + } + + cl, ok := logger.(ContextLogger) + if !ok { + return logger + } + + return cl.WithContext(ctx) +} + +// Nop is a Logger implementation that simply does not perform any logging. +type Nop struct{} + +// Logf simply returns without performing any action +func (n Nop) Logf(Classification, string, ...interface{}) { + return +} + +// StandardLogger is a Logger implementation that wraps the standard library logger, and delegates logging to it's +// Printf method. +type StandardLogger struct { + Logger *log.Logger +} + +// Logf logs the given classification and message to the underlying logger. +func (s StandardLogger) Logf(classification Classification, format string, v ...interface{}) { + if len(classification) != 0 { + format = string(classification) + " " + format + } + + s.Logger.Printf(format, v...) +} + +// NewStandardLogger returns a new StandardLogger +func NewStandardLogger(writer io.Writer) *StandardLogger { + return &StandardLogger{ + Logger: log.New(writer, "SDK ", log.LstdFlags), + } +} diff --git a/vendor/github.com/aws/smithy-go/middleware/doc.go b/vendor/github.com/aws/smithy-go/middleware/doc.go new file mode 100644 index 000000000000..9858928a7f83 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/doc.go @@ -0,0 +1,67 @@ +// Package middleware provides transport agnostic middleware for decorating SDK +// handlers. +// +// The Smithy middleware stack provides ordered behavior to be invoked on an +// underlying handler. The stack is separated into steps that are invoked in a +// static order. A step is a collection of middleware that are injected into a +// ordered list defined by the user. The user may add, insert, swap, and remove a +// step's middleware. When the stack is invoked the step middleware become static, +// and their order cannot be modified. +// +// A stack and its step middleware are **not** safe to modify concurrently. +// +// A stack will use the ordered list of middleware to decorate a underlying +// handler. A handler could be something like an HTTP Client that round trips an +// API operation over HTTP. +// +// Smithy Middleware Stack +// +// A Stack is a collection of middleware that wrap a handler. The stack can be +// broken down into discreet steps. Each step may contain zero or more middleware +// specific to that stack's step. +// +// A Stack Step is a predefined set of middleware that are invoked in a static +// order by the Stack. These steps represent fixed points in the middleware stack +// for organizing specific behavior, such as serialize and build. A Stack Step is +// composed of zero or more middleware that are specific to that step. A step may +// define its own set of input/output parameters the generic input/output +// parameters are cast from. A step calls its middleware recursively, before +// calling the next step in the stack returning the result or error of the step +// middleware decorating the underlying handler. +// +// * Initialize: Prepares the input, and sets any default parameters as needed, +// (e.g. idempotency token, and presigned URLs). +// +// * Serialize: Serializes the prepared input into a data structure that can be +// consumed by the target transport's message, (e.g. REST-JSON serialization). +// +// * Build: Adds additional metadata to the serialized transport message, (e.g. +// HTTP's Content-Length header, or body checksum). Decorations and +// modifications to the message should be copied to all message attempts. +// +// * Finalize: Performs final preparations needed before sending the message. The +// message should already be complete by this stage, and is only alternated to +// meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request +// signing). +// +// * Deserialize: Reacts to the handler's response returned by the recipient of +// the request message. Deserializes the response into a structured type or +// error above stacks can react to. +// +// Adding Middleware to a Stack Step +// +// Middleware can be added to a step front or back, or relative, by name, to an +// existing middleware in that stack. If a middleware does not have a name, a +// unique name will be generated at the middleware and be added to the step. +// +// // Create middleware stack +// stack := middleware.NewStack() +// +// // Add middleware to stack steps +// stack.Initialize.Add(paramValidationMiddleware, middleware.After) +// stack.Serialize.Add(marshalOperationFoo, middleware.After) +// stack.Deserialize.Add(unmarshalOperationFoo, middleware.After) +// +// // Invoke middleware on handler. +// resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler) +package middleware diff --git a/vendor/github.com/aws/smithy-go/middleware/logging.go b/vendor/github.com/aws/smithy-go/middleware/logging.go new file mode 100644 index 000000000000..c2f0dbb6bda9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/logging.go @@ -0,0 +1,46 @@ +package middleware + +import ( + "context" + + "github.com/aws/smithy-go/logging" +) + +// loggerKey is the context value key for which the logger is associated with. +type loggerKey struct{} + +// GetLogger takes a context to retrieve a Logger from. If no logger is present on the context a logging.Nop logger +// is returned. If the logger retrieved from context supports the ContextLogger interface, the context will be passed +// to the WithContext method and the resulting logger will be returned. Otherwise the stored logger is returned as is. +func GetLogger(ctx context.Context) logging.Logger { + logger, ok := ctx.Value(loggerKey{}).(logging.Logger) + if !ok || logger == nil { + return logging.Nop{} + } + + return logging.WithContext(ctx, logger) +} + +// SetLogger sets the provided logger value on the provided ctx. +func SetLogger(ctx context.Context, logger logging.Logger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +type setLogger struct { + Logger logging.Logger +} + +// AddSetLoggerMiddleware adds a middleware that will add the provided logger to the middleware context. +func AddSetLoggerMiddleware(stack *Stack, logger logging.Logger) error { + return stack.Initialize.Add(&setLogger{Logger: logger}, After) +} + +func (a *setLogger) ID() string { + return "SetLogger" +} + +func (a *setLogger) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, +) { + return next.HandleInitialize(SetLogger(ctx, a.Logger), in) +} diff --git a/vendor/github.com/aws/smithy-go/middleware/metadata.go b/vendor/github.com/aws/smithy-go/middleware/metadata.go new file mode 100644 index 000000000000..7bb7dbcf5a05 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/metadata.go @@ -0,0 +1,65 @@ +package middleware + +// MetadataReader provides an interface for reading metadata from the +// underlying metadata container. +type MetadataReader interface { + Get(key interface{}) interface{} +} + +// Metadata provides storing and reading metadata values. Keys may be any +// comparable value type. Get and set will panic if key is not a comparable +// value type. +// +// Metadata uses lazy initialization, and Set method must be called as an +// addressable value, or pointer. Not doing so may cause key/value pair to not +// be set. +type Metadata struct { + values map[interface{}]interface{} +} + +// Get attempts to retrieve the value the key points to. Returns nil if the +// key was not found. +// +// Panics if key type is not comparable. +func (m Metadata) Get(key interface{}) interface{} { + return m.values[key] +} + +// Clone creates a shallow copy of Metadata entries, returning a new Metadata +// value with the original entries copied into it. +func (m Metadata) Clone() Metadata { + vs := make(map[interface{}]interface{}, len(m.values)) + for k, v := range m.values { + vs[k] = v + } + + return Metadata{ + values: vs, + } +} + +// Set stores the value pointed to by the key. If a value already exists at +// that key it will be replaced with the new value. +// +// Set method must be called as an addressable value, or pointer. If Set is not +// called as an addressable value or pointer, the key value pair being set may +// be lost. +// +// Panics if the key type is not comparable. +func (m *Metadata) Set(key, value interface{}) { + if m.values == nil { + m.values = map[interface{}]interface{}{} + } + m.values[key] = value +} + +// Has returns whether the key exists in the metadata. +// +// Panics if the key type is not comparable. +func (m Metadata) Has(key interface{}) bool { + if m.values == nil { + return false + } + _, ok := m.values[key] + return ok +} diff --git a/vendor/github.com/aws/smithy-go/middleware/middleware.go b/vendor/github.com/aws/smithy-go/middleware/middleware.go new file mode 100644 index 000000000000..803b7c751840 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/middleware.go @@ -0,0 +1,71 @@ +package middleware + +import ( + "context" +) + +// Handler provides the interface for performing the logic to obtain an output, +// or error for the given input. +type Handler interface { + // Handle performs logic to obtain an output for the given input. Handler + // should be decorated with middleware to perform input specific behavior. + Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, + ) +} + +// HandlerFunc provides a wrapper around a function pointer to be used as a +// middleware handler. +type HandlerFunc func(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) + +// Handle invokes the underlying function, returning the result. +func (fn HandlerFunc) Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) { + return fn(ctx, input) +} + +// Middleware provides the interface to call handlers in a chain. +type Middleware interface { + // ID provides a unique identifier for the middleware. + ID() string + + // Performs the middleware's handling of the input, returning the output, + // or error. The middleware can invoke the next Handler if handling should + // continue. + HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( + output interface{}, metadata Metadata, err error, + ) +} + +// decoratedHandler wraps a middleware in order to to call the next handler in +// the chain. +type decoratedHandler struct { + // The next handler to be called. + Next Handler + + // The current middleware decorating the handler. + With Middleware +} + +// Handle implements the Handler interface to handle a operation invocation. +func (m decoratedHandler) Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) { + return m.With.HandleMiddleware(ctx, input, m.Next) +} + +// DecorateHandler decorates a handler with a middleware. Wrapping the handler +// with the middleware. +func DecorateHandler(h Handler, with ...Middleware) Handler { + for i := len(with) - 1; i >= 0; i-- { + h = decoratedHandler{ + Next: h, + With: with[i], + } + } + + return h +} diff --git a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go new file mode 100644 index 000000000000..4b195308c599 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go @@ -0,0 +1,268 @@ +package middleware + +import "fmt" + +// RelativePosition provides specifying the relative position of a middleware +// in an ordered group. +type RelativePosition int + +// Relative position for middleware in steps. +const ( + After RelativePosition = iota + Before +) + +type ider interface { + ID() string +} + +// orderedIDs provides an ordered collection of items with relative ordering +// by name. +type orderedIDs struct { + order *relativeOrder + items map[string]ider +} + +const baseOrderedItems = 5 + +func newOrderedIDs() *orderedIDs { + return &orderedIDs{ + order: newRelativeOrder(), + items: make(map[string]ider, baseOrderedItems), + } +} + +// Add injects the item to the relative position of the item group. Returns an +// error if the item already exists. +func (g *orderedIDs) Add(m ider, pos RelativePosition) error { + id := m.ID() + if len(id) == 0 { + return fmt.Errorf("empty ID, ID must not be empty") + } + + if err := g.order.Add(pos, id); err != nil { + return err + } + + g.items[id] = m + return nil +} + +// Insert injects the item relative to an existing item id. Returns an error if +// the original item does not exist, or the item being added already exists. +func (g *orderedIDs) Insert(m ider, relativeTo string, pos RelativePosition) error { + if len(m.ID()) == 0 { + return fmt.Errorf("insert ID must not be empty") + } + if len(relativeTo) == 0 { + return fmt.Errorf("relative to ID must not be empty") + } + + if err := g.order.Insert(relativeTo, pos, m.ID()); err != nil { + return err + } + + g.items[m.ID()] = m + return nil +} + +// Get returns the ider identified by id. If ider is not present, returns false. +func (g *orderedIDs) Get(id string) (ider, bool) { + v, ok := g.items[id] + return v, ok +} + +// Swap removes the item by id, replacing it with the new item. Returns an error +// if the original item doesn't exist. +func (g *orderedIDs) Swap(id string, m ider) (ider, error) { + if len(id) == 0 { + return nil, fmt.Errorf("swap from ID must not be empty") + } + + iderID := m.ID() + if len(iderID) == 0 { + return nil, fmt.Errorf("swap to ID must not be empty") + } + + if err := g.order.Swap(id, iderID); err != nil { + return nil, err + } + + removed := g.items[id] + + delete(g.items, id) + g.items[iderID] = m + + return removed, nil +} + +// Remove removes the item by id. Returns an error if the item +// doesn't exist. +func (g *orderedIDs) Remove(id string) (ider, error) { + if len(id) == 0 { + return nil, fmt.Errorf("remove ID must not be empty") + } + + if err := g.order.Remove(id); err != nil { + return nil, err + } + + removed := g.items[id] + delete(g.items, id) + return removed, nil +} + +func (g *orderedIDs) List() []string { + items := g.order.List() + order := make([]string, len(items)) + copy(order, items) + return order +} + +// Clear removes all entries and slots. +func (g *orderedIDs) Clear() { + g.order.Clear() + g.items = map[string]ider{} +} + +// GetOrder returns the item in the order it should be invoked in. +func (g *orderedIDs) GetOrder() []interface{} { + order := g.order.List() + ordered := make([]interface{}, len(order)) + for i := 0; i < len(order); i++ { + ordered[i] = g.items[order[i]] + } + + return ordered +} + +// relativeOrder provides ordering of item +type relativeOrder struct { + order []string +} + +func newRelativeOrder() *relativeOrder { + return &relativeOrder{ + order: make([]string, 0, baseOrderedItems), + } +} + +// Add inserts an item into the order relative to the position provided. +func (s *relativeOrder) Add(pos RelativePosition, ids ...string) error { + if len(ids) == 0 { + return nil + } + + for _, id := range ids { + if _, ok := s.has(id); ok { + return fmt.Errorf("already exists, %v", id) + } + } + + switch pos { + case Before: + return s.insert(0, Before, ids...) + + case After: + s.order = append(s.order, ids...) + + default: + return fmt.Errorf("invalid position, %v", int(pos)) + } + + return nil +} + +// Insert injects an item before or after the relative item. Returns +// an error if the relative item does not exist. +func (s *relativeOrder) Insert(relativeTo string, pos RelativePosition, ids ...string) error { + if len(ids) == 0 { + return nil + } + + for _, id := range ids { + if _, ok := s.has(id); ok { + return fmt.Errorf("already exists, %v", id) + } + } + + i, ok := s.has(relativeTo) + if !ok { + return fmt.Errorf("not found, %v", relativeTo) + } + + return s.insert(i, pos, ids...) +} + +// Swap will replace the item id with the to item. Returns an +// error if the original item id does not exist. Allows swapping out an +// item for another item with the same id. +func (s *relativeOrder) Swap(id, to string) error { + i, ok := s.has(id) + if !ok { + return fmt.Errorf("not found, %v", id) + } + + if _, ok = s.has(to); ok && id != to { + return fmt.Errorf("already exists, %v", to) + } + + s.order[i] = to + return nil +} + +func (s *relativeOrder) Remove(id string) error { + i, ok := s.has(id) + if !ok { + return fmt.Errorf("not found, %v", id) + } + + s.order = append(s.order[:i], s.order[i+1:]...) + return nil +} + +func (s *relativeOrder) List() []string { + return s.order +} + +func (s *relativeOrder) Clear() { + s.order = s.order[0:0] +} + +func (s *relativeOrder) insert(i int, pos RelativePosition, ids ...string) error { + switch pos { + case Before: + n := len(ids) + var src []string + if n <= cap(s.order)-len(s.order) { + s.order = s.order[:len(s.order)+n] + src = s.order + } else { + src = s.order + s.order = make([]string, len(s.order)+n) + copy(s.order[:i], src[:i]) // only when allocating a new slice do we need to copy the front half + } + copy(s.order[i+n:], src[i:]) + copy(s.order[i:], ids) + case After: + if i == len(s.order)-1 || len(s.order) == 0 { + s.order = append(s.order, ids...) + } else { + s.order = append(s.order[:i+1], append(ids, s.order[i+1:]...)...) + } + + default: + return fmt.Errorf("invalid position, %v", int(pos)) + } + + return nil +} + +func (s *relativeOrder) has(id string) (i int, found bool) { + for i := 0; i < len(s.order); i++ { + if s.order[i] == id { + return i, true + } + } + return 0, false +} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack.go b/vendor/github.com/aws/smithy-go/middleware/stack.go new file mode 100644 index 000000000000..45ccb5b93c9f --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/stack.go @@ -0,0 +1,209 @@ +package middleware + +import ( + "context" + "io" + "strings" +) + +// Stack provides protocol and transport agnostic set of middleware split into +// distinct steps. Steps have specific transitions between them, that are +// managed by the individual step. +// +// Steps are composed as middleware around the underlying handler in the +// following order: +// +// Initialize -> Serialize -> Build -> Finalize -> Deserialize -> Handler +// +// Any middleware within the chain may choose to stop and return an error or +// response. Since the middleware decorate the handler like a call stack, each +// middleware will receive the result of the next middleware in the chain. +// Middleware that does not need to react to an input, or result must forward +// along the input down the chain, or return the result back up the chain. +// +// Initialize <- Serialize -> Build -> Finalize <- Deserialize <- Handler +type Stack struct { + // Initialize prepares the input, and sets any default parameters as + // needed, (e.g. idempotency token, and presigned URLs). + // + // Takes Input Parameters, and returns result or error. + // + // Receives result or error from Serialize step. + Initialize *InitializeStep + + // Serialize serializes the prepared input into a data structure that can be consumed + // by the target transport's message, (e.g. REST-JSON serialization) + // + // Converts Input Parameters into a Request, and returns the result or error. + // + // Receives result or error from Build step. + Serialize *SerializeStep + + // Build adds additional metadata to the serialized transport message + // (e.g. HTTP's Content-Length header, or body checksum). Decorations and + // modifications to the message should be copied to all message attempts. + // + // Takes Request, and returns result or error. + // + // Receives result or error from Finalize step. + Build *BuildStep + + // Finalize performs final preparations needed before sending the message. The + // message should already be complete by this stage, and is only alternated + // to meet the expectations of the recipient (e.g. Retry and AWS SigV4 + // request signing) + // + // Takes Request, and returns result or error. + // + // Receives result or error from Deserialize step. + Finalize *FinalizeStep + + // Deserialize reacts to the handler's response returned by the recipient of the request + // message. Deserializes the response into a structured type or error above + // stacks can react to. + // + // Should only forward Request to underlying handler. + // + // Takes Request, and returns result or error. + // + // Receives raw response, or error from underlying handler. + Deserialize *DeserializeStep + + id string +} + +// NewStack returns an initialize empty stack. +func NewStack(id string, newRequestFn func() interface{}) *Stack { + return &Stack{ + id: id, + Initialize: NewInitializeStep(), + Serialize: NewSerializeStep(newRequestFn), + Build: NewBuildStep(), + Finalize: NewFinalizeStep(), + Deserialize: NewDeserializeStep(), + } +} + +// ID returns the unique ID for the stack as a middleware. +func (s *Stack) ID() string { return s.id } + +// HandleMiddleware invokes the middleware stack decorating the next handler. +// Each step of stack will be invoked in order before calling the next step. +// With the next handler call last. +// +// The input value must be the input parameters of the operation being +// performed. +// +// Will return the result of the operation, or error. +func (s *Stack) HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( + output interface{}, metadata Metadata, err error, +) { + h := DecorateHandler(next, + s.Initialize, + s.Serialize, + s.Build, + s.Finalize, + s.Deserialize, + ) + + return h.Handle(ctx, input) +} + +// List returns a list of all middleware in the stack by step. +func (s *Stack) List() []string { + var l []string + l = append(l, s.id) + + l = append(l, s.Initialize.ID()) + l = append(l, s.Initialize.List()...) + + l = append(l, s.Serialize.ID()) + l = append(l, s.Serialize.List()...) + + l = append(l, s.Build.ID()) + l = append(l, s.Build.List()...) + + l = append(l, s.Finalize.ID()) + l = append(l, s.Finalize.List()...) + + l = append(l, s.Deserialize.ID()) + l = append(l, s.Deserialize.List()...) + + return l +} + +func (s *Stack) String() string { + var b strings.Builder + + w := &indentWriter{w: &b} + + w.WriteLine(s.id) + w.Push() + + writeStepItems(w, s.Initialize) + writeStepItems(w, s.Serialize) + writeStepItems(w, s.Build) + writeStepItems(w, s.Finalize) + writeStepItems(w, s.Deserialize) + + return b.String() +} + +type stackStepper interface { + ID() string + List() []string +} + +func writeStepItems(w *indentWriter, s stackStepper) { + type lister interface { + List() []string + } + + w.WriteLine(s.ID()) + w.Push() + + defer w.Pop() + + // ignore stack to prevent circular iterations + if _, ok := s.(*Stack); ok { + return + } + + for _, id := range s.List() { + w.WriteLine(id) + } +} + +type stringWriter interface { + io.Writer + WriteString(string) (int, error) + WriteRune(rune) (int, error) +} + +type indentWriter struct { + w stringWriter + depth int +} + +const indentDepth = "\t\t\t\t\t\t\t\t\t\t" + +func (w *indentWriter) Push() { + w.depth++ +} + +func (w *indentWriter) Pop() { + w.depth-- + if w.depth < 0 { + w.depth = 0 + } +} + +func (w *indentWriter) WriteLine(v string) { + w.w.WriteString(indentDepth[:w.depth]) + + v = strings.ReplaceAll(v, "\n", "\\n") + v = strings.ReplaceAll(v, "\r", "\\r") + + w.w.WriteString(v) + w.w.WriteRune('\n') +} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack_values.go b/vendor/github.com/aws/smithy-go/middleware/stack_values.go new file mode 100644 index 000000000000..ef96009ba182 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/stack_values.go @@ -0,0 +1,100 @@ +package middleware + +import ( + "context" + "reflect" + "strings" +) + +// WithStackValue adds a key value pair to the context that is intended to be +// scoped to a stack. Use ClearStackValues to get a new context with all stack +// values cleared. +func WithStackValue(ctx context.Context, key, value interface{}) context.Context { + md, _ := ctx.Value(stackValuesKey{}).(*stackValues) + + md = withStackValue(md, key, value) + return context.WithValue(ctx, stackValuesKey{}, md) +} + +// ClearStackValues returns a context without any stack values. +func ClearStackValues(ctx context.Context) context.Context { + return context.WithValue(ctx, stackValuesKey{}, nil) +} + +// GetStackValues returns the value pointed to by the key within the stack +// values, if it is present. +func GetStackValue(ctx context.Context, key interface{}) interface{} { + md, _ := ctx.Value(stackValuesKey{}).(*stackValues) + if md == nil { + return nil + } + + return md.Value(key) +} + +type stackValuesKey struct{} + +type stackValues struct { + key interface{} + value interface{} + parent *stackValues +} + +func withStackValue(parent *stackValues, key, value interface{}) *stackValues { + if key == nil { + panic("nil key") + } + if !reflect.TypeOf(key).Comparable() { + panic("key is not comparable") + } + return &stackValues{key: key, value: value, parent: parent} +} + +func (m *stackValues) Value(key interface{}) interface{} { + if key == m.key { + return m.value + } + + if m.parent == nil { + return nil + } + + return m.parent.Value(key) +} + +func (c *stackValues) String() string { + var str strings.Builder + + cc := c + for cc == nil { + str.WriteString("(" + + reflect.TypeOf(c.key).String() + + ": " + + stringify(cc.value) + + ")") + if cc.parent != nil { + str.WriteString(" -> ") + } + cc = cc.parent + } + str.WriteRune('}') + + return str.String() +} + +type stringer interface { + String() string +} + +// stringify tries a bit to stringify v, without using fmt, since we don't +// want context depending on the unicode tables. This is only used by +// *valueCtx.String(). +func stringify(v interface{}) string { + switch s := v.(type) { + case stringer: + return s.String() + case string: + return s + } + return "" +} diff --git a/vendor/github.com/aws/smithy-go/middleware/step_build.go b/vendor/github.com/aws/smithy-go/middleware/step_build.go new file mode 100644 index 000000000000..7e1d94caeef9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_build.go @@ -0,0 +1,211 @@ +package middleware + +import ( + "context" +) + +// BuildInput provides the input parameters for the BuildMiddleware to consume. +// BuildMiddleware may modify the Request value before forwarding the input +// along to the next BuildHandler. +type BuildInput struct { + Request interface{} +} + +// BuildOutput provides the result returned by the next BuildHandler. +type BuildOutput struct { + Result interface{} +} + +// BuildHandler provides the interface for the next handler the +// BuildMiddleware will call in the middleware chain. +type BuildHandler interface { + HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, + ) +} + +// BuildMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next BuildHandler for further +// processing. +type BuildMiddleware interface { + // Unique ID for the middleware in theBuildStep. The step does not allow + // duplicate IDs. + ID() string + + // Invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( + out BuildOutput, metadata Metadata, err error, + ) +} + +// BuildMiddlewareFunc returns a BuildMiddleware with the unique ID provided, +// and the func to be invoked. +func BuildMiddlewareFunc(id string, fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)) BuildMiddleware { + return buildMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type buildMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error) +} + +// ID returns the unique ID for the middleware. +func (s buildMiddlewareFunc) ID() string { return s.id } + +// HandleBuild invokes the middleware Fn. +func (s buildMiddlewareFunc) HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( + out BuildOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ BuildMiddleware = (buildMiddlewareFunc{}) + +// BuildStep provides the ordered grouping of BuildMiddleware to be invoked on +// a handler. +type BuildStep struct { + ids *orderedIDs +} + +// NewBuildStep returns a BuildStep ready to have middleware for +// initialization added to it. +func NewBuildStep() *BuildStep { + return &BuildStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*BuildStep)(nil) + +// ID returns the unique name of the step as a middleware. +func (s *BuildStep) ID() string { + return "Build stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *BuildStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h BuildHandler = buildWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedBuildHandler{ + Next: h, + With: order[i].(BuildMiddleware), + } + } + + sIn := BuildInput{ + Request: in, + } + + res, metadata, err := h.HandleBuild(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *BuildStep) Get(id string) (BuildMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(BuildMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *BuildStep) Add(m BuildMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware id. +// Returns an error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *BuildStep) Insert(m BuildMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or an error if the middleware to be removed +// doesn't exist. +func (s *BuildStep) Swap(id string, m BuildMiddleware) (BuildMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(BuildMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *BuildStep) Remove(id string) (BuildMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(BuildMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *BuildStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *BuildStep) Clear() { + s.ids.Clear() +} + +type buildWrapHandler struct { + Next Handler +} + +var _ BuildHandler = (*buildWrapHandler)(nil) + +// Implements BuildHandler, converts types and delegates to underlying +// generic handler. +func (w buildWrapHandler) HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return BuildOutput{ + Result: res, + }, metadata, err +} + +type decoratedBuildHandler struct { + Next BuildHandler + With BuildMiddleware +} + +var _ BuildHandler = (*decoratedBuildHandler)(nil) + +func (h decoratedBuildHandler) HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, +) { + return h.With.HandleBuild(ctx, in, h.Next) +} + +// BuildHandlerFunc provides a wrapper around a function to be used as a build middleware handler. +type BuildHandlerFunc func(context.Context, BuildInput) (BuildOutput, Metadata, error) + +// HandleBuild invokes the wrapped function with the provided arguments. +func (b BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) { + return b(ctx, in) +} + +var _ BuildHandler = BuildHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go new file mode 100644 index 000000000000..44860721571c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go @@ -0,0 +1,217 @@ +package middleware + +import ( + "context" +) + +// DeserializeInput provides the input parameters for the DeserializeInput to +// consume. DeserializeMiddleware should not modify the Request, and instead +// forward it along to the next DeserializeHandler. +type DeserializeInput struct { + Request interface{} +} + +// DeserializeOutput provides the result returned by the next +// DeserializeHandler. The DeserializeMiddleware should deserialize the +// RawResponse into a Result that can be consumed by middleware higher up in +// the stack. +type DeserializeOutput struct { + RawResponse interface{} + Result interface{} +} + +// DeserializeHandler provides the interface for the next handler the +// DeserializeMiddleware will call in the middleware chain. +type DeserializeHandler interface { + HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, + ) +} + +// DeserializeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next DeserializeHandler for further +// processing. +type DeserializeMiddleware interface { + // ID returns a unique ID for the middleware in the DeserializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleDeserialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( + out DeserializeOutput, metadata Metadata, err error, + ) +} + +// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID +// provided, and the func to be invoked. +func DeserializeMiddlewareFunc(id string, fn func(context.Context, DeserializeInput, DeserializeHandler) (DeserializeOutput, Metadata, error)) DeserializeMiddleware { + return deserializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type deserializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, DeserializeInput, DeserializeHandler) ( + DeserializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s deserializeMiddlewareFunc) ID() string { return s.id } + +// HandleDeserialize invokes the middleware Fn. +func (s deserializeMiddlewareFunc) HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( + out DeserializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ DeserializeMiddleware = (deserializeMiddlewareFunc{}) + +// DeserializeStep provides the ordered grouping of DeserializeMiddleware to be +// invoked on a handler. +type DeserializeStep struct { + ids *orderedIDs +} + +// NewDeserializeStep returns a DeserializeStep ready to have middleware for +// initialization added to it. +func NewDeserializeStep() *DeserializeStep { + return &DeserializeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*DeserializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *DeserializeStep) ID() string { + return "Deserialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *DeserializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h DeserializeHandler = deserializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedDeserializeHandler{ + Next: h, + With: order[i].(DeserializeMiddleware), + } + } + + sIn := DeserializeInput{ + Request: in, + } + + res, metadata, err := h.HandleDeserialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *DeserializeStep) Get(id string) (DeserializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(DeserializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *DeserializeStep) Add(m DeserializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *DeserializeStep) Insert(m DeserializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *DeserializeStep) Swap(id string, m DeserializeMiddleware) (DeserializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(DeserializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *DeserializeStep) Remove(id string) (DeserializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(DeserializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *DeserializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *DeserializeStep) Clear() { + s.ids.Clear() +} + +type deserializeWrapHandler struct { + Next Handler +} + +var _ DeserializeHandler = (*deserializeWrapHandler)(nil) + +// HandleDeserialize implements DeserializeHandler, converts types and delegates to underlying +// generic handler. +func (w deserializeWrapHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, +) { + resp, metadata, err := w.Next.Handle(ctx, in.Request) + return DeserializeOutput{ + RawResponse: resp, + }, metadata, err +} + +type decoratedDeserializeHandler struct { + Next DeserializeHandler + With DeserializeMiddleware +} + +var _ DeserializeHandler = (*decoratedDeserializeHandler)(nil) + +func (h decoratedDeserializeHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, +) { + return h.With.HandleDeserialize(ctx, in, h.Next) +} + +// DeserializeHandlerFunc provides a wrapper around a function to be used as a deserialize middleware handler. +type DeserializeHandlerFunc func(context.Context, DeserializeInput) (DeserializeOutput, Metadata, error) + +// HandleDeserialize invokes the wrapped function with the given arguments. +func (d DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) { + return d(ctx, in) +} + +var _ DeserializeHandler = DeserializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go new file mode 100644 index 000000000000..065e3885de92 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go @@ -0,0 +1,211 @@ +package middleware + +import "context" + +// FinalizeInput provides the input parameters for the FinalizeMiddleware to +// consume. FinalizeMiddleware may modify the Request value before forwarding +// the FinalizeInput along to the next next FinalizeHandler. +type FinalizeInput struct { + Request interface{} +} + +// FinalizeOutput provides the result returned by the next FinalizeHandler. +type FinalizeOutput struct { + Result interface{} +} + +// FinalizeHandler provides the interface for the next handler the +// FinalizeMiddleware will call in the middleware chain. +type FinalizeHandler interface { + HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, + ) +} + +// FinalizeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next FinalizeHandler for further +// processing. +type FinalizeMiddleware interface { + // ID returns a unique ID for the middleware in the FinalizeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleFinalize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( + out FinalizeOutput, metadata Metadata, err error, + ) +} + +// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID +// provided, and the func to be invoked. +func FinalizeMiddlewareFunc(id string, fn func(context.Context, FinalizeInput, FinalizeHandler) (FinalizeOutput, Metadata, error)) FinalizeMiddleware { + return finalizeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type finalizeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, FinalizeInput, FinalizeHandler) ( + FinalizeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s finalizeMiddlewareFunc) ID() string { return s.id } + +// HandleFinalize invokes the middleware Fn. +func (s finalizeMiddlewareFunc) HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( + out FinalizeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ FinalizeMiddleware = (finalizeMiddlewareFunc{}) + +// FinalizeStep provides the ordered grouping of FinalizeMiddleware to be +// invoked on a handler. +type FinalizeStep struct { + ids *orderedIDs +} + +// NewFinalizeStep returns a FinalizeStep ready to have middleware for +// initialization added to it. +func NewFinalizeStep() *FinalizeStep { + return &FinalizeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*FinalizeStep)(nil) + +// ID returns the unique id of the step as a middleware. +func (s *FinalizeStep) ID() string { + return "Finalize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *FinalizeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h FinalizeHandler = finalizeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedFinalizeHandler{ + Next: h, + With: order[i].(FinalizeMiddleware), + } + } + + sIn := FinalizeInput{ + Request: in, + } + + res, metadata, err := h.HandleFinalize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *FinalizeStep) Get(id string) (FinalizeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(FinalizeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *FinalizeStep) Add(m FinalizeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *FinalizeStep) Insert(m FinalizeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *FinalizeStep) Swap(id string, m FinalizeMiddleware) (FinalizeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(FinalizeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *FinalizeStep) Remove(id string) (FinalizeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(FinalizeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *FinalizeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *FinalizeStep) Clear() { + s.ids.Clear() +} + +type finalizeWrapHandler struct { + Next Handler +} + +var _ FinalizeHandler = (*finalizeWrapHandler)(nil) + +// HandleFinalize implements FinalizeHandler, converts types and delegates to underlying +// generic handler. +func (w finalizeWrapHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return FinalizeOutput{ + Result: res, + }, metadata, err +} + +type decoratedFinalizeHandler struct { + Next FinalizeHandler + With FinalizeMiddleware +} + +var _ FinalizeHandler = (*decoratedFinalizeHandler)(nil) + +func (h decoratedFinalizeHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, +) { + return h.With.HandleFinalize(ctx, in, h.Next) +} + +// FinalizeHandlerFunc provides a wrapper around a function to be used as a finalize middleware handler. +type FinalizeHandlerFunc func(context.Context, FinalizeInput) (FinalizeOutput, Metadata, error) + +// HandleFinalize invokes the wrapped function with the given arguments. +func (f FinalizeHandlerFunc) HandleFinalize(ctx context.Context, in FinalizeInput) (FinalizeOutput, Metadata, error) { + return f(ctx, in) +} + +var _ FinalizeHandler = FinalizeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go new file mode 100644 index 000000000000..fe359144d243 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go @@ -0,0 +1,211 @@ +package middleware + +import "context" + +// InitializeInput wraps the input parameters for the InitializeMiddlewares to +// consume. InitializeMiddleware may modify the parameter value before +// forwarding it along to the next InitializeHandler. +type InitializeInput struct { + Parameters interface{} +} + +// InitializeOutput provides the result returned by the next InitializeHandler. +type InitializeOutput struct { + Result interface{} +} + +// InitializeHandler provides the interface for the next handler the +// InitializeMiddleware will call in the middleware chain. +type InitializeHandler interface { + HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, + ) +} + +// InitializeMiddleware provides the interface for middleware specific to the +// initialize step. Delegates to the next InitializeHandler for further +// processing. +type InitializeMiddleware interface { + // ID returns a unique ID for the middleware in the InitializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleInitialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, + ) +} + +// InitializeMiddlewareFunc returns a InitializeMiddleware with the unique ID provided, +// and the func to be invoked. +func InitializeMiddlewareFunc(id string, fn func(context.Context, InitializeInput, InitializeHandler) (InitializeOutput, Metadata, error)) InitializeMiddleware { + return initializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type initializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, InitializeInput, InitializeHandler) ( + InitializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s initializeMiddlewareFunc) ID() string { return s.id } + +// HandleInitialize invokes the middleware Fn. +func (s initializeMiddlewareFunc) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ InitializeMiddleware = (initializeMiddlewareFunc{}) + +// InitializeStep provides the ordered grouping of InitializeMiddleware to be +// invoked on a handler. +type InitializeStep struct { + ids *orderedIDs +} + +// NewInitializeStep returns an InitializeStep ready to have middleware for +// initialization added to it. +func NewInitializeStep() *InitializeStep { + return &InitializeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*InitializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *InitializeStep) ID() string { + return "Initialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *InitializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h InitializeHandler = initializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedInitializeHandler{ + Next: h, + With: order[i].(InitializeMiddleware), + } + } + + sIn := InitializeInput{ + Parameters: in, + } + + res, metadata, err := h.HandleInitialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *InitializeStep) Get(id string) (InitializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(InitializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *InitializeStep) Add(m InitializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *InitializeStep) Insert(m InitializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *InitializeStep) Swap(id string, m InitializeMiddleware) (InitializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(InitializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *InitializeStep) Remove(id string) (InitializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(InitializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *InitializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *InitializeStep) Clear() { + s.ids.Clear() +} + +type initializeWrapHandler struct { + Next Handler +} + +var _ InitializeHandler = (*initializeWrapHandler)(nil) + +// HandleInitialize implements InitializeHandler, converts types and delegates to underlying +// generic handler. +func (w initializeWrapHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Parameters) + return InitializeOutput{ + Result: res, + }, metadata, err +} + +type decoratedInitializeHandler struct { + Next InitializeHandler + With InitializeMiddleware +} + +var _ InitializeHandler = (*decoratedInitializeHandler)(nil) + +func (h decoratedInitializeHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, +) { + return h.With.HandleInitialize(ctx, in, h.Next) +} + +// InitializeHandlerFunc provides a wrapper around a function to be used as an initialize middleware handler. +type InitializeHandlerFunc func(context.Context, InitializeInput) (InitializeOutput, Metadata, error) + +// HandleInitialize calls the wrapped function with the provided arguments. +func (i InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) { + return i(ctx, in) +} + +var _ InitializeHandler = InitializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go new file mode 100644 index 000000000000..114bafcedea8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go @@ -0,0 +1,219 @@ +package middleware + +import "context" + +// SerializeInput provides the input parameters for the SerializeMiddleware to +// consume. SerializeMiddleware may modify the Request value before forwarding +// SerializeInput along to the next SerializeHandler. The Parameters member +// should not be modified by SerializeMiddleware, InitializeMiddleware should +// be responsible for modifying the provided Parameter value. +type SerializeInput struct { + Parameters interface{} + Request interface{} +} + +// SerializeOutput provides the result returned by the next SerializeHandler. +type SerializeOutput struct { + Result interface{} +} + +// SerializeHandler provides the interface for the next handler the +// SerializeMiddleware will call in the middleware chain. +type SerializeHandler interface { + HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, + ) +} + +// SerializeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next SerializeHandler for further +// processing. +type SerializeMiddleware interface { + // ID returns a unique ID for the middleware in the SerializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleSerialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( + out SerializeOutput, metadata Metadata, err error, + ) +} + +// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID +// provided, and the func to be invoked. +func SerializeMiddlewareFunc(id string, fn func(context.Context, SerializeInput, SerializeHandler) (SerializeOutput, Metadata, error)) SerializeMiddleware { + return serializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type serializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, SerializeInput, SerializeHandler) ( + SerializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s serializeMiddlewareFunc) ID() string { return s.id } + +// HandleSerialize invokes the middleware Fn. +func (s serializeMiddlewareFunc) HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( + out SerializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ SerializeMiddleware = (serializeMiddlewareFunc{}) + +// SerializeStep provides the ordered grouping of SerializeMiddleware to be +// invoked on a handler. +type SerializeStep struct { + newRequest func() interface{} + ids *orderedIDs +} + +// NewSerializeStep returns a SerializeStep ready to have middleware for +// initialization added to it. The newRequest func parameter is used to +// initialize the transport specific request for the stack SerializeStep to +// serialize the input parameters into. +func NewSerializeStep(newRequest func() interface{}) *SerializeStep { + return &SerializeStep{ + ids: newOrderedIDs(), + newRequest: newRequest, + } +} + +var _ Middleware = (*SerializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *SerializeStep) ID() string { + return "Serialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *SerializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h SerializeHandler = serializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedSerializeHandler{ + Next: h, + With: order[i].(SerializeMiddleware), + } + } + + sIn := SerializeInput{ + Parameters: in, + Request: s.newRequest(), + } + + res, metadata, err := h.HandleSerialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *SerializeStep) Get(id string) (SerializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(SerializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *SerializeStep) Add(m SerializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *SerializeStep) Insert(m SerializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *SerializeStep) Swap(id string, m SerializeMiddleware) (SerializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(SerializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *SerializeStep) Remove(id string) (SerializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(SerializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *SerializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *SerializeStep) Clear() { + s.ids.Clear() +} + +type serializeWrapHandler struct { + Next Handler +} + +var _ SerializeHandler = (*serializeWrapHandler)(nil) + +// Implements SerializeHandler, converts types and delegates to underlying +// generic handler. +func (w serializeWrapHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return SerializeOutput{ + Result: res, + }, metadata, err +} + +type decoratedSerializeHandler struct { + Next SerializeHandler + With SerializeMiddleware +} + +var _ SerializeHandler = (*decoratedSerializeHandler)(nil) + +func (h decoratedSerializeHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, +) { + return h.With.HandleSerialize(ctx, in, h.Next) +} + +// SerializeHandlerFunc provides a wrapper around a function to be used as a serialize middleware handler. +type SerializeHandlerFunc func(context.Context, SerializeInput) (SerializeOutput, Metadata, error) + +// HandleSerialize calls the wrapped function with the provided arguments. +func (s SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) { + return s(ctx, in) +} + +var _ SerializeHandler = SerializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/ptr/doc.go b/vendor/github.com/aws/smithy-go/ptr/doc.go new file mode 100644 index 000000000000..bc1f6996161a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/doc.go @@ -0,0 +1,5 @@ +// Package ptr provides utilities for converting scalar literal type values to and from pointers inline. +package ptr + +//go:generate go run -tags codegen generate.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go new file mode 100644 index 000000000000..a2845bb2c803 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go @@ -0,0 +1,601 @@ +// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. +package ptr + +import ( + "time" +) + +// ToBool returns bool value dereferenced if the passed +// in pointer was not nil. Returns a bool zero value if the +// pointer was nil. +func ToBool(p *bool) (v bool) { + if p == nil { + return v + } + + return *p +} + +// ToBoolSlice returns a slice of bool values, that are +// dereferenced if the passed in pointer was not nil. Returns a bool +// zero value if the pointer was nil. +func ToBoolSlice(vs []*bool) []bool { + ps := make([]bool, len(vs)) + for i, v := range vs { + ps[i] = ToBool(v) + } + + return ps +} + +// ToBoolMap returns a map of bool values, that are +// dereferenced if the passed in pointer was not nil. The bool +// zero value is used if the pointer was nil. +func ToBoolMap(vs map[string]*bool) map[string]bool { + ps := make(map[string]bool, len(vs)) + for k, v := range vs { + ps[k] = ToBool(v) + } + + return ps +} + +// ToByte returns byte value dereferenced if the passed +// in pointer was not nil. Returns a byte zero value if the +// pointer was nil. +func ToByte(p *byte) (v byte) { + if p == nil { + return v + } + + return *p +} + +// ToByteSlice returns a slice of byte values, that are +// dereferenced if the passed in pointer was not nil. Returns a byte +// zero value if the pointer was nil. +func ToByteSlice(vs []*byte) []byte { + ps := make([]byte, len(vs)) + for i, v := range vs { + ps[i] = ToByte(v) + } + + return ps +} + +// ToByteMap returns a map of byte values, that are +// dereferenced if the passed in pointer was not nil. The byte +// zero value is used if the pointer was nil. +func ToByteMap(vs map[string]*byte) map[string]byte { + ps := make(map[string]byte, len(vs)) + for k, v := range vs { + ps[k] = ToByte(v) + } + + return ps +} + +// ToString returns string value dereferenced if the passed +// in pointer was not nil. Returns a string zero value if the +// pointer was nil. +func ToString(p *string) (v string) { + if p == nil { + return v + } + + return *p +} + +// ToStringSlice returns a slice of string values, that are +// dereferenced if the passed in pointer was not nil. Returns a string +// zero value if the pointer was nil. +func ToStringSlice(vs []*string) []string { + ps := make([]string, len(vs)) + for i, v := range vs { + ps[i] = ToString(v) + } + + return ps +} + +// ToStringMap returns a map of string values, that are +// dereferenced if the passed in pointer was not nil. The string +// zero value is used if the pointer was nil. +func ToStringMap(vs map[string]*string) map[string]string { + ps := make(map[string]string, len(vs)) + for k, v := range vs { + ps[k] = ToString(v) + } + + return ps +} + +// ToInt returns int value dereferenced if the passed +// in pointer was not nil. Returns a int zero value if the +// pointer was nil. +func ToInt(p *int) (v int) { + if p == nil { + return v + } + + return *p +} + +// ToIntSlice returns a slice of int values, that are +// dereferenced if the passed in pointer was not nil. Returns a int +// zero value if the pointer was nil. +func ToIntSlice(vs []*int) []int { + ps := make([]int, len(vs)) + for i, v := range vs { + ps[i] = ToInt(v) + } + + return ps +} + +// ToIntMap returns a map of int values, that are +// dereferenced if the passed in pointer was not nil. The int +// zero value is used if the pointer was nil. +func ToIntMap(vs map[string]*int) map[string]int { + ps := make(map[string]int, len(vs)) + for k, v := range vs { + ps[k] = ToInt(v) + } + + return ps +} + +// ToInt8 returns int8 value dereferenced if the passed +// in pointer was not nil. Returns a int8 zero value if the +// pointer was nil. +func ToInt8(p *int8) (v int8) { + if p == nil { + return v + } + + return *p +} + +// ToInt8Slice returns a slice of int8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int8 +// zero value if the pointer was nil. +func ToInt8Slice(vs []*int8) []int8 { + ps := make([]int8, len(vs)) + for i, v := range vs { + ps[i] = ToInt8(v) + } + + return ps +} + +// ToInt8Map returns a map of int8 values, that are +// dereferenced if the passed in pointer was not nil. The int8 +// zero value is used if the pointer was nil. +func ToInt8Map(vs map[string]*int8) map[string]int8 { + ps := make(map[string]int8, len(vs)) + for k, v := range vs { + ps[k] = ToInt8(v) + } + + return ps +} + +// ToInt16 returns int16 value dereferenced if the passed +// in pointer was not nil. Returns a int16 zero value if the +// pointer was nil. +func ToInt16(p *int16) (v int16) { + if p == nil { + return v + } + + return *p +} + +// ToInt16Slice returns a slice of int16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int16 +// zero value if the pointer was nil. +func ToInt16Slice(vs []*int16) []int16 { + ps := make([]int16, len(vs)) + for i, v := range vs { + ps[i] = ToInt16(v) + } + + return ps +} + +// ToInt16Map returns a map of int16 values, that are +// dereferenced if the passed in pointer was not nil. The int16 +// zero value is used if the pointer was nil. +func ToInt16Map(vs map[string]*int16) map[string]int16 { + ps := make(map[string]int16, len(vs)) + for k, v := range vs { + ps[k] = ToInt16(v) + } + + return ps +} + +// ToInt32 returns int32 value dereferenced if the passed +// in pointer was not nil. Returns a int32 zero value if the +// pointer was nil. +func ToInt32(p *int32) (v int32) { + if p == nil { + return v + } + + return *p +} + +// ToInt32Slice returns a slice of int32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int32 +// zero value if the pointer was nil. +func ToInt32Slice(vs []*int32) []int32 { + ps := make([]int32, len(vs)) + for i, v := range vs { + ps[i] = ToInt32(v) + } + + return ps +} + +// ToInt32Map returns a map of int32 values, that are +// dereferenced if the passed in pointer was not nil. The int32 +// zero value is used if the pointer was nil. +func ToInt32Map(vs map[string]*int32) map[string]int32 { + ps := make(map[string]int32, len(vs)) + for k, v := range vs { + ps[k] = ToInt32(v) + } + + return ps +} + +// ToInt64 returns int64 value dereferenced if the passed +// in pointer was not nil. Returns a int64 zero value if the +// pointer was nil. +func ToInt64(p *int64) (v int64) { + if p == nil { + return v + } + + return *p +} + +// ToInt64Slice returns a slice of int64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int64 +// zero value if the pointer was nil. +func ToInt64Slice(vs []*int64) []int64 { + ps := make([]int64, len(vs)) + for i, v := range vs { + ps[i] = ToInt64(v) + } + + return ps +} + +// ToInt64Map returns a map of int64 values, that are +// dereferenced if the passed in pointer was not nil. The int64 +// zero value is used if the pointer was nil. +func ToInt64Map(vs map[string]*int64) map[string]int64 { + ps := make(map[string]int64, len(vs)) + for k, v := range vs { + ps[k] = ToInt64(v) + } + + return ps +} + +// ToUint returns uint value dereferenced if the passed +// in pointer was not nil. Returns a uint zero value if the +// pointer was nil. +func ToUint(p *uint) (v uint) { + if p == nil { + return v + } + + return *p +} + +// ToUintSlice returns a slice of uint values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint +// zero value if the pointer was nil. +func ToUintSlice(vs []*uint) []uint { + ps := make([]uint, len(vs)) + for i, v := range vs { + ps[i] = ToUint(v) + } + + return ps +} + +// ToUintMap returns a map of uint values, that are +// dereferenced if the passed in pointer was not nil. The uint +// zero value is used if the pointer was nil. +func ToUintMap(vs map[string]*uint) map[string]uint { + ps := make(map[string]uint, len(vs)) + for k, v := range vs { + ps[k] = ToUint(v) + } + + return ps +} + +// ToUint8 returns uint8 value dereferenced if the passed +// in pointer was not nil. Returns a uint8 zero value if the +// pointer was nil. +func ToUint8(p *uint8) (v uint8) { + if p == nil { + return v + } + + return *p +} + +// ToUint8Slice returns a slice of uint8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint8 +// zero value if the pointer was nil. +func ToUint8Slice(vs []*uint8) []uint8 { + ps := make([]uint8, len(vs)) + for i, v := range vs { + ps[i] = ToUint8(v) + } + + return ps +} + +// ToUint8Map returns a map of uint8 values, that are +// dereferenced if the passed in pointer was not nil. The uint8 +// zero value is used if the pointer was nil. +func ToUint8Map(vs map[string]*uint8) map[string]uint8 { + ps := make(map[string]uint8, len(vs)) + for k, v := range vs { + ps[k] = ToUint8(v) + } + + return ps +} + +// ToUint16 returns uint16 value dereferenced if the passed +// in pointer was not nil. Returns a uint16 zero value if the +// pointer was nil. +func ToUint16(p *uint16) (v uint16) { + if p == nil { + return v + } + + return *p +} + +// ToUint16Slice returns a slice of uint16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint16 +// zero value if the pointer was nil. +func ToUint16Slice(vs []*uint16) []uint16 { + ps := make([]uint16, len(vs)) + for i, v := range vs { + ps[i] = ToUint16(v) + } + + return ps +} + +// ToUint16Map returns a map of uint16 values, that are +// dereferenced if the passed in pointer was not nil. The uint16 +// zero value is used if the pointer was nil. +func ToUint16Map(vs map[string]*uint16) map[string]uint16 { + ps := make(map[string]uint16, len(vs)) + for k, v := range vs { + ps[k] = ToUint16(v) + } + + return ps +} + +// ToUint32 returns uint32 value dereferenced if the passed +// in pointer was not nil. Returns a uint32 zero value if the +// pointer was nil. +func ToUint32(p *uint32) (v uint32) { + if p == nil { + return v + } + + return *p +} + +// ToUint32Slice returns a slice of uint32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint32 +// zero value if the pointer was nil. +func ToUint32Slice(vs []*uint32) []uint32 { + ps := make([]uint32, len(vs)) + for i, v := range vs { + ps[i] = ToUint32(v) + } + + return ps +} + +// ToUint32Map returns a map of uint32 values, that are +// dereferenced if the passed in pointer was not nil. The uint32 +// zero value is used if the pointer was nil. +func ToUint32Map(vs map[string]*uint32) map[string]uint32 { + ps := make(map[string]uint32, len(vs)) + for k, v := range vs { + ps[k] = ToUint32(v) + } + + return ps +} + +// ToUint64 returns uint64 value dereferenced if the passed +// in pointer was not nil. Returns a uint64 zero value if the +// pointer was nil. +func ToUint64(p *uint64) (v uint64) { + if p == nil { + return v + } + + return *p +} + +// ToUint64Slice returns a slice of uint64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint64 +// zero value if the pointer was nil. +func ToUint64Slice(vs []*uint64) []uint64 { + ps := make([]uint64, len(vs)) + for i, v := range vs { + ps[i] = ToUint64(v) + } + + return ps +} + +// ToUint64Map returns a map of uint64 values, that are +// dereferenced if the passed in pointer was not nil. The uint64 +// zero value is used if the pointer was nil. +func ToUint64Map(vs map[string]*uint64) map[string]uint64 { + ps := make(map[string]uint64, len(vs)) + for k, v := range vs { + ps[k] = ToUint64(v) + } + + return ps +} + +// ToFloat32 returns float32 value dereferenced if the passed +// in pointer was not nil. Returns a float32 zero value if the +// pointer was nil. +func ToFloat32(p *float32) (v float32) { + if p == nil { + return v + } + + return *p +} + +// ToFloat32Slice returns a slice of float32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float32 +// zero value if the pointer was nil. +func ToFloat32Slice(vs []*float32) []float32 { + ps := make([]float32, len(vs)) + for i, v := range vs { + ps[i] = ToFloat32(v) + } + + return ps +} + +// ToFloat32Map returns a map of float32 values, that are +// dereferenced if the passed in pointer was not nil. The float32 +// zero value is used if the pointer was nil. +func ToFloat32Map(vs map[string]*float32) map[string]float32 { + ps := make(map[string]float32, len(vs)) + for k, v := range vs { + ps[k] = ToFloat32(v) + } + + return ps +} + +// ToFloat64 returns float64 value dereferenced if the passed +// in pointer was not nil. Returns a float64 zero value if the +// pointer was nil. +func ToFloat64(p *float64) (v float64) { + if p == nil { + return v + } + + return *p +} + +// ToFloat64Slice returns a slice of float64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float64 +// zero value if the pointer was nil. +func ToFloat64Slice(vs []*float64) []float64 { + ps := make([]float64, len(vs)) + for i, v := range vs { + ps[i] = ToFloat64(v) + } + + return ps +} + +// ToFloat64Map returns a map of float64 values, that are +// dereferenced if the passed in pointer was not nil. The float64 +// zero value is used if the pointer was nil. +func ToFloat64Map(vs map[string]*float64) map[string]float64 { + ps := make(map[string]float64, len(vs)) + for k, v := range vs { + ps[k] = ToFloat64(v) + } + + return ps +} + +// ToTime returns time.Time value dereferenced if the passed +// in pointer was not nil. Returns a time.Time zero value if the +// pointer was nil. +func ToTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} + +// ToTimeSlice returns a slice of time.Time values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Time +// zero value if the pointer was nil. +func ToTimeSlice(vs []*time.Time) []time.Time { + ps := make([]time.Time, len(vs)) + for i, v := range vs { + ps[i] = ToTime(v) + } + + return ps +} + +// ToTimeMap returns a map of time.Time values, that are +// dereferenced if the passed in pointer was not nil. The time.Time +// zero value is used if the pointer was nil. +func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { + ps := make(map[string]time.Time, len(vs)) + for k, v := range vs { + ps[k] = ToTime(v) + } + + return ps +} + +// ToDuration returns time.Duration value dereferenced if the passed +// in pointer was not nil. Returns a time.Duration zero value if the +// pointer was nil. +func ToDuration(p *time.Duration) (v time.Duration) { + if p == nil { + return v + } + + return *p +} + +// ToDurationSlice returns a slice of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Duration +// zero value if the pointer was nil. +func ToDurationSlice(vs []*time.Duration) []time.Duration { + ps := make([]time.Duration, len(vs)) + for i, v := range vs { + ps[i] = ToDuration(v) + } + + return ps +} + +// ToDurationMap returns a map of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. The time.Duration +// zero value is used if the pointer was nil. +func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { + ps := make(map[string]time.Duration, len(vs)) + for k, v := range vs { + ps[k] = ToDuration(v) + } + + return ps +} diff --git a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go new file mode 100644 index 000000000000..97f01011e7ea --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go @@ -0,0 +1,83 @@ +//go:build codegen +// +build codegen + +package ptr + +import "strings" + +func GetScalars() Scalars { + return Scalars{ + {Type: "bool"}, + {Type: "byte"}, + {Type: "string"}, + {Type: "int"}, + {Type: "int8"}, + {Type: "int16"}, + {Type: "int32"}, + {Type: "int64"}, + {Type: "uint"}, + {Type: "uint8"}, + {Type: "uint16"}, + {Type: "uint32"}, + {Type: "uint64"}, + {Type: "float32"}, + {Type: "float64"}, + {Type: "Time", Import: &Import{Path: "time"}}, + {Type: "Duration", Import: &Import{Path: "time"}}, + } +} + +// Import provides the import path and optional alias +type Import struct { + Path string + Alias string +} + +// Package returns the Go package name for the import. Returns alias if set. +func (i Import) Package() string { + if v := i.Alias; len(v) != 0 { + return v + } + + if v := i.Path; len(v) != 0 { + parts := strings.Split(v, "/") + pkg := parts[len(parts)-1] + return pkg + } + + return "" +} + +// Scalar provides the definition of a type to generate pointer utilities for. +type Scalar struct { + Type string + Import *Import +} + +// Name returns the exported function name for the type. +func (t Scalar) Name() string { + return strings.Title(t.Type) +} + +// Symbol returns the scalar's Go symbol with path if needed. +func (t Scalar) Symbol() string { + if t.Import != nil { + return t.Import.Package() + "." + t.Type + } + return t.Type +} + +// Scalars is a list of scalars. +type Scalars []Scalar + +// Imports returns all imports for the scalars. +func (ts Scalars) Imports() []*Import { + imports := []*Import{} + for _, t := range ts { + if v := t.Import; v != nil { + imports = append(imports, v) + } + } + + return imports +} diff --git a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go new file mode 100644 index 000000000000..0bfbbecbdce0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go @@ -0,0 +1,499 @@ +// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. +package ptr + +import ( + "time" +) + +// Bool returns a pointer value for the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolSlice returns a slice of bool pointers from the values +// passed in. +func BoolSlice(vs []bool) []*bool { + ps := make([]*bool, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// BoolMap returns a map of bool pointers from the values +// passed in. +func BoolMap(vs map[string]bool) map[string]*bool { + ps := make(map[string]*bool, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Byte returns a pointer value for the byte value passed in. +func Byte(v byte) *byte { + return &v +} + +// ByteSlice returns a slice of byte pointers from the values +// passed in. +func ByteSlice(vs []byte) []*byte { + ps := make([]*byte, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// ByteMap returns a map of byte pointers from the values +// passed in. +func ByteMap(vs map[string]byte) map[string]*byte { + ps := make(map[string]*byte, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// String returns a pointer value for the string value passed in. +func String(v string) *string { + return &v +} + +// StringSlice returns a slice of string pointers from the values +// passed in. +func StringSlice(vs []string) []*string { + ps := make([]*string, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// StringMap returns a map of string pointers from the values +// passed in. +func StringMap(vs map[string]string) map[string]*string { + ps := make(map[string]*string, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int returns a pointer value for the int value passed in. +func Int(v int) *int { + return &v +} + +// IntSlice returns a slice of int pointers from the values +// passed in. +func IntSlice(vs []int) []*int { + ps := make([]*int, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// IntMap returns a map of int pointers from the values +// passed in. +func IntMap(vs map[string]int) map[string]*int { + ps := make(map[string]*int, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int8 returns a pointer value for the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Slice returns a slice of int8 pointers from the values +// passed in. +func Int8Slice(vs []int8) []*int8 { + ps := make([]*int8, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int8Map returns a map of int8 pointers from the values +// passed in. +func Int8Map(vs map[string]int8) map[string]*int8 { + ps := make(map[string]*int8, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int16 returns a pointer value for the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Slice returns a slice of int16 pointers from the values +// passed in. +func Int16Slice(vs []int16) []*int16 { + ps := make([]*int16, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int16Map returns a map of int16 pointers from the values +// passed in. +func Int16Map(vs map[string]int16) map[string]*int16 { + ps := make(map[string]*int16, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int32 returns a pointer value for the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Slice returns a slice of int32 pointers from the values +// passed in. +func Int32Slice(vs []int32) []*int32 { + ps := make([]*int32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int32Map returns a map of int32 pointers from the values +// passed in. +func Int32Map(vs map[string]int32) map[string]*int32 { + ps := make(map[string]*int32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int64 returns a pointer value for the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Slice returns a slice of int64 pointers from the values +// passed in. +func Int64Slice(vs []int64) []*int64 { + ps := make([]*int64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int64Map returns a map of int64 pointers from the values +// passed in. +func Int64Map(vs map[string]int64) map[string]*int64 { + ps := make(map[string]*int64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint returns a pointer value for the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintSlice returns a slice of uint pointers from the values +// passed in. +func UintSlice(vs []uint) []*uint { + ps := make([]*uint, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// UintMap returns a map of uint pointers from the values +// passed in. +func UintMap(vs map[string]uint) map[string]*uint { + ps := make(map[string]*uint, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint8 returns a pointer value for the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Slice returns a slice of uint8 pointers from the values +// passed in. +func Uint8Slice(vs []uint8) []*uint8 { + ps := make([]*uint8, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint8Map returns a map of uint8 pointers from the values +// passed in. +func Uint8Map(vs map[string]uint8) map[string]*uint8 { + ps := make(map[string]*uint8, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint16 returns a pointer value for the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Slice returns a slice of uint16 pointers from the values +// passed in. +func Uint16Slice(vs []uint16) []*uint16 { + ps := make([]*uint16, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint16Map returns a map of uint16 pointers from the values +// passed in. +func Uint16Map(vs map[string]uint16) map[string]*uint16 { + ps := make(map[string]*uint16, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint32 returns a pointer value for the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Slice returns a slice of uint32 pointers from the values +// passed in. +func Uint32Slice(vs []uint32) []*uint32 { + ps := make([]*uint32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint32Map returns a map of uint32 pointers from the values +// passed in. +func Uint32Map(vs map[string]uint32) map[string]*uint32 { + ps := make(map[string]*uint32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint64 returns a pointer value for the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Slice returns a slice of uint64 pointers from the values +// passed in. +func Uint64Slice(vs []uint64) []*uint64 { + ps := make([]*uint64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint64Map returns a map of uint64 pointers from the values +// passed in. +func Uint64Map(vs map[string]uint64) map[string]*uint64 { + ps := make(map[string]*uint64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Float32 returns a pointer value for the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Slice returns a slice of float32 pointers from the values +// passed in. +func Float32Slice(vs []float32) []*float32 { + ps := make([]*float32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Float32Map returns a map of float32 pointers from the values +// passed in. +func Float32Map(vs map[string]float32) map[string]*float32 { + ps := make(map[string]*float32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Float64 returns a pointer value for the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Slice returns a slice of float64 pointers from the values +// passed in. +func Float64Slice(vs []float64) []*float64 { + ps := make([]*float64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Float64Map returns a map of float64 pointers from the values +// passed in. +func Float64Map(vs map[string]float64) map[string]*float64 { + ps := make(map[string]*float64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Time returns a pointer value for the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeSlice returns a slice of time.Time pointers from the values +// passed in. +func TimeSlice(vs []time.Time) []*time.Time { + ps := make([]*time.Time, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// TimeMap returns a map of time.Time pointers from the values +// passed in. +func TimeMap(vs map[string]time.Time) map[string]*time.Time { + ps := make(map[string]*time.Time, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Duration returns a pointer value for the time.Duration value passed in. +func Duration(v time.Duration) *time.Duration { + return &v +} + +// DurationSlice returns a slice of time.Duration pointers from the values +// passed in. +func DurationSlice(vs []time.Duration) []*time.Duration { + ps := make([]*time.Duration, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// DurationMap returns a map of time.Duration pointers from the values +// passed in. +func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { + ps := make(map[string]*time.Duration, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} diff --git a/vendor/github.com/aws/smithy-go/rand/doc.go b/vendor/github.com/aws/smithy-go/rand/doc.go new file mode 100644 index 000000000000..f8b25d56259e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/doc.go @@ -0,0 +1,3 @@ +// Package rand provides utilities for creating and working with random value +// generators. +package rand diff --git a/vendor/github.com/aws/smithy-go/rand/rand.go b/vendor/github.com/aws/smithy-go/rand/rand.go new file mode 100644 index 000000000000..9c479f62b59f --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/rand.go @@ -0,0 +1,31 @@ +package rand + +import ( + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func init() { + Reader = rand.Reader +} + +// Reader provides a random reader that can reset during testing. +var Reader io.Reader + +// Int63n returns a int64 between zero and value of max, read from an io.Reader source. +func Int63n(reader io.Reader, max int64) (int64, error) { + bi, err := rand.Int(reader, big.NewInt(max)) + if err != nil { + return 0, fmt.Errorf("failed to read random value, %w", err) + } + + return bi.Int64(), nil +} + +// CryptoRandInt63n returns a random int64 between zero and value of max +// obtained from the crypto rand source. +func CryptoRandInt63n(max int64) (int64, error) { + return Int63n(Reader, max) +} diff --git a/vendor/github.com/aws/smithy-go/rand/uuid.go b/vendor/github.com/aws/smithy-go/rand/uuid.go new file mode 100644 index 000000000000..dc81cbc68ac0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/uuid.go @@ -0,0 +1,87 @@ +package rand + +import ( + "encoding/hex" + "io" +) + +const dash byte = '-' + +// UUIDIdempotencyToken provides a utility to get idempotency tokens in the +// UUID format. +type UUIDIdempotencyToken struct { + uuid *UUID +} + +// NewUUIDIdempotencyToken returns a idempotency token provider returning +// tokens in the UUID random format using the reader provided. +func NewUUIDIdempotencyToken(r io.Reader) *UUIDIdempotencyToken { + return &UUIDIdempotencyToken{uuid: NewUUID(r)} +} + +// GetIdempotencyToken returns a random UUID value for Idempotency token. +func (u UUIDIdempotencyToken) GetIdempotencyToken() (string, error) { + return u.uuid.GetUUID() +} + +// UUID provides computing random UUID version 4 values from a random source +// reader. +type UUID struct { + randSrc io.Reader +} + +// NewUUID returns an initialized UUID value that can be used to retrieve +// random UUID version 4 values. +func NewUUID(r io.Reader) *UUID { + return &UUID{randSrc: r} +} + +// GetUUID returns a random UUID version 4 string representation sourced from the random reader the +// UUID was created with. Returns an error if unable to compute the UUID. +func (r *UUID) GetUUID() (string, error) { + var b [16]byte + if _, err := io.ReadFull(r.randSrc, b[:]); err != nil { + return "", err + } + r.makeUUIDv4(b[:]) + return format(b), nil +} + +// GetBytes returns a byte slice containing a random UUID version 4 sourced from the random reader the +// UUID was created with. Returns an error if unable to compute the UUID. +func (r *UUID) GetBytes() (u []byte, err error) { + u = make([]byte, 16) + if _, err = io.ReadFull(r.randSrc, u); err != nil { + return u, err + } + r.makeUUIDv4(u) + return u, nil +} + +func (r *UUID) makeUUIDv4(u []byte) { + // 13th character is "4" + u[6] = (u[6] & 0x0f) | 0x40 // Version 4 + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] & 0x3f) | 0x80 // Variant most significant bits are 10x where x can be either 1 or 0 +} + +// Format returns the canonical text representation of a UUID. +// This implementation is optimized to not use fmt. +// Example: 82e42f16-b6cc-4d5b-95f5-d403c4befd3d +func format(u [16]byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + + var scratch [36]byte + + hex.Encode(scratch[:8], u[0:4]) + scratch[8] = dash + hex.Encode(scratch[9:13], u[4:6]) + scratch[13] = dash + hex.Encode(scratch[14:18], u[6:8]) + scratch[18] = dash + hex.Encode(scratch[19:23], u[8:10]) + scratch[23] = dash + hex.Encode(scratch[24:], u[10:]) + + return string(scratch[:]) +} diff --git a/vendor/github.com/aws/smithy-go/sync/error.go b/vendor/github.com/aws/smithy-go/sync/error.go new file mode 100644 index 000000000000..629207672b4a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/sync/error.go @@ -0,0 +1,53 @@ +package sync + +import "sync" + +// OnceErr wraps the behavior of recording an error +// once and signal on a channel when this has occurred. +// Signaling is done by closing of the channel. +// +// Type is safe for concurrent usage. +type OnceErr struct { + mu sync.RWMutex + err error + ch chan struct{} +} + +// NewOnceErr return a new OnceErr +func NewOnceErr() *OnceErr { + return &OnceErr{ + ch: make(chan struct{}, 1), + } +} + +// Err acquires a read-lock and returns an +// error if one has been set. +func (e *OnceErr) Err() error { + e.mu.RLock() + err := e.err + e.mu.RUnlock() + + return err +} + +// SetError acquires a write-lock and will set +// the underlying error value if one has not been set. +func (e *OnceErr) SetError(err error) { + if err == nil { + return + } + + e.mu.Lock() + if e.err == nil { + e.err = err + close(e.ch) + } + e.mu.Unlock() +} + +// ErrorSet returns a channel that will be used to signal +// that an error has been set. This channel will be closed +// when the error value has been set for OnceErr. +func (e *OnceErr) ErrorSet() <-chan struct{} { + return e.ch +} diff --git a/vendor/github.com/aws/smithy-go/time/time.go b/vendor/github.com/aws/smithy-go/time/time.go new file mode 100644 index 000000000000..b552a09f8a8b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/time/time.go @@ -0,0 +1,134 @@ +package time + +import ( + "context" + "fmt" + "math/big" + "strings" + "time" +) + +const ( + // dateTimeFormat is a IMF-fixdate formatted RFC3339 section 5.6 + dateTimeFormatInput = "2006-01-02T15:04:05.999999999Z" + dateTimeFormatInputNoZ = "2006-01-02T15:04:05.999999999" + dateTimeFormatOutput = "2006-01-02T15:04:05.999Z" + + // httpDateFormat is a date time defined by RFC 7231#section-7.1.1.1 + // IMF-fixdate with no UTC offset. + httpDateFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + // Additional formats needed for compatibility. + httpDateFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + httpDateFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" +) + +var millisecondFloat = big.NewFloat(1e3) + +// FormatDateTime formats value as a date-time, (RFC3339 section 5.6) +// +// Example: 1985-04-12T23:20:50.52Z +func FormatDateTime(value time.Time) string { + return value.UTC().Format(dateTimeFormatOutput) +} + +// ParseDateTime parses a string as a date-time, (RFC3339 section 5.6) +// +// Example: 1985-04-12T23:20:50.52Z +func ParseDateTime(value string) (time.Time, error) { + return tryParse(value, + dateTimeFormatInput, + dateTimeFormatInputNoZ, + time.RFC3339Nano, + time.RFC3339, + ) +} + +// FormatHTTPDate formats value as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) +// +// Example: Tue, 29 Apr 2014 18:30:38 GMT +func FormatHTTPDate(value time.Time) string { + return value.UTC().Format(httpDateFormat) +} + +// ParseHTTPDate parses a string as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) +// +// Example: Tue, 29 Apr 2014 18:30:38 GMT +func ParseHTTPDate(value string) (time.Time, error) { + return tryParse(value, + httpDateFormat, + httpDateFormatSingleDigitDay, + httpDateFormatSingleDigitDayTwoDigitYear, + time.RFC850, + time.ANSIC, + ) +} + +// FormatEpochSeconds returns value as a Unix time in seconds with with decimal precision +// +// Example: 1515531081.123 +func FormatEpochSeconds(value time.Time) float64 { + ms := value.UnixNano() / int64(time.Millisecond) + return float64(ms) / 1e3 +} + +// ParseEpochSeconds returns value as a Unix time in seconds with with decimal precision +// +// Example: 1515531081.123 +func ParseEpochSeconds(value float64) time.Time { + f := big.NewFloat(value) + f = f.Mul(f, millisecondFloat) + i, _ := f.Int64() + // Offset to `UTC` because time.Unix returns the time value based on system + // local setting. + return time.Unix(0, i*1e6).UTC() +} + +func tryParse(v string, formats ...string) (time.Time, error) { + var errs parseErrors + for _, f := range formats { + t, err := time.Parse(f, v) + if err != nil { + errs = append(errs, parseError{ + Format: f, + Err: err, + }) + continue + } + return t, nil + } + + return time.Time{}, fmt.Errorf("unable to parse time string, %w", errs) +} + +type parseErrors []parseError + +func (es parseErrors) Error() string { + var s strings.Builder + for _, e := range es { + fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) + } + + return "parse errors:" + s.String() +} + +type parseError struct { + Format string + Err error +} + +// SleepWithContext will wait for the timer duration to expire, or until the context +// is canceled. Whichever happens first. If the context is canceled the +// Context's error will be returned. +func SleepWithContext(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go new file mode 100644 index 000000000000..bc4ad6e79739 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go @@ -0,0 +1,70 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +const contentMD5Header = "Content-Md5" + +// contentMD5Checksum provides a middleware to compute and set +// content-md5 checksum for a http request +type contentMD5Checksum struct { +} + +// AddContentChecksumMiddleware adds checksum middleware to middleware's +// build step. +func AddContentChecksumMiddleware(stack *middleware.Stack) error { + // This middleware must be executed before request body is set. + return stack.Build.Add(&contentMD5Checksum{}, middleware.Before) +} + +// ID returns the identifier for the checksum middleware +func (m *contentMD5Checksum) ID() string { return "ContentChecksum" } + +// HandleBuild adds behavior to compute md5 checksum and add content-md5 header +// on http request +func (m *contentMD5Checksum) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // if Content-MD5 header is already present, return + if v := req.Header.Get(contentMD5Header); len(v) != 0 { + return next.HandleBuild(ctx, in) + } + + // fetch the request stream. + stream := req.GetStream() + // compute checksum if payload is explicit + if stream != nil { + if !req.IsStreamSeekable() { + return out, metadata, fmt.Errorf( + "unseekable stream is not supported for computing md5 checksum") + } + + v, err := computeMD5Checksum(stream) + if err != nil { + return out, metadata, fmt.Errorf("error computing md5 checksum, %w", err) + } + + // reset the request stream + if err := req.RewindStream(); err != nil { + return out, metadata, fmt.Errorf( + "error rewinding request stream after computing md5 checksum, %w", err) + } + + // set the 'Content-MD5' header + req.Header.Set(contentMD5Header, string(v)) + } + + // set md5 header value + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go new file mode 100644 index 000000000000..e691c69bf444 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/client.go @@ -0,0 +1,120 @@ +package http + +import ( + "context" + "fmt" + "net/http" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +// ClientDo provides the interface for custom HTTP client implementations. +type ClientDo interface { + Do(*http.Request) (*http.Response, error) +} + +// ClientDoFunc provides a helper to wrap a function as an HTTP client for +// round tripping requests. +type ClientDoFunc func(*http.Request) (*http.Response, error) + +// Do will invoke the underlying func, returning the result. +func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) { + return fn(r) +} + +// ClientHandler wraps a client that implements the HTTP Do method. Standard +// implementation is http.Client. +type ClientHandler struct { + client ClientDo +} + +// NewClientHandler returns an initialized middleware handler for the client. +func NewClientHandler(client ClientDo) ClientHandler { + return ClientHandler{ + client: client, + } +} + +// Handle implements the middleware Handler interface, that will invoke the +// underlying HTTP client. Requires the input to be a Smithy *Request. Returns +// a smithy *Response, or error if the request failed. +func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( + out interface{}, metadata middleware.Metadata, err error, +) { + req, ok := input.(*Request) + if !ok { + return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input) + } + + builtRequest := req.Build(ctx) + if err := ValidateEndpointHost(builtRequest.Host); err != nil { + return nil, metadata, err + } + + resp, err := c.client.Do(builtRequest) + if resp == nil { + // Ensure a http response value is always present to prevent unexpected + // panics. + resp = &http.Response{ + Header: http.Header{}, + Body: http.NoBody, + } + } + if err != nil { + err = &RequestSendError{Err: err} + + // Override the error with a context canceled error, if that was canceled. + select { + case <-ctx.Done(): + err = &smithy.CanceledError{Err: ctx.Err()} + default: + } + } + + // HTTP RoundTripper *should* close the request body. But this may not happen in a timely manner. + // So instead Smithy *Request Build wraps the body to be sent in a safe closer that will clear the + // stream reference so that it can be safely reused. + if builtRequest.Body != nil { + _ = builtRequest.Body.Close() + } + + return &Response{Response: resp}, metadata, err +} + +// RequestSendError provides a generic request transport error. This error +// should wrap errors making HTTP client requests. +// +// The ClientHandler will wrap the HTTP client's error if the client request +// fails, and did not fail because of context canceled. +type RequestSendError struct { + Err error +} + +// ConnectionError returns that the error is related to not being able to send +// the request, or receive a response from the service. +func (e *RequestSendError) ConnectionError() bool { + return true +} + +// Unwrap returns the underlying error, if there was one. +func (e *RequestSendError) Unwrap() error { + return e.Err +} + +func (e *RequestSendError) Error() string { + return fmt.Sprintf("request send failed, %v", e.Err) +} + +// NopClient provides a client that ignores the request, and returns an empty +// successful HTTP response value. +type NopClient struct{} + +// Do ignores the request and returns a 200 status empty response. +func (NopClient) Do(r *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: http.NoBody, + }, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/doc.go b/vendor/github.com/aws/smithy-go/transport/http/doc.go new file mode 100644 index 000000000000..07366ac85a88 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/doc.go @@ -0,0 +1,5 @@ +/* +Package http provides the HTTP transport client and request/response types +needed to round trip API operation calls with an service. +*/ +package http diff --git a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go new file mode 100644 index 000000000000..cbc9deb4df07 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go @@ -0,0 +1,163 @@ +package http + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +func splitHeaderListValues(vs []string, splitFn func(string) ([]string, error)) ([]string, error) { + values := make([]string, 0, len(vs)) + + for i := 0; i < len(vs); i++ { + parts, err := splitFn(vs[i]) + if err != nil { + return nil, err + } + values = append(values, parts...) + } + + return values, nil +} + +// SplitHeaderListValues attempts to split the elements of the slice by commas, +// and return a list of all values separated. Returns error if unable to +// separate the values. +func SplitHeaderListValues(vs []string) ([]string, error) { + return splitHeaderListValues(vs, quotedCommaSplit) +} + +func quotedCommaSplit(v string) (parts []string, err error) { + v = strings.TrimSpace(v) + + expectMore := true + for i := 0; i < len(v); i++ { + if unicode.IsSpace(rune(v[i])) { + continue + } + expectMore = false + + // leading space in part is ignored. + // Start of value must be non-space, or quote. + // + // - If quote, enter quoted mode, find next non-escaped quote to + // terminate the value. + // - Otherwise, find next comma to terminate value. + + remaining := v[i:] + + var value string + var valueLen int + if remaining[0] == '"' { + //------------------------------ + // Quoted value + //------------------------------ + var j int + var skipQuote bool + for j += 1; j < len(remaining); j++ { + if remaining[j] == '\\' || (remaining[j] != '\\' && skipQuote) { + skipQuote = !skipQuote + continue + } + if remaining[j] == '"' { + break + } + } + if j == len(remaining) || j == 1 { + return nil, fmt.Errorf("value %v missing closing double quote", + remaining) + } + valueLen = j + 1 + + tail := remaining[valueLen:] + var k int + for ; k < len(tail); k++ { + if !unicode.IsSpace(rune(tail[k])) && tail[k] != ',' { + return nil, fmt.Errorf("value %v has non-space trailing characters", + remaining) + } + if tail[k] == ',' { + expectMore = true + break + } + } + value = remaining[:valueLen] + value, err = strconv.Unquote(value) + if err != nil { + return nil, fmt.Errorf("failed to unquote value %v, %w", value, err) + } + + // Pad valueLen to include trailing space(s) so `i` is updated correctly. + valueLen += k + + } else { + //------------------------------ + // Unquoted value + //------------------------------ + + // Index of the next comma is the length of the value, or end of string. + valueLen = strings.Index(remaining, ",") + if valueLen != -1 { + expectMore = true + } else { + valueLen = len(remaining) + } + value = strings.TrimSpace(remaining[:valueLen]) + } + + i += valueLen + parts = append(parts, value) + + } + + if expectMore { + parts = append(parts, "") + } + + return parts, nil +} + +// SplitHTTPDateTimestampHeaderListValues attempts to split the HTTP-Date +// timestamp values in the slice by commas, and return a list of all values +// separated. The split is aware of the HTTP-Date timestamp format, and will skip +// comma within the timestamp value. Returns an error if unable to split the +// timestamp values. +func SplitHTTPDateTimestampHeaderListValues(vs []string) ([]string, error) { + return splitHeaderListValues(vs, splitHTTPDateHeaderValue) +} + +func splitHTTPDateHeaderValue(v string) ([]string, error) { + if n := strings.Count(v, ","); n <= 1 { + // Nothing to do if only contains a no, or single HTTPDate value + return []string{v}, nil + } else if n%2 == 0 { + return nil, fmt.Errorf("invalid timestamp HTTPDate header comma separations, %q", v) + } + + var parts []string + var i, j int + + var doSplit bool + for ; i < len(v); i++ { + if v[i] == ',' { + if doSplit { + doSplit = false + parts = append(parts, strings.TrimSpace(v[j:i])) + j = i + 1 + } else { + // Skip the first comma in the timestamp value since that + // separates the day from the rest of the timestamp. + // + // Tue, 17 Dec 2019 23:48:18 GMT + doSplit = true + } + } + } + // Add final part + if j < len(v) { + parts = append(parts, strings.TrimSpace(v[j:])) + } + + return parts, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go new file mode 100644 index 000000000000..6b290fec030f --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/host.go @@ -0,0 +1,89 @@ +package http + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(host string) error { + var errors strings.Builder + var hostname string + var port string + var err error + + if strings.Contains(host, ":") { + hostname, port, err = net.SplitHostPort(host) + if err != nil { + errors.WriteString(fmt.Sprintf("\n endpoint %v, failed to parse, got ", host)) + errors.WriteString(err.Error()) + } + + if !ValidPortNumber(port) { + errors.WriteString(fmt.Sprintf("port number should be in range [0-65535], got %v", port)) + } + } else { + hostname = host + } + + labels := strings.Split(hostname, ".") + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + errors.WriteString("\nendpoint host domain labels must match \"[a-zA-Z0-9-]{1,63}\", but found: ") + errors.WriteString(label) + } + } + + if len(hostname) == 0 && len(port) != 0 { + errors.WriteString("\nendpoint host with port must not be empty") + } + + if len(hostname) > 255 { + errors.WriteString(fmt.Sprintf("\nendpoint host must be less than 255 characters, but was %d", len(hostname))) + } + + if len(errors.String()) > 0 { + return fmt.Errorf("invalid endpoint host%s", errors.String()) + } + return nil +} + +// ValidPortNumber returns whether the port is valid RFC 3986 port. +func ValidPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + + if i < 0 || i > 65535 { + return false + } + return true +} + +// ValidHostLabel returns whether the label is a valid RFC 3986 host abel. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go new file mode 100644 index 000000000000..941a8d6b5123 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go @@ -0,0 +1,75 @@ +package io + +import ( + "io" + "sync" +) + +// NewSafeReadCloser returns a new safeReadCloser that wraps readCloser. +func NewSafeReadCloser(readCloser io.ReadCloser) io.ReadCloser { + sr := &safeReadCloser{ + readCloser: readCloser, + } + + if _, ok := readCloser.(io.WriterTo); ok { + return &safeWriteToReadCloser{safeReadCloser: sr} + } + + return sr +} + +// safeWriteToReadCloser wraps a safeReadCloser but exposes a WriteTo interface implementation. This will panic +// if the underlying io.ReadClose does not support WriteTo. Use NewSafeReadCloser to ensure the proper handling of this +// type. +type safeWriteToReadCloser struct { + *safeReadCloser +} + +// WriteTo implements the io.WriteTo interface. +func (r *safeWriteToReadCloser) WriteTo(w io.Writer) (int64, error) { + r.safeReadCloser.mtx.Lock() + defer r.safeReadCloser.mtx.Unlock() + + if r.safeReadCloser.closed { + return 0, io.EOF + } + + return r.safeReadCloser.readCloser.(io.WriterTo).WriteTo(w) +} + +// safeReadCloser wraps a io.ReadCloser and presents an io.ReadCloser interface. When Close is called on safeReadCloser +// the underlying Close method will be executed, and then the reference to the reader will be dropped. This type +// is meant to be used with the net/http library which will retain a reference to the request body for the lifetime +// of a goroutine connection. Wrapping in this manner will ensure that no data race conditions are falsely reported. +// This type is thread-safe. +type safeReadCloser struct { + readCloser io.ReadCloser + closed bool + mtx sync.Mutex +} + +// Read reads up to len(p) bytes into p from the underlying read. If the reader is closed io.EOF will be returned. +func (r *safeReadCloser) Read(p []byte) (n int, err error) { + r.mtx.Lock() + defer r.mtx.Unlock() + if r.closed { + return 0, io.EOF + } + + return r.readCloser.Read(p) +} + +// Close calls the underlying io.ReadCloser's Close method, removes the reference to the reader, and returns any error +// reported from Close. Subsequent calls to Close will always return a nil error. +func (r *safeReadCloser) Close() error { + r.mtx.Lock() + defer r.mtx.Unlock() + if r.closed { + return nil + } + + r.closed = true + rc := r.readCloser + r.readCloser = nil + return rc.Close() +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go new file mode 100644 index 000000000000..5d6a4b23a27a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go @@ -0,0 +1,25 @@ +package http + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + "io" +) + +// computeMD5Checksum computes base64 md5 checksum of an io.Reader's contents. +// Returns the byte slice of md5 checksum and an error. +func computeMD5Checksum(r io.Reader) ([]byte, error) { + h := md5.New() + // copy errors may be assumed to be from the body. + _, err := io.Copy(h, r) + if err != nil { + return nil, fmt.Errorf("failed to read body: %w", err) + } + + // encode the md5 checksum in base64. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + return sum64, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go new file mode 100644 index 000000000000..1d3b218a1274 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go @@ -0,0 +1,79 @@ +package http + +import ( + "context" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + "io" + "io/ioutil" +) + +// AddErrorCloseResponseBodyMiddleware adds the middleware to automatically +// close the response body of an operation request if the request response +// failed. +func AddErrorCloseResponseBodyMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&errorCloseResponseBodyMiddleware{}, "OperationDeserializer", middleware.Before) +} + +type errorCloseResponseBodyMiddleware struct{} + +func (*errorCloseResponseBodyMiddleware) ID() string { + return "ErrorCloseResponseBody" +} + +func (m *errorCloseResponseBodyMiddleware) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err := next.HandleDeserialize(ctx, input) + if err != nil { + if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil { + // Consume the full body to prevent TCP connection resets on some platforms + _, _ = io.Copy(ioutil.Discard, resp.Body) + // Do not validate that the response closes successfully. + resp.Body.Close() + } + } + + return out, metadata, err +} + +// AddCloseResponseBodyMiddleware adds the middleware to automatically close +// the response body of an operation request, after the response had been +// deserialized. +func AddCloseResponseBodyMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&closeResponseBody{}, "OperationDeserializer", middleware.Before) +} + +type closeResponseBody struct{} + +func (*closeResponseBody) ID() string { + return "CloseResponseBody" +} + +func (m *closeResponseBody) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err := next.HandleDeserialize(ctx, input) + if err != nil { + return out, metadata, err + } + + if resp, ok := out.RawResponse.(*Response); ok { + // Consume the full body to prevent TCP connection resets on some platforms + _, copyErr := io.Copy(ioutil.Discard, resp.Body) + if copyErr != nil { + middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse") + } + + closeErr := resp.Body.Close() + if closeErr != nil { + middleware.GetLogger(ctx).Logf(logging.Warn, "failed to close HTTP response body, this may affect connection reuse") + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go new file mode 100644 index 000000000000..9969389bb29d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go @@ -0,0 +1,84 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +// ComputeContentLength provides a middleware to set the content-length +// header for the length of a serialize request body. +type ComputeContentLength struct { +} + +// AddComputeContentLengthMiddleware adds ComputeContentLength to the middleware +// stack's Build step. +func AddComputeContentLengthMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&ComputeContentLength{}, middleware.After) +} + +// ID returns the identifier for the ComputeContentLength. +func (m *ComputeContentLength) ID() string { return "ComputeContentLength" } + +// HandleBuild adds the length of the serialized request to the HTTP header +// if the length can be determined. +func (m *ComputeContentLength) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // do nothing if request content-length was set to 0 or above. + if req.ContentLength >= 0 { + return next.HandleBuild(ctx, in) + } + + // attempt to compute stream length + if n, ok, err := req.StreamLength(); err != nil { + return out, metadata, fmt.Errorf( + "failed getting length of request stream, %w", err) + } else if ok { + req.ContentLength = n + } + + return next.HandleBuild(ctx, in) +} + +// validateContentLength provides a middleware to validate the content-length +// is valid (greater than zero), for the serialized request payload. +type validateContentLength struct{} + +// ValidateContentLengthHeader adds middleware that validates request content-length +// is set to value greater than zero. +func ValidateContentLengthHeader(stack *middleware.Stack) error { + return stack.Build.Add(&validateContentLength{}, middleware.After) +} + +// ID returns the identifier for the ComputeContentLength. +func (m *validateContentLength) ID() string { return "ValidateContentLength" } + +// HandleBuild adds the length of the serialized request to the HTTP header +// if the length can be determined. +func (m *validateContentLength) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // if request content-length was set to less than 0, return an error + if req.ContentLength < 0 { + return out, metadata, fmt.Errorf( + "content length for payload is required and must be at least 0") + } + + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go new file mode 100644 index 000000000000..49884e6afb03 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go @@ -0,0 +1,88 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +type headerValue struct { + header string + value string + append bool +} + +type headerValueHelper struct { + headerValues []headerValue +} + +func (h *headerValueHelper) addHeaderValue(value headerValue) { + h.headerValues = append(h.headerValues, value) +} + +func (h *headerValueHelper) ID() string { + return "HTTPHeaderHelper" +} + +func (h *headerValueHelper) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (out middleware.BuildOutput, metadata middleware.Metadata, err error) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + for _, value := range h.headerValues { + if value.append { + req.Header.Add(value.header, value.value) + } else { + req.Header.Set(value.header, value.value) + } + } + + return next.HandleBuild(ctx, in) +} + +func getOrAddHeaderValueHelper(stack *middleware.Stack) (*headerValueHelper, error) { + id := (*headerValueHelper)(nil).ID() + m, ok := stack.Build.Get(id) + if !ok { + m = &headerValueHelper{} + err := stack.Build.Add(m, middleware.After) + if err != nil { + return nil, err + } + } + + requestUserAgent, ok := m.(*headerValueHelper) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", m, id) + } + + return requestUserAgent, nil +} + +// AddHeaderValue returns a stack mutator that adds the header value pair to header. +// Appends to any existing values if present. +func AddHeaderValue(header string, value string) func(stack *middleware.Stack) error { + return func(stack *middleware.Stack) error { + helper, err := getOrAddHeaderValueHelper(stack) + if err != nil { + return err + } + helper.addHeaderValue(headerValue{header: header, value: value, append: true}) + return nil + } +} + +// SetHeaderValue returns a stack mutator that adds the header value pair to header. +// Replaces any existing values if present. +func SetHeaderValue(header string, value string) func(stack *middleware.Stack) error { + return func(stack *middleware.Stack) error { + helper, err := getOrAddHeaderValueHelper(stack) + if err != nil { + return err + } + helper.addHeaderValue(headerValue{header: header, value: value, append: false}) + return nil + } +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go new file mode 100644 index 000000000000..d5909b0a242a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go @@ -0,0 +1,75 @@ +package http + +import ( + "context" + "fmt" + "net/http/httputil" + + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// RequestResponseLogger is a deserialize middleware that will log the request and response HTTP messages and optionally +// their respective bodies. Will not perform any logging if none of the options are set. +type RequestResponseLogger struct { + LogRequest bool + LogRequestWithBody bool + + LogResponse bool + LogResponseWithBody bool +} + +// ID is the middleware identifier. +func (r *RequestResponseLogger) ID() string { + return "RequestResponseLogger" +} + +// HandleDeserialize will log the request and response HTTP messages if configured accordingly. +func (r *RequestResponseLogger) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + logger := middleware.GetLogger(ctx) + + if r.LogRequest || r.LogRequestWithBody { + smithyRequest, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + rc := smithyRequest.Build(ctx) + reqBytes, err := httputil.DumpRequestOut(rc, r.LogRequestWithBody) + if err != nil { + return out, metadata, err + } + + logger.Logf(logging.Debug, "Request\n%v", string(reqBytes)) + + if r.LogRequestWithBody { + smithyRequest, err = smithyRequest.SetStream(rc.Body) + if err != nil { + return out, metadata, err + } + in.Request = smithyRequest + } + } + + out, metadata, err = next.HandleDeserialize(ctx, in) + + if (err == nil) && (r.LogResponse || r.LogResponseWithBody) { + smithyResponse, ok := out.RawResponse.(*Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse) + } + + respBytes, err := httputil.DumpResponse(smithyResponse.Response, r.LogResponseWithBody) + if err != nil { + return out, metadata, fmt.Errorf("failed to dump response %w", err) + } + + logger.Logf(logging.Debug, "Response\n%v", string(respBytes)) + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go new file mode 100644 index 000000000000..d6079b25950c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go @@ -0,0 +1,51 @@ +package http + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type ( + hostnameImmutableKey struct{} + hostPrefixDisableKey struct{} +) + +// GetHostnameImmutable retrieves whether the endpoint hostname should be considered +// immutable or not. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func GetHostnameImmutable(ctx context.Context) (v bool) { + v, _ = middleware.GetStackValue(ctx, hostnameImmutableKey{}).(bool) + return v +} + +// SetHostnameImmutable sets or modifies whether the request's endpoint hostname +// should be considered immutable or not. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func SetHostnameImmutable(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, hostnameImmutableKey{}, value) +} + +// IsEndpointHostPrefixDisabled retrieves whether the hostname prefixing is +// disabled. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func IsEndpointHostPrefixDisabled(ctx context.Context) (v bool) { + v, _ = middleware.GetStackValue(ctx, hostPrefixDisableKey{}).(bool) + return v +} + +// DisableEndpointHostPrefix sets or modifies whether the request's endpoint host +// prefixing should be disabled. If value is true, endpoint host prefixing +// will be disabled. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func DisableEndpointHostPrefix(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, hostPrefixDisableKey{}, value) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go new file mode 100644 index 000000000000..326cb8a6cab9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go @@ -0,0 +1,79 @@ +package http + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + "strings" +) + +// MinimumProtocolError is an error type indicating that the established connection did not meet the expected minimum +// HTTP protocol version. +type MinimumProtocolError struct { + proto string + expectedProtoMajor int + expectedProtoMinor int +} + +// Error returns the error message. +func (m *MinimumProtocolError) Error() string { + return fmt.Sprintf("operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + m.expectedProtoMajor, m.expectedProtoMinor, m.proto) +} + +// RequireMinimumProtocol is a deserialization middleware that asserts that the established HTTP connection +// meets the minimum major ad minor version. +type RequireMinimumProtocol struct { + ProtoMajor int + ProtoMinor int +} + +// AddRequireMinimumProtocol adds the RequireMinimumProtocol middleware to the stack using the provided minimum +// protocol major and minor version. +func AddRequireMinimumProtocol(stack *middleware.Stack, major, minor int) error { + return stack.Deserialize.Insert(&RequireMinimumProtocol{ + ProtoMajor: major, + ProtoMinor: minor, + }, "OperationDeserializer", middleware.Before) +} + +// ID returns the middleware identifier string. +func (r *RequireMinimumProtocol) ID() string { + return "RequireMinimumProtocol" +} + +// HandleDeserialize asserts that the established connection is a HTTP connection with the minimum major and minor +// protocol version. +func (r *RequireMinimumProtocol) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) + } + + if !strings.HasPrefix(response.Proto, "HTTP") { + return out, metadata, &MinimumProtocolError{ + proto: response.Proto, + expectedProtoMajor: r.ProtoMajor, + expectedProtoMinor: r.ProtoMinor, + } + } + + if response.ProtoMajor < r.ProtoMajor || response.ProtoMinor < r.ProtoMinor { + return out, metadata, &MinimumProtocolError{ + proto: response.Proto, + expectedProtoMajor: r.ProtoMajor, + expectedProtoMinor: r.ProtoMinor, + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go new file mode 100644 index 000000000000..ffac684f4dcb --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/request.go @@ -0,0 +1,180 @@ +package http + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + + iointernal "github.com/aws/smithy-go/transport/http/internal/io" +) + +// Request provides the HTTP specific request structure for HTTP specific +// middleware steps to use to serialize input, and send an operation's request. +type Request struct { + *http.Request + stream io.Reader + isStreamSeekable bool + streamStartPos int64 +} + +// NewStackRequest returns an initialized request ready to be populated with the +// HTTP request details. Returns empty interface so the function can be used as +// a parameter to the Smithy middleware Stack constructor. +func NewStackRequest() interface{} { + return &Request{ + Request: &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + ContentLength: -1, // default to unknown length + }, + } +} + +// Clone returns a deep copy of the Request for the new context. A reference to +// the Stream is copied, but the underlying stream is not copied. +func (r *Request) Clone() *Request { + rc := *r + rc.Request = rc.Request.Clone(context.TODO()) + return &rc +} + +// StreamLength returns the number of bytes of the serialized stream attached +// to the request and ok set. If the length cannot be determined, an error will +// be returned. +func (r *Request) StreamLength() (size int64, ok bool, err error) { + return streamLength(r.stream, r.isStreamSeekable, r.streamStartPos) +} + +func streamLength(stream io.Reader, seekable bool, startPos int64) (size int64, ok bool, err error) { + if stream == nil { + return 0, true, nil + } + + if l, ok := stream.(interface{ Len() int }); ok { + return int64(l.Len()), true, nil + } + + if !seekable { + return 0, false, nil + } + + s := stream.(io.Seeker) + endOffset, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, false, err + } + + // The reason to seek to streamStartPos instead of 0 is to ensure that the + // SDK only sends the stream from the starting position the user's + // application provided it to the SDK at. For example application opens a + // file, and wants to skip the first N bytes uploading the rest. The + // application would move the file's offset N bytes, then hand it off to + // the SDK to send the remaining. The SDK should respect that initial offset. + _, err = s.Seek(startPos, io.SeekStart) + if err != nil { + return 0, false, err + } + + return endOffset - startPos, true, nil +} + +// RewindStream will rewind the io.Reader to the relative start position if it +// is an io.Seeker. +func (r *Request) RewindStream() error { + // If there is no stream there is nothing to rewind. + if r.stream == nil { + return nil + } + + if !r.isStreamSeekable { + return fmt.Errorf("request stream is not seekable") + } + _, err := r.stream.(io.Seeker).Seek(r.streamStartPos, io.SeekStart) + return err +} + +// GetStream returns the request stream io.Reader if a stream is set. If no +// stream is present nil will be returned. +func (r *Request) GetStream() io.Reader { + return r.stream +} + +// IsStreamSeekable returns whether the stream is seekable. +func (r *Request) IsStreamSeekable() bool { + return r.isStreamSeekable +} + +// SetStream returns a clone of the request with the stream set to the provided +// reader. May return an error if the provided reader is seekable but returns +// an error. +func (r *Request) SetStream(reader io.Reader) (rc *Request, err error) { + rc = r.Clone() + + if reader == http.NoBody { + reader = nil + } + + var isStreamSeekable bool + var streamStartPos int64 + switch v := reader.(type) { + case io.Seeker: + n, err := v.Seek(0, io.SeekCurrent) + if err != nil { + return r, err + } + isStreamSeekable = true + streamStartPos = n + default: + // If the stream length can be determined, and is determined to be empty, + // use a nil stream to prevent confusion between empty vs not-empty + // streams. + length, ok, err := streamLength(reader, false, 0) + if err != nil { + return nil, err + } else if ok && length == 0 { + reader = nil + } + } + + rc.stream = reader + rc.isStreamSeekable = isStreamSeekable + rc.streamStartPos = streamStartPos + + return rc, err +} + +// Build returns a build standard HTTP request value from the Smithy request. +// The request's stream is wrapped in a safe container that allows it to be +// reused for subsequent attempts. +func (r *Request) Build(ctx context.Context) *http.Request { + req := r.Request.Clone(ctx) + + if r.stream == nil && req.ContentLength == -1 { + req.ContentLength = 0 + } + + switch stream := r.stream.(type) { + case *io.PipeReader: + req.Body = ioutil.NopCloser(stream) + req.ContentLength = -1 + default: + // HTTP Client Request must only have a non-nil body if the + // ContentLength is explicitly unknown (-1) or non-zero. The HTTP + // Client will interpret a non-nil body and ContentLength 0 as + // "unknown". This is unwanted behavior. + if req.ContentLength != 0 && r.stream != nil { + req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(stream)) + } + } + + return req +} + +// RequestCloner is a function that can take an input request type and clone the request +// for use in a subsequent retry attempt. +func RequestCloner(v interface{}) interface{} { + return v.(*Request).Clone() +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/response.go b/vendor/github.com/aws/smithy-go/transport/http/response.go new file mode 100644 index 000000000000..0c13bfcc8e2c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/response.go @@ -0,0 +1,34 @@ +package http + +import ( + "fmt" + "net/http" +) + +// Response provides the HTTP specific response structure for HTTP specific +// middleware steps to use to deserialize the response from an operation call. +type Response struct { + *http.Response +} + +// ResponseError provides the HTTP centric error type wrapping the underlying +// error with the HTTP response value. +type ResponseError struct { + Response *Response + Err error +} + +// HTTPStatusCode returns the HTTP response status code received from the service. +func (e *ResponseError) HTTPStatusCode() int { return e.Response.StatusCode } + +// HTTPResponse returns the HTTP response received from the service. +func (e *ResponseError) HTTPResponse() *Response { return e.Response } + +// Unwrap returns the nested error if any, or nil. +func (e *ResponseError) Unwrap() error { return e.Err } + +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "http response error StatusCode: %d, %v", + e.Response.StatusCode, e.Err) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/time.go b/vendor/github.com/aws/smithy-go/transport/http/time.go new file mode 100644 index 000000000000..607b196a8bdd --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/time.go @@ -0,0 +1,13 @@ +package http + +import ( + "time" + + smithytime "github.com/aws/smithy-go/time" +) + +// ParseTime parses a time string like the HTTP Date header. This uses a more +// relaxed rule set for date parsing compared to the standard library. +func ParseTime(text string) (t time.Time, err error) { + return smithytime.ParseHTTPDate(text) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/url.go b/vendor/github.com/aws/smithy-go/transport/http/url.go new file mode 100644 index 000000000000..60a5fc1002a9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/url.go @@ -0,0 +1,44 @@ +package http + +import "strings" + +// JoinPath returns an absolute URL path composed of the two paths provided. +// Enforces that the returned path begins with '/'. If added path is empty the +// returned path suffix will match the first parameter suffix. +func JoinPath(a, b string) string { + if len(a) == 0 { + a = "/" + } else if a[0] != '/' { + a = "/" + a + } + + if len(b) != 0 && b[0] == '/' { + b = b[1:] + } + + if len(b) != 0 && len(a) > 1 && a[len(a)-1] != '/' { + a = a + "/" + } + + return a + b +} + +// JoinRawQuery returns an absolute raw query expression. Any duplicate '&' +// will be collapsed to single separator between values. +func JoinRawQuery(a, b string) string { + a = strings.TrimFunc(a, isAmpersand) + b = strings.TrimFunc(b, isAmpersand) + + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + + return a + "&" + b +} + +func isAmpersand(v rune) bool { + return v == '&' +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go new file mode 100644 index 000000000000..71a7e0d8af55 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go @@ -0,0 +1,37 @@ +package http + +import ( + "strings" +) + +// UserAgentBuilder is a builder for a HTTP User-Agent string. +type UserAgentBuilder struct { + sb strings.Builder +} + +// NewUserAgentBuilder returns a new UserAgentBuilder. +func NewUserAgentBuilder() *UserAgentBuilder { + return &UserAgentBuilder{sb: strings.Builder{}} +} + +// AddKey adds the named component/product to the agent string +func (u *UserAgentBuilder) AddKey(key string) { + u.appendTo(key) +} + +// AddKeyValue adds the named key to the agent string with the given value. +func (u *UserAgentBuilder) AddKeyValue(key, value string) { + u.appendTo(key + "/" + value) +} + +// Build returns the constructed User-Agent string. May be called multiple times. +func (u *UserAgentBuilder) Build() string { + return u.sb.String() +} + +func (u *UserAgentBuilder) appendTo(value string) { + if u.sb.Len() > 0 { + u.sb.WriteRune(' ') + } + u.sb.WriteString(value) +} diff --git a/vendor/github.com/aws/smithy-go/validation.go b/vendor/github.com/aws/smithy-go/validation.go new file mode 100644 index 000000000000..b5eedc1f90ab --- /dev/null +++ b/vendor/github.com/aws/smithy-go/validation.go @@ -0,0 +1,140 @@ +package smithy + +import ( + "bytes" + "fmt" + "strings" +) + +// An InvalidParamsError provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type InvalidParamsError struct { + // Context is the base context of the invalid parameter group. + Context string + errs []InvalidParamError +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *InvalidParamsError) Add(err InvalidParamError) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another InvalidParamsError +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *InvalidParamsError) AddNested(nestedCtx string, nested InvalidParamsError) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e *InvalidParamsError) Len() int { + return len(e.errs) +} + +// Error returns the string formatted form of the invalid parameters. +func (e InvalidParamsError) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%d validation error(s) found.\n", len(e.errs)) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Error()) + } + + return w.String() +} + +// Errs returns a slice of the invalid parameters +func (e InvalidParamsError) Errs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An InvalidParamError represents an invalid parameter error type. +type InvalidParamError interface { + error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type invalidParamError struct { + context string + nestedContext string + field string + reason string +} + +// Error returns the string version of the invalid parameter error. +func (e invalidParamError) Error() string { + return fmt.Sprintf("%s, %s.", e.reason, e.Field()) +} + +// Field Returns the field and context the error occurred. +func (e invalidParamError) Field() string { + sb := &strings.Builder{} + sb.WriteString(e.context) + if sb.Len() > 0 { + if len(e.nestedContext) == 0 || (len(e.nestedContext) > 0 && e.nestedContext[:1] != "[") { + sb.WriteRune('.') + } + } + if len(e.nestedContext) > 0 { + sb.WriteString(e.nestedContext) + sb.WriteRune('.') + } + sb.WriteString(e.field) + return sb.String() +} + +// SetContext updates the base context of the error. +func (e *invalidParamError) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *invalidParamError) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + return + } + // Check if our nested context is an index into a slice or map + if e.nestedContext[:1] != "[" { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + return + } + e.nestedContext = ctx + e.nestedContext +} + +// An ParamRequiredError represents an required parameter error. +type ParamRequiredError struct { + invalidParamError +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ParamRequiredError { + return &ParamRequiredError{ + invalidParamError{ + field: field, + reason: fmt.Sprintf("missing required field"), + }, + } +} diff --git a/vendor/github.com/aws/smithy-go/waiter/logger.go b/vendor/github.com/aws/smithy-go/waiter/logger.go new file mode 100644 index 000000000000..8d70a03ff2f4 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/waiter/logger.go @@ -0,0 +1,36 @@ +package waiter + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// Logger is the Logger middleware used by the waiter to log an attempt +type Logger struct { + // Attempt is the current attempt to be logged + Attempt int64 +} + +// ID representing the Logger middleware +func (*Logger) ID() string { + return "WaiterLogger" +} + +// HandleInitialize performs handling of request in initialize stack step +func (m *Logger) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + logger := middleware.GetLogger(ctx) + + logger.Logf(logging.Debug, fmt.Sprintf("attempting waiter request, attempt count: %d", m.Attempt)) + + return next.HandleInitialize(ctx, in) +} + +// AddLogger is a helper util to add waiter logger after `SetLogger` middleware in +func (m Logger) AddLogger(stack *middleware.Stack) error { + return stack.Initialize.Insert(&m, "SetLogger", middleware.After) +} diff --git a/vendor/github.com/aws/smithy-go/waiter/waiter.go b/vendor/github.com/aws/smithy-go/waiter/waiter.go new file mode 100644 index 000000000000..03e46e2ee72c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/waiter/waiter.go @@ -0,0 +1,66 @@ +package waiter + +import ( + "fmt" + "math" + "time" + + "github.com/aws/smithy-go/rand" +) + +// ComputeDelay computes delay between waiter attempts. The function takes in a current attempt count, +// minimum delay, maximum delay, and remaining wait time for waiter as input. The inputs minDelay and maxDelay +// must always be greater than 0, along with minDelay lesser than or equal to maxDelay. +// +// Returns the computed delay and if next attempt count is possible within the given input time constraints. +// Note that the zeroth attempt results in no delay. +func ComputeDelay(attempt int64, minDelay, maxDelay, remainingTime time.Duration) (delay time.Duration, err error) { + // zeroth attempt, no delay + if attempt <= 0 { + return 0, nil + } + + // remainingTime is zero or less, no delay + if remainingTime <= 0 { + return 0, nil + } + + // validate min delay is greater than 0 + if minDelay == 0 { + return 0, fmt.Errorf("minDelay must be greater than zero when computing Delay") + } + + // validate max delay is greater than 0 + if maxDelay == 0 { + return 0, fmt.Errorf("maxDelay must be greater than zero when computing Delay") + } + + // Get attempt ceiling to prevent integer overflow. + attemptCeiling := (math.Log(float64(maxDelay/minDelay)) / math.Log(2)) + 1 + + if attempt > int64(attemptCeiling) { + delay = maxDelay + } else { + // Compute exponential delay based on attempt. + ri := 1 << uint64(attempt-1) + // compute delay + delay = minDelay * time.Duration(ri) + } + + if delay != minDelay { + // randomize to get jitter between min delay and delay value + d, err := rand.CryptoRandInt63n(int64(delay - minDelay)) + if err != nil { + return 0, fmt.Errorf("error computing retry jitter, %w", err) + } + + delay = time.Duration(d) + minDelay + } + + // check if this is the last attempt possible and compute delay accordingly + if remainingTime-delay <= minDelay { + delay = remainingTime - minDelay + } + + return delay, nil +} diff --git a/vendor/github.com/containerd/containerd/.mailmap b/vendor/github.com/containerd/containerd/.mailmap index 83bb03cd002c..11dcdc48c088 100644 --- a/vendor/github.com/containerd/containerd/.mailmap +++ b/vendor/github.com/containerd/containerd/.mailmap @@ -83,10 +83,12 @@ Mario Hros Mario Hros Mario Macias Mark Gordon +Marvin Giessing Michael Crosby Michael Katsoulis Mike Brown Mohammad Asif Siddiqui +Nabeel Rana Ng Yang Ning Li ningmingxiao @@ -100,6 +102,7 @@ Ross Boucher Ruediger Maass Rui Cao Sakeven Jiang +Samuel Karp Samuel Karp Seth Pellegrino <30441101+sethp-nr@users.noreply.github.com> Shaobao Feng @@ -121,6 +124,7 @@ Tõnis Tiigi Wade Lee Wade Lee Wade Lee <21621232@zju.edu.cn> +Wang Bing wanglei wanglei wangzhan diff --git a/vendor/github.com/containerd/containerd/.zuul.yaml b/vendor/github.com/containerd/containerd/.zuul.yaml deleted file mode 100644 index 8c845725a8e9..000000000000 --- a/vendor/github.com/containerd/containerd/.zuul.yaml +++ /dev/null @@ -1,35 +0,0 @@ -- project: - name: containerd/containerd - merge-mode: merge - check: - jobs: - - containerd-build-arm64 - - containerd-test-arm64 - - containerd-integration-test-arm64 - -- job: - name: containerd-build-arm64 - parent: init-test - description: | - Containerd build in openlab cluster. - run: .zuul/playbooks/containerd-build/run.yaml - nodeset: ubuntu-xenial-arm64-openlab - voting: false - -- job: - name: containerd-test-arm64 - parent: init-test - description: | - Containerd unit tests in openlab cluster. - run: .zuul/playbooks/containerd-build/unit-test.yaml - nodeset: ubuntu-xenial-arm64-openlab - voting: false - -- job: - name: containerd-integration-test-arm64 - parent: init-test - description: | - Containerd unit tests in openlab cluster. - run: .zuul/playbooks/containerd-build/integration-test.yaml - nodeset: ubuntu-xenial-arm64-openlab - voting: false diff --git a/vendor/github.com/containerd/containerd/BUILDING.md b/vendor/github.com/containerd/containerd/BUILDING.md index 5310924ce84c..4f2196e6c240 100644 --- a/vendor/github.com/containerd/containerd/BUILDING.md +++ b/vendor/github.com/containerd/containerd/BUILDING.md @@ -122,14 +122,13 @@ Please refer to [RUNC.md](/docs/RUNC.md) for the currently supported version of You can build static binaries by providing a few variables to `make`: ```sh -make EXTRA_FLAGS="-buildmode pie" \ - EXTRA_LDFLAGS='-linkmode external -extldflags "-fno-PIC -static"' \ - BUILDTAGS="netgo osusergo static_build" +make STATIC=1 ``` > *Note*: > - static build is discouraged > - static containerd binary does not support loading shared object plugins (`*.so`) +> - static build binaries are not position-independent # Via Docker container diff --git a/vendor/github.com/containerd/containerd/Makefile b/vendor/github.com/containerd/containerd/Makefile index 5791d8fa228a..7441eeac66e1 100644 --- a/vendor/github.com/containerd/containerd/Makefile +++ b/vendor/github.com/containerd/containerd/Makefile @@ -89,8 +89,17 @@ ifdef BUILDTAGS endif GO_BUILDTAGS ?= GO_BUILDTAGS += ${DEBUG_TAGS} +ifneq ($(STATIC),) + GO_BUILDTAGS += osusergo netgo static_build +endif GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(strip $(GO_BUILDTAGS))",) -GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) $(EXTRA_LDFLAGS)' + +GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) $(EXTRA_LDFLAGS) +ifneq ($(STATIC),) + GO_LDFLAGS += -extldflags "-static" +endif +GO_LDFLAGS+=' + SHIM_GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) -extldflags "-static" $(EXTRA_LDFLAGS)' # Project packages. @@ -214,6 +223,16 @@ cri-integration: binaries bin/cri-integration.test ## run cri integration tests @bash -x ./script/test/cri-integration.sh @rm -rf bin/cri-integration.test +# build runc shimv2 with failpoint control, only used by integration test +bin/containerd-shim-runc-fp-v1: integration/failpoint/cmd/containerd-shim-runc-fp-v1 FORCE + @echo "$(WHALE) $@" + @CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./integration/failpoint/cmd/containerd-shim-runc-fp-v1 + +# build CNI bridge plugin wrapper with failpoint support, only used by integration test +bin/cni-bridge-fp: integration/failpoint/cmd/cni-bridge-fp FORCE + @echo "$(WHALE) $@" + @$(GO) build ${GO_BUILD_FLAGS} -o $@ ./integration/failpoint/cmd/cni-bridge-fp + benchmark: ## run benchmarks tests @echo "$(WHALE) $@" @$(GO) test ${TESTFLAGS} -bench . -run Benchmark -test.root @@ -360,6 +379,8 @@ clean-test: ## clean up debris from previously failed tests @rm -rf /run/containerd/fifo/* @rm -rf /run/containerd-test/* @rm -rf bin/cri-integration.test + @rm -rf bin/cni-bridge-fp + @rm -rf bin/containerd-shim-runc-fp-v1 install: ## install binaries @echo "$(WHALE) $@ $(BINARIES)" diff --git a/vendor/github.com/containerd/containerd/Makefile.linux b/vendor/github.com/containerd/containerd/Makefile.linux index aba7b149f8aa..05414007030d 100644 --- a/vendor/github.com/containerd/containerd/Makefile.linux +++ b/vendor/github.com/containerd/containerd/Makefile.linux @@ -21,7 +21,9 @@ COMMANDS += containerd-shim containerd-shim-runc-v1 containerd-shim-runc-v2 # check GOOS for cross compile builds ifeq ($(GOOS),linux) ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64)) - GO_GCFLAGS += -buildmode=pie + ifeq ($(STATIC),) + GO_GCFLAGS += -buildmode=pie + endif endif endif diff --git a/vendor/github.com/containerd/containerd/Vagrantfile b/vendor/github.com/containerd/containerd/Vagrantfile index 16feb48a1a9a..e81bfc2dc30e 100644 --- a/vendor/github.com/containerd/containerd/Vagrantfile +++ b/vendor/github.com/containerd/containerd/Vagrantfile @@ -15,9 +15,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Vagrantfile for cgroup2 and SELinux +# Vagrantfile for Fedora and EL Vagrant.configure("2") do |config| - config.vm.box = "fedora/35-cloud-base" + config.vm.box = ENV["BOX"] || "fedora/37-cloud-base" + config.vm.box_version = ENV["BOX_VERSION"] memory = 4096 cpus = 2 config.vm.provider :virtualbox do |v| @@ -29,6 +30,8 @@ Vagrant.configure("2") do |config| v.cpus = cpus end + config.vm.synced_folder ".", "/vagrant", type: "rsync" + # Disabled by default. To run: # vagrant up --provision-with=upgrade-packages # To upgrade only specific packages: @@ -71,26 +74,36 @@ Vagrant.configure("2") do |config| SHELL end + # EL does not have /usr/local/{bin,sbin} in the PATH by default + config.vm.provision "setup-etc-environment", type: "shell", run: "once" do |sh| + sh.upload_path = "/tmp/vagrant-setup-etc-environment" + sh.inline = <<~SHELL + #!/usr/bin/env bash + set -eux -o pipefail + cat >> /etc/environment <> /etc/environment <> /etc/profile.d/sh.local < /tmp/containerd.log + cat /tmp/containerd.log systemctl stop containerd } selinux=$(getenforce) @@ -253,7 +269,7 @@ EOF fi trap cleanup EXIT ctr version - critest --parallel=$(nproc) --report-dir="${REPORT_DIR}" --ginkgo.skip='HostIpc is true' + critest --parallel=$[$(nproc)+2] --ginkgo.skip='HostIpc is true' --report-dir="${REPORT_DIR}" SHELL end @@ -279,8 +295,6 @@ EOF [registries.search] registries = ['docker.io'] EOF - # Disable SELinux to allow overlayfs - setenforce 0 SHELL end diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go index 97c7d4a92b36..df272237ccfe 100644 --- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go @@ -299,7 +299,7 @@ type ListContentRequest struct { // filters. Expanded, containers that match the following will be // returned: // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go index 62341d5e12f0..44b794953133 100644 --- a/vendor/github.com/containerd/containerd/archive/tar.go +++ b/vendor/github.com/containerd/containerd/archive/tar.go @@ -31,6 +31,7 @@ import ( "time" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/userns" "github.com/containerd/continuity/fs" ) @@ -119,6 +120,8 @@ const ( whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq" paxSchilyXattr = "SCHILY.xattr." + + userXattrPrefix = "user." ) // Apply applies a tar stream of an OCI style diff tar. @@ -380,6 +383,10 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header // Lchown is not supported on Windows. if runtime.GOOS != "windows" { if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { + err = fmt.Errorf("failed to Lchown %q for UID %d, GID %d: %w", path, hdr.Uid, hdr.Gid, err) + if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() { + err = fmt.Errorf("%w (Hint: try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)", err) + } return err } } @@ -388,11 +395,19 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header if strings.HasPrefix(key, paxSchilyXattr) { key = key[len(paxSchilyXattr):] if err := setxattr(path, key, value); err != nil { + if errors.Is(err, syscall.EPERM) && strings.HasPrefix(key, userXattrPrefix) { + // In the user.* namespace, only regular files and directories can have extended attributes. + // See https://man7.org/linux/man-pages/man7/xattr.7.html for details. + if fi, err := os.Lstat(path); err == nil && (!fi.Mode().IsRegular() && !fi.Mode().IsDir()) { + log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key) + continue + } + } if errors.Is(err, syscall.ENOTSUP) { log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key) continue } - return err + return fmt.Errorf("failed to setxattr %q for key %q: %w", path, key, err) } } } diff --git a/vendor/github.com/containerd/containerd/archive/tar_unix.go b/vendor/github.com/containerd/containerd/archive/tar_unix.go index 2f3a3a392e6c..854afcf0adb4 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_unix.go +++ b/vendor/github.com/containerd/containerd/archive/tar_unix.go @@ -63,7 +63,7 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, fi os.FileInfo) err } // Rdev is int32 on darwin/bsd, int64 on linux/solaris - rdev := uint64(s.Rdev) // nolint: unconvert + rdev := uint64(s.Rdev) //nolint:unconvert // Currently go does not fill in the major/minors if s.Mode&syscall.S_IFBLK != 0 || diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go index 3ec1ffce001d..723c31391712 100644 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -26,10 +26,16 @@ import ( "time" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) +// maxResets is the no.of times the Copy() method can tolerate a reset of the body +const maxResets = 5 + +var ErrReset = errors.New("writer has been reset") + var bufPool = sync.Pool{ New: func() interface{} { buffer := make([]byte, 1<<20) @@ -80,7 +86,7 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc o return fmt.Errorf("failed to open writer: %w", err) } - return nil // all ready present + return nil // already present } defer cw.Close() @@ -131,35 +137,63 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er // the size or digest is unknown, these values may be empty. // // Copy is buffered, so no need to wrap reader in buffered io. -func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error { +func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected digest.Digest, opts ...Opt) error { ws, err := cw.Status() if err != nil { return fmt.Errorf("failed to get status: %w", err) } - + r := or if ws.Offset > 0 { - r, err = seekReader(r, ws.Offset, size) + r, err = seekReader(or, ws.Offset, size) if err != nil { return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) } } - copied, err := copyWithBuffer(cw, r) - if err != nil { - return fmt.Errorf("failed to copy: %w", err) - } - if size != 0 && copied < size-ws.Offset { - // Short writes would return its own error, this indicates a read failure - return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) - } - - if err := cw.Commit(ctx, size, expected, opts...); err != nil { - if !errdefs.IsAlreadyExists(err) { - return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) + for i := 0; i < maxResets; i++ { + if i >= 1 { + log.G(ctx).WithField("digest", expected).Debugf("retrying copy due to reset") + } + copied, err := copyWithBuffer(cw, r) + if errors.Is(err, ErrReset) { + ws, err := cw.Status() + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + r, err = seekReader(or, ws.Offset, size) + if err != nil { + return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + continue + } + if err != nil { + return fmt.Errorf("failed to copy: %w", err) + } + if size != 0 && copied < size-ws.Offset { + // Short writes would return its own error, this indicates a read failure + return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) + } + if err := cw.Commit(ctx, size, expected, opts...); err != nil { + if errors.Is(err, ErrReset) { + ws, err := cw.Status() + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + r, err = seekReader(or, ws.Offset, size) + if err != nil { + return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + continue + } + if !errdefs.IsAlreadyExists(err) { + return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) + } } + return nil } - return nil + log.G(ctx).WithField("digest", expected).Errorf("failed to copy after %d retries", maxResets) + return fmt.Errorf("failed to copy after %d retries", maxResets) } // CopyReaderAt copies to a writer from a given reader at for the given diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go index 457bbcd0eb4d..f41a92d04a58 100644 --- a/vendor/github.com/containerd/containerd/content/local/store.go +++ b/vendor/github.com/containerd/containerd/content/local/store.go @@ -643,7 +643,6 @@ func (s *store) ingestRoot(ref string) string { // - root: entire ingest directory // - ref: name of the starting ref, must be unique // - data: file where data is written -// func (s *store) ingestPaths(ref string) (string, string, string) { var ( fp = s.ingestRoot(ref) diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/kernelversion/kernel_linux.go b/vendor/github.com/containerd/containerd/contrib/seccomp/kernelversion/kernel_linux.go new file mode 100644 index 000000000000..ab288fbb8a55 --- /dev/null +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/kernelversion/kernel_linux.go @@ -0,0 +1,92 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + File copied and customized based on + https://github.com/moby/moby/tree/v20.10.14/profiles/seccomp/kernel_linux.go +*/ + +package kernelversion + +import ( + "bytes" + "fmt" + "sync" + + "golang.org/x/sys/unix" +) + +// KernelVersion holds information about the kernel. +type KernelVersion struct { + Kernel uint64 // Version of the Kernel (i.e., the "4" in "4.1.2-generic") + Major uint64 // Major revision of the Kernel (i.e., the "1" in "4.1.2-generic") +} + +// String implements fmt.Stringer for KernelVersion +func (k *KernelVersion) String() string { + if k.Kernel > 0 || k.Major > 0 { + return fmt.Sprintf("%d.%d", k.Kernel, k.Major) + } + return "" +} + +var ( + currentKernelVersion *KernelVersion + kernelVersionError error + once sync.Once +) + +// getKernelVersion gets the current kernel version. +func getKernelVersion() (*KernelVersion, error) { + once.Do(func() { + var uts unix.Utsname + if err := unix.Uname(&uts); err != nil { + return + } + // Remove the \x00 from the release for Atoi to parse correctly + currentKernelVersion, kernelVersionError = parseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)])) + }) + return currentKernelVersion, kernelVersionError +} + +// parseRelease parses a string and creates a KernelVersion based on it. +func parseRelease(release string) (*KernelVersion, error) { + var version = KernelVersion{} + + // We're only make sure we get the "kernel" and "major revision". Sometimes we have + // 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64. + _, err := fmt.Sscanf(release, "%d.%d", &version.Kernel, &version.Major) + if err != nil { + return nil, fmt.Errorf("failed to parse kernel version %q: %w", release, err) + } + return &version, nil +} + +// GreaterEqualThan checks if the host's kernel version is greater than, or +// equal to the given kernel version v. Only "kernel version" and "major revision" +// can be specified (e.g., "3.12") and will be taken into account, which means +// that 3.12.25-gentoo and 3.12-1-amd64 are considered equal (kernel: 3, major: 12). +func GreaterEqualThan(minVersion KernelVersion) (bool, error) { + kv, err := getKernelVersion() + if err != nil { + return false, err + } + if kv.Kernel > minVersion.Kernel { + return true, nil + } + if kv.Kernel == minVersion.Kernel && kv.Major >= minVersion.Major { + return true, nil + } + return false, nil +} diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go index cf09d8d9e4fd..e13f2625c731 100644 --- a/vendor/github.com/containerd/containerd/filters/filter.go +++ b/vendor/github.com/containerd/containerd/filters/filter.go @@ -65,7 +65,6 @@ // ``` // name==foo,labels.bar // ``` -// package filters import ( diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go index 49182d7b7bd6..32767909b1c9 100644 --- a/vendor/github.com/containerd/containerd/filters/parser.go +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -45,7 +45,6 @@ field := quoted | [A-Za-z] [A-Za-z0-9_]+ operator := "==" | "!=" | "~=" value := quoted | [^\s,]+ quoted := - */ func Parse(s string) (Filter, error) { // special case empty to match all diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go index b76aab9b4a7a..5c800ef846cc 100644 --- a/vendor/github.com/containerd/containerd/filters/quote.go +++ b/vendor/github.com/containerd/containerd/filters/quote.go @@ -31,10 +31,10 @@ var errQuoteSyntax = errors.New("quote syntax error") // or character literal represented by the string s. // It returns four values: // -// 1) value, the decoded Unicode code point or byte value; -// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; -// 3) tail, the remainder of the string after the character; and -// 4) an error that will be nil if the character is syntactically valid. +// 1. value, the decoded Unicode code point or byte value; +// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3. tail, the remainder of the string after the character; and +// 4. an error that will be nil if the character is syntactically valid. // // The second argument, quote, specifies the type of literal being parsed // and therefore which escaped quote character is permitted. diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go index 216b3adb1e65..784df5dd951d 100644 --- a/vendor/github.com/containerd/containerd/image.go +++ b/vendor/github.com/containerd/containerd/image.go @@ -28,6 +28,7 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/pkg/kmutex" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/rootfs" "github.com/containerd/containerd/snapshots" @@ -287,6 +288,10 @@ type UnpackConfig struct { // CheckPlatformSupported is whether to validate that a snapshotter // supports an image's platform before unpacking CheckPlatformSupported bool + // DuplicationSuppressor is used to make sure that there is only one + // in-flight fetch request or unpack handler for a given descriptor's + // digest or chain ID. + DuplicationSuppressor kmutex.KeyedLocker } // UnpackOpt provides configuration for unpack @@ -300,6 +305,14 @@ func WithSnapshotterPlatformCheck() UnpackOpt { } } +// WithUnpackDuplicationSuppressor sets `DuplicationSuppressor` on the UnpackConfig. +func WithUnpackDuplicationSuppressor(suppressor kmutex.KeyedLocker) UnpackOpt { + return func(ctx context.Context, uc *UnpackConfig) error { + uc.DuplicationSuppressor = suppressor + return nil + } +} + func (i *image) Unpack(ctx context.Context, snapshotterName string, opts ...UnpackOpt) error { ctx, done, err := i.client.WithLease(ctx) if err != nil { diff --git a/vendor/github.com/containerd/containerd/images/archive/exporter.go b/vendor/github.com/containerd/containerd/images/archive/exporter.go index 549474644b5f..40a0a33df06e 100644 --- a/vendor/github.com/containerd/containerd/images/archive/exporter.go +++ b/vendor/github.com/containerd/containerd/images/archive/exporter.go @@ -182,6 +182,9 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: d, ok := resolvedIndex[desc.Digest] if !ok { + if err := desc.Digest.Validate(); err != nil { + return err + } records = append(records, blobRecord(store, desc, &eo.blobRecordOptions)) p, err := content.ReadBlob(ctx, store, desc) @@ -271,6 +274,9 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts func getRecords(ctx context.Context, store content.Provider, desc ocispec.Descriptor, algorithms map[string]struct{}, brOpts *blobRecordOptions) ([]tarRecord, error) { var records []tarRecord exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if err := desc.Digest.Validate(); err != nil { + return nil, err + } records = append(records, blobRecord(store, desc, brOpts)) algorithms[desc.Digest.Algorithm().String()] = struct{}{} return nil, nil @@ -428,6 +434,9 @@ func manifestsRecord(ctx context.Context, store content.Provider, manifests map[ } dgst := manifest.Config.Digest + if err := dgst.Validate(); err != nil { + return tarRecord{}, err + } mfsts[i].Config = path.Join("blobs", dgst.Algorithm().String(), dgst.Encoded()) for _, l := range manifest.Layers { path := path.Join("blobs", l.Digest.Algorithm().String(), l.Digest.Encoded()) diff --git a/vendor/github.com/containerd/containerd/images/archive/importer.go b/vendor/github.com/containerd/containerd/images/archive/importer.go index c53104950841..c1c802fb55c7 100644 --- a/vendor/github.com/containerd/containerd/images/archive/importer.go +++ b/vendor/github.com/containerd/containerd/images/archive/importer.go @@ -55,12 +55,12 @@ func WithImportCompression() ImportOpt { } // ImportIndex imports an index from a tar archive image bundle -// - implements Docker v1.1, v1.2 and OCI v1. -// - prefers OCI v1 when provided -// - creates OCI index for Docker formats -// - normalizes Docker references and adds as OCI ref name -// e.g. alpine:latest -> docker.io/library/alpine:latest -// - existing OCI reference names are untouched +// - implements Docker v1.1, v1.2 and OCI v1. +// - prefers OCI v1 when provided +// - creates OCI index for Docker formats +// - normalizes Docker references and adds as OCI ref name +// e.g. alpine:latest -> docker.io/library/alpine:latest +// - existing OCI reference names are untouched func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) { var ( tr = tar.NewReader(reader) @@ -232,12 +232,14 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt return writeManifest(ctx, store, idx, ocispec.MediaTypeImageIndex) } +const ( + kib = 1024 + mib = 1024 * kib + jsonLimit = 20 * mib +) + func onUntarJSON(r io.Reader, j interface{}) error { - b, err := io.ReadAll(r) - if err != nil { - return err - } - return json.Unmarshal(b, j) + return json.NewDecoder(io.LimitReader(r, jsonLimit)).Decode(j) } func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size int64, ref string) (digest.Digest, error) { @@ -300,6 +302,9 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string } if s.GetCompression() == compression.Uncompressed { if compress { + if err := desc.Digest.Validate(); err != nil { + return nil, err + } ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) labels := map[string]string{ "containerd.io/uncompressed": desc.Digest.String(), diff --git a/vendor/github.com/containerd/containerd/leases/lease.go b/vendor/github.com/containerd/containerd/leases/lease.go index 058d065594f3..fc0ca3491c55 100644 --- a/vendor/github.com/containerd/containerd/leases/lease.go +++ b/vendor/github.com/containerd/containerd/leases/lease.go @@ -65,10 +65,15 @@ func SynchronousDelete(ctx context.Context, o *DeleteOptions) error { return nil } -// WithLabels sets labels on a lease +// WithLabels merges labels on a lease func WithLabels(labels map[string]string) Opt { return func(l *Lease) error { - l.Labels = labels + if l.Labels == nil { + l.Labels = map[string]string{} + } + for k, v := range labels { + l.Labels[k] = v + } return nil } } diff --git a/vendor/github.com/containerd/containerd/metadata/buckets.go b/vendor/github.com/containerd/containerd/metadata/buckets.go index d23be84fea72..516de1fc7a77 100644 --- a/vendor/github.com/containerd/containerd/metadata/buckets.go +++ b/vendor/github.com/containerd/containerd/metadata/buckets.go @@ -26,7 +26,7 @@ // // Generically, we try to do the following: // -// /// -> +// /// -> // // version: Currently, this is "v1". Additions can be made to v1 in a backwards // compatible way. If the layout changes, a new version must be made, along @@ -46,72 +46,73 @@ // the structure is changed in addition to adding a migration and incrementing // the database version. Note that `╘══*...*` refers to maps with arbitrary // keys. -// ├──version : - Latest version, see migrations -// └──v1 - Schema version bucket -// ╘══*namespace* -// ├──labels -// │  ╘══*key* : - Label value -// ├──image -// │  ╘══*image name* -// │   ├──createdat : - Created at -// │   ├──updatedat : - Updated at -// │   ├──target -// │   │  ├──digest : - Descriptor digest -// │   │  ├──mediatype : - Descriptor media type -// │   │  └──size : - Descriptor size -// │   └──labels -// │   ╘══*key* : - Label value -// ├──containers -// │  ╘══*container id* -// │   ├──createdat : - Created at -// │   ├──updatedat : - Updated at -// │   ├──spec : - Proto marshaled spec -// │   ├──image : - Image name -// │   ├──snapshotter : - Snapshotter name -// │   ├──snapshotKey : - Snapshot key -// │   ├──runtime -// │   │  ├──name : - Runtime name -// │   │  ├──extensions -// │   │  │  ╘══*name* : - Proto marshaled extension -// │   │  └──options : - Proto marshaled options -// │   └──labels -// │   ╘══*key* : - Label value -// ├──snapshots -// │  ╘══*snapshotter* -// │   ╘══*snapshot key* -// │    ├──name : - Snapshot name in backend -// │   ├──createdat : - Created at -// │   ├──updatedat : - Updated at -// │    ├──parent : - Parent snapshot name -// │   ├──children -// │   │  ╘══*snapshot key* : - Child snapshot reference -// │   └──labels -// │   ╘══*key* : - Label value -// ├──content -// │  ├──blob -// │  │ ╘══*blob digest* -// │  │ ├──createdat : - Created at -// │  │ ├──updatedat : - Updated at -// │  │   ├──size : - Blob size -// │  │ └──labels -// │  │ ╘══*key* : - Label value -// │  └──ingests -// │   ╘══*ingest reference* -// │    ├──ref : - Ingest reference in backend -// │   ├──expireat : - Time to expire ingest -// │   └──expected : - Expected commit digest -// └──leases -// ╘══*lease id* -//   ├──createdat : - Created at -// ├──labels -// │ ╘══*key* : - Label value -//   ├──snapshots -// │  ╘══*snapshotter* -// │   ╘══*snapshot key* : - Snapshot reference -//   ├──content -// │  ╘══*blob digest* : - Content blob reference -// └──ingests -//   ╘══*ingest reference* : - Content ingest reference +// +// ├──version : - Latest version, see migrations +// └──v1 - Schema version bucket +// ╘══*namespace* +// ├──labels +// │  ╘══*key* : - Label value +// ├──image +// │  ╘══*image name* +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │   ├──target +// │   │  ├──digest : - Descriptor digest +// │   │  ├──mediatype : - Descriptor media type +// │   │  └──size : - Descriptor size +// │   └──labels +// │   ╘══*key* : - Label value +// ├──containers +// │  ╘══*container id* +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │   ├──spec : - Proto marshaled spec +// │   ├──image : - Image name +// │   ├──snapshotter : - Snapshotter name +// │   ├──snapshotKey : - Snapshot key +// │   ├──runtime +// │   │  ├──name : - Runtime name +// │   │  ├──extensions +// │   │  │  ╘══*name* : - Proto marshaled extension +// │   │  └──options : - Proto marshaled options +// │   └──labels +// │   ╘══*key* : - Label value +// ├──snapshots +// │  ╘══*snapshotter* +// │   ╘══*snapshot key* +// │    ├──name : - Snapshot name in backend +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │    ├──parent : - Parent snapshot name +// │   ├──children +// │   │  ╘══*snapshot key* : - Child snapshot reference +// │   └──labels +// │   ╘══*key* : - Label value +// ├──content +// │  ├──blob +// │  │ ╘══*blob digest* +// │  │ ├──createdat : - Created at +// │  │ ├──updatedat : - Updated at +// │  │   ├──size : - Blob size +// │  │ └──labels +// │  │ ╘══*key* : - Label value +// │  └──ingests +// │   ╘══*ingest reference* +// │    ├──ref : - Ingest reference in backend +// │   ├──expireat : - Time to expire ingest +// │   └──expected : - Expected commit digest +// └──leases +// ╘══*lease id* +//   ├──createdat : - Created at +// ├──labels +// │ ╘══*key* : - Label value +//   ├──snapshots +// │  ╘══*snapshotter* +// │   ╘══*snapshot key* : - Snapshot reference +//   ├──content +// │  ╘══*blob digest* : - Content blob reference +// └──ingests +//   ╘══*ingest reference* : - Content ingest reference package metadata import ( diff --git a/vendor/github.com/containerd/containerd/namespaces/store.go b/vendor/github.com/containerd/containerd/namespaces/store.go index 5936772cb4cb..a1b2571bb1f5 100644 --- a/vendor/github.com/containerd/containerd/namespaces/store.go +++ b/vendor/github.com/containerd/containerd/namespaces/store.go @@ -24,8 +24,6 @@ import "context" // oriented. A namespace is really just a name and a set of labels. Objects // that belong to a namespace are returned when the namespace is assigned to a // given context. -// -// type Store interface { Create(ctx context.Context, namespace string, labels map[string]string) error Labels(ctx context.Context, namespace string) (map[string]string, error) diff --git a/vendor/github.com/containerd/containerd/oci/spec.go b/vendor/github.com/containerd/containerd/oci/spec.go index 34d7662309f2..a1c98ddcbd06 100644 --- a/vendor/github.com/containerd/containerd/oci/spec.go +++ b/vendor/github.com/containerd/containerd/oci/spec.go @@ -148,10 +148,9 @@ func populateDefaultUnixSpec(ctx context.Context, s *Spec, id string) error { GID: 0, }, Capabilities: &specs.LinuxCapabilities{ - Bounding: defaultUnixCaps(), - Permitted: defaultUnixCaps(), - Inheritable: defaultUnixCaps(), - Effective: defaultUnixCaps(), + Bounding: defaultUnixCaps(), + Permitted: defaultUnixCaps(), + Effective: defaultUnixCaps(), }, Rlimits: []specs.POSIXRlimit{ { diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go index 9b0cfc3f1798..3330ad1088ca 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go @@ -113,6 +113,17 @@ func setCapabilities(s *Spec) { } } +// ensureAdditionalGids ensures that the primary GID is also included in the additional GID list. +func ensureAdditionalGids(s *Spec) { + setProcess(s) + for _, f := range s.Process.User.AdditionalGids { + if f == s.Process.User.GID { + return + } + } + s.Process.User.AdditionalGids = append([]uint32{s.Process.User.GID}, s.Process.User.AdditionalGids...) +} + // WithDefaultSpec returns a SpecOpts that will populate the spec with default // values. // @@ -518,10 +529,13 @@ func WithNamespacedCgroup() SpecOpts { // WithUser sets the user to be used within the container. // It accepts a valid user string in OCI Image Spec v1.0.0: -// user, uid, user:group, uid:gid, uid:group, user:gid +// +// user, uid, user:group, uid:gid, uid:group, user:gid func WithUser(userstr string) SpecOpts { return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { + defer ensureAdditionalGids(s) setProcess(s) + s.Process.User.AdditionalGids = nil // For LCOW it's a bit harder to confirm that the user actually exists on the host as a rootfs isn't // mounted on the host and shared into the guest, but rather the rootfs is constructed entirely in the @@ -614,7 +628,9 @@ func WithUser(userstr string) SpecOpts { // WithUIDGID allows the UID and GID for the Process to be set func WithUIDGID(uid, gid uint32) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + defer ensureAdditionalGids(s) setProcess(s) + s.Process.User.AdditionalGids = nil s.Process.User.UID = uid s.Process.User.GID = gid return nil @@ -627,12 +643,11 @@ func WithUIDGID(uid, gid uint32) SpecOpts { // additionally sets the gid to 0, and does not return an error. func WithUserID(uid uint32) SpecOpts { return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { + defer ensureAdditionalGids(s) setProcess(s) - if c.Snapshotter == "" && c.SnapshotKey == "" { - if !isRootfsAbs(s.Root.Path) { - return errors.New("rootfs absolute path is required") - } - user, err := UserFromPath(s.Root.Path, func(u user.User) bool { + s.Process.User.AdditionalGids = nil + setUser := func(root string) error { + user, err := UserFromPath(root, func(u user.User) bool { return u.Uid == int(uid) }) if err != nil { @@ -644,7 +659,12 @@ func WithUserID(uid uint32) SpecOpts { } s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) return nil - + } + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !isRootfsAbs(s.Root.Path) { + return errors.New("rootfs absolute path is required") + } + return setUser(s.Root.Path) } if c.Snapshotter == "" { return errors.New("no snapshotter set for container") @@ -659,20 +679,7 @@ func WithUserID(uid uint32) SpecOpts { } mounts = tryReadonlyMounts(mounts) - return mount.WithTempMount(ctx, mounts, func(root string) error { - user, err := UserFromPath(root, func(u user.User) bool { - return u.Uid == int(uid) - }) - if err != nil { - if os.IsNotExist(err) || err == ErrNoUsersFound { - s.Process.User.UID, s.Process.User.GID = uid, 0 - return nil - } - return err - } - s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) - return nil - }) + return mount.WithTempMount(ctx, mounts, setUser) } } @@ -684,13 +691,12 @@ func WithUserID(uid uint32) SpecOpts { // the container. func WithUsername(username string) SpecOpts { return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { + defer ensureAdditionalGids(s) setProcess(s) + s.Process.User.AdditionalGids = nil if s.Linux != nil { - if c.Snapshotter == "" && c.SnapshotKey == "" { - if !isRootfsAbs(s.Root.Path) { - return errors.New("rootfs absolute path is required") - } - user, err := UserFromPath(s.Root.Path, func(u user.User) bool { + setUser := func(root string) error { + user, err := UserFromPath(root, func(u user.User) bool { return u.Name == username }) if err != nil { @@ -699,6 +705,12 @@ func WithUsername(username string) SpecOpts { s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) return nil } + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !isRootfsAbs(s.Root.Path) { + return errors.New("rootfs absolute path is required") + } + return setUser(s.Root.Path) + } if c.Snapshotter == "" { return errors.New("no snapshotter set for container") } @@ -712,16 +724,7 @@ func WithUsername(username string) SpecOpts { } mounts = tryReadonlyMounts(mounts) - return mount.WithTempMount(ctx, mounts, func(root string) error { - user, err := UserFromPath(root, func(u user.User) bool { - return u.Name == username - }) - if err != nil { - return err - } - s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) - return nil - }) + return mount.WithTempMount(ctx, mounts, setUser) } else if s.Windows != nil { s.Process.User.Username = username } else { @@ -732,7 +735,7 @@ func WithUsername(username string) SpecOpts { } // WithAdditionalGIDs sets the OCI spec's additionalGids array to any additional groups listed -// for a particular user in the /etc/groups file of the image's root filesystem +// for a particular user in the /etc/group file of the image's root filesystem // The passed in user can be either a uid or a username. func WithAdditionalGIDs(userstr string) SpecOpts { return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { @@ -741,7 +744,9 @@ func WithAdditionalGIDs(userstr string) SpecOpts { return nil } setProcess(s) + s.Process.User.AdditionalGids = nil setAdditionalGids := func(root string) error { + defer ensureAdditionalGids(s) var username string uid, err := strconv.Atoi(userstr) if err == nil { @@ -802,6 +807,68 @@ func WithAdditionalGIDs(userstr string) SpecOpts { } } +// WithAppendAdditionalGroups append additional groups within the container. +// The passed in groups can be either a gid or a groupname. +func WithAppendAdditionalGroups(groups ...string) SpecOpts { + return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { + // For LCOW or on Darwin additional GID's are not supported + if s.Windows != nil || runtime.GOOS == "darwin" { + return nil + } + setProcess(s) + setAdditionalGids := func(root string) error { + defer ensureAdditionalGids(s) + gpath, err := fs.RootPath(root, "/etc/group") + if err != nil { + return err + } + ugroups, err := user.ParseGroupFile(gpath) + if err != nil { + return err + } + groupMap := make(map[string]user.Group) + for _, group := range ugroups { + groupMap[group.Name] = group + } + var gids []uint32 + for _, group := range groups { + gid, err := strconv.ParseUint(group, 10, 32) + if err == nil { + gids = append(gids, uint32(gid)) + } else { + g, ok := groupMap[group] + if !ok { + return fmt.Errorf("unable to find group %s", group) + } + gids = append(gids, uint32(g.Gid)) + } + } + s.Process.User.AdditionalGids = append(s.Process.User.AdditionalGids, gids...) + return nil + } + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !filepath.IsAbs(s.Root.Path) { + return errors.New("rootfs absolute path is required") + } + return setAdditionalGids(s.Root.Path) + } + if c.Snapshotter == "" { + return errors.New("no snapshotter set for container") + } + if c.SnapshotKey == "" { + return errors.New("rootfs snapshot not created for container") + } + snapshotter := client.SnapshotService(c.Snapshotter) + mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) + if err != nil { + return err + } + + mounts = tryReadonlyMounts(mounts) + return mount.WithTempMount(ctx, mounts, setAdditionalGids) + } +} + // WithCapabilities sets Linux capabilities on the process func WithCapabilities(caps []string) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { @@ -810,7 +877,6 @@ func WithCapabilities(caps []string) SpecOpts { s.Process.Capabilities.Bounding = caps s.Process.Capabilities.Effective = caps s.Process.Capabilities.Permitted = caps - s.Process.Capabilities.Inheritable = caps return nil } @@ -845,7 +911,6 @@ func WithAddedCapabilities(caps []string) SpecOpts { &s.Process.Capabilities.Bounding, &s.Process.Capabilities.Effective, &s.Process.Capabilities.Permitted, - &s.Process.Capabilities.Inheritable, } { if !capsContain(*cl, c) { *cl = append(*cl, c) @@ -865,7 +930,6 @@ func WithDroppedCapabilities(caps []string) SpecOpts { &s.Process.Capabilities.Bounding, &s.Process.Capabilities.Effective, &s.Process.Capabilities.Permitted, - &s.Process.Capabilities.Inheritable, } { removeCap(cl, c) } @@ -880,7 +944,7 @@ func WithDroppedCapabilities(caps []string) SpecOpts { func WithAmbientCapabilities(caps []string) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { setCapabilities(s) - + s.Process.Capabilities.Inheritable = caps s.Process.Capabilities.Ambient = caps return nil } @@ -909,7 +973,7 @@ func UserFromPath(root string, filter func(user.User) bool) (user.User, error) { // ErrNoGroupsFound can be returned from GIDFromPath var ErrNoGroupsFound = errors.New("no groups found") -// GIDFromPath inspects the GID using /etc/passwd in the specified rootfs. +// GIDFromPath inspects the GID using /etc/group in the specified rootfs. // filter can be nil. func GIDFromPath(root string, filter func(user.Group) bool) (gid uint32, err error) { gpath, err := fs.RootPath(root, "/etc/group") @@ -1123,20 +1187,13 @@ func WithDefaultUnixDevices(_ context.Context, _ Client, _ *containers.Container Allow: true, }, { + // "dev/ptmx" Type: "c", Major: intptr(5), Minor: intptr(2), Access: rwm, Allow: true, }, - { - // tuntap - Type: "c", - Major: intptr(10), - Minor: intptr(200), - Access: rwm, - Allow: true, - }, }...) return nil } diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_nonlinux.go b/vendor/github.com/containerd/containerd/oci/spec_opts_nonlinux.go index c990fc634926..ec9149279851 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_nonlinux.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_nonlinux.go @@ -28,19 +28,22 @@ import ( // WithAllCurrentCapabilities propagates the effective capabilities of the caller process to the container process. // The capability set may differ from WithAllKnownCapabilities when running in a container. -//nolint: deadcode, unused +// +//nolint:deadcode,unused var WithAllCurrentCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { return WithCapabilities(nil)(ctx, client, c, s) } // WithAllKnownCapabilities sets all the the known linux capabilities for the container process -//nolint: deadcode, unused +// +//nolint:deadcode,unused var WithAllKnownCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { return WithCapabilities(nil)(ctx, client, c, s) } // WithCPUShares sets the container's cpu shares -//nolint: deadcode, unused +// +//nolint:deadcode,unused func WithCPUShares(shares uint64) SpecOpts { return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { return nil diff --git a/vendor/github.com/containerd/containerd/pkg/kmutex/kmutex.go b/vendor/github.com/containerd/containerd/pkg/kmutex/kmutex.go new file mode 100644 index 000000000000..74846c0577c7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/kmutex/kmutex.go @@ -0,0 +1,105 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package kmutex provides synchronization primitives to lock/unlock resource by unique key. +package kmutex + +import ( + "context" + "fmt" + "sync" + + "golang.org/x/sync/semaphore" +) + +// KeyedLocker is the interface for acquiring locks based on string. +type KeyedLocker interface { + Lock(ctx context.Context, key string) error + Unlock(key string) +} + +func New() KeyedLocker { + return newKeyMutex() +} + +func newKeyMutex() *keyMutex { + return &keyMutex{ + locks: make(map[string]*klock), + } +} + +type keyMutex struct { + mu sync.Mutex + + locks map[string]*klock +} + +type klock struct { + *semaphore.Weighted + ref int +} + +func (km *keyMutex) Lock(ctx context.Context, key string) error { + km.mu.Lock() + + l, ok := km.locks[key] + if !ok { + km.locks[key] = &klock{ + Weighted: semaphore.NewWeighted(1), + } + l = km.locks[key] + } + l.ref++ + km.mu.Unlock() + + if err := l.Acquire(ctx, 1); err != nil { + km.mu.Lock() + defer km.mu.Unlock() + + l.ref-- + + if l.ref < 0 { + panic(fmt.Errorf("kmutex: release of unlocked key %v", key)) + } + + if l.ref == 0 { + delete(km.locks, key) + } + return err + } + return nil +} + +func (km *keyMutex) Unlock(key string) { + km.mu.Lock() + defer km.mu.Unlock() + + l, ok := km.locks[key] + if !ok { + panic(fmt.Errorf("kmutex: unlock of unlocked key %v", key)) + } + l.Release(1) + + l.ref-- + + if l.ref < 0 { + panic(fmt.Errorf("kmutex: released of unlocked key %v", key)) + } + + if l.ref == 0 { + delete(km.locks, key) + } +} diff --git a/vendor/github.com/containerd/containerd/pkg/kmutex/noop.go b/vendor/github.com/containerd/containerd/pkg/kmutex/noop.go new file mode 100644 index 000000000000..66c46f15ad38 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/kmutex/noop.go @@ -0,0 +1,33 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package kmutex + +import "context" + +func NewNoop() KeyedLocker { + return &noopMutex{} +} + +type noopMutex struct { +} + +func (*noopMutex) Lock(_ context.Context, _ string) error { + return nil +} + +func (*noopMutex) Unlock(_ string) { +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go index c1aaf72ca8ee..ff9771a60032 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -46,10 +46,14 @@ type matchComparer struct { // Match matches platform with the same windows major, minor // and build version. -func (m matchComparer) Match(p imagespec.Platform) bool { - if m.defaults.Match(p) { - // TODO(windows): Figure out whether OSVersion is deprecated. - return strings.HasPrefix(p.OSVersion, m.osVersionPrefix) +func (m matchComparer) Match(p specs.Platform) bool { + match := m.defaults.Match(p) + + if match && p.OS == "windows" { + if strings.HasPrefix(p.OSVersion, m.osVersionPrefix) { + return true + } + return p.OSVersion == "" } return false } diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 8f955d036dff..234309941803 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -27,40 +27,40 @@ // The vast majority of use cases should simply use the match function with // user input. The first step is to parse a specifier into a matcher: // -// m, err := Parse("linux") -// if err != nil { ... } +// m, err := Parse("linux") +// if err != nil { ... } // // Once you have a matcher, use it to match against the platform declared by a // component, typically from an image or runtime. Since extracting an images // platform is a little more involved, we'll use an example against the // platform default: // -// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// if ok := m.Match(Default()); !ok { /* doesn't match */ } // // This can be composed in loops for resolving runtimes or used as a filter for // fetch and select images. // // More details of the specifier syntax and platform spec follow. // -// Declaring Platform Support +// # Declaring Platform Support // // Components that have strict platform requirements should use the OCI // platform specification to declare their support. Typically, this will be // images and runtimes that should make these declaring which platform they // support specifically. This looks roughly as follows: // -// type Platform struct { -// Architecture string -// OS string -// Variant string -// } +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } // // Most images and runtimes should at least set Architecture and OS, according // to their GOARCH and GOOS values, respectively (follow the OCI image // specification when in doubt). ARM should set variant under certain // discussions, which are outlined below. // -// Platform Specifiers +// # Platform Specifiers // // While the OCI platform specifications provide a tool for components to // specify structured information, user input typically doesn't need the full @@ -77,7 +77,7 @@ // where the architecture may be known but a runtime may support images from // different operating systems. // -// Normalization +// # Normalization // // Because not all users are familiar with the way the Go runtime represents // platforms, several normalizations have been provided to make this package @@ -85,17 +85,17 @@ // // The following are performed for architectures: // -// Value Normalized -// aarch64 arm64 -// armhf arm -// armel arm/v6 -// i386 386 -// x86_64 amd64 -// x86-64 amd64 +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 // // We also normalize the operating system `macos` to `darwin`. // -// ARM Support +// # ARM Support // // To qualify ARM architecture, the Variant field is used to qualify the arm // version. The most common arm version, v7, is represented without the variant diff --git a/vendor/github.com/containerd/containerd/process.go b/vendor/github.com/containerd/containerd/process.go index 42d0da60e1e2..297c77ab6e03 100644 --- a/vendor/github.com/containerd/containerd/process.go +++ b/vendor/github.com/containerd/containerd/process.go @@ -71,8 +71,10 @@ type ExitStatus struct { // Result returns the exit code and time of the exit status. // An error may be returned here to which indicates there was an error -// at some point while waiting for the exit status. It does not signify -// an error with the process itself. +// +// at some point while waiting for the exit status. It does not signify +// an error with the process itself. +// // If an error is returned, the process may still be running. func (s ExitStatus) Result() (uint32, time.Time, error) { return s.code, s.exitedAt, s.err diff --git a/vendor/github.com/containerd/containerd/reference/docker/reference.go b/vendor/github.com/containerd/containerd/reference/docker/reference.go index 6fa97dfdca9b..25436b645550 100644 --- a/vendor/github.com/containerd/containerd/reference/docker/reference.go +++ b/vendor/github.com/containerd/containerd/reference/docker/reference.go @@ -19,13 +19,13 @@ // // Grammar // -// reference := name [ ":" tag ] [ "@" digest ] +// reference := name [ ":" tag ] [ "@" digest ] // name := [domain '/'] path-component ['/' path-component]* // domain := domain-component ['.' domain-component]* [':' port-number] // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ +// alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go b/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go index 223fa2d0524f..e4529a776103 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go @@ -134,9 +134,6 @@ func parseValueAndParams(header string) (value string, params map[string]string) } var pvalue string pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } pkey = strings.ToLower(pkey) params[pkey] = pvalue s = skipSpace(s) diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go index c786ad215898..bef77fa61d56 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -24,6 +24,7 @@ import ( "net/http" "net/url" "strings" + "sync" "time" "github.com/containerd/containerd/content" @@ -261,27 +262,20 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str // TODO: Support chunked upload - pr, pw := io.Pipe() - respC := make(chan response, 1) - body := io.NopCloser(pr) + pushw := newPushWriter(p.dockerBase, ref, desc.Digest, p.tracker, isManifest) req.body = func() (io.ReadCloser, error) { - if body == nil { - return nil, errors.New("cannot reuse body, request must be retried") - } - // Only use the body once since pipe cannot be seeked - ob := body - body = nil - return ob, nil + pr, pw := io.Pipe() + pushw.setPipe(pw) + return io.NopCloser(pr), nil } req.size = desc.Size go func() { - defer close(respC) resp, err := req.doWithRetries(ctx, nil) if err != nil { - respC <- response{err: err} - pr.CloseWithError(err) + pushw.setError(err) + pushw.Close() return } @@ -290,20 +284,13 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str default: err := remoteserrors.NewUnexpectedStatusErr(resp) log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") - pr.CloseWithError(err) + pushw.setError(err) + pushw.Close() } - respC <- response{Response: resp} + pushw.setResponse(resp) }() - return &pushWriter{ - base: p.dockerBase, - ref: ref, - pipe: pw, - responseC: respC, - isManifest: isManifest, - expected: desc.Digest, - tracker: p.tracker, - }, nil + return pushw, nil } func getManifestPath(object string, dgst digest.Digest) []string { @@ -325,29 +312,89 @@ func getManifestPath(object string, dgst digest.Digest) []string { return []string{"manifests", object} } -type response struct { - *http.Response - err error -} - type pushWriter struct { base *dockerBase ref string - pipe *io.PipeWriter - responseC <-chan response + pipe *io.PipeWriter + + pipeC chan *io.PipeWriter + respC chan *http.Response + closeOnce sync.Once + errC chan error + isManifest bool expected digest.Digest tracker StatusTracker } +func newPushWriter(db *dockerBase, ref string, expected digest.Digest, tracker StatusTracker, isManifest bool) *pushWriter { + // Initialize and create response + return &pushWriter{ + base: db, + ref: ref, + expected: expected, + tracker: tracker, + pipeC: make(chan *io.PipeWriter, 1), + respC: make(chan *http.Response, 1), + errC: make(chan error, 1), + isManifest: isManifest, + } +} + +func (pw *pushWriter) setPipe(p *io.PipeWriter) { + pw.pipeC <- p +} + +func (pw *pushWriter) setError(err error) { + pw.errC <- err +} +func (pw *pushWriter) setResponse(resp *http.Response) { + pw.respC <- resp +} + func (pw *pushWriter) Write(p []byte) (n int, err error) { status, err := pw.tracker.GetStatus(pw.ref) if err != nil { return n, err } + + if pw.pipe == nil { + p, ok := <-pw.pipeC + if !ok { + return 0, io.ErrClosedPipe + } + pw.pipe = p + } else { + select { + case p, ok := <-pw.pipeC: + if !ok { + return 0, io.ErrClosedPipe + } + pw.pipe.CloseWithError(content.ErrReset) + pw.pipe = p + + // If content has already been written, the bytes + // cannot be written and the caller must reset + status.Offset = 0 + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + return 0, content.ErrReset + default: + } + } + n, err = pw.pipe.Write(p) + if errors.Is(err, io.ErrClosedPipe) { + // if the pipe is closed, we might have the original error on the error + // channel - so we should try and get it + select { + case err2 := <-pw.errC: + err = err2 + default: + } + } status.Offset += int64(n) status.UpdatedAt = time.Now() pw.tracker.SetStatus(pw.ref, status) @@ -355,13 +402,21 @@ func (pw *pushWriter) Write(p []byte) (n int, err error) { } func (pw *pushWriter) Close() error { - status, err := pw.tracker.GetStatus(pw.ref) - if err == nil && !status.Committed { - // Closing an incomplete writer. Record this as an error so that following write can retry it. - status.ErrClosed = errors.New("closed incomplete writer") - pw.tracker.SetStatus(pw.ref, status) + // Ensure pipeC is closed but handle `Close()` being + // called multiple times without panicking + pw.closeOnce.Do(func() { + close(pw.pipeC) + }) + if pw.pipe != nil { + status, err := pw.tracker.GetStatus(pw.ref) + if err == nil && !status.Committed { + // Closing an incomplete writer. Record this as an error so that following write can retry it. + status.ErrClosed = errors.New("closed incomplete writer") + pw.tracker.SetStatus(pw.ref, status) + } + return pw.pipe.Close() } - return pw.pipe.Close() + return nil } func (pw *pushWriter) Status() (content.Status, error) { @@ -380,7 +435,7 @@ func (pw *pushWriter) Digest() digest.Digest { func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { // Check whether read has already thrown an error - if _, err := pw.pipe.Write([]byte{}); err != nil && err != io.ErrClosedPipe { + if _, err := pw.pipe.Write([]byte{}); err != nil && !errors.Is(err, io.ErrClosedPipe) { return fmt.Errorf("pipe error before commit: %w", err) } @@ -388,18 +443,40 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di return err } // TODO: timeout waiting for response - resp := <-pw.responseC - if resp.err != nil { - return resp.err + var resp *http.Response + select { + case err := <-pw.errC: + return err + case resp = <-pw.respC: + defer resp.Body.Close() + case p, ok := <-pw.pipeC: + // check whether the pipe has changed in the commit, because sometimes Write + // can complete successfully, but the pipe may have changed. In that case, the + // content needs to be reset. + if !ok { + return io.ErrClosedPipe + } + pw.pipe.CloseWithError(content.ErrReset) + pw.pipe = p + + // If content has already been written, the bytes + // cannot be written again and the caller must reset + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return err + } + status.Offset = 0 + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + return content.ErrReset } - defer resp.Response.Body.Close() // 201 is specified return status, some registries return // 200, 202 or 204. switch resp.StatusCode { case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted: default: - return remoteserrors.NewUnexpectedStatusErr(resp.Response) + return remoteserrors.NewUnexpectedStatusErr(resp) } status, err := pw.tracker.GetStatus(pw.ref) diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index 9bbbc262220b..709fa028de27 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "io" + "net" "net/http" "net/url" "path" @@ -667,3 +668,17 @@ func responseFields(resp *http.Response) logrus.Fields { return logrus.Fields(fields) } + +// IsLocalhost checks if the registry host is local. +func IsLocalhost(host string) bool { + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + + if host == "localhost" { + return true + } + + ip := net.ParseIP(host) + return ip.IsLoopback() +} diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go index 8bcafb22a078..4d91ed2e54d0 100644 --- a/vendor/github.com/containerd/containerd/remotes/handlers.go +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -257,8 +257,8 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st // An example of this kind of content would be a Windows base layer, which is not supposed to be redistributed. // // This is based on the media type of the content: -// - application/vnd.oci.image.layer.nondistributable -// - application/vnd.docker.image.rootfs.foreign +// - application/vnd.oci.image.layer.nondistributable +// - application/vnd.docker.image.rootfs.foreign func SkipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc { return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { if images.IsNonDistributable(desc.MediaType) { diff --git a/vendor/github.com/containerd/containerd/rootfs/diff.go b/vendor/github.com/containerd/containerd/rootfs/diff.go index f396c73ab094..226cebccf230 100644 --- a/vendor/github.com/containerd/containerd/rootfs/diff.go +++ b/vendor/github.com/containerd/containerd/rootfs/diff.go @@ -44,7 +44,7 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter return ocispec.Descriptor{}, err } - lowerKey := fmt.Sprintf("%s-parent-view", info.Parent) + lowerKey := fmt.Sprintf("%s-parent-view-%s", info.Parent, uniquePart()) lower, err := sn.View(ctx, lowerKey, info.Parent) if err != nil { return ocispec.Descriptor{}, err @@ -58,7 +58,7 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter return ocispec.Descriptor{}, err } } else { - upperKey := fmt.Sprintf("%s-view", snapshotID) + upperKey := fmt.Sprintf("%s-view-%s", snapshotID, uniquePart()) upper, err = sn.View(ctx, upperKey, snapshotID) if err != nil { return ocispec.Descriptor{}, err diff --git a/vendor/github.com/containerd/containerd/snapshots/native/native.go b/vendor/github.com/containerd/containerd/snapshots/native/native.go index 33578fd14639..dd9f8a4c684c 100644 --- a/vendor/github.com/containerd/containerd/snapshots/native/native.go +++ b/vendor/github.com/containerd/containerd/snapshots/native/native.go @@ -150,11 +150,17 @@ func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap id, _, _, err := storage.GetInfo(ctx, key) if err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } return err } usage, err := fs.DiskUsage(ctx, o.getSnapshotDir(id)) if err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } return err } @@ -281,6 +287,9 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k fs.WithXAttrErrorHandler(xattrErrorHandler), } if err := fs.CopyDir(td, parent, copyDirOpts...); err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } return nil, fmt.Errorf("copying of parent failed: %w", err) } } diff --git a/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go b/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go index 26ee5c568447..f9ae8a4b1cfc 100644 --- a/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go +++ b/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go @@ -160,10 +160,6 @@ func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath return snapshots.Info{}, err } - if err := t.Commit(); err != nil { - return snapshots.Info{}, err - } - if o.upperdirLabel { id, _, _, err := storage.GetInfo(ctx, info.Name) if err != nil { @@ -175,6 +171,10 @@ func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpath info.Labels[upperdirKey] = o.upperPath(id) } + if err := t.Commit(); err != nil { + return snapshots.Info{}, err + } + return info, nil } diff --git a/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go b/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go index c5b93fc57679..17e7547feb97 100644 --- a/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go +++ b/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go @@ -23,13 +23,20 @@ import ( "fmt" "os" "path/filepath" + "syscall" + kernel "github.com/containerd/containerd/contrib/seccomp/kernelversion" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/pkg/userns" "github.com/containerd/continuity/fs" ) +const ( + // see https://man7.org/linux/man-pages/man2/statfs.2.html + tmpfsMagic = 0x01021994 +) + // SupportsMultipleLowerDir checks if the system supports multiple lowerdirs, // which is required for the overlay snapshotter. On 4.x kernels, multiple lowerdirs // are always available (so this check isn't needed), and backported to RHEL and @@ -87,6 +94,21 @@ func Supported(root string) error { return SupportsMultipleLowerDir(root) } +// IsPathOnTmpfs returns whether the path is on a tmpfs or not. +// +// It uses statfs to check if the fs type is TMPFS_MAGIC (0x01021994) +// see https://man7.org/linux/man-pages/man2/statfs.2.html +func IsPathOnTmpfs(d string) bool { + stat := syscall.Statfs_t{} + err := syscall.Statfs(d, &stat) + if err != nil { + log.L.WithError(err).Warnf("Could not retrieve statfs for %v", d) + return false + } + + return stat.Type == tmpfsMagic +} + // NeedsUserXAttr returns whether overlayfs should be mounted with the "userxattr" mount option. // // The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. @@ -113,10 +135,19 @@ func NeedsUserXAttr(d string) (bool, error) { return false, nil } - // TODO: add fast path for kernel >= 5.11 . + // userxattr not permitted on tmpfs https://man7.org/linux/man-pages/man5/tmpfs.5.html + if IsPathOnTmpfs(d) { + return false, nil + } + + // Fast path on kernels >= 5.11 // - // Keep in mind that distro vendors might be going to backport the patch to older kernels. - // So we can't completely remove the check. + // Keep in mind that distro vendors might be going to backport the patch to older kernels + // so we can't completely remove the "slow path". + fiveDotEleven := kernel.KernelVersion{Kernel: 5, Major: 11} + if ok, err := kernel.GreaterEqualThan(fiveDotEleven); err == nil && ok { + return true, nil + } tdRoot := filepath.Join(d, "userxattr-check") if err := os.RemoveAll(tdRoot); err != nil { diff --git a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go index 8b0ea85e65b5..e144fb1583f2 100644 --- a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go +++ b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go @@ -153,10 +153,10 @@ type WalkFunc func(context.Context, Info) error // For consistency, we define the following terms to be used throughout this // interface for snapshotter implementations: // -// `ctx` - refers to a context.Context -// `key` - refers to an active snapshot -// `name` - refers to a committed snapshot -// `parent` - refers to the parent in relation +// `ctx` - refers to a context.Context +// `key` - refers to an active snapshot +// `name` - refers to a committed snapshot +// `parent` - refers to the parent in relation // // Most methods take various combinations of these identifiers. Typically, // `name` and `parent` will be used in cases where a method *only* takes @@ -168,7 +168,7 @@ type WalkFunc func(context.Context, Info) error // We cover several examples below to demonstrate the utility of a snapshot // snapshotter. // -// Importing a Layer +// # Importing a Layer // // To import a layer, we simply have the Snapshotter provide a list of // mounts to be applied such that our dst will capture a changeset. We start @@ -185,7 +185,7 @@ type WalkFunc func(context.Context, Info) error // "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339), // }) // mounts, err := snapshotter.Prepare(ctx, key, "", noGcOpt) -// if err != nil { ... } +// if err != nil { ... } // // We get back a list of mounts from Snapshotter.Prepare, with the key identifying // the active snapshot. Mount this to the temporary location with the @@ -202,8 +202,8 @@ type WalkFunc func(context.Context, Info) error // // layer, err := os.Open(layerPath) // if err != nil { ... } -// digest, err := unpackLayer(tmpLocation, layer) // unpack into layer location -// if err != nil { ... } +// digest, err := unpackLayer(tmpLocation, layer) // unpack into layer location +// if err != nil { ... } // // When the above completes, we should have a filesystem the represents the // contents of the layer. Careful implementations should verify that digest @@ -221,30 +221,30 @@ type WalkFunc func(context.Context, Info) error // Now, we have a layer in the Snapshotter that can be accessed with the digest // provided during commit. // -// Importing the Next Layer +// # Importing the Next Layer // // Making a layer depend on the above is identical to the process described // above except that the parent is provided as parent when calling // Manager.Prepare, assuming a clean, unique key identifier: // -// mounts, err := snapshotter.Prepare(ctx, key, parentDigest, noGcOpt) +// mounts, err := snapshotter.Prepare(ctx, key, parentDigest, noGcOpt) // // We then mount, apply and commit, as we did above. The new snapshot will be // based on the content of the previous one. // -// Running a Container +// # Running a Container // // To run a container, we simply provide Snapshotter.Prepare the committed image // snapshot as the parent. After mounting, the prepared path can // be used directly as the container's filesystem: // -// mounts, err := snapshotter.Prepare(ctx, containerKey, imageRootFSChainID) +// mounts, err := snapshotter.Prepare(ctx, containerKey, imageRootFSChainID) // // The returned mounts can then be passed directly to the container runtime. If // one would like to create a new image from the filesystem, Manager.Commit is // called: // -// if err := snapshotter.Commit(ctx, newImageSnapshot, containerKey); err != nil { ... } +// if err := snapshotter.Commit(ctx, newImageSnapshot, containerKey); err != nil { ... } // // Alternatively, for most container runs, Snapshotter.Remove will be called to // signal the Snapshotter to abandon the changes. diff --git a/vendor/github.com/containerd/containerd/task.go b/vendor/github.com/containerd/containerd/task.go index 692d92c1d27c..105d4fbc3143 100644 --- a/vendor/github.com/containerd/containerd/task.go +++ b/vendor/github.com/containerd/containerd/task.go @@ -310,6 +310,11 @@ func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitStat // On windows Created is akin to Stopped break } + if t.pid == 0 { + // allow for deletion of created tasks with PID 0 + // https://github.com/containerd/containerd/issues/7357 + break + } fallthrough default: return nil, fmt.Errorf("task must be stopped before deletion: %s: %w", status.Status, errdefs.ErrFailedPrecondition) diff --git a/vendor/github.com/containerd/containerd/unpacker.go b/vendor/github.com/containerd/containerd/unpacker.go index 719345a1c22c..03cf7554e6a6 100644 --- a/vendor/github.com/containerd/containerd/unpacker.go +++ b/vendor/github.com/containerd/containerd/unpacker.go @@ -32,6 +32,7 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/pkg/kmutex" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/snapshots" "github.com/opencontainers/go-digest" @@ -59,7 +60,9 @@ func (c *Client) newUnpacker(ctx context.Context, rCtx *RemoteContext) (*unpacke if err != nil { return nil, err } - var config UnpackConfig + var config = UnpackConfig{ + DuplicationSuppressor: kmutex.NewNoop(), + } for _, o := range rCtx.UnpackOpts { if err := o(ctx, &config); err != nil { return nil, err @@ -127,15 +130,20 @@ func (u *unpacker) unpack( ctx, cancel := context.WithCancel(ctx) defer cancel() -EachLayer: - for i, desc := range layers { + doUnpackFn := func(i int, desc ocispec.Descriptor) error { parent := identity.ChainID(chain) chain = append(chain, diffIDs[i]) - chainID := identity.ChainID(chain).String() + + unlock, err := u.lockSnChainID(ctx, chainID) + if err != nil { + return err + } + defer unlock() + if _, err := sn.Stat(ctx, chainID); err == nil { // no need to handle - continue + return nil } else if !errdefs.IsNotFound(err) { return fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) } @@ -167,7 +175,7 @@ EachLayer: log.G(ctx).WithField("key", key).WithField("chainid", chainID).Debug("extraction snapshot already exists, chain id not found") } else { // no need to handle, snapshot now found with chain id - continue EachLayer + return nil } } else { return fmt.Errorf("failed to prepare extraction snapshot %q: %w", key, err) @@ -227,7 +235,7 @@ EachLayer: if err = sn.Commit(ctx, chainID, key, opts...); err != nil { abort() if errdefs.IsAlreadyExists(err) { - continue + return nil } return fmt.Errorf("failed to commit snapshot %s: %w", key, err) } @@ -243,7 +251,13 @@ EachLayer: if _, err := cs.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil { return err } + return nil + } + for i, desc := range layers { + if err := doUnpackFn(i, desc); err != nil { + return err + } } chainID := identity.ChainID(chain).String() @@ -271,17 +285,22 @@ func (u *unpacker) fetch(ctx context.Context, h images.Handler, layers []ocispec desc := desc i := i - if u.limiter != nil { - if err := u.limiter.Acquire(ctx, 1); err != nil { - return err - } + if err := u.acquire(ctx); err != nil { + return err } eg.Go(func() error { - _, err := h.Handle(ctx2, desc) - if u.limiter != nil { - u.limiter.Release(1) + unlock, err := u.lockBlobDescriptor(ctx2, desc) + if err != nil { + u.release() + return err } + + _, err = h.Handle(ctx2, desc) + + unlock() + u.release() + if err != nil && !errors.Is(err, images.ErrSkipDesc) { return err } @@ -306,7 +325,13 @@ func (u *unpacker) handlerWrapper( layers = map[digest.Digest][]ocispec.Descriptor{} ) return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + unlock, err := u.lockBlobDescriptor(ctx, desc) + if err != nil { + return nil, err + } + children, err := f.Handle(ctx, desc) + unlock() if err != nil { return children, err } @@ -349,6 +374,50 @@ func (u *unpacker) handlerWrapper( }, eg } +func (u *unpacker) acquire(ctx context.Context) error { + if u.limiter == nil { + return nil + } + return u.limiter.Acquire(ctx, 1) +} + +func (u *unpacker) release() { + if u.limiter == nil { + return + } + u.limiter.Release(1) +} + +func (u *unpacker) lockSnChainID(ctx context.Context, chainID string) (func(), error) { + key := u.makeChainIDKeyWithSnapshotter(chainID) + + if err := u.config.DuplicationSuppressor.Lock(ctx, key); err != nil { + return nil, err + } + return func() { + u.config.DuplicationSuppressor.Unlock(key) + }, nil +} + +func (u *unpacker) lockBlobDescriptor(ctx context.Context, desc ocispec.Descriptor) (func(), error) { + key := u.makeBlobDescriptorKey(desc) + + if err := u.config.DuplicationSuppressor.Lock(ctx, key); err != nil { + return nil, err + } + return func() { + u.config.DuplicationSuppressor.Unlock(key) + }, nil +} + +func (u *unpacker) makeChainIDKeyWithSnapshotter(chainID string) string { + return fmt.Sprintf("sn://%s/%v", u.snapshotter, chainID) +} + +func (u *unpacker) makeBlobDescriptorKey(desc ocispec.Descriptor) string { + return fmt.Sprintf("blob://%v", desc.Digest) +} + func uniquePart() string { t := time.Now() var b [3]byte diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index a92784ef8191..ca1b6773abcb 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.6.1+unknown" + Version = "1.6.18+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/continuity/.golangci.yml b/vendor/github.com/containerd/continuity/.golangci.yml index 92a74904f606..2924bc4cf4fc 100644 --- a/vendor/github.com/containerd/continuity/.golangci.yml +++ b/vendor/github.com/containerd/continuity/.golangci.yml @@ -6,8 +6,8 @@ linters: - unconvert - gofmt - goimports - - golint - ineffassign + - revive - vet - unused - misspell diff --git a/vendor/github.com/containerd/continuity/.mailmap b/vendor/github.com/containerd/continuity/.mailmap index f48ae41ad53b..3ce5e9a6ef13 100644 --- a/vendor/github.com/containerd/continuity/.mailmap +++ b/vendor/github.com/containerd/continuity/.mailmap @@ -1 +1,10 @@ -Stephen J Day Stephen Day +Aaron Lehmann +Akihiro Suda +Akihiro Suda +Derek McGowan +Michael Crosby +Phil Estes +Phil Estes +Stephen J Day +Stephen J Day +Stephen J Day diff --git a/vendor/github.com/containerd/continuity/AUTHORS b/vendor/github.com/containerd/continuity/AUTHORS index deef28c1d7cc..0b4a03cd4529 100644 --- a/vendor/github.com/containerd/continuity/AUTHORS +++ b/vendor/github.com/containerd/continuity/AUTHORS @@ -1,8 +1,6 @@ -Aaron Lehmann +Aaron Lehmann Akash Gupta Akihiro Suda -Akihiro Suda -Akihiro Suda Andrew Pennebaker Brandon Philips Brian Goff @@ -10,9 +8,9 @@ Christopher Jones Daniel, Dao Quang Minh Darren Stahl Derek McGowan -Derek McGowan Edward Pilatowicz Fu Wei +Gabriel Adrian Samfira Hajime Tazaki Ian Campbell Ivan Markin @@ -20,20 +18,18 @@ Jacob Blain Christen Justin Cormack Justin Cummins Kasper Fabæch Brandt +Kazuyoshi Kato Kir Kolyshkin Michael Crosby -Michael Crosby Michael Wan Mike Brown Niels de Vos -Phil Estes Phil Estes -Phil Estes Sam Whited Samuel Karp Sebastiaan van Stijn Shengjing Zhu -Stephen J Day +Stephen J Day Tibor Vass Tobias Klauser Tom Faulhaber diff --git a/vendor/github.com/containerd/continuity/Makefile b/vendor/github.com/containerd/continuity/Makefile index 256a0b0d6ef1..63ab8519fd98 100644 --- a/vendor/github.com/containerd/continuity/Makefile +++ b/vendor/github.com/containerd/continuity/Makefile @@ -15,11 +15,6 @@ # Set an output prefix, which is the local directory if not specified PREFIX?=$(shell pwd) -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) - -GO_LDFLAGS=-ldflags "-X `go list -mod=vendor ./version`.Version=$(VERSION)" - PKG=github.com/containerd/continuity PACKAGES=$(shell go list -mod=vendor ./... | grep -v /vendor/) @@ -41,13 +36,9 @@ all: AUTHORS clean lint build test binaries AUTHORS: .mailmap .git/HEAD git log --format='%aN <%aE>' | sort -fu > $@ -# This only needs to be generated by hand when cutting full releases. -version/version.go: - ./version/version.sh > $@ - -${PREFIX}/bin/continuity: version/version.go $(shell find . -type f -name '*.go') +${PREFIX}/bin/continuity: @echo "+ $@" - @go build -mod=vendor -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/continuity + @(cd cmd/continuity && go build -mod=mod -o $@ ${GO_GCFLAGS} .) generate: go generate -mod=vendor $(PACKAGES) @@ -66,7 +57,7 @@ test: root-test: @echo "+ $@" - @go test ${TEST_REQUIRES_ROOT_PACKAGES} -test.root + @go test -exec sudo ${TEST_REQUIRES_ROOT_PACKAGES} -test.root test-compile: @echo "+ $@" diff --git a/vendor/github.com/containerd/continuity/README.md b/vendor/github.com/containerd/continuity/README.md index f47b4afea76c..10996df16388 100644 --- a/vendor/github.com/containerd/continuity/README.md +++ b/vendor/github.com/containerd/continuity/README.md @@ -1,19 +1,20 @@ # continuity -[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity) -[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=main)](https://travis-ci.org/containerd/continuity) +[![Go Reference](https://pkg.go.dev/badge/github.com/containerd/continuity.svg)](https://pkg.go.dev/github.com/containerd/continuity) +[![Build Status](https://github.com/containerd/continuity/workflows/Continuity/badge.svg)](https://github.com/containerd/continuity/actions?query=workflow%3AContinuity+branch%3Amain) A transport-agnostic, filesystem metadata manifest system This project is a staging area for experiments in providing transport agnostic metadata storage. -Please see https://github.com/opencontainers/specs/issues/11 for more details. +See [opencontainers/runtime-spec#11](https://github.com/opencontainers/runtime-spec/issues/11) +for more details. ## Manifest Format A continuity manifest encodes filesystem metadata in Protocol Buffers. -Please refer to [proto/manifest.proto](proto/manifest.proto). +Refer to [proto/manifest.proto](proto/manifest.proto) for more details. ## Usage @@ -65,7 +66,7 @@ $ ./bin/continuity verify . /tmp/a.pb ## Platforms -continuity primarily targets Linux. continuity may compile for and work on +continuity primarily targets Linux. Continuity may compile for and work on other operating systems, but those platforms are not tested. ## Contribution Guide diff --git a/vendor/github.com/containerd/continuity/context.go b/vendor/github.com/containerd/continuity/context.go index 019b185f8640..f92299c225b1 100644 --- a/vendor/github.com/containerd/continuity/context.go +++ b/vendor/github.com/containerd/continuity/context.go @@ -390,7 +390,7 @@ func (c *context) checkoutFile(fp string, rf RegularFile) error { } } if err != nil { - return fmt.Errorf("file content could not be provided: %v", err) + return fmt.Errorf("file content could not be provided: %w", err) } defer r.Close() @@ -422,7 +422,7 @@ func (c *context) Apply(resource Resource) error { case RegularFile: if fi == nil { if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) + return fmt.Errorf("error checking out file %q: %w", resource.Path(), err) } chmod = false } else { @@ -431,18 +431,18 @@ func (c *context) Apply(resource Resource) error { } if fi.Size() != r.Size() { if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) + return fmt.Errorf("error checking out file %q: %w", resource.Path(), err) } } else { for _, dgst := range r.Digests() { f, err := os.Open(fp) if err != nil { - return fmt.Errorf("failure opening file for read %q: %v", resource.Path(), err) + return fmt.Errorf("failure opening file for read %q: %w", resource.Path(), err) } compared, err := dgst.Algorithm().FromReader(f) if err == nil && dgst != compared { if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) + return fmt.Errorf("error checking out file %q: %w", resource.Path(), err) } break } @@ -450,7 +450,7 @@ func (c *context) Apply(resource Resource) error { err = err1 } if err != nil { - return fmt.Errorf("error checking digest for %q: %v", resource.Path(), err) + return fmt.Errorf("error checking digest for %q: %w", resource.Path(), err) } } } diff --git a/vendor/github.com/containerd/continuity/driver/driver_unix.go b/vendor/github.com/containerd/continuity/driver/driver_unix.go index d64dd503cca3..6089c51dba82 100644 --- a/vendor/github.com/containerd/continuity/driver/driver_unix.go +++ b/vendor/github.com/containerd/continuity/driver/driver_unix.go @@ -54,7 +54,7 @@ func (d *driver) Mkfifo(path string, mode os.FileMode) error { func (d *driver) Getxattr(p string) (map[string][]byte, error) { xattrs, err := sysx.Listxattr(p) if err != nil { - return nil, fmt.Errorf("listing %s xattrs: %v", p, err) + return nil, fmt.Errorf("listing %s xattrs: %w", p, err) } sort.Strings(xattrs) @@ -63,7 +63,7 @@ func (d *driver) Getxattr(p string) (map[string][]byte, error) { for _, attr := range xattrs { value, err := sysx.Getxattr(p, attr) if err != nil { - return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err) + return nil, fmt.Errorf("getting %q xattr on %s: %w", attr, p, err) } // NOTE(stevvooe): This append/copy tricky relies on unique @@ -82,7 +82,7 @@ func (d *driver) Getxattr(p string) (map[string][]byte, error) { func (d *driver) Setxattr(path string, attrMap map[string][]byte) error { for attr, value := range attrMap { if err := sysx.Setxattr(path, attr, value, 0); err != nil { - return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err) + return fmt.Errorf("error setting xattr %q on %s: %w", attr, path, err) } } @@ -94,7 +94,7 @@ func (d *driver) Setxattr(path string, attrMap map[string][]byte) error { func (d *driver) LGetxattr(p string) (map[string][]byte, error) { xattrs, err := sysx.LListxattr(p) if err != nil { - return nil, fmt.Errorf("listing %s xattrs: %v", p, err) + return nil, fmt.Errorf("listing %s xattrs: %w", p, err) } sort.Strings(xattrs) @@ -103,7 +103,7 @@ func (d *driver) LGetxattr(p string) (map[string][]byte, error) { for _, attr := range xattrs { value, err := sysx.LGetxattr(p, attr) if err != nil { - return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err) + return nil, fmt.Errorf("getting %q xattr on %s: %w", attr, p, err) } // NOTE(stevvooe): This append/copy tricky relies on unique @@ -122,7 +122,7 @@ func (d *driver) LGetxattr(p string) (map[string][]byte, error) { func (d *driver) LSetxattr(path string, attrMap map[string][]byte) error { for attr, value := range attrMap { if err := sysx.LSetxattr(path, attr, value, 0); err != nil { - return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err) + return fmt.Errorf("error setting xattr %q on %s: %w", attr, path, err) } } diff --git a/vendor/github.com/containerd/continuity/driver/utils.go b/vendor/github.com/containerd/continuity/driver/utils.go index 0c688d158f19..d122a3f732b2 100644 --- a/vendor/github.com/containerd/continuity/driver/utils.go +++ b/vendor/github.com/containerd/continuity/driver/utils.go @@ -18,12 +18,11 @@ package driver import ( "io" - "io/ioutil" "os" "sort" ) -// ReadFile works the same as ioutil.ReadFile with the Driver abstraction +// ReadFile works the same as os.ReadFile with the Driver abstraction func ReadFile(r Driver, filename string) ([]byte, error) { f, err := r.Open(filename) if err != nil { @@ -31,7 +30,7 @@ func ReadFile(r Driver, filename string) ([]byte, error) { } defer f.Close() - data, err := ioutil.ReadAll(f) + data, err := io.ReadAll(f) if err != nil { return nil, err } @@ -39,7 +38,7 @@ func ReadFile(r Driver, filename string) ([]byte, error) { return data, nil } -// WriteFile works the same as ioutil.WriteFile with the Driver abstraction +// WriteFile works the same as os.WriteFile with the Driver abstraction func WriteFile(r Driver, filename string, data []byte, perm os.FileMode) error { f, err := r.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) if err != nil { diff --git a/vendor/github.com/containerd/continuity/fs/copy.go b/vendor/github.com/containerd/continuity/fs/copy.go index 8e23934e9969..6982a761ba9e 100644 --- a/vendor/github.com/containerd/continuity/fs/copy.go +++ b/vendor/github.com/containerd/continuity/fs/copy.go @@ -22,6 +22,8 @@ import ( "os" "path/filepath" "sync" + + "github.com/sirupsen/logrus" ) var bufferPool = &sync.Pool{ @@ -31,7 +33,7 @@ var bufferPool = &sync.Pool{ }, } -// XAttrErrorHandlers transform a non-nil xattr error. +// XAttrErrorHandler transform a non-nil xattr error. // Return nil to ignore an error. // xattrKey can be empty for listxattr operation. type XAttrErrorHandler func(dst, src, xattrKey string, err error) error @@ -152,13 +154,15 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er if err := os.Symlink(link, target); err != nil { return fmt.Errorf("failed to create symlink: %s: %w", target, err) } - case (fi.Mode() & os.ModeDevice) == os.ModeDevice: - if err := copyDevice(target, fi); err != nil { - return fmt.Errorf("failed to create device: %w", err) + case (fi.Mode() & os.ModeDevice) == os.ModeDevice, + (fi.Mode() & os.ModeNamedPipe) == os.ModeNamedPipe, + (fi.Mode() & os.ModeSocket) == os.ModeSocket: + if err := copyIrregular(target, fi); err != nil { + return fmt.Errorf("failed to create irregular file: %w", err) } default: - // TODO: Support pipes and sockets - return fmt.Errorf("unsupported mode %s: %w", fi.Mode(), err) + logrus.Warnf("unsupported mode: %s: %s", source, fi.Mode()) + continue } if err := copyFileInfo(fi, source, target); err != nil { diff --git a/vendor/github.com/containerd/continuity/fs/copy_darwin.go b/vendor/github.com/containerd/continuity/fs/copy_darwin.go deleted file mode 100644 index ce55f0aa2421..000000000000 --- a/vendor/github.com/containerd/continuity/fs/copy_darwin.go +++ /dev/null @@ -1,36 +0,0 @@ -//go:build darwin -// +build darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "errors" - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) -} diff --git a/vendor/github.com/containerd/continuity/fs/copy_device_unix.go b/vendor/github.com/containerd/continuity/fs/copy_device_unix.go deleted file mode 100644 index f821890cb7fd..000000000000 --- a/vendor/github.com/containerd/continuity/fs/copy_device_unix.go +++ /dev/null @@ -1,36 +0,0 @@ -//go:build openbsd || solaris || netbsd -// +build openbsd solaris netbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "errors" - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) -} diff --git a/vendor/github.com/containerd/continuity/fs/copy_freebsd.go b/vendor/github.com/containerd/continuity/fs/copy_freebsd.go deleted file mode 100644 index 4aaf743e5a94..000000000000 --- a/vendor/github.com/containerd/continuity/fs/copy_freebsd.go +++ /dev/null @@ -1,36 +0,0 @@ -//go:build freebsd -// +build freebsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package fs - -import ( - "errors" - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), st.Rdev) -} diff --git a/vendor/github.com/containerd/continuity/fs/copy_irregular_freebsd.go b/vendor/github.com/containerd/continuity/fs/copy_irregular_freebsd.go new file mode 100644 index 000000000000..cfe9d8020476 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_irregular_freebsd.go @@ -0,0 +1,36 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "fmt" + "os" + "syscall" +) + +// copyIrregular covers devices, pipes, and sockets +func copyIrregular(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) // not *unix.Stat_t + if !ok { + return fmt.Errorf("unsupported stat type: %s: %v", dst, fi.Mode()) + } + var rDev uint64 // uint64 on FreeBSD, int on other unixen + if fi.Mode()&os.ModeDevice == os.ModeDevice { + rDev = st.Rdev + } + return syscall.Mknod(dst, uint32(st.Mode), rDev) +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_irregular_unix.go b/vendor/github.com/containerd/continuity/fs/copy_irregular_unix.go new file mode 100644 index 000000000000..99fc8a965121 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_irregular_unix.go @@ -0,0 +1,40 @@ +//go:build !windows && !freebsd +// +build !windows,!freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "fmt" + "os" + "syscall" +) + +// copyIrregular covers devices, pipes, and sockets +func copyIrregular(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) // not *unix.Stat_t + if !ok { + return fmt.Errorf("unsupported stat type: %s: %v", dst, fi.Mode()) + } + var rDev int + if fi.Mode()&os.ModeDevice == os.ModeDevice { + rDev = int(st.Rdev) + } + //nolint:unconvert + return syscall.Mknod(dst, uint32(st.Mode), rDev) +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_linux.go b/vendor/github.com/containerd/continuity/fs/copy_linux.go index 938407662c7f..1906e5e011d8 100644 --- a/vendor/github.com/containerd/continuity/fs/copy_linux.go +++ b/vendor/github.com/containerd/continuity/fs/copy_linux.go @@ -17,7 +17,6 @@ package fs import ( - "errors" "fmt" "io" "os" @@ -144,11 +143,3 @@ func copyXAttrs(dst, src string, excludes map[string]struct{}, errorHandler XAtt return nil } - -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) -} diff --git a/vendor/github.com/containerd/continuity/fs/copy_windows.go b/vendor/github.com/containerd/continuity/fs/copy_windows.go index e3f0cdd58dc5..4dad9441de8d 100644 --- a/vendor/github.com/containerd/continuity/fs/copy_windows.go +++ b/vendor/github.com/containerd/continuity/fs/copy_windows.go @@ -85,6 +85,6 @@ func copyXAttrs(dst, src string, excludes map[string]struct{}, errorHandler XAtt return nil } -func copyDevice(dst string, fi os.FileInfo) error { - return errors.New("device copy not supported") +func copyIrregular(dst string, fi os.FileInfo) error { + return errors.New("irregular copy not supported") } diff --git a/vendor/github.com/containerd/continuity/fs/diff.go b/vendor/github.com/containerd/continuity/fs/diff.go index e64f9e73d304..3cd4eee6fbf3 100644 --- a/vendor/github.com/containerd/continuity/fs/diff.go +++ b/vendor/github.com/containerd/continuity/fs/diff.go @@ -22,9 +22,8 @@ import ( "path/filepath" "strings" - "golang.org/x/sync/errgroup" - "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" ) // ChangeKind is the type of modification that diff --git a/vendor/github.com/containerd/continuity/fs/dtype_linux.go b/vendor/github.com/containerd/continuity/fs/dtype_linux.go index ddd6c79375b2..a8eab1db8ac2 100644 --- a/vendor/github.com/containerd/continuity/fs/dtype_linux.go +++ b/vendor/github.com/containerd/continuity/fs/dtype_linux.go @@ -35,7 +35,7 @@ func locateDummyIfEmpty(path string) (string, error) { if len(children) != 0 { return "", nil } - dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + dummyFile, err := os.CreateTemp(path, "fsutils-dummy") if err != nil { return "", err } diff --git a/vendor/github.com/containerd/continuity/fs/fstest/compare.go b/vendor/github.com/containerd/continuity/fs/fstest/compare.go index 5ae167e5d9b2..98a9abcda91a 100644 --- a/vendor/github.com/containerd/continuity/fs/fstest/compare.go +++ b/vendor/github.com/containerd/continuity/fs/fstest/compare.go @@ -18,7 +18,6 @@ package fstest import ( "fmt" - "io/ioutil" "os" "github.com/containerd/continuity" @@ -57,7 +56,7 @@ func CheckDirectoryEqual(d1, d2 string) error { // CheckDirectoryEqualWithApplier compares directory against applier func CheckDirectoryEqualWithApplier(root string, a Applier) error { - applied, err := ioutil.TempDir("", "fstest") + applied, err := os.MkdirTemp("", "fstest") if err != nil { return err } diff --git a/vendor/github.com/containerd/continuity/fs/fstest/testsuite.go b/vendor/github.com/containerd/continuity/fs/fstest/testsuite.go index 360ef5526662..420126e6c302 100644 --- a/vendor/github.com/containerd/continuity/fs/fstest/testsuite.go +++ b/vendor/github.com/containerd/continuity/fs/fstest/testsuite.go @@ -18,7 +18,6 @@ package fstest import ( "context" - "io/ioutil" "os" "testing" ) @@ -49,7 +48,7 @@ func makeTest(t *testing.T, ta TestApplier, as []Applier) func(t *testing.T) { } defer cleanup() - applyDir, err := ioutil.TempDir("", "test-expected-") + applyDir, err := os.MkdirTemp("", "test-expected-") if err != nil { t.Fatalf("Unable to make temp directory: %+v", err) } diff --git a/vendor/github.com/containerd/continuity/hardlinks.go b/vendor/github.com/containerd/continuity/hardlinks.go index e72c0e72c8e8..1df07f54a678 100644 --- a/vendor/github.com/containerd/continuity/hardlinks.go +++ b/vendor/github.com/containerd/continuity/hardlinks.go @@ -63,7 +63,7 @@ func (hlm *hardlinkManager) Merge() ([]Resource, error) { merged, err := Merge(linked...) if err != nil { - return nil, fmt.Errorf("error merging hardlink: %v", err) + return nil, fmt.Errorf("error merging hardlink: %w", err) } resources = append(resources, merged) diff --git a/vendor/github.com/containerd/continuity/ioutils.go b/vendor/github.com/containerd/continuity/ioutils.go index 503640ebfc86..392c407faf29 100644 --- a/vendor/github.com/containerd/continuity/ioutils.go +++ b/vendor/github.com/containerd/continuity/ioutils.go @@ -19,7 +19,6 @@ package continuity import ( "bytes" "io" - "io/ioutil" "os" "path/filepath" ) @@ -34,7 +33,7 @@ func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { // atomicWriteFile writes data to a file by first writing to a temp // file and calling rename. func atomicWriteFile(filename string, r io.Reader, dataSize int64, perm os.FileMode) error { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) if err != nil { return err } diff --git a/vendor/github.com/containerd/continuity/manifest.go b/vendor/github.com/containerd/continuity/manifest.go index 8e83317ee754..659a4015316c 100644 --- a/vendor/github.com/containerd/continuity/manifest.go +++ b/vendor/github.com/containerd/continuity/manifest.go @@ -23,7 +23,8 @@ import ( "sort" pb "github.com/containerd/continuity/proto" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/proto" ) // Manifest provides the contents of a manifest. Users of this struct should @@ -68,7 +69,12 @@ func MarshalText(w io.Writer, m *Manifest) error { bm.Resource = append(bm.Resource, toProto(resource)) } - return proto.MarshalText(w, &bm) + b, err := prototext.Marshal(&bm) + if err != nil { + return err + } + _, err = w.Write(b) + return err } // BuildManifest creates the manifest for the given context @@ -78,7 +84,7 @@ func BuildManifest(ctx Context) (*Manifest, error) { if err := ctx.Walk(func(p string, fi os.FileInfo, err error) error { if err != nil { - return fmt.Errorf("error walking %s: %v", p, err) + return fmt.Errorf("error walking %s: %w", p, err) } if p == string(os.PathSeparator) { @@ -101,7 +107,7 @@ func BuildManifest(ctx Context) (*Manifest, error) { return nil } else if err != errNotAHardLink { // handle any other case where we have a proper error. - return fmt.Errorf("adding hardlink %s: %v", p, err) + return fmt.Errorf("adding hardlink %s: %w", p, err) } resourcesByPath[p] = resource diff --git a/vendor/github.com/containerd/continuity/proto/gen.go b/vendor/github.com/containerd/continuity/proto/gen.go index 63ce10fb5312..bc9972867f85 100644 --- a/vendor/github.com/containerd/continuity/proto/gen.go +++ b/vendor/github.com/containerd/continuity/proto/gen.go @@ -17,3 +17,5 @@ package proto //go:generate protoc --go_out=. manifest.proto +//go:generate mv github.com/containerd/continuity/proto/manifest.pb.go . +//go:generate rmdir -p github.com/containerd/continuity/proto diff --git a/vendor/github.com/containerd/continuity/proto/manifest.pb.go b/vendor/github.com/containerd/continuity/proto/manifest.pb.go index c83a9e775278..9dbc2bf221d7 100644 --- a/vendor/github.com/containerd/continuity/proto/manifest.pb.go +++ b/vendor/github.com/containerd/continuity/proto/manifest.pb.go @@ -1,223 +1,315 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.4 // source: manifest.proto -/* -Package proto is a generated protocol buffer package. - -It is generated from these files: - manifest.proto - -It has these top-level messages: - Manifest - Resource - XAttr - ADSEntry -*/ package proto -import proto1 "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto1.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto1.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Manifest specifies the entries in a container bundle, keyed and sorted by // path. type Manifest struct { - Resource []*Resource `protobuf:"bytes,1,rep,name=resource" json:"resource,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resource []*Resource `protobuf:"bytes,1,rep,name=resource,proto3" json:"resource,omitempty"` } -func (m *Manifest) Reset() { *m = Manifest{} } -func (m *Manifest) String() string { return proto1.CompactTextString(m) } -func (*Manifest) ProtoMessage() {} -func (*Manifest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (x *Manifest) Reset() { + *x = Manifest{} + if protoimpl.UnsafeEnabled { + mi := &file_manifest_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Manifest) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *Manifest) GetResource() []*Resource { - if m != nil { - return m.Resource +func (*Manifest) ProtoMessage() {} + +func (x *Manifest) ProtoReflect() protoreflect.Message { + mi := &file_manifest_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Manifest.ProtoReflect.Descriptor instead. +func (*Manifest) Descriptor() ([]byte, []int) { + return file_manifest_proto_rawDescGZIP(), []int{0} +} + +func (x *Manifest) GetResource() []*Resource { + if x != nil { + return x.Resource } return nil } type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Path specifies the path from the bundle root. If more than one // path is present, the entry may represent a hardlink, rather than using // a link target. The path format is operating system specific. - Path []string `protobuf:"bytes,1,rep,name=path" json:"path,omitempty"` + Path []string `protobuf:"bytes,1,rep,name=path,proto3" json:"path,omitempty"` // Uid specifies the user id for the resource. - Uid int64 `protobuf:"varint,2,opt,name=uid" json:"uid,omitempty"` + Uid int64 `protobuf:"varint,2,opt,name=uid,proto3" json:"uid,omitempty"` // Gid specifies the group id for the resource. - Gid int64 `protobuf:"varint,3,opt,name=gid" json:"gid,omitempty"` + Gid int64 `protobuf:"varint,3,opt,name=gid,proto3" json:"gid,omitempty"` // user and group are not currently used but their field numbers have been // reserved for future use. As such, they are marked as deprecated. - User string `protobuf:"bytes,4,opt,name=user" json:"user,omitempty"` - Group string `protobuf:"bytes,5,opt,name=group" json:"group,omitempty"` + // + // Deprecated: Do not use. + User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` // "deprecated" stands for "reserved" here + // Deprecated: Do not use. + Group string `protobuf:"bytes,5,opt,name=group,proto3" json:"group,omitempty"` // "deprecated" stands for "reserved" here // Mode defines the file mode and permissions. We've used the same // bit-packing from Go's os package, // http://golang.org/pkg/os/#FileMode, since they've done the work of // creating a cross-platform layout. - Mode uint32 `protobuf:"varint,6,opt,name=mode" json:"mode,omitempty"` + Mode uint32 `protobuf:"varint,6,opt,name=mode,proto3" json:"mode,omitempty"` // Size specifies the size in bytes of the resource. This is only valid // for regular files. - Size uint64 `protobuf:"varint,7,opt,name=size" json:"size,omitempty"` + Size uint64 `protobuf:"varint,7,opt,name=size,proto3" json:"size,omitempty"` // Digest specifies the content digest of the target file. Only valid for // regular files. The strings are formatted in OCI style, i.e. :. // For detailed information about the format, please refer to OCI Image Spec: // https://github.com/opencontainers/image-spec/blob/master/descriptor.md#digests-and-verification // The digests are sorted in lexical order and implementations may choose // which algorithms they prefer. - Digest []string `protobuf:"bytes,8,rep,name=digest" json:"digest,omitempty"` + Digest []string `protobuf:"bytes,8,rep,name=digest,proto3" json:"digest,omitempty"` // Target defines the target of a hard or soft link. Absolute links start // with a slash and specify the resource relative to the bundle root. // Relative links do not start with a slash and are relative to the // resource path. - Target string `protobuf:"bytes,9,opt,name=target" json:"target,omitempty"` + Target string `protobuf:"bytes,9,opt,name=target,proto3" json:"target,omitempty"` // Major specifies the major device number for character and block devices. - Major uint64 `protobuf:"varint,10,opt,name=major" json:"major,omitempty"` + Major uint64 `protobuf:"varint,10,opt,name=major,proto3" json:"major,omitempty"` // Minor specifies the minor device number for character and block devices. - Minor uint64 `protobuf:"varint,11,opt,name=minor" json:"minor,omitempty"` + Minor uint64 `protobuf:"varint,11,opt,name=minor,proto3" json:"minor,omitempty"` // Xattr provides storage for extended attributes for the target resource. - Xattr []*XAttr `protobuf:"bytes,12,rep,name=xattr" json:"xattr,omitempty"` + Xattr []*XAttr `protobuf:"bytes,12,rep,name=xattr,proto3" json:"xattr,omitempty"` // Ads stores one or more alternate data streams for the target resource. - Ads []*ADSEntry `protobuf:"bytes,13,rep,name=ads" json:"ads,omitempty"` + Ads []*ADSEntry `protobuf:"bytes,13,rep,name=ads,proto3" json:"ads,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_manifest_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_manifest_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *Resource) Reset() { *m = Resource{} } -func (m *Resource) String() string { return proto1.CompactTextString(m) } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_manifest_proto_rawDescGZIP(), []int{1} +} -func (m *Resource) GetPath() []string { - if m != nil { - return m.Path +func (x *Resource) GetPath() []string { + if x != nil { + return x.Path } return nil } -func (m *Resource) GetUid() int64 { - if m != nil { - return m.Uid +func (x *Resource) GetUid() int64 { + if x != nil { + return x.Uid } return 0 } -func (m *Resource) GetGid() int64 { - if m != nil { - return m.Gid +func (x *Resource) GetGid() int64 { + if x != nil { + return x.Gid } return 0 } -func (m *Resource) GetUser() string { - if m != nil { - return m.User +// Deprecated: Do not use. +func (x *Resource) GetUser() string { + if x != nil { + return x.User } return "" } -func (m *Resource) GetGroup() string { - if m != nil { - return m.Group +// Deprecated: Do not use. +func (x *Resource) GetGroup() string { + if x != nil { + return x.Group } return "" } -func (m *Resource) GetMode() uint32 { - if m != nil { - return m.Mode +func (x *Resource) GetMode() uint32 { + if x != nil { + return x.Mode } return 0 } -func (m *Resource) GetSize() uint64 { - if m != nil { - return m.Size +func (x *Resource) GetSize() uint64 { + if x != nil { + return x.Size } return 0 } -func (m *Resource) GetDigest() []string { - if m != nil { - return m.Digest +func (x *Resource) GetDigest() []string { + if x != nil { + return x.Digest } return nil } -func (m *Resource) GetTarget() string { - if m != nil { - return m.Target +func (x *Resource) GetTarget() string { + if x != nil { + return x.Target } return "" } -func (m *Resource) GetMajor() uint64 { - if m != nil { - return m.Major +func (x *Resource) GetMajor() uint64 { + if x != nil { + return x.Major } return 0 } -func (m *Resource) GetMinor() uint64 { - if m != nil { - return m.Minor +func (x *Resource) GetMinor() uint64 { + if x != nil { + return x.Minor } return 0 } -func (m *Resource) GetXattr() []*XAttr { - if m != nil { - return m.Xattr +func (x *Resource) GetXattr() []*XAttr { + if x != nil { + return x.Xattr } return nil } -func (m *Resource) GetAds() []*ADSEntry { - if m != nil { - return m.Ads +func (x *Resource) GetAds() []*ADSEntry { + if x != nil { + return x.Ads } return nil } // XAttr encodes extended attributes for a resource. type XAttr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Name specifies the attribute name. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Data specifies the associated data for the attribute. Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` } -func (m *XAttr) Reset() { *m = XAttr{} } -func (m *XAttr) String() string { return proto1.CompactTextString(m) } -func (*XAttr) ProtoMessage() {} -func (*XAttr) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (x *XAttr) Reset() { + *x = XAttr{} + if protoimpl.UnsafeEnabled { + mi := &file_manifest_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XAttr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XAttr) ProtoMessage() {} + +func (x *XAttr) ProtoReflect() protoreflect.Message { + mi := &file_manifest_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XAttr.ProtoReflect.Descriptor instead. +func (*XAttr) Descriptor() ([]byte, []int) { + return file_manifest_proto_rawDescGZIP(), []int{2} +} -func (m *XAttr) GetName() string { - if m != nil { - return m.Name +func (x *XAttr) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *XAttr) GetData() []byte { - if m != nil { - return m.Data +func (x *XAttr) GetData() []byte { + if x != nil { + return x.Data } return nil } // ADSEntry encodes information for a Windows Alternate Data Stream. type ADSEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Name specifices the stream name. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Data specifies the stream data. // See also the description about the digest below. Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -229,64 +321,205 @@ type ADSEntry struct { // How to access the actual data using the digest is implementation-specific, // and implementations can choose not to implement digest. // So, digest SHOULD be used only when the stream data is large. - Digest string `protobuf:"bytes,3,opt,name=digest" json:"digest,omitempty"` + Digest string `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"` } -func (m *ADSEntry) Reset() { *m = ADSEntry{} } -func (m *ADSEntry) String() string { return proto1.CompactTextString(m) } -func (*ADSEntry) ProtoMessage() {} -func (*ADSEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (x *ADSEntry) Reset() { + *x = ADSEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_manifest_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *ADSEntry) GetName() string { - if m != nil { - return m.Name +func (x *ADSEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ADSEntry) ProtoMessage() {} + +func (x *ADSEntry) ProtoReflect() protoreflect.Message { + mi := &file_manifest_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ADSEntry.ProtoReflect.Descriptor instead. +func (*ADSEntry) Descriptor() ([]byte, []int) { + return file_manifest_proto_rawDescGZIP(), []int{3} +} + +func (x *ADSEntry) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *ADSEntry) GetData() []byte { - if m != nil { - return m.Data +func (x *ADSEntry) GetData() []byte { + if x != nil { + return x.Data } return nil } -func (m *ADSEntry) GetDigest() string { - if m != nil { - return m.Digest +func (x *ADSEntry) GetDigest() string { + if x != nil { + return x.Digest } return "" } -func init() { - proto1.RegisterType((*Manifest)(nil), "proto.Manifest") - proto1.RegisterType((*Resource)(nil), "proto.Resource") - proto1.RegisterType((*XAttr)(nil), "proto.XAttr") - proto1.RegisterType((*ADSEntry)(nil), "proto.ADSEntry") -} - -func init() { proto1.RegisterFile("manifest.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 317 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0x4f, 0x4b, 0xf3, 0x40, - 0x10, 0xc6, 0x49, 0x93, 0xf4, 0x4d, 0xa7, 0xed, 0xab, 0x2c, 0x52, 0xe6, 0x18, 0x73, 0x0a, 0x08, - 0x15, 0xf4, 0xe0, 0xb9, 0xa2, 0x17, 0xc1, 0xcb, 0x7a, 0xf1, 0xba, 0xba, 0x6b, 0x5c, 0x21, 0xd9, - 0xb0, 0xd9, 0x80, 0xfa, 0xe5, 0xfc, 0x6a, 0x32, 0xb3, 0x69, 0xd1, 0x9b, 0xa7, 0x3c, 0xcf, 0x6f, - 0xfe, 0x64, 0xf6, 0x81, 0xff, 0xad, 0xea, 0xec, 0x8b, 0x19, 0xc2, 0xb6, 0xf7, 0x2e, 0x38, 0x91, - 0xf3, 0xa7, 0xba, 0x82, 0xe2, 0x7e, 0x2a, 0x88, 0x33, 0x28, 0xbc, 0x19, 0xdc, 0xe8, 0x9f, 0x0d, - 0x26, 0x65, 0x5a, 0x2f, 0x2f, 0x8e, 0x62, 0xf3, 0x56, 0x4e, 0x58, 0x1e, 0x1a, 0xaa, 0xaf, 0x19, - 0x14, 0x7b, 0x2c, 0x04, 0x64, 0xbd, 0x0a, 0xaf, 0x3c, 0xb5, 0x90, 0xac, 0xc5, 0x31, 0xa4, 0xa3, - 0xd5, 0x38, 0x2b, 0x93, 0x3a, 0x95, 0x24, 0x89, 0x34, 0x56, 0x63, 0x1a, 0x49, 0x63, 0xb5, 0xd8, - 0x40, 0x36, 0x0e, 0xc6, 0x63, 0x56, 0x26, 0xf5, 0xe2, 0x7a, 0x86, 0x89, 0x64, 0x2f, 0x10, 0xf2, - 0xc6, 0xbb, 0xb1, 0xc7, 0xfc, 0x50, 0x88, 0x80, 0xfe, 0xd4, 0x3a, 0x6d, 0x70, 0x5e, 0x26, 0xf5, - 0x5a, 0xb2, 0x26, 0x36, 0xd8, 0x4f, 0x83, 0xff, 0xca, 0xa4, 0xce, 0x24, 0x6b, 0xb1, 0x81, 0xb9, - 0xb6, 0x8d, 0x19, 0x02, 0x16, 0x7c, 0xd3, 0xe4, 0x88, 0x07, 0xe5, 0x1b, 0x13, 0x70, 0x41, 0xab, - 0xe5, 0xe4, 0xc4, 0x09, 0xe4, 0xad, 0x7a, 0x73, 0x1e, 0x81, 0x97, 0x44, 0xc3, 0xd4, 0x76, 0xce, - 0xe3, 0x72, 0xa2, 0x64, 0x44, 0x05, 0xf9, 0xbb, 0x0a, 0xc1, 0xe3, 0x8a, 0x43, 0x5a, 0x4d, 0x21, - 0x3d, 0xee, 0x42, 0xf0, 0x32, 0x96, 0xc4, 0x29, 0xa4, 0x4a, 0x0f, 0xb8, 0xfe, 0x15, 0xe3, 0xee, - 0xe6, 0xe1, 0xb6, 0x0b, 0xfe, 0x43, 0x52, 0xad, 0x3a, 0x87, 0x9c, 0x47, 0xe8, 0xfe, 0x4e, 0xb5, - 0x94, 0x39, 0x5d, 0xc4, 0x9a, 0x98, 0x56, 0x41, 0x71, 0x7c, 0x2b, 0xc9, 0xba, 0xba, 0x83, 0x62, - 0xbf, 0xe1, 0xaf, 0x33, 0x3f, 0x72, 0x48, 0xe3, 0x7b, 0xa3, 0x7b, 0x9a, 0xf3, 0x45, 0x97, 0xdf, - 0x01, 0x00, 0x00, 0xff, 0xff, 0xef, 0x27, 0x99, 0xf7, 0x17, 0x02, 0x00, 0x00, +var File_manifest_proto protoreflect.FileDescriptor + +var file_manifest_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x37, 0x0a, 0x08, 0x4d, 0x61, 0x6e, 0x69, 0x66, + 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x22, 0xbf, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, + 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x18, 0x0a, + 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, + 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x22, 0x0a, 0x05, 0x78, + 0x61, 0x74, 0x74, 0x72, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x58, 0x41, 0x74, 0x74, 0x72, 0x52, 0x05, 0x78, 0x61, 0x74, 0x74, 0x72, 0x12, + 0x21, 0x0a, 0x03, 0x61, 0x64, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x44, 0x53, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x03, 0x61, + 0x64, 0x73, 0x22, 0x2f, 0x0a, 0x05, 0x58, 0x41, 0x74, 0x74, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x22, 0x4a, 0x0a, 0x08, 0x41, 0x44, 0x53, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x42, + 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, + 0x69, 0x74, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_manifest_proto_rawDescOnce sync.Once + file_manifest_proto_rawDescData = file_manifest_proto_rawDesc +) + +func file_manifest_proto_rawDescGZIP() []byte { + file_manifest_proto_rawDescOnce.Do(func() { + file_manifest_proto_rawDescData = protoimpl.X.CompressGZIP(file_manifest_proto_rawDescData) + }) + return file_manifest_proto_rawDescData +} + +var file_manifest_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_manifest_proto_goTypes = []interface{}{ + (*Manifest)(nil), // 0: proto.Manifest + (*Resource)(nil), // 1: proto.Resource + (*XAttr)(nil), // 2: proto.XAttr + (*ADSEntry)(nil), // 3: proto.ADSEntry +} +var file_manifest_proto_depIdxs = []int32{ + 1, // 0: proto.Manifest.resource:type_name -> proto.Resource + 2, // 1: proto.Resource.xattr:type_name -> proto.XAttr + 3, // 2: proto.Resource.ads:type_name -> proto.ADSEntry + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_manifest_proto_init() } +func file_manifest_proto_init() { + if File_manifest_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_manifest_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Manifest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_manifest_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_manifest_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*XAttr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_manifest_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ADSEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_manifest_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_manifest_proto_goTypes, + DependencyIndexes: file_manifest_proto_depIdxs, + MessageInfos: file_manifest_proto_msgTypes, + }.Build() + File_manifest_proto = out.File + file_manifest_proto_rawDesc = nil + file_manifest_proto_goTypes = nil + file_manifest_proto_depIdxs = nil } diff --git a/vendor/github.com/containerd/continuity/proto/manifest.proto b/vendor/github.com/containerd/continuity/proto/manifest.proto index 66ef80f054ed..35df41ffab69 100644 --- a/vendor/github.com/containerd/continuity/proto/manifest.proto +++ b/vendor/github.com/containerd/continuity/proto/manifest.proto @@ -1,6 +1,7 @@ syntax = "proto3"; package proto; +option go_package = "github.com/containerd/continuity/proto;proto"; // Manifest specifies the entries in a container bundle, keyed and sorted by // path. diff --git a/vendor/github.com/containerd/continuity/sysx/generate.sh b/vendor/github.com/containerd/continuity/sysx/generate.sh deleted file mode 100644 index 87d708d7ae77..000000000000 --- a/vendor/github.com/containerd/continuity/sysx/generate.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -# Copyright The containerd Authors. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -set -e - -mksyscall="$(go env GOROOT)/src/syscall/mksyscall.pl" - -fix() { - sed 's,^package syscall$,package sysx,' \ - | sed 's,^import "unsafe"$,import (\n\t"syscall"\n\t"unsafe"\n),' \ - | gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \ - | gofmt -r='Syscall6 -> syscall.Syscall6' \ - | gofmt -r='Syscall -> syscall.Syscall' \ - | gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \ - | gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \ - | gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \ - | gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \ - | gofmt -r='SYS_LGETXATTR -> syscall.SYS_LGETXATTR' \ - | gofmt -r='SYS_LLISTXATTR -> syscall.SYS_LLISTXATTR' \ - | gofmt -r='SYS_LSETXATTR -> syscall.SYS_LSETXATTR' \ - | gofmt -r='SYS_LREMOVEXATTR -> syscall.SYS_LREMOVEXATTR' -} - -if [ "$GOARCH" == "" ] || [ "$GOOS" == "" ]; then - echo "Must specify \$GOARCH and \$GOOS" - exit 1 -fi - -mkargs="" - -if [ "$GOARCH" == "386" ] || [ "$GOARCH" == "arm" ]; then - mkargs="-l32" -fi - -for f in "$@"; do - $mksyscall $mkargs "${f}_${GOOS}.go" | fix > "${f}_${GOOS}_${GOARCH}.go" -done - diff --git a/vendor/github.com/containerd/go-cni/.gitignore b/vendor/github.com/containerd/go-cni/.gitignore new file mode 100644 index 000000000000..04249514ed31 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/.gitignore @@ -0,0 +1,3 @@ +/bin/ +coverage.txt +profile.out diff --git a/vendor/github.com/containerd/go-cni/.golangci.yml b/vendor/github.com/containerd/go-cni/.golangci.yml index 75f462ee0b64..673fd33a2f98 100644 --- a/vendor/github.com/containerd/go-cni/.golangci.yml +++ b/vendor/github.com/containerd/go-cni/.golangci.yml @@ -6,7 +6,7 @@ linters: - unconvert - gofmt - goimports - - golint + - revive - ineffassign - vet - unused diff --git a/vendor/github.com/containerd/go-cni/Makefile b/vendor/github.com/containerd/go-cni/Makefile new file mode 100644 index 000000000000..0b2edf770761 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/Makefile @@ -0,0 +1,41 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TESTFLAGS_PARALLEL ?= 8 + +EXTRA_TESTFLAGS ?= + +# quiet or not +ifeq ($(V),1) + Q = +else + Q = @ +endif + +.PHONY: test integration clean help + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort + +test: ## run tests, except integration tests and tests that require root + $(Q)go test -v -race $(EXTRA_TESTFLAGS) -count=1 ./... + +integration: bin/integration.test ## run integration test + $(Q)bin/integration.test -test.v -test.count=1 -test.root $(EXTRA_TESTFLAGS) -test.parallel $(TESTFLAGS_PARALLEL) + +bin/integration.test: ## build integration test binary into bin + $(Q)cd ./integration && go test -race -c . -o ../bin/integration.test + +clean: ## clean up binaries + $(Q)rm -rf bin/ diff --git a/vendor/github.com/containerd/go-cni/cni.go b/vendor/github.com/containerd/go-cni/cni.go index 44dceaea4611..b10af47ab602 100644 --- a/vendor/github.com/containerd/go-cni/cni.go +++ b/vendor/github.com/containerd/go-cni/cni.go @@ -33,6 +33,8 @@ import ( type CNI interface { // Setup setup the network for the namespace Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) + // SetupSerially sets up each of the network interfaces for the namespace in serial + SetupSerially(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) // Remove tears down the network of the namespace. Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error // Check checks if the network is still in desired state @@ -165,6 +167,34 @@ func (c *libcni) Setup(ctx context.Context, id string, path string, opts ...Name return c.createResult(result) } +// SetupSerially setups the network in the namespace and returns a Result +func (c *libcni) SetupSerially(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) { + if err := c.Status(); err != nil { + return nil, err + } + ns, err := newNamespace(id, path, opts...) + if err != nil { + return nil, err + } + result, err := c.attachNetworksSerially(ctx, ns) + if err != nil { + return nil, err + } + return c.createResult(result) +} + +func (c *libcni) attachNetworksSerially(ctx context.Context, ns *Namespace) ([]*types100.Result, error) { + var results []*types100.Result + for _, network := range c.Networks() { + r, err := network.Attach(ctx, ns) + if err != nil { + return nil, err + } + results = append(results, r) + } + return results, nil +} + type asynchAttachResult struct { index int res *types100.Result diff --git a/vendor/github.com/containerd/go-cni/deprecated.go b/vendor/github.com/containerd/go-cni/deprecated.go index d823651898c9..06afd15432de 100644 --- a/vendor/github.com/containerd/go-cni/deprecated.go +++ b/vendor/github.com/containerd/go-cni/deprecated.go @@ -19,10 +19,10 @@ package cni import types100 "github.com/containernetworking/cni/pkg/types/100" // Deprecated: use cni.Opt instead -type CNIOpt = Opt //nolint: golint // type name will be used as cni.CNIOpt by other packages, and that stutters +type CNIOpt = Opt //revive:disable // type name will be used as cni.CNIOpt by other packages, and that stutters // Deprecated: use cni.Result instead -type CNIResult = Result //nolint: golint // type name will be used as cni.CNIResult by other packages, and that stutters +type CNIResult = Result //revive:disable // type name will be used as cni.CNIResult by other packages, and that stutters // GetCNIResultFromResults creates a Result from the given slice of types100.Result, // adding structured data containing the interface configuration for each of the diff --git a/vendor/github.com/containerd/go-cni/namespace_opts.go b/vendor/github.com/containerd/go-cni/namespace_opts.go index 1fad5f69a50d..3387f6fd1311 100644 --- a/vendor/github.com/containerd/go-cni/namespace_opts.go +++ b/vendor/github.com/containerd/go-cni/namespace_opts.go @@ -18,7 +18,7 @@ package cni type NamespaceOpts func(s *Namespace) error -// Capabilities +// WithCapabilityPortMap adds support for port mappings func WithCapabilityPortMap(portMapping []PortMapping) NamespaceOpts { return func(c *Namespace) error { c.capabilityArgs["portMappings"] = portMapping @@ -26,6 +26,7 @@ func WithCapabilityPortMap(portMapping []PortMapping) NamespaceOpts { } } +// WithCapabilityIPRanges adds support for ip ranges func WithCapabilityIPRanges(ipRanges []IPRanges) NamespaceOpts { return func(c *Namespace) error { c.capabilityArgs["ipRanges"] = ipRanges @@ -33,8 +34,7 @@ func WithCapabilityIPRanges(ipRanges []IPRanges) NamespaceOpts { } } -// WithCapabilityBandWitdh adds support for traffic shaping: -// https://github.com/heptio/cni-plugins/tree/master/plugins/meta/bandwidth +// WithCapabilityBandWitdh adds support for bandwidth limits func WithCapabilityBandWidth(bandWidth BandWidth) NamespaceOpts { return func(c *Namespace) error { c.capabilityArgs["bandwidth"] = bandWidth @@ -50,6 +50,8 @@ func WithCapabilityDNS(dns DNS) NamespaceOpts { } } +// WithCapability support well-known capabilities +// https://www.cni.dev/docs/conventions/#well-known-capabilities func WithCapability(name string, capability interface{}) NamespaceOpts { return func(c *Namespace) error { c.capabilityArgs[name] = capability diff --git a/vendor/github.com/containerd/nydus-snapshotter/LICENSE b/vendor/github.com/containerd/nydus-snapshotter/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go new file mode 100644 index 000000000000..b7b9f2a2b7bd --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +const ( + ManifestOSFeatureNydus = "nydus.remoteimage.v1" + MediaTypeNydusBlob = "application/vnd.oci.image.layer.nydus.blob.v1" + BootstrapFileNameInLayer = "image/image.boot" + + ManifestNydusCache = "containerd.io/snapshot/nydus-cache" + + LayerAnnotationFSVersion = "containerd.io/snapshot/nydus-fs-version" + LayerAnnotationNydusBlob = "containerd.io/snapshot/nydus-blob" + LayerAnnotationNydusBlobDigest = "containerd.io/snapshot/nydus-blob-digest" + LayerAnnotationNydusBlobSize = "containerd.io/snapshot/nydus-blob-size" + LayerAnnotationNydusBlobIDs = "containerd.io/snapshot/nydus-blob-ids" + LayerAnnotationNydusBootstrap = "containerd.io/snapshot/nydus-bootstrap" + LayerAnnotationNydusSourceChainID = "containerd.io/snapshot/nydus-source-chainid" + + LayerAnnotationNydusReferenceBlobIDs = "containerd.io/snapshot/nydus-reference-blob-ids" + + LayerAnnotationUncompressed = "containerd.io/uncompressed" +) diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go new file mode 100644 index 000000000000..dc0130aefece --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go @@ -0,0 +1,839 @@ +//go:build !windows +// +build !windows + +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/images/converter" + "github.com/containerd/containerd/labels" + "github.com/containerd/fifo" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + + "github.com/containerd/nydus-snapshotter/pkg/converter/tool" + "github.com/containerd/nydus-snapshotter/pkg/errdefs" +) + +const bootstrapNameInTar = "image.boot" +const blobNameInTar = "image.blob" + +const envNydusBuilder = "NYDUS_BUILDER" +const envNydusWorkDir = "NYDUS_WORKDIR" + +const configGCLabelKey = "containerd.io/gc.ref.content.config" + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +func getBuilder(specifiedPath string) string { + if specifiedPath != "" { + return specifiedPath + } + + builderPath := os.Getenv(envNydusBuilder) + if builderPath != "" { + return builderPath + } + + return "nydus-image" +} + +func ensureWorkDir(specifiedBasePath string) (string, error) { + var baseWorkDir string + + if specifiedBasePath != "" { + baseWorkDir = specifiedBasePath + } else { + baseWorkDir = os.Getenv(envNydusWorkDir) + } + if baseWorkDir == "" { + baseWorkDir = os.TempDir() + } + + if err := os.MkdirAll(baseWorkDir, 0750); err != nil { + return "", errors.Wrapf(err, "create base directory %s", baseWorkDir) + } + + workDirPath, err := os.MkdirTemp(baseWorkDir, "nydus-converter-") + if err != nil { + return "", errors.Wrap(err, "create work directory") + } + + return workDirPath, nil +} + +// Unpack a OCI formatted tar stream into a directory. +func unpackOciTar(ctx context.Context, dst string, reader io.Reader) error { + ds, err := compression.DecompressStream(reader) + if err != nil { + return errors.Wrap(err, "unpack stream") + } + defer ds.Close() + + if _, err := archive.Apply( + ctx, + dst, + ds, + archive.WithConvertWhiteout(func(hdr *tar.Header, file string) (bool, error) { + // Keep to extract all whiteout files. + return true, nil + }), + ); err != nil { + return errors.Wrap(err, "apply with convert whiteout") + } + + return nil +} + +// Unpack a Nydus formatted tar stream into a directory. +func unpackNydusTar(ctx context.Context, bootDst, blobDst string, ra content.ReaderAt) error { + boot, err := os.OpenFile(bootDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return errors.Wrapf(err, "write to bootstrap %s", bootDst) + } + defer boot.Close() + + if err = unpackBootstrapFromNydusTar(ctx, ra, boot); err != nil { + return errors.Wrap(err, "unpack bootstrap from nydus") + } + + blob, err := os.OpenFile(blobDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return errors.Wrapf(err, "write to blob %s", blobDst) + } + defer blob.Close() + + if err = unpackBlobFromNydusTar(ctx, ra, blob); err != nil { + return errors.Wrap(err, "unpack blob from nydus") + } + + return nil +} + +// Unpack the bootstrap from nydus formatted tar stream (blob + bootstrap). +// The nydus formatted tar stream is a tar-like structure that arranges the +// data as follows: +// +// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` +func unpackBootstrapFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error { + cur := ra.Size() + reader := newSeekReader(ra) + + const headerSize = 512 + + // Seek from tail to head of nydus formatted tar stream to find nydus + // bootstrap data. + for { + if headerSize > cur { + return fmt.Errorf("invalid tar format at pos %d", cur) + } + + // Try to seek to the part of tar header. + var err error + cur, err = reader.Seek(cur-headerSize, io.SeekCurrent) + if err != nil { + return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize) + } + + tr := tar.NewReader(reader) + // Parse tar header. + hdr, err := tr.Next() + if err != nil { + return errors.Wrap(err, "parse tar header") + } + + if hdr.Name == bootstrapNameInTar { + // Try to seek to the part of tar data (bootstrap_data). + if hdr.Size > cur { + return fmt.Errorf("invalid tar format at pos %d", cur) + } + bootstrapOffset := cur - hdr.Size + _, err = reader.Seek(bootstrapOffset, io.SeekStart) + if err != nil { + return errors.Wrap(err, "seek to bootstrap data offset") + } + + // Copy tar data (bootstrap_data) to provided target writer. + if _, err := io.CopyN(target, reader, hdr.Size); err != nil { + return errors.Wrap(err, "copy bootstrap data to reader") + } + + return nil + } + + if cur == hdr.Size { + break + } + } + + return fmt.Errorf("can't find bootstrap in nydus tar") +} + +// Unpack the blob from nydus formatted tar stream (blob + bootstrap). +// The nydus formatted tar stream is a tar-like structure that arranges the +// data as follows: +// +// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` +func unpackBlobFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error { + cur := ra.Size() + reader := newSeekReader(ra) + + const headerSize = 512 + + // Seek from tail to head of nydus formatted tar stream to find nydus + // bootstrap data. + for { + if headerSize > cur { + break + } + + // Try to seek to the part of tar header. + var err error + cur, err = reader.Seek(cur-headerSize, io.SeekStart) + if err != nil { + return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize) + } + + tr := tar.NewReader(reader) + // Parse tar header. + hdr, err := tr.Next() + if err != nil { + return errors.Wrap(err, "parse tar header") + } + + if hdr.Name == bootstrapNameInTar { + if hdr.Size > cur { + return fmt.Errorf("invalid tar format at pos %d", cur) + } + cur, err = reader.Seek(cur-hdr.Size, io.SeekStart) + if err != nil { + return errors.Wrap(err, "seek to bootstrap data offset") + } + } else if hdr.Name == blobNameInTar { + if hdr.Size > cur { + return fmt.Errorf("invalid tar format at pos %d", cur) + } + _, err = reader.Seek(cur-hdr.Size, io.SeekStart) + if err != nil { + return errors.Wrap(err, "seek to blob data offset") + } + if _, err := io.CopyN(target, reader, hdr.Size); err != nil { + return errors.Wrap(err, "copy blob data to reader") + } + return nil + } + } + + return nil +} + +// Pack converts an OCI tar stream to nydus formatted stream with a tar-like +// structure that arranges the data as follows: +// +// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` +// +// The caller should write OCI tar stream into the returned `io.WriteCloser`, +// then the Pack method will write the nydus formatted stream to `dest` +// provided by the caller. +// +// Important: the caller must check `io.WriteCloser.Close() == nil` to ensure +// the conversion workflow is finished. +func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) { + workDir, err := ensureWorkDir(opt.WorkDir) + if err != nil { + return nil, errors.Wrap(err, "ensure work directory") + } + defer func() { + if err != nil { + os.RemoveAll(workDir) + } + }() + + sourceDir := filepath.Join(workDir, "source") + if err := os.MkdirAll(sourceDir, 0755); err != nil { + return nil, errors.Wrap(err, "create source directory") + } + + pr, pw := io.Pipe() + + unpackDone := make(chan bool, 1) + go func() { + if err := unpackOciTar(ctx, sourceDir, pr); err != nil { + pr.CloseWithError(errors.Wrapf(err, "unpack to %s", sourceDir)) + close(unpackDone) + return + } + unpackDone <- true + }() + + wc := newWriteCloser(pw, func() error { + defer func() { + os.RemoveAll(workDir) + }() + + // Because PipeWriter#Close is called does not mean that the PipeReader + // has finished reading all the data, and unpack may not be complete yet, + // so we need to wait for that here. + <-unpackDone + + blobPath := filepath.Join(workDir, "blob") + blobFifo, err := fifo.OpenFifo(ctx, blobPath, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_NONBLOCK, 0644) + if err != nil { + return errors.Wrapf(err, "create fifo file") + } + defer blobFifo.Close() + + go func() { + err := tool.Pack(tool.PackOption{ + BuilderPath: getBuilder(opt.BuilderPath), + + BlobPath: blobPath, + FsVersion: opt.FsVersion, + SourcePath: sourceDir, + ChunkDictPath: opt.ChunkDictPath, + PrefetchPatterns: opt.PrefetchPatterns, + Compressor: opt.Compressor, + Timeout: opt.Timeout, + }) + if err != nil { + pw.CloseWithError(errors.Wrapf(err, "convert blob for %s", sourceDir)) + blobFifo.Close() + } + }() + + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(dest, blobFifo, *buffer); err != nil { + return errors.Wrap(err, "pack nydus tar") + } + + return nil + }) + + return wc, nil +} + +// Merge multiple nydus bootstraps (from each layer of image) to a final +// bootstrap. And due to the possibility of enabling the `ChunkDictPath` +// option causes the data deduplication, it will return the actual blob +// digests referenced by the bootstrap. +func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) ([]digest.Digest, error) { + workDir, err := ensureWorkDir(opt.WorkDir) + if err != nil { + return nil, errors.Wrap(err, "ensure work directory") + } + defer os.RemoveAll(workDir) + + eg, ctx := errgroup.WithContext(ctx) + sourceBootstrapPaths := []string{} + for idx := range layers { + sourceBootstrapPaths = append(sourceBootstrapPaths, filepath.Join(workDir, layers[idx].Digest.Hex())) + eg.Go(func(idx int) func() error { + return func() error { + layer := layers[idx] + + // Use the hex hash string of whole tar blob as the bootstrap name. + bootstrap, err := os.Create(filepath.Join(workDir, layer.Digest.Hex())) + if err != nil { + return errors.Wrap(err, "create source bootstrap") + } + defer bootstrap.Close() + + if err := unpackBootstrapFromNydusTar(ctx, layer.ReaderAt, bootstrap); err != nil { + return errors.Wrap(err, "unpack nydus tar") + } + + return nil + } + }(idx)) + } + + if err := eg.Wait(); err != nil { + return nil, errors.Wrap(err, "unpack all bootstraps") + } + + targetBootstrapPath := filepath.Join(workDir, "bootstrap") + + blobDigests, err := tool.Merge(tool.MergeOption{ + BuilderPath: getBuilder(opt.BuilderPath), + + SourceBootstrapPaths: sourceBootstrapPaths, + TargetBootstrapPath: targetBootstrapPath, + ChunkDictPath: opt.ChunkDictPath, + PrefetchPatterns: opt.PrefetchPatterns, + OutputJSONPath: filepath.Join(workDir, "merge-output.json"), + Timeout: opt.Timeout, + }) + if err != nil { + return nil, errors.Wrap(err, "merge bootstrap") + } + + var rc io.ReadCloser + + if opt.WithTar { + rc, err = packToTar(targetBootstrapPath, fmt.Sprintf("image/%s", bootstrapNameInTar), false) + if err != nil { + return nil, errors.Wrap(err, "pack bootstrap to tar") + } + } else { + rc, err = os.Open(targetBootstrapPath) + if err != nil { + return nil, errors.Wrap(err, "open targe bootstrap") + } + } + defer rc.Close() + + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err = io.CopyBuffer(dest, rc, *buffer); err != nil { + return nil, errors.Wrap(err, "copy merged bootstrap") + } + + return blobDigests, nil +} + +// Unpack converts a nydus blob layer to OCI formatted tar stream. +func Unpack(ctx context.Context, ra content.ReaderAt, dest io.Writer, opt UnpackOption) error { + workDir, err := ensureWorkDir(opt.WorkDir) + if err != nil { + return errors.Wrap(err, "ensure work directory") + } + defer os.RemoveAll(workDir) + + bootPath, blobPath := filepath.Join(workDir, bootstrapNameInTar), filepath.Join(workDir, blobNameInTar) + if err = unpackNydusTar(ctx, bootPath, blobPath, ra); err != nil { + return errors.Wrap(err, "unpack nydus tar") + } + + tarPath := filepath.Join(workDir, "oci.tar") + blobFifo, err := fifo.OpenFifo(ctx, tarPath, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_NONBLOCK, 0644) + if err != nil { + return errors.Wrapf(err, "create fifo file") + } + defer blobFifo.Close() + + unpackErrChan := make(chan error) + go func() { + defer close(unpackErrChan) + err := tool.Unpack(tool.UnpackOption{ + BuilderPath: getBuilder(opt.BuilderPath), + BootstrapPath: bootPath, + BlobPath: blobPath, + TarPath: tarPath, + Timeout: opt.Timeout, + }) + if err != nil { + blobFifo.Close() + unpackErrChan <- err + } + }() + + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(dest, blobFifo, *buffer); err != nil { + if unpackErr := <-unpackErrChan; unpackErr != nil { + return errors.Wrap(unpackErr, "unpack") + } + return errors.Wrap(err, "copy oci tar") + } + + return nil +} + +// IsNydusBlobAndExists returns true when the specified digest of content exists in +// the content store and it's nydus blob format. +func IsNydusBlobAndExists(ctx context.Context, cs content.Store, desc ocispec.Descriptor) bool { + _, err := cs.Info(ctx, desc.Digest) + if err != nil { + return false + } + + return IsNydusBlob(ctx, desc) +} + +// IsNydusBlob returns true when the specified descriptor is nydus blob format. +func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool { + if desc.Annotations == nil { + return false + } + + _, hasAnno := desc.Annotations[LayerAnnotationNydusBlob] + return hasAnno +} + +// LayerConvertFunc returns a function which converts an OCI image layer to +// a nydus blob layer, and set the media type to "application/vnd.oci.image.layer.nydus.blob.v1". +func LayerConvertFunc(opt PackOption) converter.ConvertFunc { + return func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { + if !images.IsLayerType(desc.MediaType) { + return nil, nil + } + + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, errors.Wrap(err, "get source blob reader") + } + defer ra.Close() + rdr := io.NewSectionReader(ra, 0, ra.Size()) + + ref := fmt.Sprintf("convert-nydus-from-%s", desc.Digest) + dst, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return nil, errors.Wrap(err, "open blob writer") + } + defer dst.Close() + + tr, err := compression.DecompressStream(rdr) + if err != nil { + return nil, errors.Wrap(err, "decompress blob stream") + } + + digester := digest.SHA256.Digester() + pr, pw := io.Pipe() + tw, err := Pack(ctx, io.MultiWriter(pw, digester.Hash()), opt) + if err != nil { + return nil, errors.Wrap(err, "pack tar to nydus") + } + + go func() { + defer pw.Close() + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(tw, tr, *buffer); err != nil { + pw.CloseWithError(err) + return + } + if err := tr.Close(); err != nil { + pw.CloseWithError(err) + return + } + if err := tw.Close(); err != nil { + pw.CloseWithError(err) + return + } + }() + + if err := content.Copy(ctx, dst, pr, 0, ""); err != nil { + return nil, errors.Wrap(err, "copy nydus blob to content store") + } + + blobDigest := digester.Digest() + info, err := cs.Info(ctx, blobDigest) + if err != nil { + return nil, errors.Wrapf(err, "get blob info %s", blobDigest) + } + if info.Labels == nil { + info.Labels = map[string]string{} + } + // Write a diff id label of layer in content store for simplifying + // diff id calculation to speed up the conversion. + // See: https://github.com/containerd/containerd/blob/e4fefea5544d259177abb85b64e428702ac49c97/images/diffid.go#L49 + info.Labels[labels.LabelUncompressed] = blobDigest.String() + _, err = cs.Update(ctx, info) + if err != nil { + return nil, errors.Wrap(err, "update layer label") + } + + newDesc := ocispec.Descriptor{ + Digest: blobDigest, + Size: info.Size, + MediaType: MediaTypeNydusBlob, + Annotations: map[string]string{ + // Use `containerd.io/uncompressed` to generate DiffID of + // layer defined in OCI spec. + LayerAnnotationUncompressed: blobDigest.String(), + LayerAnnotationNydusBlob: "true", + }, + } + + if opt.Backend != nil { + blobRa, err := cs.ReaderAt(ctx, newDesc) + if err != nil { + return nil, errors.Wrap(err, "get nydus blob reader") + } + defer blobRa.Close() + + if err := opt.Backend.Push(ctx, blobRa, blobDigest); err != nil { + return nil, errors.Wrap(err, "push to storage backend") + } + } + + return &newDesc, nil + } +} + +// ConvertHookFunc returns a function which will be used as a callback +// called for each blob after conversion is done. The function only hooks +// the index conversion and the manifest conversion. +func ConvertHookFunc(opt MergeOption) converter.ConvertHookFunc { + return func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) { + switch { + case images.IsIndexType(newDesc.MediaType): + return convertIndex(ctx, cs, orgDesc, newDesc) + case images.IsManifestType(newDesc.MediaType): + return convertManifest(ctx, cs, newDesc, opt) + default: + return newDesc, nil + } + } +} + +// convertIndex modifies the original index by appending "nydus.remoteimage.v1" +// to the Platform.OSFeatures of each modified manifest descriptors. +func convertIndex(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) { + var orgIndex ocispec.Index + if _, err := readJSON(ctx, cs, &orgIndex, orgDesc); err != nil { + return nil, errors.Wrap(err, "read target image index json") + } + // isManifestModified is a function to check whether the manifest is modified. + isManifestModified := func(manifest ocispec.Descriptor) bool { + for _, oldManifest := range orgIndex.Manifests { + if manifest.Digest == oldManifest.Digest { + return false + } + } + return true + } + + var index ocispec.Index + indexLabels, err := readJSON(ctx, cs, &index, *newDesc) + if err != nil { + return nil, errors.Wrap(err, "read index json") + } + for i, manifest := range index.Manifests { + if !isManifestModified(manifest) { + // Skip the manifest which is not modified. + continue + } + manifest.Platform.OSFeatures = append(manifest.Platform.OSFeatures, ManifestOSFeatureNydus) + index.Manifests[i] = manifest + } + // Update image index in content store. + newIndexDesc, err := writeJSON(ctx, cs, index, *newDesc, indexLabels) + if err != nil { + return nil, errors.Wrap(err, "write index json") + } + return newIndexDesc, nil +} + +// convertManifest merges all the nydus blob layers into a +// nydus bootstrap layer, update the image config, +// and modify the image manifest. +func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) { + var manifest ocispec.Manifest + manifestDesc := *newDesc + manifestLabels, err := readJSON(ctx, cs, &manifest, manifestDesc) + if err != nil { + return nil, errors.Wrap(err, "read manifest json") + } + + // Append bootstrap layer to manifest. + bootstrapDesc, blobDescs, err := MergeLayers(ctx, cs, manifest.Layers, MergeOption{ + BuilderPath: opt.BuilderPath, + WorkDir: opt.WorkDir, + ChunkDictPath: opt.ChunkDictPath, + FsVersion: opt.FsVersion, + WithTar: true, + }) + if err != nil { + return nil, errors.Wrap(err, "merge nydus layers") + } + if opt.Backend != nil { + // Only append nydus bootstrap layer into manifest, and do not put nydus + // blob layer into manifest if blob storage backend is specified. + manifest.Layers = []ocispec.Descriptor{*bootstrapDesc} + } else { + for idx, blobDesc := range blobDescs { + blobGCLabelKey := fmt.Sprintf("containerd.io/gc.ref.content.l.%d", idx) + manifestLabels[blobGCLabelKey] = blobDesc.Digest.String() + } + // Affected by chunk dict, the blob list referenced by final bootstrap + // are from different layers, part of them are from original layers, part + // from chunk dict bootstrap, so we need to rewrite manifest's layers here. + manifest.Layers = append(blobDescs, *bootstrapDesc) + } + + // Update the gc label of bootstrap layer + bootstrapGCLabelKey := fmt.Sprintf("containerd.io/gc.ref.content.l.%d", len(manifest.Layers)-1) + manifestLabels[bootstrapGCLabelKey] = bootstrapDesc.Digest.String() + + // Rewrite diff ids and remove useless annotation. + var config ocispec.Image + configLabels, err := readJSON(ctx, cs, &config, manifest.Config) + if err != nil { + return nil, errors.Wrap(err, "read image config") + } + if opt.Backend != nil { + config.RootFS.DiffIDs = []digest.Digest{digest.Digest(bootstrapDesc.Annotations[LayerAnnotationUncompressed])} + } else { + config.RootFS.DiffIDs = make([]digest.Digest, 0, len(manifest.Layers)) + for i, layer := range manifest.Layers { + config.RootFS.DiffIDs = append(config.RootFS.DiffIDs, digest.Digest(layer.Annotations[LayerAnnotationUncompressed])) + // Remove useless annotation. + delete(manifest.Layers[i].Annotations, LayerAnnotationUncompressed) + } + } + // Update image config in content store. + newConfigDesc, err := writeJSON(ctx, cs, config, manifest.Config, configLabels) + if err != nil { + return nil, errors.Wrap(err, "write image config") + } + manifest.Config = *newConfigDesc + // Update the config gc label + manifestLabels[configGCLabelKey] = newConfigDesc.Digest.String() + + // Update image manifest in content store. + newManifestDesc, err := writeJSON(ctx, cs, manifest, manifestDesc, manifestLabels) + if err != nil { + return nil, errors.Wrap(err, "write manifest") + } + + return newManifestDesc, nil +} + +// MergeLayers merges a list of nydus blob layer into a nydus bootstrap layer. +// The media type of the nydus bootstrap layer is "application/vnd.oci.image.layer.v1.tar+gzip". +func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, []ocispec.Descriptor, error) { + // Extracts nydus bootstrap from nydus format for each layer. + layers := []Layer{} + + var chainID digest.Digest + for _, blobDesc := range descs { + ra, err := cs.ReaderAt(ctx, blobDesc) + if err != nil { + return nil, nil, errors.Wrapf(err, "get reader for blob %q", blobDesc.Digest) + } + defer ra.Close() + layers = append(layers, Layer{ + Digest: blobDesc.Digest, + ReaderAt: ra, + }) + if chainID == "" { + chainID = identity.ChainID([]digest.Digest{blobDesc.Digest}) + } else { + chainID = identity.ChainID([]digest.Digest{chainID, blobDesc.Digest}) + } + } + + // Merge all nydus bootstraps into a final nydus bootstrap. + pr, pw := io.Pipe() + blobDigestChan := make(chan []digest.Digest, 1) + go func() { + defer pw.Close() + blobDigests, err := Merge(ctx, layers, pw, opt) + if err != nil { + pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap")) + } + blobDigestChan <- blobDigests + }() + + // Compress final nydus bootstrap to tar.gz and write into content store. + cw, err := content.OpenWriter(ctx, cs, content.WithRef("nydus-merge-"+chainID.String())) + if err != nil { + return nil, nil, errors.Wrap(err, "open content store writer") + } + defer cw.Close() + + gw := gzip.NewWriter(cw) + uncompressedDgst := digest.SHA256.Digester() + compressed := io.MultiWriter(gw, uncompressedDgst.Hash()) + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(compressed, pr, *buffer); err != nil { + return nil, nil, errors.Wrapf(err, "copy bootstrap targz into content store") + } + if err := gw.Close(); err != nil { + return nil, nil, errors.Wrap(err, "close gzip writer") + } + + compressedDgst := cw.Digest() + if err := cw.Commit(ctx, 0, compressedDgst, content.WithLabels(map[string]string{ + LayerAnnotationUncompressed: uncompressedDgst.Digest().String(), + })); err != nil { + if !errdefs.IsAlreadyExists(err) { + return nil, nil, errors.Wrap(err, "commit to content store") + } + } + if err := cw.Close(); err != nil { + return nil, nil, errors.Wrap(err, "close content store writer") + } + + bootstrapInfo, err := cs.Info(ctx, compressedDgst) + if err != nil { + return nil, nil, errors.Wrap(err, "get info from content store") + } + + blobDigests := <-blobDigestChan + blobDescs := []ocispec.Descriptor{} + blobIDs := []string{} + for _, blobDigest := range blobDigests { + blobInfo, err := cs.Info(ctx, blobDigest) + if err != nil { + return nil, nil, errors.Wrap(err, "get info from content store") + } + blobDesc := ocispec.Descriptor{ + Digest: blobDigest, + Size: blobInfo.Size, + MediaType: MediaTypeNydusBlob, + Annotations: map[string]string{ + LayerAnnotationUncompressed: blobDigest.String(), + LayerAnnotationNydusBlob: "true", + }, + } + blobDescs = append(blobDescs, blobDesc) + blobIDs = append(blobIDs, blobDigest.Hex()) + } + + blobIDsBytes, err := json.Marshal(blobIDs) + if err != nil { + return nil, nil, errors.Wrap(err, "marshal blob ids") + } + + if opt.FsVersion == "" { + opt.FsVersion = "5" + } + + bootstrapDesc := ocispec.Descriptor{ + Digest: compressedDgst, + Size: bootstrapInfo.Size, + MediaType: ocispec.MediaTypeImageLayerGzip, + Annotations: map[string]string{ + LayerAnnotationUncompressed: uncompressedDgst.Digest().String(), + LayerAnnotationFSVersion: opt.FsVersion, + // Use this annotation to identify nydus bootstrap layer. + LayerAnnotationNydusBootstrap: "true", + // Track all blob digests for nydus snapshotter. + LayerAnnotationNydusBlobIDs: string(blobIDsBytes), + }, + } + + return &bootstrapDesc, blobDescs, nil +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go new file mode 100644 index 000000000000..12cb53ed5373 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go @@ -0,0 +1,51 @@ +//go:build windows +// +build windows + +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images/converter" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) { + panic("not implemented") +} + +func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) error { + panic("not implemented") +} + +func Unpack(ctx context.Context, ia content.ReaderAt, dest io.Writer, opt UnpackOption) error { + panic("not implemented") +} + +func IsNydusBlobAndExists(ctx context.Context, cs content.Store, desc ocispec.Descriptor) bool { + panic("not implemented") +} + +func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool { + panic("not implemented") +} + +func LayerConvertFunc(opt PackOption) converter.ConvertFunc { + panic("not implemented") +} + +func ConvertHookFunc(opt MergeOption) converter.ConvertHookFunc { + panic("not implemented") +} + +func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) { + panic("not implemented") +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go new file mode 100644 index 000000000000..55e98cc09704 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package tool + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os/exec" + "strings" + "time" + + "github.com/containerd/nydus-snapshotter/pkg/errdefs" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var logger = logrus.WithField("module", "builder") + +type PackOption struct { + BuilderPath string + + BootstrapPath string + BlobPath string + FsVersion string + SourcePath string + ChunkDictPath string + PrefetchPatterns string + Compressor string + Timeout *time.Duration +} + +type MergeOption struct { + BuilderPath string + + SourceBootstrapPaths []string + TargetBootstrapPath string + ChunkDictPath string + PrefetchPatterns string + OutputJSONPath string + Timeout *time.Duration +} + +type UnpackOption struct { + BuilderPath string + BootstrapPath string + BlobPath string + TarPath string + Timeout *time.Duration +} + +type outputJSON struct { + Blobs []string +} + +func Pack(option PackOption) error { + if option.FsVersion == "" { + option.FsVersion = "5" + } + + args := []string{ + "create", + "--log-level", + "warn", + "--prefetch-policy", + "fs", + "--blob", + option.BlobPath, + "--source-type", + "directory", + "--whiteout-spec", + "none", + "--fs-version", + option.FsVersion, + "--inline-bootstrap", + } + if option.ChunkDictPath != "" { + args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath)) + } + if option.PrefetchPatterns == "" { + option.PrefetchPatterns = "/" + } + if option.Compressor != "" { + args = append(args, "--compressor", option.Compressor) + } + args = append(args, option.SourcePath) + + ctx := context.Background() + var cancel context.CancelFunc + if option.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *option.Timeout) + defer cancel() + } + + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + + cmd := exec.CommandContext(ctx, option.BuilderPath, args...) + cmd.Stdout = logger.Writer() + cmd.Stderr = logger.Writer() + cmd.Stdin = strings.NewReader(option.PrefetchPatterns) + + if err := cmd.Run(); err != nil { + if errdefs.IsSignalKilled(err) && option.Timeout != nil { + logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) + } else { + logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) + } + return err + } + + return nil +} + +func Merge(option MergeOption) ([]digest.Digest, error) { + args := []string{ + "merge", + "--log-level", + "warn", + "--prefetch-policy", + "fs", + "--output-json", + option.OutputJSONPath, + "--bootstrap", + option.TargetBootstrapPath, + } + if option.ChunkDictPath != "" { + args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath)) + } + if option.PrefetchPatterns == "" { + option.PrefetchPatterns = "/" + } + args = append(args, option.SourceBootstrapPaths...) + + ctx := context.Background() + var cancel context.CancelFunc + if option.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *option.Timeout) + defer cancel() + } + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + + cmd := exec.CommandContext(ctx, option.BuilderPath, args...) + cmd.Stdout = logger.Writer() + cmd.Stderr = logger.Writer() + cmd.Stdin = strings.NewReader(option.PrefetchPatterns) + + if err := cmd.Run(); err != nil { + if errdefs.IsSignalKilled(err) && option.Timeout != nil { + logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) + } else { + logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) + } + return nil, errors.Wrap(err, "run merge command") + } + + outputBytes, err := ioutil.ReadFile(option.OutputJSONPath) + if err != nil { + return nil, errors.Wrapf(err, "read file %s", option.OutputJSONPath) + } + var output outputJSON + err = json.Unmarshal(outputBytes, &output) + if err != nil { + return nil, errors.Wrapf(err, "unmarshal output json file %s", option.OutputJSONPath) + } + + blobDigests := []digest.Digest{} + for _, blobID := range output.Blobs { + blobDigests = append(blobDigests, digest.NewDigestFromHex(string(digest.SHA256), blobID)) + } + + return blobDigests, nil +} + +func Unpack(option UnpackOption) error { + args := []string{ + "unpack", + "--log-level", + "warn", + "--bootstrap", + option.BootstrapPath, + "--output", + option.TarPath, + } + if option.BlobPath != "" { + args = append(args, "--blob", option.BlobPath) + } + + ctx := context.Background() + var cancel context.CancelFunc + if option.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *option.Timeout) + defer cancel() + } + + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + + cmd := exec.CommandContext(ctx, option.BuilderPath, args...) + cmd.Stdout = logger.Writer() + cmd.Stderr = logger.Writer() + + if err := cmd.Run(); err != nil { + if errdefs.IsSignalKilled(err) && option.Timeout != nil { + logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) + } else { + logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) + } + return err + } + + return nil +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go new file mode 100644 index 000000000000..9d0590a0c964 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "context" + "time" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" +) + +type Layer struct { + // Digest represents the hash of whole tar blob. + Digest digest.Digest + // ReaderAt holds the reader of whole tar blob. + ReaderAt content.ReaderAt +} + +// Backend uploads blobs generated by nydus-image builder to a backend storage such as: +// - oss: A object storage backend, which uses its SDK to upload blob file. +type Backend interface { + // Push pushes specified blob file to remote storage backend. + Push(ctx context.Context, ra content.ReaderAt, blobDigest digest.Digest) error + // Check checks whether a blob exists in remote storage backend, + // blob exists -> return (blobPath, nil) + // blob not exists -> return ("", err) + Check(blobDigest digest.Digest) (string, error) + // Type returns backend type name. + Type() string +} + +type PackOption struct { + // WorkDir is used as the work directory during layer pack. + WorkDir string + // BuilderPath holds the path of `nydus-image` binary tool. + BuilderPath string + // FsVersion specifies nydus RAFS format version, possible + // values: `5`, `6` (EROFS-compatible), default is `5`. + FsVersion string + // ChunkDictPath holds the bootstrap path of chunk dict image. + ChunkDictPath string + // PrefetchPatterns holds file path pattern list want to prefetch. + PrefetchPatterns string + // Compressor specifies nydus blob compression algorithm. + Compressor string + // Backend uploads blobs generated by nydus-image builder to a backend storage. + Backend Backend + // Timeout cancels execution once exceed the specified time. + Timeout *time.Duration +} + +type MergeOption struct { + // WorkDir is used as the work directory during layer merge. + WorkDir string + // BuilderPath holds the path of `nydus-image` binary tool. + BuilderPath string + // FsVersion specifies nydus RAFS format version, possible + // values: `5`, `6` (EROFS-compatible), default is `5`. + FsVersion string + // ChunkDictPath holds the bootstrap path of chunk dict image. + ChunkDictPath string + // PrefetchPatterns holds file path pattern list want to prefetch. + PrefetchPatterns string + // WithTar puts bootstrap into a tar stream (no gzip). + WithTar bool + // Backend uploads blobs generated by nydus-image builder to a backend storage. + Backend Backend + // Timeout cancels execution once exceed the specified time. + Timeout *time.Duration +} + +type UnpackOption struct { + // WorkDir is used as the work directory during layer unpack. + WorkDir string + // BuilderPath holds the path of `nydus-image` binary tool. + BuilderPath string + // Timeout cancels execution once exceed the specified time. + Timeout *time.Duration +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go new file mode 100644 index 000000000000..849d870b3409 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type writeCloser struct { + closed bool + io.WriteCloser + action func() error +} + +func (c *writeCloser) Close() error { + if c.closed { + return nil + } + + if err := c.WriteCloser.Close(); err != nil { + return err + } + c.closed = true + + if err := c.action(); err != nil { + return err + } + + return nil +} + +func newWriteCloser(wc io.WriteCloser, action func() error) *writeCloser { + return &writeCloser{ + WriteCloser: wc, + action: action, + } +} + +type seekReader struct { + io.ReaderAt + pos int64 +} + +func (ra *seekReader) Read(p []byte) (int, error) { + n, err := ra.ReaderAt.ReadAt(p, ra.pos) + ra.pos += int64(len(p)) + return n, err +} + +func (ra *seekReader) Seek(offset int64, whence int) (int64, error) { + if whence == io.SeekCurrent { + ra.pos += offset + } else if whence == io.SeekStart { + ra.pos = offset + } else { + return 0, fmt.Errorf("unsupported whence %d", whence) + } + return ra.pos, nil +} + +func newSeekReader(ra io.ReaderAt) *seekReader { + return &seekReader{ + ReaderAt: ra, + pos: 0, + } +} + +// packToTar makes .tar(.gz) stream of file named `name` and return reader. +func packToTar(src string, name string, compress bool) (io.ReadCloser, error) { + fi, err := os.Stat(src) + if err != nil { + return nil, err + } + + dirHdr := &tar.Header{ + Name: filepath.Dir(name), + Mode: 0755, + Typeflag: tar.TypeDir, + } + + hdr := &tar.Header{ + Name: name, + Mode: 0444, + Size: fi.Size(), + } + + reader, writer := io.Pipe() + + go func() { + // Prepare targz writer + var tw *tar.Writer + var gw *gzip.Writer + var err error + var file *os.File + + if compress { + gw = gzip.NewWriter(writer) + tw = tar.NewWriter(gw) + } else { + tw = tar.NewWriter(writer) + } + + defer func() { + err1 := tw.Close() + var err2 error + if gw != nil { + err2 = gw.Close() + } + + var finalErr error + + // Return the first error encountered to the other end and ignore others. + if err != nil { + finalErr = err + } else if err1 != nil { + finalErr = err1 + } else if err2 != nil { + finalErr = err2 + } + + writer.CloseWithError(finalErr) + }() + + file, err = os.Open(src) + if err != nil { + return + } + defer file.Close() + + // Write targz stream + if err = tw.WriteHeader(dirHdr); err != nil { + return + } + + if err = tw.WriteHeader(hdr); err != nil { + return + } + + if _, err = io.Copy(tw, file); err != nil { + return + } + }() + + return reader, nil +} + +// Copied from containerd/containerd project, copyright The containerd Authors. +// https://github.com/containerd/containerd/blob/4902059cb554f4f06a8d06a12134c17117809f4e/images/converter/default.go#L385 +func readJSON(ctx context.Context, cs content.Store, x interface{}, desc ocispec.Descriptor) (map[string]string, error) { + info, err := cs.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + labels := info.Labels + b, err := content.ReadBlob(ctx, cs, desc) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, x); err != nil { + return nil, err + } + return labels, nil +} + +// Copied from containerd/containerd project, copyright The containerd Authors. +// https://github.com/containerd/containerd/blob/4902059cb554f4f06a8d06a12134c17117809f4e/images/converter/default.go#L401 +func writeJSON(ctx context.Context, cs content.Store, x interface{}, oldDesc ocispec.Descriptor, labels map[string]string) (*ocispec.Descriptor, error) { + b, err := json.Marshal(x) + if err != nil { + return nil, err + } + dgst := digest.SHA256.FromBytes(b) + ref := fmt.Sprintf("converter-write-json-%s", dgst.String()) + w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return nil, err + } + if err := content.Copy(ctx, w, bytes.NewReader(b), int64(len(b)), dgst, content.WithLabels(labels)); err != nil { + return nil, err + } + if err := w.Close(); err != nil { + return nil, err + } + newDesc := oldDesc + newDesc.Size = int64(len(b)) + newDesc.Digest = dgst + return &newDesc, nil +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go new file mode 100644 index 000000000000..3bdf74cb9ddf --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2020. Ant Group. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package errdefs + +import ( + stderrors "errors" + "net" + "strings" + "syscall" + + "github.com/pkg/errors" +) + +const signalKilled = "signal: killed" + +var ( + ErrAlreadyExists = errors.New("already exists") + ErrNotFound = errors.New("not found") +) + +// IsAlreadyExists returns true if the error is due to already exists +func IsAlreadyExists(err error) bool { + return errors.Is(err, ErrAlreadyExists) +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsSignalKilled returns true if the error is signal killed +func IsSignalKilled(err error) bool { + return strings.Contains(err.Error(), signalKilled) +} + +// IsConnectionClosed returns true if error is due to connection closed +// this is used when snapshotter closed by sig term +func IsConnectionClosed(err error) bool { + switch err := err.(type) { + case *net.OpError: + return err.Err.Error() == "use of closed network connection" + default: + return false + } +} + +func IsErofsMounted(err error) bool { + return stderrors.Is(err, syscall.EBUSY) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/cache/cache.go b/vendor/github.com/containerd/stargz-snapshotter/cache/cache.go index d438be381590..417ae4b2b822 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/cache/cache.go +++ b/vendor/github.com/containerd/stargz-snapshotter/cache/cache.go @@ -20,7 +20,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "path/filepath" "sync" @@ -365,7 +364,7 @@ func (dc *directoryCache) cachePath(key string) string { } func (dc *directoryCache) wipFile(key string) (*os.File, error) { - return ioutil.TempFile(dc.wipDirectory, key+"-*") + return os.CreateTemp(dc.wipDirectory, key+"-*") } func NewMemoryCache() BlobCache { diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 9ee97fc91105..b071cea51dde 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -26,10 +26,10 @@ import ( "archive/tar" "bytes" "compress/gzip" + "context" "errors" "fmt" "io" - "io/ioutil" "os" "path" "runtime" @@ -48,6 +48,8 @@ type options struct { prioritizedFiles []string missedPrioritizedFiles *[]string compression Compression + ctx context.Context + minChunkSize int } type Option func(o *options) error @@ -62,6 +64,7 @@ func WithChunkSize(chunkSize int) Option { // WithCompressionLevel option specifies the gzip compression level. // The default is gzip.BestCompression. +// This option will be ignored if WithCompression option is used. // See also: https://godoc.org/compress/gzip#pkg-constants func WithCompressionLevel(level int) Option { return func(o *options) error { @@ -104,6 +107,26 @@ func WithCompression(compression Compression) Option { } } +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + +// WithMinChunkSize option specifies the minimal number of bytes of data +// must be written in one gzip stream. +// By increasing this number, one gzip stream can contain multiple files +// and it hopefully leads to smaller result blob. +// NOTE: This adds a TOC property that old reader doesn't understand. +func WithMinChunkSize(minChunkSize int) Option { + return func(o *options) error { + o.minChunkSize = minChunkSize + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser @@ -139,12 +162,29 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) } layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() defer func() { if rErr != nil { if err := layerFiles.CleanupAll(); err != nil { rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) } } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } }() tarBlob, err := decompressBlob(tarBlob, layerFiles) if err != nil { @@ -154,7 +194,14 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { if err != nil { return nil, err } - tarParts := divideEntries(entries, runtime.GOMAXPROCS(0)) + var tarParts [][]*entry + if opts.minChunkSize > 0 { + // Each entry needs to know the size of the current gzip stream so they + // cannot be processed in parallel. + tarParts = [][]*entry{entries} + } else { + tarParts = divideEntries(entries, runtime.GOMAXPROCS(0)) + } writers := make([]*Writer, len(tarParts)) payloads := make([]*os.File, len(tarParts)) var mu sync.Mutex @@ -169,6 +216,13 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { } sw := NewWriterWithCompressor(esgzFile, opts.compression) sw.ChunkSize = opts.chunkSize + sw.MinChunkSize = opts.minChunkSize + if sw.needsOpenGzEntries == nil { + sw.needsOpenGzEntries = make(map[string]struct{}) + } + for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} { + sw.needsOpenGzEntries[f] = struct{}{} + } if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { return err } @@ -183,7 +237,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { rErr = err return nil, err } - tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...) + tocAndFooter, tocDgst, err := closeWithCombine(writers...) if err != nil { rErr = err return nil, err @@ -226,7 +280,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { // Writers doesn't write TOC and footer to the underlying writers so they can be // combined into a single eStargz and tocAndFooter returned by this function can // be appended at the tail of that combined blob. -func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { +func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { if len(ws) == 0 { return nil, "", fmt.Errorf("at least one writer must be passed") } @@ -369,7 +423,7 @@ func readerFromEntries(entries ...*entry) io.Reader { func importTar(in io.ReaderAt) (*tarFile, error) { tf := &tarFile{} - pw, err := newCountReader(in) + pw, err := newCountReadSeeker(in) if err != nil { return nil, fmt.Errorf("failed to make position watcher: %w", err) } @@ -506,12 +560,13 @@ func newTempFiles() *tempFiles { } type tempFiles struct { - files []*os.File - filesMu sync.Mutex + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once } func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { - f, err := ioutil.TempFile(dir, pattern) + f, err := os.CreateTemp(dir, pattern) if err != nil { return nil, err } @@ -521,7 +576,14 @@ func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { return f, nil } -func (tf *tempFiles) CleanupAll() error { +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { tf.filesMu.Lock() defer tf.filesMu.Unlock() var allErr []error @@ -537,19 +599,19 @@ func (tf *tempFiles) CleanupAll() error { return errorutil.Aggregate(allErr) } -func newCountReader(r io.ReaderAt) (*countReader, error) { +func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) { pos := int64(0) - return &countReader{r: r, cPos: &pos}, nil + return &countReadSeeker{r: r, cPos: &pos}, nil } -type countReader struct { +type countReadSeeker struct { r io.ReaderAt cPos *int64 mu sync.Mutex } -func (cr *countReader) Read(p []byte) (int, error) { +func (cr *countReadSeeker) Read(p []byte) (int, error) { cr.mu.Lock() defer cr.mu.Unlock() @@ -560,7 +622,7 @@ func (cr *countReader) Read(p []byte) (int, error) { return n, err } -func (cr *countReader) Seek(offset int64, whence int) (int64, error) { +func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { cr.mu.Lock() defer cr.mu.Unlock() @@ -581,7 +643,7 @@ func (cr *countReader) Seek(offset int64, whence int) (int64, error) { return offset, nil } -func (cr *countReader) currentPos() int64 { +func (cr *countReadSeeker) currentPos() int64 { cr.mu.Lock() defer cr.mu.Unlock() diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index 4b655c14532f..f4d55465584e 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -31,7 +31,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "os" "path" "sort" @@ -151,10 +150,10 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { allErr = append(allErr, err) continue } - if tocSize <= 0 { + if tocOffset >= 0 && tocSize <= 0 { tocSize = sr.Size() - tocOffset - fSize } - if tocSize < int64(len(maybeTocBytes)) { + if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) { maybeTocBytes = maybeTocBytes[:tocSize] } r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) @@ -208,8 +207,16 @@ func (r *Reader) initFields() error { uname := map[int]string{} gname := map[int]string{} var lastRegEnt *TOCEntry - for _, ent := range r.toc.Entries { + var chunkTopIndex int + for i, ent := range r.toc.Entries { ent.Name = cleanEntryName(ent.Name) + switch ent.Type { + case "reg", "chunk": + if ent.Offset != r.toc.Entries[chunkTopIndex].Offset { + chunkTopIndex = i + } + ent.chunkTopIndex = chunkTopIndex + } if ent.Type == "reg" { lastRegEnt = ent } @@ -295,7 +302,7 @@ func (r *Reader) initFields() error { if e.isDataType() { e.nextOffset = lastOffset } - if e.Offset != 0 { + if e.Offset != 0 && e.InnerOffset == 0 { lastOffset = e.Offset } } @@ -489,6 +496,14 @@ func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { // // Name must be absolute path or one that is relative to root. func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) newFileReader(name string) (*fileReader, error) { name = cleanEntryName(name) ent, ok := r.Lookup(name) if !ok { @@ -506,11 +521,19 @@ func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { Err: errors.New("not a regular file"), } } - fr := &fileReader{ + return &fileReader{ r: r, size: ent.Size, ents: r.getChunks(ent), + }, nil +} + +func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err } + fr.preRead = preRead return io.NewSectionReader(fr, 0, fr.size), nil } @@ -522,9 +545,10 @@ func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { } type fileReader struct { - r *Reader - size int64 - ents []*TOCEntry // 1 or more reg/chunk entries + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries + preRead func(*TOCEntry, io.Reader) error } func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { @@ -579,10 +603,48 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) } defer dr.Close() - if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil { - return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) + + if fr.preRead == nil { + if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil { + return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err) + } + return io.ReadFull(dr, p) + } + + var retN int + var retErr error + var found bool + var nr int64 + for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] { + if !e.isDataType() { + continue + } + if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset { + break + } + if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr { + return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err) + } + nr = e.InnerOffset + if e == ent { + found = true + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err) + } + retN, retErr = io.ReadFull(dr, p) + nr += off + int64(retN) + continue + } + cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)} + if err := fr.preRead(e, cr); err != nil { + return 0, fmt.Errorf("failed to pre read: %w", err) + } + nr += cr.n + } + if !found { + return 0, fmt.Errorf("fileReader.ReadAt: target entry not found") } - return io.ReadFull(dr, p) + return retN, retErr } // A Writer writes stargz files. @@ -600,11 +662,20 @@ type Writer struct { lastGroupname map[int]string compressor Compressor + uncompressedCounter *countWriteFlusher + // ChunkSize optionally controls the maximum number of bytes // of data of a regular file that can be written in one gzip // stream before a new gzip stream is started. // Zero means to use a default, currently 4 MiB. ChunkSize int + + // MinChunkSize optionally controls the minimum number of bytes + // of data must be written in one gzip stream before a new gzip + // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand. + MinChunkSize int + + needsOpenGzEntries map[string]struct{} } // currentCompressionWriter writes to the current w.gz field, which can @@ -647,6 +718,9 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { if err != nil { return nil, fmt.Errorf("failed to parse footer: %w", err) } + if blobPayloadSize < 0 { + blobPayloadSize = sr.Size() + } return c.Reader(io.LimitReader(sr, blobPayloadSize)) } @@ -673,11 +747,12 @@ func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { bw := bufio.NewWriter(w) cw := &countWriter{w: bw} return &Writer{ - bw: bw, - cw: cw, - toc: &JTOC{Version: 1}, - diffHash: sha256.New(), - compressor: c, + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + uncompressedCounter: &countWriteFlusher{}, } } @@ -718,6 +793,20 @@ func (w *Writer) closeGz() error { return nil } +func (w *Writer) flushGz() error { + if w.closed { + return errors.New("flush on closed Writer") + } + if w.gz != nil { + if f, ok := w.gz.(interface { + Flush() error + }); ok { + return f.Flush() + } + } + return nil +} + // nameIfChanged returns name, unless it was the already the value of (*mp)[id], // in which case it returns the empty string. func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { @@ -737,6 +826,9 @@ func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { func (w *Writer) condOpenGz() (err error) { if w.gz == nil { w.gz, err = w.compressor.Writer(w.cw) + if w.gz != nil { + w.gz = w.uncompressedCounter.register(w.gz) + } } return } @@ -785,6 +877,8 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { if lossless { tr.RawAccounting = true } + prevOffset := w.cw.n + var prevOffsetUncompressed int64 for { h, err := tr.Next() if err == io.EOF { @@ -884,10 +978,6 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { totalSize := ent.Size // save it before we destroy ent tee := io.TeeReader(tr, payloadDigest.Hash()) for written < totalSize { - if err := w.closeGz(); err != nil { - return err - } - chunkSize := int64(w.chunkSize()) remain := totalSize - written if remain < chunkSize { @@ -895,7 +985,23 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { } else { ent.ChunkSize = chunkSize } - ent.Offset = w.cw.n + + // We flush the underlying compression writer here to correctly calculate "w.cw.n". + if err := w.flushGz(); err != nil { + return err + } + if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) { + if err := w.closeGz(); err != nil { + return err + } + ent.Offset = w.cw.n + prevOffset = ent.Offset + prevOffsetUncompressed = w.uncompressedCounter.n + } else { + ent.Offset = prevOffset + ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed + } + ent.ChunkOffset = written chunkDigest := digest.Canonical.Digester() @@ -933,7 +1039,7 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { } } } - remainDest := ioutil.Discard + remainDest := io.Discard if lossless { remainDest = dst // Preserve the remaining bytes in lossless mode } @@ -941,6 +1047,17 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { return err } +func (w *Writer) needsOpenGz(ent *TOCEntry) bool { + if ent.Type != "reg" { + return false + } + if w.needsOpenGzEntries == nil { + return false + } + _, ok := w.needsOpenGzEntries[ent.Name] + return ok +} + // DiffID returns the SHA-256 of the uncompressed tar bytes. // It is only valid to call DiffID after Close. func (w *Writer) DiffID() string { @@ -957,6 +1074,28 @@ func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { } func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if tocOff < 0 { + // This means that TOC isn't contained in the blob. + // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from + // the external location. + start := time.Now() + toc, tocDgst, err := d.ParseTOC(nil) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } if len(tocBytes) > 0 { start := time.Now() toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) @@ -1022,6 +1161,37 @@ func (cw *countWriter) Write(p []byte) (n int, err error) { return } +type countWriteFlusher struct { + io.WriteCloser + n int64 +} + +func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser { + wc.WriteCloser = w + return wc +} + +func (wc *countWriteFlusher) Write(p []byte) (n int, err error) { + n, err = wc.WriteCloser.Write(p) + wc.n += int64(n) + return +} + +func (wc *countWriteFlusher) Flush() error { + if f, ok := wc.WriteCloser.(interface { + Flush() error + }); ok { + return f.Flush() + } + return nil +} + +func (wc *countWriteFlusher) Close() error { + err := wc.WriteCloser.Close() + wc.WriteCloser = nil + return err +} + // isGzip reports whether br is positioned right before an upcoming gzip stream. // It does not consume any bytes from br. func isGzip(br *bufio.Reader) bool { @@ -1040,3 +1210,14 @@ func positive(n int64) int64 { } return n } + +type countReader struct { + r io.Reader + n int64 +} + +func (cr *countReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + cr.n += int64(n) + return +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/externaltoc/externaltoc.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/externaltoc/externaltoc.go new file mode 100644 index 000000000000..5f466177cbb9 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/externaltoc/externaltoc.go @@ -0,0 +1,278 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package externaltoc + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "io" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz" + digest "github.com/opencontainers/go-digest" +) + +type GzipCompression struct { + *GzipCompressor + *GzipDecompressor +} + +func NewGzipCompressionWithLevel(provideTOC func() ([]byte, error), level int) estargz.Compression { + return &GzipCompression{ + NewGzipCompressorWithLevel(level), + NewGzipDecompressor(provideTOC), + } +} + +func NewGzipCompressor() *GzipCompressor { + return &GzipCompressor{compressionLevel: gzip.BestCompression} +} + +func NewGzipCompressorWithLevel(level int) *GzipCompressor { + return &GzipCompressor{compressionLevel: level} +} + +type GzipCompressor struct { + compressionLevel int + buf *bytes.Buffer +} + +func (gc *GzipCompressor) WriteTOCTo(w io.Writer) (int, error) { + if len(gc.buf.Bytes()) == 0 { + return 0, fmt.Errorf("TOC hasn't been registered") + } + return w.Write(gc.buf.Bytes()) +} + +func (gc *GzipCompressor) Writer(w io.Writer) (estargz.WriteFlushCloser, error) { + return gzip.NewWriterLevel(w, gc.compressionLevel) +} + +func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *estargz.JTOC, diffHash hash.Hash) (digest.Digest, error) { + tocJSON, err := json.MarshalIndent(toc, "", "\t") + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + gz, _ := gzip.NewWriterLevel(buf, gc.compressionLevel) + // TOC isn't written to layer so no effect to diff ID + tw := tar.NewWriter(gz) + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: estargz.TOCTarName, + Size: int64(len(tocJSON)), + }); err != nil { + return "", err + } + if _, err := tw.Write(tocJSON); err != nil { + return "", err + } + + if err := tw.Close(); err != nil { + return "", err + } + if err := gz.Close(); err != nil { + return "", err + } + gc.buf = buf + footerBytes, err := gzipFooterBytes() + if err != nil { + return "", err + } + if _, err := w.Write(footerBytes); err != nil { + return "", err + } + return digest.FromBytes(tocJSON), nil +} + +// The footer is an empty gzip stream with no compression and an Extra header. +// +// 46 comes from: +// +// 10 bytes gzip header +// 2 bytes XLEN (length of Extra field) = 21 (4 bytes header + len("STARGZEXTERNALTOC")) +// 2 bytes Extra: SI1 = 'S', SI2 = 'G' +// 2 bytes Extra: LEN = 17 (len("STARGZEXTERNALTOC")) +// 17 bytes Extra: subfield = "STARGZEXTERNALTOC" +// 5 bytes flate header +// 8 bytes gzip footer +// (End of the eStargz blob) +const FooterSize = 46 + +// gzipFooterBytes returns the 104 bytes footer. +func gzipFooterBytes() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) + gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes + + // Extra header indicating the offset of TOCJSON + // https://tools.ietf.org/html/rfc1952#section-2.3.1.1 + header := make([]byte, 4) + header[0], header[1] = 'S', 'G' + subfield := "STARGZEXTERNALTOC" // len("STARGZEXTERNALTOC") = 17 + binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 + gz.Header.Extra = append(header, []byte(subfield)...) + if err := gz.Close(); err != nil { + return nil, err + } + if buf.Len() != FooterSize { + panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) + } + return buf.Bytes(), nil +} + +func NewGzipDecompressor(provideTOCFunc func() ([]byte, error)) *GzipDecompressor { + return &GzipDecompressor{provideTOCFunc: provideTOCFunc} +} + +type GzipDecompressor struct { + provideTOCFunc func() ([]byte, error) + rawTOC []byte // Do not access this field directly. Get this through getTOC() method. + getTOCOnce sync.Once +} + +func (gz *GzipDecompressor) getTOC() ([]byte, error) { + if len(gz.rawTOC) == 0 { + var retErr error + gz.getTOCOnce.Do(func() { + if gz.provideTOCFunc == nil { + retErr = fmt.Errorf("TOC hasn't been provided") + return + } + rawTOC, err := gz.provideTOCFunc() + if err != nil { + retErr = err + return + } + gz.rawTOC = rawTOC + }) + if retErr != nil { + return nil, retErr + } + if len(gz.rawTOC) == 0 { + return nil, fmt.Errorf("no TOC is provided") + } + } + return gz.rawTOC, nil +} + +func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *estargz.JTOC, tocDgst digest.Digest, err error) { + if r != nil { + return nil, "", fmt.Errorf("TOC must be provided externally but got internal one") + } + rawTOC, err := gz.getTOC() + if err != nil { + return nil, "", fmt.Errorf("failed to get TOC: %v", err) + } + return parseTOCEStargz(bytes.NewReader(rawTOC)) +} + +func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != FooterSize { + return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, err + } + defer zr.Close() + extra := zr.Header.Extra + si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] + if si1 != 'S' || si2 != 'G' { + return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) + } + if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(len("STARGZEXTERNALTOC")) { + return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) + } + if string(subfield) != "STARGZEXTERNALTOC" { + return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") + } + // tocOffset < 0 indicates external TOC. + // blobPayloadSize < 0 indicates the entire blob size. + return -1, -1, 0, nil +} + +func (gz *GzipDecompressor) FooterSize() int64 { + return FooterSize +} + +func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + if r != nil { + return nil, fmt.Errorf("TOC must be provided externally but got internal one") + } + rawTOC, err := gz.getTOC() + if err != nil { + return nil, fmt.Errorf("failed to get TOC: %v", err) + } + return decompressTOCEStargz(bytes.NewReader(rawTOC)) +} + +func parseTOCEStargz(r io.Reader) (toc *estargz.JTOC, tocDgst digest.Digest, err error) { + tr, err := decompressTOCEStargz(r) + if err != nil { + return nil, "", err + } + dgstr := digest.Canonical.Digester() + toc = new(estargz.JTOC) + if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { + return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) + } + if err := tr.Close(); err != nil { + return nil, "", err + } + return toc, dgstr.Digest(), nil +} + +func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) { + zr, err := gzip.NewReader(r) + if err != nil { + return nil, fmt.Errorf("malformed TOC gzip header: %v", err) + } + zr.Multistream(false) + tr := tar.NewReader(zr) + h, err := tr.Next() + if err != nil { + return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) + } + if h.Name != estargz.TOCTarName { + return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, estargz.TOCTarName) + } + return readCloser{tr, zr.Close}, nil +} + +type readCloser struct { + io.Reader + closeFunc func() error +} + +func (rc readCloser) Close() error { + return rc.closeFunc() +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go index 591d7a62e117..f24afe32f450 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -60,7 +60,7 @@ type GzipCompressor struct { compressionLevel int } -func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { +func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) { return gzip.NewWriterLevel(w, gc.compressionLevel) } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 1de13a4705be..0ca6fd75f2ed 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -31,8 +31,9 @@ import ( "errors" "fmt" "io" - "io/ioutil" + "math/rand" "os" + "path/filepath" "reflect" "sort" "strings" @@ -44,21 +45,27 @@ import ( digest "github.com/opencontainers/go-digest" ) +func init() { + rand.Seed(time.Now().UnixNano()) +} + // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression - CountStreams(*testing.T, []byte) int + TestStreams(t *testing.T, b []byte, streams []int64) DiffIDOf(*testing.T, []byte) string String() string } // CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. -func CompressionTestSuite(t *testing.T, controllers ...TestingController) { +func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) } +type TestingControllerFactory func() TestingController + const ( uncompressedType int = iota gzipType @@ -75,11 +82,12 @@ var allowedPrefix = [4]string{"", "./", "/", "../"} // testBuild tests the resulting stargz blob built by this pkg has the same // contents as the normal stargz blob. -func testBuild(t *testing.T, controllers ...TestingController) { +func testBuild(t *testing.T, controllers ...TestingControllerFactory) { tests := []struct { - name string - chunkSize int - in []tarEntry + name string + chunkSize int + minChunkSize []int + in []tarEntry }{ { name: "regfiles and directories", @@ -108,11 +116,14 @@ func testBuild(t *testing.T, controllers ...TestingController) { ), }, { - name: "various files", - chunkSize: 4, + name: "various files", + chunkSize: 4, + minChunkSize: []int{0, 64000}, in: tarOf( file("baz.txt", "bazbazbazbazbazbazbaz"), - file("foo.txt", "a"), + file("foo1.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), symlink("barlink", "test/bar.txt"), dir("test/"), dir("dev/"), @@ -144,99 +155,112 @@ func testBuild(t *testing.T, controllers ...TestingController) { }, } for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } for _, srcCompression := range srcCompressions { srcCompression := srcCompression - for _, cl := range controllers { - cl := cl + for _, newCL := range controllers { + newCL := newCL for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, prefix := range allowedPrefix { prefix := prefix - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) { - tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) - // Test divideEntries() - entries, err := sortEntries(tarBlob, nil, nil) // identical order - if err != nil { - t.Fatalf("failed to parse tar: %v", err) - } - var merged []*entry - for _, part := range divideEntries(entries, 4) { - merged = append(merged, part...) - } - if !reflect.DeepEqual(entries, merged) { - for _, e := range entries { - t.Logf("Original: %v", e.header) + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) } - for _, e := range merged { - t.Logf("Merged: %v", e.header) + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return } - t.Errorf("divided entries couldn't be merged") - return - } - // Prepare sample data - wantBuf := new(bytes.Buffer) - sw := NewWriterWithCompressor(wantBuf, cl) - sw.ChunkSize = tt.chunkSize - if err := sw.AppendTar(tarBlob); err != nil { - t.Fatalf("failed to append tar to want stargz: %v", err) - } - if _, err := sw.Close(); err != nil { - t.Fatalf("failed to prepare want stargz: %v", err) - } - wantData := wantBuf.Bytes() - want, err := Open(io.NewSectionReader( - bytes.NewReader(wantData), 0, int64(len(wantData))), - WithDecompressors(cl), - ) - if err != nil { - t.Fatalf("failed to parse the want stargz: %v", err) - } + // Prepare sample data + cl1 := newCL() + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl1) + sw.MinChunkSize = minChunkSize + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl1), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } - // Prepare testing data - rc, err := Build(compressBlob(t, tarBlob, srcCompression), - WithChunkSize(tt.chunkSize), WithCompression(cl)) - if err != nil { - t.Fatalf("failed to build stargz: %v", err) - } - defer rc.Close() - gotBuf := new(bytes.Buffer) - if _, err := io.Copy(gotBuf, rc); err != nil { - t.Fatalf("failed to copy built stargz blob: %v", err) - } - gotData := gotBuf.Bytes() - got, err := Open(io.NewSectionReader( - bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), - WithDecompressors(cl), - ) - if err != nil { - t.Fatalf("failed to parse the got stargz: %v", err) - } + // Prepare testing data + var opts []Option + if minChunkSize > 0 { + opts = append(opts, WithMinChunkSize(minChunkSize)) + } + cl2 := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl2), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } - // Check DiffID is properly calculated - rc.Close() - diffID := rc.DiffID() - wantDiffID := cl.DiffIDOf(t, gotData) - if diffID.String() != wantDiffID { - t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) - } + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl2.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } - // Compare as stargz - if !isSameVersion(t, cl, wantData, gotData) { - t.Errorf("built stargz hasn't same json") - return - } - if !isSameEntries(t, want, got) { - t.Errorf("built stargz isn't same as the original") - return - } + // Compare as stargz + if !isSameVersion(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } - // Compare as tar.gz - if !isSameTarGz(t, cl, wantData, gotData) { - t.Errorf("built stargz isn't same tar.gz") - return - } - }) + // Compare as tar.gz + if !isSameTarGz(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } } } } @@ -244,13 +268,13 @@ func testBuild(t *testing.T, controllers ...TestingController) { } } -func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { - aGz, err := controller.Reader(bytes.NewReader(a)) +func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aGz, err := cla.Reader(bytes.NewReader(a)) if err != nil { t.Fatalf("failed to read A") } defer aGz.Close() - bGz, err := controller.Reader(bytes.NewReader(b)) + bGz, err := clb.Reader(bytes.NewReader(b)) if err != nil { t.Fatalf("failed to read B") } @@ -287,11 +311,11 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { return false } - aFile, err := ioutil.ReadAll(aTar) + aFile, err := io.ReadAll(aTar) if err != nil { t.Fatal("failed to read tar payload of A") } - bFile, err := ioutil.ReadAll(bTar) + bFile, err := io.ReadAll(bTar) if err != nil { t.Fatal("failed to read tar payload of B") } @@ -304,12 +328,12 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { return true } -func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool { - aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller) +func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) if err != nil { t.Fatalf("failed to parse A: %v", err) } - bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller) + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb) if err != nil { t.Fatalf("failed to parse B: %v", err) } @@ -463,7 +487,7 @@ func equalEntry(a, b *TOCEntry) bool { a.GID == b.GID && a.Uname == b.Uname && a.Gname == b.Gname && - (a.Offset > 0) == (b.Offset > 0) && + (a.Offset >= 0) == (b.Offset >= 0) && (a.NextOffset() > 0) == (b.NextOffset() > 0) && a.DevMajor == b.DevMajor && a.DevMinor == b.DevMinor && @@ -510,14 +534,15 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { const chunkSize = 3 // type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) -type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) // testDigestAndVerify runs specified checks against sample stargz blobs. -func testDigestAndVerify(t *testing.T, controllers ...TestingController) { +func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { tests := []struct { - name string - tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) - checks []check + name string + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + minChunkSize []int }{ { name: "no-regfile", @@ -544,6 +569,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { regDigest(t, "test/bar.txt", "bbb", dgstMap), ) }, + minChunkSize: []int{0, 64000}, checks: []check{ checkStargzTOC, checkVerifyTOC, @@ -581,11 +607,14 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { }, }, { - name: "with-non-regfiles", + name: "with-non-regfiles", + minChunkSize: []int{0, 64000}, tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), + regDigest(t, "bar/foo2.txt", "b", dgstMap), + regDigest(t, "foo3.txt", "c", dgstMap), symlink("barlink", "test/bar.txt"), dir("test/"), regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), @@ -599,6 +628,8 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { checkVerifyInvalidStargzFail(buildTar(t, tarOf( file("baz.txt", "bazbazbazbazbazbazbaz"), file("foo.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), symlink("barlink", "test/bar.txt"), dir("test/"), file("test/bar.txt", "testbartestbar"), @@ -612,38 +643,45 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { } for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } for _, srcCompression := range srcCompressions { srcCompression := srcCompression - for _, cl := range controllers { - cl := cl + for _, newCL := range controllers { + newCL := newCL for _, prefix := range allowedPrefix { prefix := prefix for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) { - // Get original tar file and chunk digests - dgstMap := make(map[string]digest.Digest) - tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) - - rc, err := Build(compressBlob(t, tarBlob, srcCompression), - WithChunkSize(chunkSize), WithCompression(cl)) - if err != nil { - t.Fatalf("failed to convert stargz: %v", err) - } - tocDigest := rc.TOCDigest() - defer rc.Close() - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, rc); err != nil { - t.Fatalf("failed to copy built stargz blob: %v", err) - } - newStargz := buf.Bytes() - // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. - dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + + cl := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) - for _, check := range tt.checks { - check(t, newStargz, tocDigest, dgstMap, cl) - } - }) + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl, newCL) + } + }) + } } } } @@ -654,7 +692,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { // checkStargzTOC checks the TOC JSON of the passed stargz has the expected // digest and contains valid chunks. It walks all entries in the stargz and // checks all chunk digests stored to the TOC JSON match the actual contents. -func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -765,7 +803,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyTOC checks the verification works for the TOC JSON of the passed // stargz. It walks all entries in the stargz and checks the verifications for // all chunks work. -func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -846,7 +884,7 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be // detected during the verification and the verification returns an error. func checkVerifyInvalidTOCEntryFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { funcs := map[string]rewriteFunc{ "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { var found bool @@ -920,8 +958,9 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { // checkVerifyInvalidStargzFail checks if the verification detects that the // given stargz file doesn't match to the expected digest and returns error. func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { - rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller)) + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + cl := newController() + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) if err != nil { t.Fatalf("failed to convert stargz: %v", err) } @@ -934,7 +973,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { sgz, err := Open( io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), - WithDecompressors(controller), + WithDecompressors(cl), ) if err != nil { t.Fatalf("failed to parse converted stargz: %v", err) @@ -951,7 +990,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { // checkVerifyBrokenContentFail checks if the verifier detects broken contents // that doesn't match to the expected digest and returns error. func checkVerifyBrokenContentFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { // Parse stargz file sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), @@ -1070,7 +1109,10 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT } // Decode the TOC JSON - tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + var tocReader io.Reader + if tocOffset >= 0 { + tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + } decodedJTOC, _, err = controller.ParseTOC(tocReader) if err != nil { return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) @@ -1078,28 +1120,31 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT return decodedJTOC, tocOffset, nil } -func testWriteAndOpen(t *testing.T, controllers ...TestingController) { +func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { const content = "Some contents" invalidUtf8 := "\xff\xfe\xfd" xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} sampleOwner := owner{uid: 50, gid: 100} + data64KB := randomContents(64000) + tests := []struct { - name string - chunkSize int - in []tarEntry - want []stargzCheck - wantNumGz int // expected number of streams + name string + chunkSize int + minChunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz wantFailOnLossLess bool + wantTOCVersion int // default = 1 }{ { - name: "empty", - in: tarOf(), - wantNumGz: 2, // empty tar + TOC + footer - wantNumGzLossLess: 3, // empty tar + TOC + footer + name: "empty", + in: tarOf(), + wantNumGz: 2, // (empty tar) + TOC + footer want: checks( numTOCEntries(0), ), @@ -1195,7 +1240,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { dir("foo/"), file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), ), - wantNumGz: 9, + wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer want: checks( numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file hasDir("foo/"), @@ -1314,23 +1359,120 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { ), wantFailOnLossLess: true, }, + { + name: "hardlink should be replaced to the destination entry", + in: tarOf( + dir("foo/"), + file("foo/foo1", "test"), + link("foolink", "foo/foo1"), + ), + wantNumGz: 4, // dir, foo1 + link, TOC, footer + want: checks( + mustSameEntry("foo/foo1", "foolink"), + ), + }, + { + name: "several_files_in_chunk", + minChunkSize: 8000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + file("foo22", "ccc"), + dir("bar/"), + file("bar/bar.txt", "aaa"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed "data64KB" is still larger than 8KB + wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3 + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo22", len("ccc")), + hasFileLen("bar/bar.txt", len("aaa")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo22", digestFor("ccc")), + hasFileDigest("bar/bar.txt", digestFor("aaa")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo22", 0, "ccc"), + hasFileContentsRange("foo22", 1, "cc"), + hasFileContentsRange("foo22", 2, "c"), + hasFileContentsRange("bar/bar.txt", 0, "aaa"), + hasFileContentsRange("bar/bar.txt", 1, "aa"), + hasFileContentsRange("bar/bar.txt", 2, "a"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, + { + name: "several_files_in_chunk_chunked", + minChunkSize: 8000, + chunkSize: 32000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + dir("bar/"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB + wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks) + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo/foo1", 1, data64KB[1:]), + hasFileContentsRange("foo/foo1", 2, data64KB[2:]), + hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, } for _, tt := range tests { - for _, cl := range controllers { - cl := cl + for _, newCL := range controllers { + newCL := newCL for _, prefix := range allowedPrefix { prefix := prefix for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, lossless := range []bool{true, false} { - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) origTarDgstr := digest.Canonical.Digester() tr = io.TeeReader(tr, origTarDgstr.Hash()) var stargzBuf bytes.Buffer - w := NewWriterWithCompressor(&stargzBuf, cl) + cl1 := newCL() + w := NewWriterWithCompressor(&stargzBuf, cl1) w.ChunkSize = tt.chunkSize + w.MinChunkSize = tt.minChunkSize if lossless { err := w.AppendTarLossLess(tr) if tt.wantFailOnLossLess { @@ -1354,7 +1496,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { if lossless { // Check if the result blob reserves original tar metadata - rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl) + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1) if err != nil { t.Errorf("failed to decompress blob: %v", err) return @@ -1373,32 +1515,71 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { } diffID := w.DiffID() - wantDiffID := cl.DiffIDOf(t, b) + wantDiffID := cl1.DiffIDOf(t, b) if diffID != wantDiffID { t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) } - got := cl.CountStreams(t, b) - wantNumGz := tt.wantNumGz - if lossless && tt.wantNumGzLossLess > 0 { - wantNumGz = tt.wantNumGzLossLess - } - if got != wantNumGz { - t.Errorf("number of streams = %d; want %d", got, wantNumGz) - } - telemetry, checkCalled := newCalledTelemetry() + sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))) r, err := Open( - io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), - WithDecompressors(cl), + sr, + WithDecompressors(cl1), WithTelemetry(telemetry), ) if err != nil { t.Fatalf("stargz.Open: %v", err) } - if err := checkCalled(); err != nil { + wantTOCVersion := 1 + if tt.wantTOCVersion > 0 { + wantTOCVersion = tt.wantTOCVersion + } + if r.toc.Version != wantTOCVersion { + t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion) + } + + footerSize := cl1.FooterSize() + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + t.Errorf("failed to read footer: %v", err) + } + _, tocOffset, _, err := cl1.ParseFooter(footer) + if err != nil { + t.Errorf("failed to parse footer: %v", err) + } + if err := checkCalled(tocOffset >= 0); err != nil { t.Errorf("telemetry failure: %v", err) } + + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + streamOffsets := []int64{0} + prevOffset := int64(-1) + streams := 0 + for _, e := range r.toc.Entries { + if e.Offset > prevOffset { + streamOffsets = append(streamOffsets, e.Offset) + prevOffset = e.Offset + streams++ + } + } + streams++ // TOC + if tocOffset >= 0 { + // toc is in the blob + streamOffsets = append(streamOffsets, tocOffset) + } + streams++ // footer + streamOffsets = append(streamOffsets, footerOffset) + if streams != wantNumGz { + t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz) + } + + t.Logf("testing streams: %+v", streamOffsets) + cl1.TestStreams(t, b, streamOffsets) + for _, want := range tt.want { want.check(t, r) } @@ -1410,7 +1591,12 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { } } -func newCalledTelemetry() (telemetry *Telemetry, check func() error) { +type chunkInfo struct { + name string + data string +} + +func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) { var getFooterLatencyCalled bool var getTocLatencyCalled bool var deserializeTocLatencyCalled bool @@ -1418,13 +1604,15 @@ func newCalledTelemetry() (telemetry *Telemetry, check func() error) { func(time.Time) { getFooterLatencyCalled = true }, func(time.Time) { getTocLatencyCalled = true }, func(time.Time) { deserializeTocLatencyCalled = true }, - }, func() error { + }, func(needsGetTOC bool) error { var allErr []error if !getFooterLatencyCalled { allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) } - if !getTocLatencyCalled { - allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + if needsGetTOC { + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } } if !deserializeTocLatencyCalled { allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) @@ -1561,6 +1749,53 @@ func hasFileDigest(file string, digest string) stargzCheck { }) } +func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + extraMap := make(map[string]chunkInfo) + for _, e := range extra { + extraMap[e.name] = e + } + var extraNames []string + for n := range extraMap { + extraNames = append(extraNames, n) + } + f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error { + t.Logf("On %q: got preread of %q", file, e.Name) + ex, ok := extraMap[e.Name] + if !ok { + t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames) + } + got, err := io.ReadAll(cr) + if err != nil { + t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err) + } + if ex.data != string(got) { + t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data)) + } + delete(extraMap, e.Name) + return nil + }) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) + } + if len(extraMap) != 0 { + var exNames []string + for _, ex := range extraMap { + exNames = append(exNames, ex.name) + } + t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames) + } + }) +} + func hasFileContentsRange(file string, offset int, want string) stargzCheck { return stargzCheckFn(func(t *testing.T, r *Reader) { f, err := r.OpenFile(file) @@ -1573,7 +1808,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck { t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) } if string(got) != want { - t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want) + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) } }) } @@ -1731,6 +1966,67 @@ func hasEntryOwner(entry string, owner owner) stargzCheck { }) } +func mustSameEntry(files ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + var first *TOCEntry + for _, f := range files { + if first == nil { + var ok bool + first, ok = r.Lookup(f) + if !ok { + t.Errorf("unknown first file on Lookup: %q", f) + return + } + } + + // Test Lookup + e, ok := r.Lookup(f) + if !ok { + t.Errorf("unknown file on Lookup: %q", f) + return + } + if e != first { + t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test LookupChild + pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get parent of %q", f) + return + } + e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get %q as the child of %+v", f, pe) + return + } + if e != first { + t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test ForeachChild + pe.ForeachChild(func(baseName string, e *TOCEntry) bool { + if baseName == filepath.Base(filepath.Clean(f)) { + if e != first { + t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first) + return false + } + } + return true + }) + } + }) +} + +func viewContent(c []byte) string { + if len(c) < 100 { + return string(c) + } + return string(c[:50]) + "...(omit)..." + string(c[50:100]) +} + func tarOf(s ...tarEntry) []tarEntry { return s } type tarEntry interface { @@ -1990,6 +2286,16 @@ func regDigest(t *testing.T, name string, contentStr string, digestMap map[strin }) } +var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomContents(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = runes[rand.Intn(len(runes))] + } + return string(b) +} + func fileModeToTarMode(mode os.FileMode) (int64, error) { h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") if err != nil { @@ -2007,3 +2313,54 @@ func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } func (f fileInfoOnlyMode) Sys() interface{} { return nil } + +func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { + if len(streams) == 0 { + return // nop + } + + wants := map[int64]struct{}{} + for _, s := range streams { + wants[s] = struct{}{} + } + + len0 := len(b) + br := bytes.NewReader(b) + zr := new(gzip.Reader) + t.Logf("got gzip streams:") + numStreams := 0 + for { + zoff := len0 - br.Len() + if err := zr.Reset(br); err != nil { + if err == io.EOF { + return + } + t.Fatalf("countStreams(gzip), Reset: %v", err) + } + zr.Multistream(false) + n, err := io.Copy(io.Discard, zr) + if err != nil { + t.Fatalf("countStreams(gzip), Copy: %v", err) + } + var extra string + if len(zr.Header.Extra) > 0 { + extra = fmt.Sprintf("; extra=%q", zr.Header.Extra) + } + t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) + delete(wants, int64(zoff)) + numStreams++ + } +} + +func GzipDiffIDOf(t *testing.T, b []byte) string { + h := sha256.New() + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("diffIDOf(gzip): %v", err) + } + defer zr.Close() + if _, err := io.Copy(h, zr); err != nil { + t.Fatalf("diffIDOf(gzip).Copy: %v", err) + } + return fmt.Sprintf("sha256:%x", h.Sum(nil)) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go index 384ff7fd7f2f..57e0aa614e4a 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -149,6 +149,12 @@ type TOCEntry struct { // ChunkSize. Offset int64 `json:"offset,omitempty"` + // InnerOffset is an optional field indicates uncompressed offset + // of this "reg" or "chunk" payload in a stream starts from Offset. + // This field enables to put multiple "reg" or "chunk" payloads + // in one chunk with having the same Offset but different InnerOffset. + InnerOffset int64 `json:"innerOffset,omitempty"` + nextOffset int64 // the Offset of the next entry with a non-zero Offset // DevMajor is the major device number for "char" and "block" types. @@ -159,7 +165,8 @@ type TOCEntry struct { // NumLink is the number of entry names pointing to this entry. // Zero means one name references this entry. - NumLink int + // This field is calculated during runtime and not recorded in TOC JSON. + NumLink int `json:"-"` // Xattrs are the extended attribute for the entry. Xattrs map[string][]byte `json:"xattrs,omitempty"` @@ -185,6 +192,9 @@ type TOCEntry struct { ChunkDigest string `json:"chunkDigest,omitempty"` children map[string]*TOCEntry + + // chunkTopIndex is index of the entry where Offset starts in the blob. + chunkTopIndex int } // ModTime returns the entry's modification time. @@ -278,7 +288,10 @@ type Compressor interface { // Writer returns WriteCloser to be used for writing a chunk to eStargz. // Everytime a chunk is written, the WriteCloser is closed and Writer is // called again for writing the next chunk. - Writer(w io.Writer) (io.WriteCloser, error) + // + // The returned writer should implement "Flush() error" function that flushes + // any pending compressed data to the underlying writer. + Writer(w io.Writer) (WriteFlushCloser, error) // WriteTOCAndFooter is called to write JTOC to the passed Writer. // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob @@ -302,8 +315,12 @@ type Decompressor interface { // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between // the top until the TOC JSON). // - // Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range - // from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize). + // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader + // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it. + // + // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the + // footer (blob size - tocOff - FooterSize). + // If blobPayloadSize < 0, blobPayloadSize become the blob size. ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) // ParseTOC parses TOC from the passed reader. The reader provides the partial contents @@ -312,5 +329,14 @@ type Decompressor interface { // This function returns tocDgst that represents the digest of TOC that will be used // to verify this blob. This must match to the value returned from // Compressor.WriteTOCAndFooter that is used when creating this blob. + // + // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob. + // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location + // and return it. ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) } + +type WriteFlushCloser interface { + io.WriteCloser + Flush() error +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/zstdchunked/zstdchunked.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/zstdchunked/zstdchunked.go index 778b91c5d25b..306038a74664 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/zstdchunked/zstdchunked.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/zstdchunked/zstdchunked.go @@ -121,7 +121,7 @@ type Compressor struct { pool sync.Pool } -func (zc *Compressor) Writer(w io.Writer) (io.WriteCloser, error) { +func (zc *Compressor) Writer(w io.Writer) (estargz.WriteFlushCloser, error) { if wc := zc.pool.Get(); wc != nil { ec := wc.(*zstd.Encoder) ec.Reset(w) diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/fs.go b/vendor/github.com/containerd/stargz-snapshotter/fs/fs.go index fa45065b7a98..d154ed2ab5d5 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/fs.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/fs.go @@ -42,7 +42,6 @@ import ( "os/exec" "strconv" "sync" - "syscall" "time" "github.com/containerd/containerd/log" @@ -65,21 +64,25 @@ import ( digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) const ( defaultFuseTimeout = time.Second defaultMaxConcurrency = 2 - fusermountBin = "fusermount" ) +var fusermountBin = []string{"fusermount", "fusermount3"} + type Option func(*options) type options struct { - getSources source.GetSources - resolveHandlers map[string]remote.Handler - metadataStore metadata.Store - metricsLogLevel *logrus.Level + getSources source.GetSources + resolveHandlers map[string]remote.Handler + metadataStore metadata.Store + metricsLogLevel *logrus.Level + overlayOpaqueType layer.OverlayOpaqueType + additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor } func WithGetSources(s source.GetSources) Option { @@ -109,6 +112,18 @@ func WithMetricsLogLevel(logLevel logrus.Level) Option { } } +func WithOverlayOpaqueType(overlayOpaqueType layer.OverlayOpaqueType) Option { + return func(opts *options) { + opts.overlayOpaqueType = overlayOpaqueType + } +} + +func WithAdditionalDecompressors(d func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor) Option { + return func(opts *options) { + opts.additionalDecompressors = d + } +} + func NewFilesystem(root string, cfg config.Config, opts ...Option) (_ snapshot.FileSystem, err error) { var fsOpts options for _, o := range opts { @@ -141,7 +156,7 @@ func NewFilesystem(root string, cfg config.Config, opts ...Option) (_ snapshot.F }) } tm := task.NewBackgroundTaskManager(maxConcurrency, 5*time.Second) - r, err := layer.NewResolver(root, tm, cfg, fsOpts.resolveHandlers, metadataStore) + r, err := layer.NewResolver(root, tm, cfg, fsOpts.resolveHandlers, metadataStore, fsOpts.overlayOpaqueType, fsOpts.additionalDecompressors) if err != nil { return nil, fmt.Errorf("failed to setup resolver: %w", err) } @@ -331,7 +346,8 @@ func (fs *filesystem) Mount(ctx context.Context, mountpoint string, labels map[s FsName: "stargz", // name this filesystem as "stargz" Debug: fs.debug, } - if _, err := exec.LookPath(fusermountBin); err == nil { + if isFusermountBinExist() { + log.G(ctx).Infof("fusermount detected") mountOpts.Options = []string{"suid"} // option for fusermount; allow setuid inside container } else { log.G(ctx).WithError(err).Infof("%s not installed; trying direct mount", fusermountBin) @@ -415,6 +431,9 @@ func (fs *filesystem) check(ctx context.Context, l layer.Layer, labels map[strin } func (fs *filesystem) Unmount(ctx context.Context, mountpoint string) error { + if mountpoint == "" { + return fmt.Errorf("mount point must be specified") + } fs.layerMu.Lock() l, ok := fs.layer[mountpoint] if !ok { @@ -425,12 +444,27 @@ func (fs *filesystem) Unmount(ctx context.Context, mountpoint string) error { l.Done() fs.layerMu.Unlock() fs.metricsController.Remove(mountpoint) - // The goroutine which serving the mountpoint possibly becomes not responding. - // In case of such situations, we use MNT_FORCE here and abort the connection. - // In the future, we might be able to consider to kill that specific hanging - // goroutine using channel, etc. - // See also: https://www.kernel.org/doc/html/latest/filesystems/fuse.html#aborting-a-filesystem-connection - return syscall.Unmount(mountpoint, syscall.MNT_FORCE) + + if err := unmount(mountpoint, 0); err != nil { + if err != unix.EBUSY { + return err + } + // Try force unmount + log.G(ctx).WithError(err).Debugf("trying force unmount %q", mountpoint) + if err := unmount(mountpoint, unix.MNT_FORCE); err != nil { + return err + } + } + + return nil +} + +func unmount(target string, flags int) error { + for { + if err := unix.Unmount(target, flags); err != unix.EINTR { + return err + } + } } func (fs *filesystem) prefetch(ctx context.Context, l layer.Layer, defaultPrefetchSize int64, start time.Time) { @@ -459,3 +493,12 @@ func neighboringLayers(manifest ocispec.Manifest, target ocispec.Descriptor) (de } return } + +func isFusermountBinExist() bool { + for _, b := range fusermountBin { + if _, err := exec.LookPath(b); err == nil { + return true + } + } + return false +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/layer.go b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/layer.go index 455769529f68..e0844ab41a54 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/layer.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/layer.go @@ -27,7 +27,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "path/filepath" "sync" @@ -113,21 +112,23 @@ type Info struct { // Resolver resolves the layer location and provieds the handler of that layer. type Resolver struct { - rootDir string - resolver *remote.Resolver - prefetchTimeout time.Duration - layerCache *cacheutil.TTLCache - layerCacheMu sync.Mutex - blobCache *cacheutil.TTLCache - blobCacheMu sync.Mutex - backgroundTaskManager *task.BackgroundTaskManager - resolveLock *namedmutex.NamedMutex - config config.Config - metadataStore metadata.Store + rootDir string + resolver *remote.Resolver + prefetchTimeout time.Duration + layerCache *cacheutil.TTLCache + layerCacheMu sync.Mutex + blobCache *cacheutil.TTLCache + blobCacheMu sync.Mutex + backgroundTaskManager *task.BackgroundTaskManager + resolveLock *namedmutex.NamedMutex + config config.Config + metadataStore metadata.Store + overlayOpaqueType OverlayOpaqueType + additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor } // NewResolver returns a new layer resolver. -func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, cfg config.Config, resolveHandlers map[string]remote.Handler, metadataStore metadata.Store) (*Resolver, error) { +func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, cfg config.Config, resolveHandlers map[string]remote.Handler, metadataStore metadata.Store, overlayOpaqueType OverlayOpaqueType, additionalDecompressors func(context.Context, source.RegistryHosts, reference.Spec, ocispec.Descriptor) []metadata.Decompressor) (*Resolver, error) { resolveResultEntryTTL := time.Duration(cfg.ResolveResultEntryTTLSec) * time.Second if resolveResultEntryTTL == 0 { resolveResultEntryTTL = defaultResolveResultEntryTTLSec * time.Second @@ -165,15 +166,17 @@ func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, } return &Resolver{ - rootDir: root, - resolver: remote.NewResolver(cfg.BlobConfig, resolveHandlers), - layerCache: layerCache, - blobCache: blobCache, - prefetchTimeout: prefetchTimeout, - backgroundTaskManager: backgroundTaskManager, - config: cfg, - resolveLock: new(namedmutex.NamedMutex), - metadataStore: metadataStore, + rootDir: root, + resolver: remote.NewResolver(cfg.BlobConfig, resolveHandlers), + layerCache: layerCache, + blobCache: blobCache, + prefetchTimeout: prefetchTimeout, + backgroundTaskManager: backgroundTaskManager, + config: cfg, + resolveLock: new(namedmutex.NamedMutex), + metadataStore: metadataStore, + overlayOpaqueType: overlayOpaqueType, + additionalDecompressors: additionalDecompressors, }, nil } @@ -209,7 +212,7 @@ func newCache(root string, cacheType string, cfg config.Config) (cache.BlobCache if err := os.MkdirAll(root, 0700); err != nil { return nil, err } - cachePath, err := ioutil.TempDir(root, "") + cachePath, err := os.MkdirTemp(root, "") if err != nil { return nil, fmt.Errorf("failed to initialize directory cache: %w", err) } @@ -296,8 +299,13 @@ func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refs commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.DeserializeTocJSON, desc.Digest, start) }, } + + additionalDecompressors := []metadata.Decompressor{new(zstdchunked.Decompressor)} + if r.additionalDecompressors != nil { + additionalDecompressors = append(additionalDecompressors, r.additionalDecompressors(ctx, hosts, refspec, desc)...) + } meta, err := r.metadataStore(sr, - append(esgzOpts, metadata.WithTelemetry(&telemetry), metadata.WithDecompressors(new(zstdchunked.Decompressor)))...) + append(esgzOpts, metadata.WithTelemetry(&telemetry), metadata.WithDecompressors(additionalDecompressors...))...) if err != nil { return nil, err } @@ -574,7 +582,7 @@ func (l *layer) RootNode(baseInode uint32) (fusefs.InodeEmbedder, error) { if l.r == nil { return nil, fmt.Errorf("layer hasn't been verified yet") } - return newNode(l.desc.Digest, l.r, l.blob, baseInode) + return newNode(l.desc.Digest, l.r, l.blob, baseInode, l.resolver.overlayOpaqueType) } func (l *layer) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) { diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/node.go b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/node.go index dbe1c94502ae..d3144b81d2fb 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/node.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/node.go @@ -59,19 +59,36 @@ const ( stateDirMode = syscall.S_IFDIR | 0500 // dr-x------ ) -var opaqueXattrs = []string{"trusted.overlay.opaque", "user.overlay.opaque"} +type OverlayOpaqueType int -func newNode(layerDgst digest.Digest, r reader.Reader, blob remote.Blob, baseInode uint32) (fusefs.InodeEmbedder, error) { +const ( + OverlayOpaqueAll OverlayOpaqueType = iota + OverlayOpaqueTrusted + OverlayOpaqueUser +) + +var opaqueXattrs = map[OverlayOpaqueType][]string{ + OverlayOpaqueAll: {"trusted.overlay.opaque", "user.overlay.opaque"}, + OverlayOpaqueTrusted: {"trusted.overlay.opaque"}, + OverlayOpaqueUser: {"user.overlay.opaque"}, +} + +func newNode(layerDgst digest.Digest, r reader.Reader, blob remote.Blob, baseInode uint32, opaque OverlayOpaqueType) (fusefs.InodeEmbedder, error) { rootID := r.Metadata().RootID() rootAttr, err := r.Metadata().GetAttr(rootID) if err != nil { return nil, err } + opq, ok := opaqueXattrs[opaque] + if !ok { + return nil, fmt.Errorf("Unknown overlay opaque type") + } ffs := &fs{ - r: r, - layerDigest: layerDgst, - baseInode: baseInode, - rootID: rootID, + r: r, + layerDigest: layerDgst, + baseInode: baseInode, + rootID: rootID, + opaqueXattrs: opq, } ffs.s = ffs.newState(layerDgst, blob) return &node{ @@ -83,11 +100,12 @@ func newNode(layerDgst digest.Digest, r reader.Reader, blob remote.Blob, baseIno // fs contains global metadata used by nodes type fs struct { - r reader.Reader - s *state - layerDigest digest.Digest - baseInode uint32 - rootID uint32 + r reader.Reader + s *state + layerDigest digest.Digest + baseInode uint32 + rootID uint32 + opaqueXattrs []string } func (fs *fs) inodeOfState() uint64 { @@ -335,7 +353,7 @@ var _ = (fusefs.NodeGetxattrer)((*node)(nil)) func (n *node) Getxattr(ctx context.Context, attr string, dest []byte) (uint32, syscall.Errno) { ent := n.attr opq := n.isOpaque() - for _, opaqueXattr := range opaqueXattrs { + for _, opaqueXattr := range n.fs.opaqueXattrs { if attr == opaqueXattr && opq { // This node is an opaque directory so give overlayfs-compliant indicator. if len(dest) < len(opaqueXattrValue) { @@ -361,7 +379,7 @@ func (n *node) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errn var attrs []byte if opq { // This node is an opaque directory so add overlayfs-compliant indicator. - for _, opaqueXattr := range opaqueXattrs { + for _, opaqueXattr := range n.fs.opaqueXattrs { attrs = append(attrs, []byte(opaqueXattr+"\x00")...) } } diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/testutil.go index b849bb70f93f..38b97a68210c 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/layer/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/layer/testutil.go @@ -24,12 +24,12 @@ package layer import ( "bytes" + "compress/gzip" "context" "crypto/sha256" "encoding/json" "fmt" "io" - "io/ioutil" "math/rand" "net/http" "os" @@ -48,9 +48,10 @@ import ( "github.com/containerd/stargz-snapshotter/fs/source" "github.com/containerd/stargz-snapshotter/metadata" "github.com/containerd/stargz-snapshotter/task" - "github.com/containerd/stargz-snapshotter/util/testutil" + tutil "github.com/containerd/stargz-snapshotter/util/testutil" fusefs "github.com/hanwen/go-fuse/v2/fs" "github.com/hanwen/go-fuse/v2/fuse" + "github.com/klauspost/compress/zstd" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/sys/unix" @@ -62,15 +63,22 @@ const ( sampleData2 = "abcdefghij" ) +var srcCompressions = map[string]tutil.CompressionFactory{ + "zstd-fastest": tutil.ZstdCompressionWithLevel(zstd.SpeedFastest), + "gzip-bestspeed": tutil.GzipCompressionWithLevel(gzip.BestSpeed), + "externaltoc-gzip-bestspeed": tutil.ExternalTOCGzipCompressionWithLevel(gzip.BestSpeed), +} + func TestSuiteLayer(t *testing.T, store metadata.Store) { testPrefetch(t, store) testNodeRead(t, store) - testExistence(t, store) + testNodes(t, store) } var testStateLayerDigest = digest.FromString("dummy") func testPrefetch(t *testing.T, factory metadata.Store) { + data64KB := string(tutil.RandomBytes(t, 64000)) defaultPrefetchSize := int64(10000) landmarkPosition := func(t *testing.T, l *layer) int64 { if l.r == nil { @@ -87,7 +95,9 @@ func testPrefetch(t *testing.T, factory metadata.Store) { } tests := []struct { name string - in []testutil.TarEntry + chunkSize int // default is "sampleChunkSize" + minChunkSize int + in []tutil.TarEntry wantNum int // number of chunks wanted in the cache wants []string // filenames to compare prefetchSize func(*testing.T, *layer) int64 @@ -95,17 +105,17 @@ func testPrefetch(t *testing.T, factory metadata.Store) { }{ { name: "no_prefetch", - in: []testutil.TarEntry{ - testutil.File("foo.txt", sampleData1), + in: []tutil.TarEntry{ + tutil.File("foo.txt", sampleData1), }, wantNum: 0, prioritizedFiles: nil, }, { name: "prefetch", - in: []testutil.TarEntry{ - testutil.File("foo.txt", sampleData1), - testutil.File("bar.txt", sampleData2), + in: []tutil.TarEntry{ + tutil.File("foo.txt", sampleData1), + tutil.File("bar.txt", sampleData2), }, wantNum: chunkNum(sampleData1), wants: []string{"foo.txt"}, @@ -114,101 +124,152 @@ func testPrefetch(t *testing.T, factory metadata.Store) { }, { name: "with_dir", - in: []testutil.TarEntry{ - testutil.Dir("foo/"), - testutil.File("foo/bar.txt", sampleData1), - testutil.Dir("buz/"), - testutil.File("buz/buzbuz.txt", sampleData2), + in: []tutil.TarEntry{ + tutil.Dir("foo/"), + tutil.File("foo/bar.txt", sampleData1), + tutil.Dir("buz/"), + tutil.File("buz/buzbuz.txt", sampleData2), }, wantNum: chunkNum(sampleData1), wants: []string{"foo/bar.txt"}, prefetchSize: landmarkPosition, prioritizedFiles: []string{"foo/", "foo/bar.txt"}, }, + { + name: "several_files_in_chunk", + minChunkSize: 8000, + chunkSize: 1000000000, // do not chunk + in: []tutil.TarEntry{ + tutil.Dir("foo/"), + tutil.File("foo/foo1", data64KB), + tutil.File("foo2", "bb"), + tutil.File("foo22", "ccc"), + tutil.Dir("bar/"), + tutil.File("bar/bar.txt", "aaa"), + tutil.File("foo3", data64KB), + }, + // NOTE: we assume that the compressed "data64KB" is still larger than 8KB + // landmark+dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer + wantNum: 5, // foo1 + foo2 + foo22 + bar.txt + foo3 + wants: []string{"foo/foo1", "foo2", "foo22", "bar/bar.txt", "foo3"}, + prefetchSize: landmarkPosition, + prioritizedFiles: []string{"foo/", "foo/foo1", "foo2", "foo22", "bar/", "bar/bar.txt", "foo3"}, + }, + { + name: "several_files_in_chunk_chunked", + minChunkSize: 8000, + chunkSize: 32000, + in: []tutil.TarEntry{ + tutil.Dir("foo/"), + tutil.File("foo/foo1", data64KB), + tutil.File("foo2", "bb"), + tutil.Dir("bar/"), + tutil.File("foo3", data64KB), + }, + // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB + // landmark+dir+foo1(1), foo1(2), foo2, dir+foo3(1), foo3(2), TOC, footer + wantNum: 3, // foo1(2) + foo2 (foo3(1) shouldn't be in a separated stream) + wants: []string{"foo/foo1", "foo2"}, + prefetchSize: landmarkPosition, + prioritizedFiles: []string{"foo/", "foo/foo1", "foo2"}, + }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sr, dgst, err := testutil.BuildEStargz(tt.in, - testutil.WithEStargzOptions( - estargz.WithChunkSize(sampleChunkSize), - estargz.WithPrioritizedFiles(tt.prioritizedFiles), - )) - if err != nil { - t.Fatalf("failed to build eStargz: %v", err) - } - blob := newBlob(sr) - mcache := cache.NewMemoryCache() - mr, err := factory(sr) - if err != nil { - t.Fatalf("failed to create metadata reader: %v", err) - } - defer mr.Close() - vr, err := reader.NewReader(mr, mcache, digest.FromString("")) - if err != nil { - t.Fatalf("failed to create reader: %v", err) - } - l := newLayer( - &Resolver{ - prefetchTimeout: time.Second, - backgroundTaskManager: task.NewBackgroundTaskManager(10, 5*time.Second), - }, - ocispec.Descriptor{Digest: testStateLayerDigest}, - &blobRef{blob, func() {}}, - vr, - ) - if err := l.Verify(dgst); err != nil { - t.Errorf("failed to verify reader: %v", err) - return - } - prefetchSize := int64(0) - if tt.prefetchSize != nil { - prefetchSize = tt.prefetchSize(t, l) - } - if err := l.Prefetch(defaultPrefetchSize); err != nil { - t.Errorf("failed to prefetch: %v", err) - return - } - if blob.calledPrefetchOffset != 0 { - t.Errorf("invalid prefetch offset %d; want %d", - blob.calledPrefetchOffset, 0) - } - if blob.calledPrefetchSize != prefetchSize { - t.Errorf("invalid prefetch size %d; want %d", - blob.calledPrefetchSize, prefetchSize) - } - if cLen := len(mcache.(*cache.MemoryCache).Membuf); tt.wantNum != cLen { - t.Errorf("number of chunks in the cache %d; want %d: %v", cLen, tt.wantNum, err) - return - } - - lr := l.r - if lr == nil { - t.Fatalf("failed to get reader from layer: %v", err) - } - for _, file := range tt.wants { - id, err := lookup(lr.Metadata(), file) + for srcCompressionName, srcCompression := range srcCompressions { + cl := srcCompression() + t.Run("testPrefetch-"+tt.name+"-"+srcCompressionName, func(t *testing.T) { + chunkSize := sampleChunkSize + if tt.chunkSize > 0 { + chunkSize = tt.chunkSize + } + minChunkSize := 0 + if tt.minChunkSize > 0 { + minChunkSize = tt.minChunkSize + } + sr, dgst, err := tutil.BuildEStargz(tt.in, + tutil.WithEStargzOptions( + estargz.WithChunkSize(chunkSize), + estargz.WithMinChunkSize(minChunkSize), + estargz.WithPrioritizedFiles(tt.prioritizedFiles), + estargz.WithCompression(cl), + )) if err != nil { - t.Fatalf("failed to lookup %q: %v", file, err) + t.Fatalf("failed to build eStargz: %v", err) } - e, err := lr.Metadata().GetAttr(id) + blob := newBlob(t, sr) + mcache := cache.NewMemoryCache() + mr, err := factory(sr, metadata.WithDecompressors(cl)) if err != nil { - t.Fatalf("failed to get attr of %q: %v", file, err) + t.Fatalf("failed to create metadata reader: %v", err) } - wantFile, err := lr.OpenFile(id) + defer mr.Close() + vr, err := reader.NewReader(mr, mcache, digest.FromString("")) if err != nil { - t.Fatalf("failed to open file %q", file) + t.Fatalf("failed to create reader: %v", err) } - blob.readCalled = false - if _, err := io.Copy(ioutil.Discard, io.NewSectionReader(wantFile, 0, e.Size)); err != nil { - t.Fatalf("failed to read file %q", file) + l := newLayer( + &Resolver{ + prefetchTimeout: time.Second, + backgroundTaskManager: task.NewBackgroundTaskManager(10, 5*time.Second), + }, + ocispec.Descriptor{Digest: testStateLayerDigest}, + &blobRef{blob, func() {}}, + vr, + ) + if err := l.Verify(dgst); err != nil { + t.Errorf("failed to verify reader: %v", err) + return } - if blob.readCalled { - t.Errorf("chunks of file %q aren't cached", file) + prefetchSize := int64(0) + if tt.prefetchSize != nil { + prefetchSize = tt.prefetchSize(t, l) + } + if err := l.Prefetch(defaultPrefetchSize); err != nil { + t.Errorf("failed to prefetch: %v", err) return } - } - }) + if blob.calledPrefetchOffset != 0 { + t.Errorf("invalid prefetch offset %d; want %d", + blob.calledPrefetchOffset, 0) + } + if blob.calledPrefetchSize != prefetchSize { + t.Errorf("invalid prefetch size %d; want %d", + blob.calledPrefetchSize, prefetchSize) + } + if cLen := len(mcache.(*cache.MemoryCache).Membuf); tt.wantNum != cLen { + t.Errorf("number of chunks in the cache %d; want %d: %v", cLen, tt.wantNum, err) + return + } + + lr := l.r + if lr == nil { + t.Fatalf("failed to get reader from layer: %v", err) + } + for _, file := range tt.wants { + id, err := lookup(lr.Metadata(), file) + if err != nil { + t.Fatalf("failed to lookup %q: %v", file, err) + } + e, err := lr.Metadata().GetAttr(id) + if err != nil { + t.Fatalf("failed to get attr of %q: %v", file, err) + } + wantFile, err := lr.OpenFile(id) + if err != nil { + t.Fatalf("failed to open file %q", file) + } + blob.readCalled = false + if _, err := io.Copy(io.Discard, io.NewSectionReader(wantFile, 0, e.Size)); err != nil { + t.Fatalf("failed to read file %q", file) + } + if blob.readCalled { + t.Errorf("chunks of file %q aren't cached", file) + return + } + } + }) + } } } @@ -230,17 +291,34 @@ func chunkNum(data string) int { return (len(data)-1)/sampleChunkSize + 1 } -func newBlob(sr *io.SectionReader) *sampleBlob { +type region struct { + begin int64 + end int64 // inclusive +} + +func isDup(a, b region) bool { + if a.begin < b.begin { + return a.end >= b.begin + } + // b.begin <= a.begin + return b.end >= a.begin +} + +func newBlob(t *testing.T, sr *io.SectionReader) *sampleBlob { return &sampleBlob{ + t: t, r: sr, } } type sampleBlob struct { + t *testing.T + r *io.SectionReader readCalled bool calledPrefetchOffset int64 calledPrefetchSize int64 + calledRegions []region // sorted } func (sb *sampleBlob) Authn(tr http.RoundTripper) (http.RoundTripper, error) { return nil, nil } @@ -248,6 +326,39 @@ func (sb *sampleBlob) Check() error { r func (sb *sampleBlob) Size() int64 { return sb.r.Size() } func (sb *sampleBlob) FetchedSize() int64 { return 0 } func (sb *sampleBlob) ReadAt(p []byte, offset int64, opts ...remote.Option) (int, error) { + if len(p) > 0 { + target := region{offset, offset + int64(len(p)) - 1} + if len(sb.calledRegions) == 0 { + sb.calledRegions = []region{target} + } else { + pos := 0 + found := false + for i, r := range sb.calledRegions { + if target.begin < r.begin { + pos = i + found = true + break + } + } + if !found { + pos = len(sb.calledRegions) + } + if pos > 0 { + b := sb.calledRegions[pos-1] + if isDup(b, target) { + sb.t.Fatalf("reading on the previous region is duplicated: %+v and %+v", b, target) + } + } + if pos+1 < len(sb.calledRegions) { + a := sb.calledRegions[pos+1] + if isDup(a, target) { + sb.t.Fatalf("reading on the next region is duplicated: %+v and %+v", a, target) + } + } + sb.calledRegions = append(sb.calledRegions[:pos], append([]region{target}, sb.calledRegions[pos:]...)...) + } + } + sb.readCalled = true return sb.r.ReadAt(p, offset) } @@ -289,74 +400,77 @@ func testNodeRead(t *testing.T, factory metadata.Store) { for in, innero := range innerOffsetCond { for bo, baseo := range baseOffsetCond { for fn, filesize := range fileSizeCond { - t.Run(fmt.Sprintf("reading_%s_%s_%s_%s", sn, in, bo, fn), func(t *testing.T) { - if filesize > int64(len(sampleData1)) { - t.Fatal("sample file size is larger than sample data") - } - - wantN := size - offset := baseo + innero - if remain := filesize - offset; remain < wantN { - if wantN = remain; wantN < 0 { - wantN = 0 + for _, srcCompression := range srcCompressions { + cl := srcCompression() + t.Run(fmt.Sprintf("reading_%s_%s_%s_%s", sn, in, bo, fn), func(t *testing.T) { + if filesize > int64(len(sampleData1)) { + t.Fatal("sample file size is larger than sample data") + } + + wantN := size + offset := baseo + innero + if remain := filesize - offset; remain < wantN { + if wantN = remain; wantN < 0 { + wantN = 0 + } + } + + // use constant string value as a data source. + want := strings.NewReader(sampleData1) + + // data we want to get. + wantData := make([]byte, wantN) + _, err := want.ReadAt(wantData, offset) + if err != nil && err != io.EOF { + t.Fatalf("want.ReadAt (offset=%d,size=%d): %v", offset, wantN, err) + } + + // data we get from the file node. + f, closeFn := makeNodeReader(t, []byte(sampleData1)[:filesize], sampleChunkSize, factory, cl) + defer closeFn() + tmpbuf := make([]byte, size) // fuse library can request bigger than remain + rr, errno := f.Read(context.Background(), tmpbuf, offset) + if errno != 0 { + t.Errorf("failed to read off=%d, size=%d, filesize=%d: %v", offset, size, filesize, err) + return + } + if rsize := rr.Size(); int64(rsize) != wantN { + t.Errorf("read size: %d; want: %d; passed %d", rsize, wantN, size) + return } - } - - // use constant string value as a data source. - want := strings.NewReader(sampleData1) - - // data we want to get. - wantData := make([]byte, wantN) - _, err := want.ReadAt(wantData, offset) - if err != nil && err != io.EOF { - t.Fatalf("want.ReadAt (offset=%d,size=%d): %v", offset, wantN, err) - } - - // data we get from the file node. - f, closeFn := makeNodeReader(t, []byte(sampleData1)[:filesize], sampleChunkSize, factory) - defer closeFn() - tmpbuf := make([]byte, size) // fuse library can request bigger than remain - rr, errno := f.Read(context.Background(), tmpbuf, offset) - if errno != 0 { - t.Errorf("failed to read off=%d, size=%d, filesize=%d: %v", offset, size, filesize, err) - return - } - if rsize := rr.Size(); int64(rsize) != wantN { - t.Errorf("read size: %d; want: %d; passed %d", rsize, wantN, size) - return - } - tmpbuf = make([]byte, len(tmpbuf)) - respData, fs := rr.Bytes(tmpbuf) - if fs != fuse.OK { - t.Errorf("failed to read result data for off=%d, size=%d, filesize=%d: %v", offset, size, filesize, err) - } - - if !bytes.Equal(wantData, respData) { - t.Errorf("off=%d, filesize=%d; read data{size=%d,data=%q}; want (size=%d,data=%q)", - offset, filesize, len(respData), string(respData), wantN, string(wantData)) - return - } - }) + tmpbuf = make([]byte, len(tmpbuf)) + respData, fs := rr.Bytes(tmpbuf) + if fs != fuse.OK { + t.Errorf("failed to read result data for off=%d, size=%d, filesize=%d: %v", offset, size, filesize, err) + } + + if !bytes.Equal(wantData, respData) { + t.Errorf("off=%d, filesize=%d; read data{size=%d,data=%q}; want (size=%d,data=%q)", + offset, filesize, len(respData), string(respData), wantN, string(wantData)) + return + } + }) + } } } } } } -func makeNodeReader(t *testing.T, contents []byte, chunkSize int, factory metadata.Store) (_ *file, closeFn func() error) { +func makeNodeReader(t *testing.T, contents []byte, chunkSize int, factory metadata.Store, cl tutil.Compression) (_ *file, closeFn func() error) { testName := "test" - sr, _, err := testutil.BuildEStargz( - []testutil.TarEntry{testutil.File(testName, string(contents))}, - testutil.WithEStargzOptions(estargz.WithChunkSize(chunkSize)), + sr, tocDgst, err := tutil.BuildEStargz( + []tutil.TarEntry{tutil.File(testName, string(contents))}, + tutil.WithEStargzOptions(estargz.WithChunkSize(chunkSize), estargz.WithCompression(cl)), ) if err != nil { t.Fatalf("failed to build sample eStargz: %v", err) } - r, err := factory(sr) + r, err := factory(sr, metadata.WithDecompressors(cl)) if err != nil { t.Fatalf("failed to create reader: %v", err) } - rootNode := getRootNode(t, r) + rootNode := getRootNode(t, r, OverlayOpaqueAll, tocDgst, cache.NewMemoryCache()) var eo fuse.EntryOut inode, errno := rootNode.Lookup(context.Background(), testName, &eo) if errno != 0 { @@ -371,18 +485,34 @@ func makeNodeReader(t *testing.T, contents []byte, chunkSize int, factory metada return f.(*file), r.Close } -func testExistence(t *testing.T, factory metadata.Store) { +func testNodes(t *testing.T, factory metadata.Store) { + for _, o := range []OverlayOpaqueType{OverlayOpaqueAll, OverlayOpaqueTrusted, OverlayOpaqueUser} { + testNodesWithOpaque(t, factory, o) + } +} + +func testNodesWithOpaque(t *testing.T, factory metadata.Store, opaque OverlayOpaqueType) { + data64KB := string(tutil.RandomBytes(t, 64000)) + hasOpaque := func(entry string) check { + return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) { + for _, k := range opaqueXattrs[opaque] { + hasNodeXattrs(entry, k, opaqueXattrValue)(t, root, cc, cr) + } + } + } tests := []struct { - name string - in []testutil.TarEntry - want []check + name string + chunkSize int + minChunkSize int + in []tutil.TarEntry + want []check }{ { name: "1_whiteout_with_sibling", - in: []testutil.TarEntry{ - testutil.Dir("foo/"), - testutil.File("foo/bar.txt", ""), - testutil.File("foo/.wh.foo.txt", ""), + in: []tutil.TarEntry{ + tutil.Dir("foo/"), + tutil.File("foo/bar.txt", ""), + tutil.File("foo/.wh.foo.txt", ""), }, want: []check{ hasValidWhiteout("foo/foo.txt"), @@ -391,10 +521,10 @@ func testExistence(t *testing.T, factory metadata.Store) { }, { name: "1_whiteout_with_duplicated_name", - in: []testutil.TarEntry{ - testutil.Dir("foo/"), - testutil.File("foo/bar.txt", "test"), - testutil.File("foo/.wh.bar.txt", ""), + in: []tutil.TarEntry{ + tutil.Dir("foo/"), + tutil.File("foo/bar.txt", "test"), + tutil.File("foo/.wh.bar.txt", ""), }, want: []check{ hasFileDigest("foo/bar.txt", digestFor("test")), @@ -403,49 +533,46 @@ func testExistence(t *testing.T, factory metadata.Store) { }, { name: "1_opaque", - in: []testutil.TarEntry{ - testutil.Dir("foo/"), - testutil.File("foo/.wh..wh..opq", ""), + in: []tutil.TarEntry{ + tutil.Dir("foo/"), + tutil.File("foo/.wh..wh..opq", ""), }, want: []check{ - hasNodeXattrs("foo/", opaqueXattrs[0], opaqueXattrValue), - hasNodeXattrs("foo/", opaqueXattrs[1], opaqueXattrValue), + hasOpaque("foo/"), fileNotExist("foo/.wh..wh..opq"), }, }, { name: "1_opaque_with_sibling", - in: []testutil.TarEntry{ - testutil.Dir("foo/"), - testutil.File("foo/.wh..wh..opq", ""), - testutil.File("foo/bar.txt", "test"), + in: []tutil.TarEntry{ + tutil.Dir("foo/"), + tutil.File("foo/.wh..wh..opq", ""), + tutil.File("foo/bar.txt", "test"), }, want: []check{ - hasNodeXattrs("foo/", opaqueXattrs[0], opaqueXattrValue), - hasNodeXattrs("foo/", opaqueXattrs[1], opaqueXattrValue), + hasOpaque("foo/"), hasFileDigest("foo/bar.txt", digestFor("test")), fileNotExist("foo/.wh..wh..opq"), }, }, { name: "1_opaque_with_xattr", - in: []testutil.TarEntry{ - testutil.Dir("foo/", testutil.WithDirXattrs(map[string]string{"foo": "bar"})), - testutil.File("foo/.wh..wh..opq", ""), + in: []tutil.TarEntry{ + tutil.Dir("foo/", tutil.WithDirXattrs(map[string]string{"foo": "bar"})), + tutil.File("foo/.wh..wh..opq", ""), }, want: []check{ - hasNodeXattrs("foo/", opaqueXattrs[0], opaqueXattrValue), - hasNodeXattrs("foo/", opaqueXattrs[1], opaqueXattrValue), + hasOpaque("foo/"), hasNodeXattrs("foo/", "foo", "bar"), fileNotExist("foo/.wh..wh..opq"), }, }, { name: "prefetch_landmark", - in: []testutil.TarEntry{ - testutil.File(estargz.PrefetchLandmark, "test"), - testutil.Dir("foo/"), - testutil.File(fmt.Sprintf("foo/%s", estargz.PrefetchLandmark), "test"), + in: []tutil.TarEntry{ + tutil.File(estargz.PrefetchLandmark, "test"), + tutil.Dir("foo/"), + tutil.File(fmt.Sprintf("foo/%s", estargz.PrefetchLandmark), "test"), }, want: []check{ fileNotExist(estargz.PrefetchLandmark), @@ -454,10 +581,10 @@ func testExistence(t *testing.T, factory metadata.Store) { }, { name: "no_prefetch_landmark", - in: []testutil.TarEntry{ - testutil.File(estargz.NoPrefetchLandmark, "test"), - testutil.Dir("foo/"), - testutil.File(fmt.Sprintf("foo/%s", estargz.NoPrefetchLandmark), "test"), + in: []tutil.TarEntry{ + tutil.File(estargz.NoPrefetchLandmark, "test"), + tutil.Dir("foo/"), + tutil.File(fmt.Sprintf("foo/%s", estargz.NoPrefetchLandmark), "test"), }, want: []check{ fileNotExist(estargz.NoPrefetchLandmark), @@ -466,8 +593,8 @@ func testExistence(t *testing.T, factory metadata.Store) { }, { name: "state_file", - in: []testutil.TarEntry{ - testutil.File("test", "test"), + in: []tutil.TarEntry{ + tutil.File("test", "test"), }, want: []check{ hasFileDigest("test", digestFor("test")), @@ -476,8 +603,8 @@ func testExistence(t *testing.T, factory metadata.Store) { }, { name: "file_suid", - in: []testutil.TarEntry{ - testutil.File("test", "test", testutil.WithFileMode(0644|os.ModeSetuid)), + in: []tutil.TarEntry{ + tutil.File("test", "test", tutil.WithFileMode(0644|os.ModeSetuid)), }, want: []check{ hasExtraMode("test", os.ModeSetuid), @@ -485,8 +612,8 @@ func testExistence(t *testing.T, factory metadata.Store) { }, { name: "dir_sgid", - in: []testutil.TarEntry{ - testutil.Dir("test/", testutil.WithDirMode(0755|os.ModeSetgid)), + in: []tutil.TarEntry{ + tutil.Dir("test/", tutil.WithDirMode(0755|os.ModeSetgid)), }, want: []check{ hasExtraMode("test/", os.ModeSetgid), @@ -494,8 +621,8 @@ func testExistence(t *testing.T, factory metadata.Store) { }, { name: "file_sticky", - in: []testutil.TarEntry{ - testutil.File("test", "test", testutil.WithFileMode(0644|os.ModeSticky)), + in: []tutil.TarEntry{ + tutil.File("test", "test", tutil.WithFileMode(0644|os.ModeSticky)), }, want: []check{ hasExtraMode("test", os.ModeSticky), @@ -503,37 +630,117 @@ func testExistence(t *testing.T, factory metadata.Store) { }, { name: "symlink_size", - in: []testutil.TarEntry{ - testutil.Symlink("test", "target"), + in: []tutil.TarEntry{ + tutil.Symlink("test", "target"), }, want: []check{ hasSize("test", len("target")), }, }, + { + name: "several_files_in_chunk", + minChunkSize: 8000, + in: []tutil.TarEntry{ + tutil.Dir("foo/"), + tutil.File("foo/foo1", data64KB), + tutil.File("foo2", "bb"), + tutil.File("foo22", "ccc"), + tutil.Dir("bar/"), + tutil.File("bar/bar.txt", "aaa"), + tutil.File("foo3", data64KB), + }, + // NOTE: we assume that the compressed "data64KB" is still larger than 8KB + // landmark+dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer + want: []check{ + hasFileContentsWithPreCached("foo22", 0, "ccc", chunkInfo{"foo2", "bb", 0, 2}, chunkInfo{"bar/bar.txt", "aaa", 0, 3}, chunkInfo{"foo3", data64KB, 0, 64000}), + hasFileContentsOffset("foo2", 0, "bb", true), + hasFileContentsOffset("bar/bar.txt", 0, "aaa", true), + hasFileContentsOffset("bar/bar.txt", 1, "aa", true), + hasFileContentsOffset("bar/bar.txt", 2, "a", true), + hasFileContentsOffset("foo3", 0, data64KB, true), + hasFileContentsOffset("foo22", 0, "ccc", true), + hasFileContentsOffset("foo/foo1", 0, data64KB, false), + hasFileContentsOffset("foo/foo1", 0, data64KB, true), + hasFileContentsOffset("foo/foo1", 1, data64KB[1:], true), + hasFileContentsOffset("foo/foo1", 2, data64KB[2:], true), + hasFileContentsOffset("foo/foo1", 3, data64KB[3:], true), + }, + }, + { + name: "several_files_in_chunk_chunked", + minChunkSize: 8000, + chunkSize: 32000, + in: []tutil.TarEntry{ + tutil.Dir("foo/"), + tutil.File("foo/foo1", data64KB), + tutil.File("foo2", "bb"), + tutil.Dir("bar/"), + tutil.File("foo3", data64KB), + }, + // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB + // landmark+dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer + want: []check{ + hasFileContentsWithPreCached("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000], 0, 32000}), + hasFileContentsOffset("foo2", 0, "bb", true), + hasFileContentsOffset("foo2", 1, "b", true), + hasFileContentsOffset("foo3", 0, data64KB[:len(data64KB)/2], true), + hasFileContentsOffset("foo3", 1, data64KB[1:len(data64KB)/2], true), + hasFileContentsOffset("foo3", 2, data64KB[2:len(data64KB)/2], true), + hasFileContentsOffset("foo3", int64(len(data64KB)/2), data64KB[len(data64KB)/2:], false), + hasFileContentsOffset("foo3", int64(len(data64KB)-1), data64KB[len(data64KB)-1:], true), + hasFileContentsOffset("foo/foo1", 0, data64KB, false), + hasFileContentsOffset("foo/foo1", 1, data64KB[1:], true), + hasFileContentsOffset("foo/foo1", 2, data64KB[2:], true), + hasFileContentsOffset("foo/foo1", int64(len(data64KB)/2), data64KB[len(data64KB)/2:], true), + hasFileContentsOffset("foo/foo1", int64(len(data64KB)-1), data64KB[len(data64KB)-1:], true), + }, + }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sgz, _, err := testutil.BuildEStargz(tt.in) - if err != nil { - t.Fatalf("failed to build sample eStargz: %v", err) - } + for _, srcCompression := range srcCompressions { + cl := srcCompression() + t.Run(tt.name, func(t *testing.T) { + opts := []tutil.BuildEStargzOption{ + tutil.WithEStargzOptions(estargz.WithCompression(cl)), + } + if tt.chunkSize > 0 { + opts = append(opts, tutil.WithEStargzOptions(estargz.WithChunkSize(tt.chunkSize))) + } + if tt.minChunkSize > 0 { + opts = append(opts, tutil.WithEStargzOptions(estargz.WithMinChunkSize(tt.minChunkSize))) + } + sgz, tocDgst, err := tutil.BuildEStargz(tt.in, opts...) + if err != nil { + t.Fatalf("failed to build sample eStargz: %v", err) + } - r, err := factory(sgz) - if err != nil { - t.Fatalf("failed to create reader: %v", err) - } - defer r.Close() - rootNode := getRootNode(t, r) - for _, want := range tt.want { - want(t, rootNode) - } - }) + testR := &calledReaderAt{sgz, nil} + r, err := factory(io.NewSectionReader(testR, 0, sgz.Size()), metadata.WithDecompressors(cl)) + if err != nil { + t.Fatalf("failed to create reader: %v", err) + } + defer r.Close() + mcache := cache.NewMemoryCache() + rootNode := getRootNode(t, r, opaque, tocDgst, mcache) + for _, want := range tt.want { + want(t, rootNode, mcache, testR) + } + }) + } } } -func getRootNode(t *testing.T, r metadata.Reader) *node { - rootNode, err := newNode(testStateLayerDigest, &testReader{r}, &testBlobState{10, 5}, 100) +func getRootNode(t *testing.T, r metadata.Reader, opaque OverlayOpaqueType, tocDgst digest.Digest, cc cache.BlobCache) *node { + vr, err := reader.NewReader(r, cc, digest.FromString("")) + if err != nil { + t.Fatalf("failed to create reader: %v", err) + } + rr, err := vr.VerifyTOC(tocDgst) + if err != nil { + t.Fatalf("failed to verify reader: %v", err) + } + rootNode, err := newNode(testStateLayerDigest, rr, &testBlobState{10, 5}, 100, opaque) if err != nil { t.Fatalf("failed to get root node: %v", err) } @@ -541,16 +748,6 @@ func getRootNode(t *testing.T, r metadata.Reader) *node { return rootNode.(*node) } -type testReader struct { - r metadata.Reader -} - -func (tr *testReader) OpenFile(id uint32) (io.ReaderAt, error) { return tr.r.OpenFile(id) } -func (tr *testReader) Metadata() metadata.Reader { return tr.r } -func (tr *testReader) Cache(opts ...reader.CacheOption) error { return nil } -func (tr *testReader) Close() error { return nil } -func (tr *testReader) LastOnDemandReadTime() time.Time { return time.Now() } - type testBlobState struct { size int64 fetchedSize int64 @@ -568,10 +765,10 @@ func (tb *testBlobState) Refresh(ctx context.Context, host source.RegistryHosts, } func (tb *testBlobState) Close() error { return nil } -type check func(*testing.T, *node) +type check func(*testing.T, *node, cache.BlobCache, *calledReaderAt) func fileNotExist(file string) check { - return func(t *testing.T, root *node) { + return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) { if _, _, err := getDirentAndNode(t, root, file); err == nil { t.Errorf("Node %q exists", file) } @@ -579,7 +776,7 @@ func fileNotExist(file string) check { } func hasFileDigest(filename string, digest string) check { - return func(t *testing.T, root *node) { + return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) { _, n, err := getDirentAndNode(t, root, filename) if err != nil { t.Fatalf("failed to get node %q: %v", filename, err) @@ -608,7 +805,7 @@ func hasFileDigest(filename string, digest string) check { } func hasSize(name string, size int) check { - return func(t *testing.T, root *node) { + return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) { _, n, err := getDirentAndNode(t, root, name) if err != nil { t.Fatalf("failed to get node %q: %v", name, err) @@ -624,7 +821,7 @@ func hasSize(name string, size int) check { } func hasExtraMode(name string, mode os.FileMode) check { - return func(t *testing.T, root *node) { + return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) { _, n, err := getDirentAndNode(t, root, name) if err != nil { t.Fatalf("failed to get node %q: %v", name, err) @@ -643,7 +840,7 @@ func hasExtraMode(name string, mode os.FileMode) check { } func hasValidWhiteout(name string) check { - return func(t *testing.T, root *node) { + return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) { ent, n, err := getDirentAndNode(t, root, name) if err != nil { t.Fatalf("failed to get node %q: %v", name, err) @@ -679,7 +876,7 @@ func hasValidWhiteout(name string) check { } func hasNodeXattrs(entry, name, value string) check { - return func(t *testing.T, root *node) { + return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) { _, n, err := getDirentAndNode(t, root, entry) if err != nil { t.Fatalf("failed to get node %q: %v", entry, err) @@ -734,7 +931,7 @@ func hasEntry(t *testing.T, name string, ents fusefs.DirStream) (fuse.DirEntry, } func hasStateFile(t *testing.T, id string) check { - return func(t *testing.T, root *node) { + return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) { // Check the state dir is hidden on OpenDir for "/" ents, errno := root.Readdir(context.Background()) @@ -895,3 +1092,96 @@ func extraModeToTarMode(fm os.FileMode) (tm int64) { } return } + +type chunkInfo struct { + name string + data string + chunkOffset int64 + chunkSize int64 +} + +func hasFileContentsWithPreCached(name string, off int64, contents string, extra ...chunkInfo) check { + return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) { + buf := readFile(t, root, name, int64(len(contents)), off) + if len(buf) != len(contents) { + t.Fatalf("failed to read contents %q (off:%d, want:%q) got %q", name, off, longBytesView([]byte(contents)), longBytesView(buf)) + } + if string(buf) != contents { + t.Fatalf("unexpected content of %q: %q want %q", name, longBytesView(buf), longBytesView([]byte(contents))) + } + for _, e := range extra { + cr.called = nil // reset test + data := readFile(t, root, e.name, e.chunkSize, e.chunkOffset) + if string(data) != e.data { + t.Fatalf("unexpected contents of %q (%+v): %q; wanted %q", e.name, e, longBytesView(data), longBytesView([]byte(e.data))) + } + if len(cr.called) != 0 { + t.Fatalf("unexpected read on %q: offsets: %v", e.name, cr.called) + } + } + } +} + +func hasFileContentsOffset(name string, off int64, contents string, fromCache bool) check { + return func(t *testing.T, root *node, cc cache.BlobCache, cr *calledReaderAt) { + cr.called = nil // reset test + buf := readFile(t, root, name, int64(len(contents)), off) + if len(buf) != len(contents) { + t.Fatalf("failed to read contents %q (off:%d, want:%q) got %q", name, off, longBytesView([]byte(contents)), longBytesView(buf)) + } + if string(buf) != contents { + t.Fatalf("unexpected content of %q: %q want %q", name, longBytesView(buf), longBytesView([]byte(contents))) + } + t.Logf("reader calls for %q: offsets: %+v", name, cr.called) + if fromCache { + if len(cr.called) != 0 { + t.Fatalf("unexpected read on %q: offsets: %v", name, cr.called) + } + } else { + if len(cr.called) == 0 { + t.Fatalf("no call happened to reader for %q", name) + } + } + } +} + +func readFile(t *testing.T, root *node, filename string, size, off int64) []byte { + _, n, err := getDirentAndNode(t, root, filename) + if err != nil { + t.Fatalf("failed to get node %q: %v", filename, err) + } + ni := n.Operations().(*node) + fh, _, errno := ni.Open(context.Background(), 0) + if errno != 0 { + t.Fatalf("failed to open node %q: %v", filename, errno) + } + rr, errno := fh.(*file).Read(context.Background(), make([]byte, size), off) + if errno != 0 { + t.Fatalf("failed to read node %q: %v", filename, errno) + } + buf, status := rr.Bytes(make([]byte, size)) + if status != fuse.OK { + t.Fatalf("failed to get read result of node %q: %v", filename, status) + } + return buf +} + +type calledReaderAt struct { + io.ReaderAt + called []int64 +} + +func (r *calledReaderAt) ReadAt(p []byte, off int64) (int, error) { + r.called = append(r.called, off) + return r.ReaderAt.ReadAt(p, off) +} + +// longBytesView is an alias of []byte suitable for printing a long data as an omitted string to avoid long data being printed. +type longBytesView []byte + +func (b longBytesView) String() string { + if len(b) < 100 { + return string(b) + } + return string(b[:50]) + "...(omit)..." + string(b[len(b)-50:]) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/reader/reader.go b/vendor/github.com/containerd/stargz-snapshotter/fs/reader/reader.go index 4e9e93b23c66..c860b87dec4e 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/reader/reader.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/reader/reader.go @@ -29,7 +29,6 @@ import ( "crypto/sha256" "fmt" "io" - "io/ioutil" "os" "runtime" "sync" @@ -149,7 +148,6 @@ func (vr *VerifiableReader) cacheWithReader(ctx context.Context, currentDepth in if currentDepth > maxWalkDepth { return fmt.Errorf("tree is too deep (depth:%d)", currentDepth) } - gr := vr.r rootID := r.RootID() r.ForeachChild(dirID, func(name string, id uint32, mode os.FileMode) bool { e, err := r.GetAttr(id) @@ -189,7 +187,9 @@ func (vr *VerifiableReader) cacheWithReader(ctx context.Context, currentDepth in return true } - fr, err := r.OpenFile(id) + fr, err := r.OpenFileWithPreReader(id, func(nid uint32, chunkOffset, chunkSize int64, chunkDigest string, r io.Reader) (retErr error) { + return vr.readAndCache(nid, r, chunkOffset, chunkSize, chunkDigest, opts...) + }) if err != nil { rErr = err return false @@ -208,61 +208,13 @@ func (vr *VerifiableReader) cacheWithReader(ctx context.Context, currentDepth in return false } - eg.Go(func() (retErr error) { + eg.Go(func() error { defer sem.Release(1) - defer func() { - if retErr != nil { - vr.storeLastVerifyErr(retErr) - } - }() - - // Check if the target chunks exists in the cache - cacheID := genID(id, chunkOffset, chunkSize) - if r, err := gr.cache.Get(cacheID, opts...); err == nil { - return r.Close() - } - - // missed cache, needs to fetch and add it to the cache - br := bufio.NewReaderSize(io.NewSectionReader(fr, chunkOffset, chunkSize), int(chunkSize)) - if _, err := br.Peek(int(chunkSize)); err != nil { - return fmt.Errorf("cacheWithReader.peek: %v", err) - } - w, err := gr.cache.Add(cacheID, opts...) + err := vr.readAndCache(id, io.NewSectionReader(fr, chunkOffset, chunkSize), chunkOffset, chunkSize, chunkDigestStr, opts...) if err != nil { - return err - } - defer w.Close() - v, err := vr.verifier(id, chunkDigestStr) - if err != nil { - vr.prohibitVerifyFailureMu.RLock() - if vr.prohibitVerifyFailure { - vr.prohibitVerifyFailureMu.RUnlock() - return fmt.Errorf("verifier not found %q(off:%d,size:%d): %w", name, chunkOffset, chunkSize, err) - } - vr.storeLastVerifyErr(err) - vr.prohibitVerifyFailureMu.RUnlock() - } - tee := ioutil.Discard - if v != nil { - tee = io.Writer(v) // verification is required - } - if _, err := io.CopyN(w, io.TeeReader(br, tee), chunkSize); err != nil { - w.Abort() - return fmt.Errorf("failed to cache file payload of %q (offset:%d,size:%d): %w", name, chunkOffset, chunkSize, err) + return fmt.Errorf("failed to read %q (off:%d,size:%d): %w", name, chunkOffset, chunkSize, err) } - if v != nil && !v.Verified() { - err := fmt.Errorf("invalid chunk %q (offset:%d,size:%d)", name, chunkOffset, chunkSize) - vr.prohibitVerifyFailureMu.RLock() - if vr.prohibitVerifyFailure { - vr.prohibitVerifyFailureMu.RUnlock() - w.Abort() - return err - } - vr.storeLastVerifyErr(err) - vr.prohibitVerifyFailureMu.RUnlock() - } - - return w.Commit() + return nil }) } @@ -272,6 +224,63 @@ func (vr *VerifiableReader) cacheWithReader(ctx context.Context, currentDepth in return } +func (vr *VerifiableReader) readAndCache(id uint32, fr io.Reader, chunkOffset, chunkSize int64, chunkDigest string, opts ...cache.Option) (retErr error) { + gr := vr.r + + if retErr != nil { + vr.storeLastVerifyErr(retErr) + } + + // Check if it already exists in the cache + cacheID := genID(id, chunkOffset, chunkSize) + if r, err := gr.cache.Get(cacheID); err == nil { + r.Close() + return nil + } + + // missed cache, needs to fetch and add it to the cache + br := bufio.NewReaderSize(fr, int(chunkSize)) + if _, err := br.Peek(int(chunkSize)); err != nil { + return fmt.Errorf("cacheWithReader.peek: %v", err) + } + w, err := gr.cache.Add(cacheID, opts...) + if err != nil { + return err + } + defer w.Close() + v, err := vr.verifier(id, chunkDigest) + if err != nil { + vr.prohibitVerifyFailureMu.RLock() + if vr.prohibitVerifyFailure { + vr.prohibitVerifyFailureMu.RUnlock() + return fmt.Errorf("verifier not found: %w", err) + } + vr.storeLastVerifyErr(err) + vr.prohibitVerifyFailureMu.RUnlock() + } + tee := io.Discard + if v != nil { + tee = io.Writer(v) // verification is required + } + if _, err := io.CopyN(w, io.TeeReader(br, tee), chunkSize); err != nil { + w.Abort() + return fmt.Errorf("failed to cache file payload: %w", err) + } + if v != nil && !v.Verified() { + err := fmt.Errorf("invalid chunk") + vr.prohibitVerifyFailureMu.RLock() + if vr.prohibitVerifyFailure { + vr.prohibitVerifyFailureMu.RUnlock() + w.Abort() + return err + } + vr.storeLastVerifyErr(err) + vr.prohibitVerifyFailureMu.RUnlock() + } + + return w.Commit() +} + func (vr *VerifiableReader) Close() error { vr.closedMu.Lock() defer vr.closedMu.Unlock() @@ -346,7 +355,27 @@ func (gr *reader) OpenFile(id uint32) (io.ReaderAt, error) { return nil, fmt.Errorf("reader is already closed") } var fr metadata.File - fr, err := gr.r.OpenFile(id) + fr, err := gr.r.OpenFileWithPreReader(id, func(nid uint32, chunkOffset, chunkSize int64, chunkDigest string, r io.Reader) error { + // Check if it already exists in the cache + cacheID := genID(nid, chunkOffset, chunkSize) + if r, err := gr.cache.Get(cacheID); err == nil { + r.Close() + return nil + } + + // Read and cache + b := gr.bufPool.Get().(*bytes.Buffer) + b.Reset() + b.Grow(int(chunkSize)) + ip := b.Bytes()[:chunkSize] + if _, err := io.ReadFull(r, ip); err != nil { + gr.putBuffer(b) + return err + } + err := gr.verifyAndCache(nid, ip, chunkDigest, cacheID) + gr.putBuffer(b) + return err + }) if err != nil { return nil, fmt.Errorf("failed to open file %d: %w", id, err) } @@ -428,24 +457,8 @@ func (sf *file) ReadAt(p []byte, offset int64) (int, error) { if err != nil && err != io.EOF { return 0, fmt.Errorf("failed to read data: %w", err) } - - commonmetrics.IncOperationCount(commonmetrics.OnDemandRemoteRegistryFetchCount, sf.gr.layerSha) // increment the number of on demand file fetches from remote registry - commonmetrics.AddBytesCount(commonmetrics.OnDemandBytesFetched, sf.gr.layerSha, int64(n)) // record total bytes fetched - sf.gr.setLastReadTime(time.Now()) - - // Verify this chunk - if err := sf.verify(sf.id, ip, chunkDigestStr); err != nil { - return 0, fmt.Errorf("invalid chunk: %w", err) - } - - // Cache this chunk - if w, err := sf.gr.cache.Add(id); err == nil { - if cn, err := w.Write(ip); err != nil || cn != len(ip) { - w.Abort() - } else { - w.Commit() - } - w.Close() + if err := sf.gr.verifyAndCache(sf.id, ip, chunkDigestStr, id); err != nil { + return 0, err } nr += n continue @@ -460,26 +473,9 @@ func (sf *file) ReadAt(p []byte, offset int64) (int, error) { sf.gr.putBuffer(b) return 0, fmt.Errorf("failed to read data: %w", err) } - - // We can end up doing on demand registry fetch when aligning the chunk - commonmetrics.IncOperationCount(commonmetrics.OnDemandRemoteRegistryFetchCount, sf.gr.layerSha) // increment the number of on demand file fetches from remote registry - commonmetrics.AddBytesCount(commonmetrics.OnDemandBytesFetched, sf.gr.layerSha, int64(len(ip))) // record total bytes fetched - sf.gr.setLastReadTime(time.Now()) - - // Verify this chunk - if err := sf.verify(sf.id, ip, chunkDigestStr); err != nil { + if err := sf.gr.verifyAndCache(sf.id, ip, chunkDigestStr, id); err != nil { sf.gr.putBuffer(b) - return 0, fmt.Errorf("invalid chunk: %w", err) - } - - // Cache this chunk - if w, err := sf.gr.cache.Add(id); err == nil { - if cn, err := w.Write(ip); err != nil || cn != len(ip) { - w.Abort() - } else { - w.Commit() - } - w.Close() + return 0, err } n := copy(p[nr:], ip[lowerDiscard:chunkSize-upperDiscard]) sf.gr.putBuffer(b) @@ -494,11 +490,35 @@ func (sf *file) ReadAt(p []byte, offset int64) (int, error) { return nr, nil } -func (sf *file) verify(id uint32, p []byte, chunkDigestStr string) error { - if !sf.gr.verify { +func (gr *reader) verifyAndCache(entryID uint32, ip []byte, chunkDigestStr string, cacheID string) error { + // We can end up doing on demand registry fetch when aligning the chunk + commonmetrics.IncOperationCount(commonmetrics.OnDemandRemoteRegistryFetchCount, gr.layerSha) // increment the number of on demand file fetches from remote registry + commonmetrics.AddBytesCount(commonmetrics.OnDemandBytesFetched, gr.layerSha, int64(len(ip))) // record total bytes fetched + gr.setLastReadTime(time.Now()) + + // Verify this chunk + if err := gr.verifyChunk(entryID, ip, chunkDigestStr); err != nil { + return fmt.Errorf("invalid chunk: %w", err) + } + + // Cache this chunk + if w, err := gr.cache.Add(cacheID); err == nil { + if cn, err := w.Write(ip); err != nil || cn != len(ip) { + w.Abort() + } else { + w.Commit() + } + w.Close() + } + + return nil +} + +func (gr *reader) verifyChunk(id uint32, p []byte, chunkDigestStr string) error { + if !gr.verify { return nil // verification is not required } - v, err := sf.gr.verifier(id, chunkDigestStr) + v, err := gr.verifier(id, chunkDigestStr) if err != nil { return fmt.Errorf("invalid chunk: %w", err) } @@ -553,7 +573,7 @@ func WithReader(sr *io.SectionReader) CacheOption { func digestVerifier(id uint32, chunkDigestStr string) (digest.Verifier, error) { chunkDigest, err := digest.Parse(chunkDigestStr) if err != nil { - return nil, fmt.Errorf("invalid chunk: no digset is recorded: %w", err) + return nil, fmt.Errorf("invalid chunk: no digest is recorded(len=%d): %w", len(chunkDigestStr), err) } return chunkDigest.Verifier(), nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/reader/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/fs/reader/testutil.go index 67b1b91a1624..527ca96c695a 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/reader/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/reader/testutil.go @@ -24,10 +24,12 @@ package reader import ( "bytes" + "compress/gzip" "fmt" "io" "os" "path" + "path/filepath" "strings" "sync" "testing" @@ -37,6 +39,8 @@ import ( "github.com/containerd/stargz-snapshotter/estargz" "github.com/containerd/stargz-snapshotter/metadata" "github.com/containerd/stargz-snapshotter/util/testutil" + tutil "github.com/containerd/stargz-snapshotter/util/testutil" + "github.com/klauspost/compress/zstd" digest "github.com/opencontainers/go-digest" "golang.org/x/sync/errgroup" ) @@ -50,10 +54,17 @@ const ( lastChunkOffset1 = sampleChunkSize * (int64(len(sampleData1)) / sampleChunkSize) ) +var srcCompressions = map[string]tutil.CompressionFactory{ + "zstd-fastest": tutil.ZstdCompressionWithLevel(zstd.SpeedFastest), + "gzip-bestspeed": tutil.GzipCompressionWithLevel(gzip.BestSpeed), + "externaltoc-gzip-bestspeed": tutil.ExternalTOCGzipCompressionWithLevel(gzip.BestSpeed), +} + func TestSuiteReader(t *testing.T, store metadata.Store) { testFileReadAt(t, store) testCacheVerify(t, store) testFailReader(t, store) + testPreReader(t, store) } func testFileReadAt(t *testing.T, factory metadata.Store) { @@ -91,88 +102,91 @@ func testFileReadAt(t *testing.T, factory metadata.Store) { for bo, baseo := range baseOffsetCond { for fn, filesize := range fileSizeCond { for cc, cacheExcept := range cacheCond { - t.Run(fmt.Sprintf("reading_%s_%s_%s_%s_%s", sn, in, bo, fn, cc), func(t *testing.T) { - if filesize > int64(len(sampleData1)) { - t.Fatal("sample file size is larger than sample data") - } - - wantN := size - offset := baseo + innero - if remain := filesize - offset; remain < wantN { - if wantN = remain; wantN < 0 { - wantN = 0 + for srcCompressionName, srcCompression := range srcCompressions { + srcCompression := srcCompression() + t.Run(fmt.Sprintf("reading_%s_%s_%s_%s_%s_%s", sn, in, bo, fn, cc, srcCompressionName), func(t *testing.T) { + if filesize > int64(len(sampleData1)) { + t.Fatal("sample file size is larger than sample data") } - } - // use constant string value as a data source. - want := strings.NewReader(sampleData1) + wantN := size + offset := baseo + innero + if remain := filesize - offset; remain < wantN { + if wantN = remain; wantN < 0 { + wantN = 0 + } + } - // data we want to get. - wantData := make([]byte, wantN) - _, err := want.ReadAt(wantData, offset) - if err != nil && err != io.EOF { - t.Fatalf("want.ReadAt (offset=%d,size=%d): %v", offset, wantN, err) - } + // use constant string value as a data source. + want := strings.NewReader(sampleData1) - // data we get through a file. - f, closeFn := makeFile(t, []byte(sampleData1)[:filesize], sampleChunkSize, factory) - defer closeFn() - f.fr = newExceptFile(t, f.fr, cacheExcept...) - for _, reg := range cacheExcept { - id := genID(f.id, reg.b, reg.e-reg.b+1) - w, err := f.gr.cache.Add(id) - if err != nil { - w.Close() - t.Fatalf("failed to add cache %v: %v", id, err) - } - if _, err := w.Write([]byte(sampleData1[reg.b : reg.e+1])); err != nil { - w.Close() - t.Fatalf("failed to write cache %v: %v", id, err) + // data we want to get. + wantData := make([]byte, wantN) + _, err := want.ReadAt(wantData, offset) + if err != nil && err != io.EOF { + t.Fatalf("want.ReadAt (offset=%d,size=%d): %v", offset, wantN, err) } - if err := w.Commit(); err != nil { - w.Close() - t.Fatalf("failed to commit cache %v: %v", id, err) - } - w.Close() - } - respData := make([]byte, size) - n, err := f.ReadAt(respData, offset) - if err != nil { - t.Errorf("failed to read off=%d, size=%d, filesize=%d: %v", offset, size, filesize, err) - return - } - respData = respData[:n] - - if !bytes.Equal(wantData, respData) { - t.Errorf("off=%d, filesize=%d; read data{size=%d,data=%q}; want (size=%d,data=%q)", - offset, filesize, len(respData), string(respData), wantN, string(wantData)) - return - } - // check cache has valid contents. - cn := 0 - nr := 0 - for int64(nr) < wantN { - chunkOffset, chunkSize, _, ok := f.fr.ChunkEntryForOffset(offset + int64(nr)) - if !ok { - break + // data we get through a file. + f, closeFn := makeFile(t, []byte(sampleData1)[:filesize], sampleChunkSize, factory, srcCompression) + defer closeFn() + f.fr = newExceptFile(t, f.fr, cacheExcept...) + for _, reg := range cacheExcept { + id := genID(f.id, reg.b, reg.e-reg.b+1) + w, err := f.gr.cache.Add(id) + if err != nil { + w.Close() + t.Fatalf("failed to add cache %v: %v", id, err) + } + if _, err := w.Write([]byte(sampleData1[reg.b : reg.e+1])); err != nil { + w.Close() + t.Fatalf("failed to write cache %v: %v", id, err) + } + if err := w.Commit(); err != nil { + w.Close() + t.Fatalf("failed to commit cache %v: %v", id, err) + } + w.Close() } - data := make([]byte, chunkSize) - id := genID(f.id, chunkOffset, chunkSize) - r, err := f.gr.cache.Get(id) + respData := make([]byte, size) + n, err := f.ReadAt(respData, offset) if err != nil { - t.Errorf("missed cache of offset=%d, size=%d: %v(got size=%d)", chunkOffset, chunkSize, err, n) + t.Errorf("failed to read off=%d, size=%d, filesize=%d: %v", offset, size, filesize, err) return } - defer r.Close() - if n, err := r.ReadAt(data, 0); (err != nil && err != io.EOF) || n != int(chunkSize) { - t.Errorf("failed to read cache of offset=%d, size=%d: %v(got size=%d)", chunkOffset, chunkSize, err, n) + respData = respData[:n] + + if !bytes.Equal(wantData, respData) { + t.Errorf("off=%d, filesize=%d; read data{size=%d,data=%q}; want (size=%d,data=%q)", + offset, filesize, len(respData), string(respData), wantN, string(wantData)) return } - nr += n - cn++ - } - }) + + // check cache has valid contents. + cn := 0 + nr := 0 + for int64(nr) < wantN { + chunkOffset, chunkSize, _, ok := f.fr.ChunkEntryForOffset(offset + int64(nr)) + if !ok { + break + } + data := make([]byte, chunkSize) + id := genID(f.id, chunkOffset, chunkSize) + r, err := f.gr.cache.Get(id) + if err != nil { + t.Errorf("missed cache of offset=%d, size=%d: %v(got size=%d)", chunkOffset, chunkSize, err, n) + return + } + defer r.Close() + if n, err := r.ReadAt(data, 0); (err != nil && err != io.EOF) || n != int(chunkSize) { + t.Errorf("failed to read cache of offset=%d, size=%d: %v(got size=%d)", chunkOffset, chunkSize, err, n) + return + } + nr += n + cn++ + } + }) + } } } } @@ -206,15 +220,15 @@ func (er *exceptFile) ChunkEntryForOffset(offset int64) (off int64, size int64, return er.fr.ChunkEntryForOffset(offset) } -func makeFile(t *testing.T, contents []byte, chunkSize int, factory metadata.Store) (*file, func() error) { +func makeFile(t *testing.T, contents []byte, chunkSize int, factory metadata.Store, comp tutil.Compression) (*file, func() error) { testName := "test" sr, dgst, err := testutil.BuildEStargz([]testutil.TarEntry{ testutil.File(testName, string(contents)), - }, testutil.WithEStargzOptions(estargz.WithChunkSize(chunkSize))) + }, testutil.WithEStargzOptions(estargz.WithChunkSize(chunkSize), estargz.WithCompression(comp))) if err != nil { t.Fatalf("failed to build sample estargz") } - mr, err := factory(sr) + mr, err := factory(sr, metadata.WithDecompressors(comp)) if err != nil { t.Fatalf("failed to create reader: %v", err) } @@ -247,120 +261,139 @@ func makeFile(t *testing.T, contents []byte, chunkSize int, factory metadata.Sto } func testCacheVerify(t *testing.T, factory metadata.Store) { - sr, tocDgst, err := testutil.BuildEStargz([]testutil.TarEntry{ - testutil.File("a", sampleData1+"a"), - testutil.File("b", sampleData1+"b"), - }, testutil.WithEStargzOptions(estargz.WithChunkSize(sampleChunkSize))) - if err != nil { - t.Fatalf("failed to build sample estargz") - } for _, skipVerify := range [2]bool{true, false} { for _, invalidChunkBeforeVerify := range [2]bool{true, false} { for _, invalidChunkAfterVerify := range [2]bool{true, false} { - name := fmt.Sprintf("test_cache_verify_%v_%v_%v", - skipVerify, invalidChunkBeforeVerify, invalidChunkAfterVerify) - t.Run(name, func(t *testing.T) { - - // Determine the expected behaviour - var wantVerifyFail, wantCacheFail, wantCacheFail2 bool - if skipVerify { - // always no error if verification is disabled - wantVerifyFail, wantCacheFail, wantCacheFail2 = false, false, false - } else if invalidChunkBeforeVerify { - // errors occurred before verifying TOC must be reported via VerifyTOC() - wantVerifyFail = true - } else if invalidChunkAfterVerify { - // errors occurred after verifying TOC must be reported via Cache() - wantVerifyFail, wantCacheFail, wantCacheFail2 = false, true, true - } else { - // otherwise no verification error - wantVerifyFail, wantCacheFail, wantCacheFail2 = false, false, false - } - - // Prepare reader - verifier := &failIDVerifier{} - mr, err := factory(sr) - if err != nil { - t.Fatalf("failed to prepare reader %v", err) - } - defer mr.Close() - vr, err := NewReader(mr, cache.NewMemoryCache(), digest.FromString("")) - if err != nil { - t.Fatalf("failed to make new reader: %v", err) - } - if verifier != nil { - vr.verifier = verifier.verifier - vr.r.verifier = verifier.verifier - } - - off2id, id2path, err := prepareMap(vr.Metadata(), vr.Metadata().RootID(), "") - if err != nil || off2id == nil || id2path == nil { - t.Fatalf("failed to prepare offset map %v, off2id = %+v, id2path = %+v", err, off2id, id2path) - } - - // Perform Cache() before verification - // 1. Either of "a" or "b" is read and verified - // 2. VerifyTOC/SkipVerify is called - // 3. Another entry ("a" or "b") is called - verifyDone := make(chan struct{}) - var firstEntryCalled bool - var eg errgroup.Group - eg.Go(func() error { - return vr.Cache(WithFilter(func(off int64) bool { - id, ok := off2id[off] - if !ok { - t.Fatalf("no ID is assigned to offset %d", off) - } - name, ok := id2path[id] - if !ok { - t.Fatalf("no name is assigned to id %d", id) - } - if name == "a" || name == "b" { - if !firstEntryCalled { - firstEntryCalled = true - if invalidChunkBeforeVerify { + for srcCompressionName, srcCompression := range srcCompressions { + srcCompression := srcCompression() + name := fmt.Sprintf("test_cache_verify_%v_%v_%v_%v", + skipVerify, invalidChunkBeforeVerify, invalidChunkAfterVerify, srcCompressionName) + t.Run(name, func(t *testing.T) { + sr, tocDgst, err := testutil.BuildEStargz([]testutil.TarEntry{ + testutil.File("a", sampleData1+"a"), + testutil.File("b", sampleData1+"b"), + }, testutil.WithEStargzOptions(estargz.WithChunkSize(sampleChunkSize), estargz.WithCompression(srcCompression))) + if err != nil { + t.Fatalf("failed to build sample estargz") + } + + // Determine the expected behaviour + var wantVerifyFail, wantCacheFail, wantCacheFail2 bool + if skipVerify { + // always no error if verification is disabled + wantVerifyFail, wantCacheFail, wantCacheFail2 = false, false, false + } else if invalidChunkBeforeVerify { + // errors occurred before verifying TOC must be reported via VerifyTOC() + wantVerifyFail = true + } else if invalidChunkAfterVerify { + // errors occurred after verifying TOC must be reported via Cache() + wantVerifyFail, wantCacheFail, wantCacheFail2 = false, true, true + } else { + // otherwise no verification error + wantVerifyFail, wantCacheFail, wantCacheFail2 = false, false, false + } + + // Prepare reader + verifier := &failIDVerifier{} + mr, err := factory(sr, metadata.WithDecompressors(srcCompression)) + if err != nil { + t.Fatalf("failed to prepare reader %v", err) + } + defer mr.Close() + vr, err := NewReader(mr, cache.NewMemoryCache(), digest.FromString("")) + if err != nil { + t.Fatalf("failed to make new reader: %v", err) + } + if verifier != nil { + vr.verifier = verifier.verifier + vr.r.verifier = verifier.verifier + } + + off2id, id2path, err := prepareMap(vr.Metadata(), vr.Metadata().RootID(), "") + if err != nil || off2id == nil || id2path == nil { + t.Fatalf("failed to prepare offset map %v, off2id = %+v, id2path = %+v", err, off2id, id2path) + } + + // Perform Cache() before verification + // 1. Either of "a" or "b" is read and verified + // 2. VerifyTOC/SkipVerify is called + // 3. Another entry ("a" or "b") is called + verifyDone := make(chan struct{}) + var firstEntryCalled bool + var eg errgroup.Group + var mu sync.Mutex + eg.Go(func() error { + return vr.Cache(WithFilter(func(off int64) bool { + id, ok := off2id[off] + if !ok { + t.Fatalf("no ID is assigned to offset %d", off) + } + name, ok := id2path[id] + if !ok { + t.Fatalf("no name is assigned to id %d", id) + } + if name == "a" || name == "b" { + mu.Lock() + if !firstEntryCalled { + firstEntryCalled = true + if invalidChunkBeforeVerify { + verifier.registerFails([]uint32{id}) + } + mu.Unlock() + return true + } + mu.Unlock() + <-verifyDone + if invalidChunkAfterVerify { verifier.registerFails([]uint32{id}) } return true } - <-verifyDone - if invalidChunkAfterVerify { - verifier.registerFails([]uint32{id}) + return false + })) + }) + if invalidChunkBeforeVerify { + // wait for encountering the error of the first chunk read + start := time.Now() + for { + if err := vr.loadLastVerifyErr(); err != nil { + break + } + if time.Since(start) > time.Second { + t.Fatalf("timeout(1s): failed to wait for read error is registered") } - return true + time.Sleep(10 * time.Millisecond) } - return false - })) + } + + // Perform verification + if skipVerify { + vr.SkipVerify() + } else { + _, err = vr.VerifyTOC(tocDgst) + } + if checkErr := checkError(wantVerifyFail, err); checkErr != nil { + t.Errorf("verify: %v", checkErr) + return + } + if err != nil { + return + } + close(verifyDone) + + // Check the result of Cache() + if checkErr := checkError(wantCacheFail, eg.Wait()); checkErr != nil { + t.Errorf("cache: %v", checkErr) + return + } + + // Call Cache() again and check the result + if checkErr := checkError(wantCacheFail2, vr.Cache()); checkErr != nil { + t.Errorf("cache(2): %v", checkErr) + return + } }) - time.Sleep(10 * time.Millisecond) - - // Perform verification - if skipVerify { - vr.SkipVerify() - } else { - _, err = vr.VerifyTOC(tocDgst) - } - if checkErr := checkError(wantVerifyFail, err); checkErr != nil { - t.Errorf("verify: %v", checkErr) - return - } - if err != nil { - return - } - close(verifyDone) - - // Check the result of Cache() - if checkErr := checkError(wantCacheFail, eg.Wait()); checkErr != nil { - t.Errorf("cache: %v", checkErr) - return - } - - // Call Cache() again and check the result - if checkErr := checkError(wantCacheFail2, vr.Cache()); checkErr != nil { - t.Errorf("cache(2): %v", checkErr) - return - } - }) + } } } } @@ -449,88 +482,93 @@ func prepareMap(mr metadata.Reader, id uint32, p string) (off2id map[int64]uint3 func testFailReader(t *testing.T, factory metadata.Store) { testFileName := "test" - stargzFile, tocDigest, err := testutil.BuildEStargz([]testutil.TarEntry{ - testutil.File(testFileName, sampleData1), - }, testutil.WithEStargzOptions(estargz.WithChunkSize(sampleChunkSize))) - if err != nil { - t.Fatalf("failed to build sample estargz") - } + for srcCompressionName, srcCompression := range srcCompressions { + srcCompression := srcCompression() + t.Run(fmt.Sprintf("%v", srcCompressionName), func(t *testing.T) { + for _, rs := range []bool{true, false} { + for _, vs := range []bool{true, false} { + stargzFile, tocDigest, err := testutil.BuildEStargz([]testutil.TarEntry{ + testutil.File(testFileName, sampleData1), + }, testutil.WithEStargzOptions(estargz.WithChunkSize(sampleChunkSize), estargz.WithCompression(srcCompression))) + if err != nil { + t.Fatalf("failed to build sample estargz") + } - for _, rs := range []bool{true, false} { - for _, vs := range []bool{true, false} { - br := &breakReaderAt{ - ReaderAt: stargzFile, - success: true, - } - bev := &testChunkVerifier{true} - mcache := cache.NewMemoryCache() - mr, err := factory(io.NewSectionReader(br, 0, stargzFile.Size())) - if err != nil { - t.Fatalf("failed to prepare metadata reader") - } - defer mr.Close() - vr, err := NewReader(mr, mcache, digest.FromString("")) - if err != nil { - t.Fatalf("failed to make new reader: %v", err) - } - defer vr.Close() - vr.verifier = bev.verifier - vr.r.verifier = bev.verifier - gr, err := vr.VerifyTOC(tocDigest) - if err != nil { - t.Fatalf("failed to verify TOC: %v", err) - } + br := &breakReaderAt{ + ReaderAt: stargzFile, + success: true, + } + bev := &testChunkVerifier{true} + mcache := cache.NewMemoryCache() + mr, err := factory(io.NewSectionReader(br, 0, stargzFile.Size()), metadata.WithDecompressors(srcCompression)) + if err != nil { + t.Fatalf("failed to prepare metadata reader") + } + defer mr.Close() + vr, err := NewReader(mr, mcache, digest.FromString("")) + if err != nil { + t.Fatalf("failed to make new reader: %v", err) + } + defer vr.Close() + vr.verifier = bev.verifier + vr.r.verifier = bev.verifier + gr, err := vr.VerifyTOC(tocDigest) + if err != nil { + t.Fatalf("failed to verify TOC: %v", err) + } - notexist := uint32(0) - found := false - for i := uint32(0); i < 1000000; i++ { - if _, err := gr.Metadata().GetAttr(i); err != nil { - notexist, found = i, true - break - } - } - if !found { - t.Fatalf("free ID not found") - } + notexist := uint32(0) + found := false + for i := uint32(0); i < 1000000; i++ { + if _, err := gr.Metadata().GetAttr(i); err != nil { + notexist, found = i, true + break + } + } + if !found { + t.Fatalf("free ID not found") + } - // tests for opening non-existing file - _, err = gr.OpenFile(notexist) - if err == nil { - t.Errorf("succeeded to open file but wanted to fail") - return - } + // tests for opening non-existing file + _, err = gr.OpenFile(notexist) + if err == nil { + t.Errorf("succeeded to open file but wanted to fail") + return + } - // tests failure behaviour of a file read - tid, _, err := gr.Metadata().GetChild(gr.Metadata().RootID(), testFileName) - if err != nil { - t.Errorf("failed to get %q: %v", testFileName, err) - return - } - fr, err := gr.OpenFile(tid) - if err != nil { - t.Errorf("failed to open file but wanted to succeed: %v", err) - return - } + // tests failure behaviour of a file read + tid, _, err := gr.Metadata().GetChild(gr.Metadata().RootID(), testFileName) + if err != nil { + t.Errorf("failed to get %q: %v", testFileName, err) + return + } + fr, err := gr.OpenFile(tid) + if err != nil { + t.Errorf("failed to open file but wanted to succeed: %v", err) + return + } - mcache.(*cache.MemoryCache).Membuf = map[string]*bytes.Buffer{} - br.success = rs - bev.success = vs - - // tests for reading file - p := make([]byte, len(sampleData1)) - n, err := fr.ReadAt(p, 0) - if rs && vs { - if err != nil || n != len(sampleData1) || !bytes.Equal([]byte(sampleData1), p) { - t.Errorf("failed to read data but wanted to succeed: %v", err) - return - } - } else { - if err == nil { - t.Errorf("succeeded to read data but wanted to fail (reader:%v,verify:%v)", rs, vs) - return + mcache.(*cache.MemoryCache).Membuf = map[string]*bytes.Buffer{} + br.success = rs + bev.success = vs + + // tests for reading file + p := make([]byte, len(sampleData1)) + n, err := fr.ReadAt(p, 0) + if rs && vs { + if err != nil || n != len(sampleData1) || !bytes.Equal([]byte(sampleData1), p) { + t.Errorf("failed to read data but wanted to succeed: %v", err) + return + } + } else { + if err == nil { + t.Errorf("succeeded to read data but wanted to fail (reader:%v,verify:%v)", rs, vs) + return + } + } } } - } + }) } } @@ -553,3 +591,234 @@ type testChunkVerifier struct { func (bev *testChunkVerifier) verifier(id uint32, chunkDigest string) (digest.Verifier, error) { return &testVerifier{bev.success}, nil } + +func testPreReader(t *testing.T, factory metadata.Store) { + data64KB := string(tutil.RandomBytes(t, 64000)) + tests := []struct { + name string + chunkSize int + minChunkSize int + in []tutil.TarEntry + want []check + }{ + { + name: "several_files_in_chunk", + minChunkSize: 8000, + in: []tutil.TarEntry{ + tutil.Dir("foo/"), + tutil.File("foo/foo1", data64KB), + tutil.File("foo2", "bb"), + tutil.File("foo22", "ccc"), + tutil.Dir("bar/"), + tutil.File("bar/bar.txt", "aaa"), + tutil.File("foo3", data64KB), + }, + // NOTE: we assume that the compressed "data64KB" is still larger than 8KB + // landmark+dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer + want: []check{ + hasFileContentsWithPreCached("foo22", 0, "ccc", chunkInfo{"foo2", "bb", 0, 2}, chunkInfo{"bar/bar.txt", "aaa", 0, 3}, chunkInfo{"foo3", data64KB, 0, 64000}), + hasFileContentsOffset("foo2", 0, "bb", true), + hasFileContentsOffset("bar/bar.txt", 0, "aaa", true), + hasFileContentsOffset("bar/bar.txt", 1, "aa", true), + hasFileContentsOffset("bar/bar.txt", 2, "a", true), + hasFileContentsOffset("foo3", 0, data64KB, true), + hasFileContentsOffset("foo22", 0, "ccc", true), + hasFileContentsOffset("foo/foo1", 0, data64KB, false), + hasFileContentsOffset("foo/foo1", 0, data64KB, true), + hasFileContentsOffset("foo/foo1", 1, data64KB[1:], true), + hasFileContentsOffset("foo/foo1", 2, data64KB[2:], true), + hasFileContentsOffset("foo/foo1", 3, data64KB[3:], true), + }, + }, + { + name: "several_files_in_chunk_chunked", + minChunkSize: 8000, + chunkSize: 32000, + in: []tutil.TarEntry{ + tutil.Dir("foo/"), + tutil.File("foo/foo1", data64KB), + tutil.File("foo2", "bb"), + tutil.Dir("bar/"), + tutil.File("foo3", data64KB), + }, + // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB + // landmark+dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer + want: []check{ + hasFileContentsWithPreCached("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000], 0, 32000}), + hasFileContentsOffset("foo2", 0, "bb", true), + hasFileContentsOffset("foo2", 1, "b", true), + hasFileContentsOffset("foo3", 0, data64KB[:len(data64KB)/2], true), + hasFileContentsOffset("foo3", 1, data64KB[1:len(data64KB)/2], true), + hasFileContentsOffset("foo3", 2, data64KB[2:len(data64KB)/2], true), + hasFileContentsOffset("foo3", int64(len(data64KB)/2), data64KB[len(data64KB)/2:], false), + hasFileContentsOffset("foo3", int64(len(data64KB)-1), data64KB[len(data64KB)-1:], true), + hasFileContentsOffset("foo/foo1", 0, data64KB, false), + hasFileContentsOffset("foo/foo1", 1, data64KB[1:], true), + hasFileContentsOffset("foo/foo1", 2, data64KB[2:], true), + hasFileContentsOffset("foo/foo1", int64(len(data64KB)/2), data64KB[len(data64KB)/2:], true), + hasFileContentsOffset("foo/foo1", int64(len(data64KB)-1), data64KB[len(data64KB)-1:], true), + }, + }, + } + for _, tt := range tests { + for srcCompresionName, srcCompression := range srcCompressions { + srcCompression := srcCompression() + t.Run(tt.name+"-"+srcCompresionName, func(t *testing.T) { + opts := []tutil.BuildEStargzOption{ + tutil.WithEStargzOptions(estargz.WithCompression(srcCompression)), + } + if tt.chunkSize > 0 { + opts = append(opts, tutil.WithEStargzOptions(estargz.WithChunkSize(tt.chunkSize))) + } + if tt.minChunkSize > 0 { + t.Logf("minChunkSize = %d", tt.minChunkSize) + opts = append(opts, tutil.WithEStargzOptions(estargz.WithMinChunkSize(tt.minChunkSize))) + } + esgz, tocDgst, err := tutil.BuildEStargz(tt.in, opts...) + if err != nil { + t.Fatalf("failed to build sample eStargz: %v", err) + } + testR := &calledReaderAt{esgz, nil} + mr, err := factory(io.NewSectionReader(testR, 0, esgz.Size()), metadata.WithDecompressors(srcCompression)) + if err != nil { + t.Fatalf("failed to create new reader: %v", err) + } + defer mr.Close() + memcache := cache.NewMemoryCache() + vr, err := NewReader(mr, memcache, digest.FromString("")) + if err != nil { + t.Fatalf("failed to make new reader: %v", err) + } + rr, err := vr.VerifyTOC(tocDgst) + if err != nil { + t.Fatalf("failed to verify TOC: %v", err) + } + r := rr.(*reader) + for _, want := range tt.want { + want(t, r, testR) + } + }) + } + } +} + +type check func(*testing.T, *reader, *calledReaderAt) + +type chunkInfo struct { + name string + data string + chunkOffset int64 + chunkSize int64 +} + +func hasFileContentsOffset(name string, off int64, contents string, fromCache bool) check { + return func(t *testing.T, r *reader, cr *calledReaderAt) { + tid, err := lookup(r, name) + if err != nil { + t.Fatalf("failed to lookup %q", name) + } + ra, err := r.OpenFile(tid) + if err != nil { + t.Fatalf("Failed to open testing file: %v", err) + } + cr.called = nil // reset test + buf := make([]byte, len(contents)) + n, err := ra.ReadAt(buf, off) + if err != nil { + t.Fatalf("failed to readat %q: %v", name, err) + } + if n != len(contents) { + t.Fatalf("failed to read contents %q (off:%d, want:%q) got %q", name, off, longBytesView([]byte(contents)), longBytesView(buf)) + } + if string(buf) != contents { + t.Fatalf("unexpected content of %q: %q want %q", name, longBytesView(buf), longBytesView([]byte(contents))) + } + t.Logf("reader calls for %q: offsets: %+v", name, cr.called) + if fromCache { + if len(cr.called) != 0 { + t.Fatalf("unexpected read on %q: offsets: %v", name, cr.called) + } + } else { + if len(cr.called) == 0 { + t.Fatalf("no call happened to reader for %q", name) + } + } + } +} + +func hasFileContentsWithPreCached(name string, off int64, contents string, extra ...chunkInfo) check { + return func(t *testing.T, r *reader, cr *calledReaderAt) { + tid, err := lookup(r, name) + if err != nil { + t.Fatalf("failed to lookup %q", name) + } + ra, err := r.OpenFile(tid) + if err != nil { + t.Fatalf("Failed to open testing file: %v", err) + } + buf := make([]byte, len(contents)) + n, err := ra.ReadAt(buf, off) + if err != nil { + t.Fatalf("failed to readat %q: %v", name, err) + } + if n != len(contents) { + t.Fatalf("failed to read contents %q (off:%d, want:%q) got %q", name, off, longBytesView([]byte(contents)), longBytesView(buf)) + } + if string(buf) != contents { + t.Fatalf("unexpected content of %q: %q want %q", name, longBytesView(buf), longBytesView([]byte(contents))) + } + for _, e := range extra { + eid, err := lookup(r, e.name) + if err != nil { + t.Fatalf("failed to lookup %q", e.name) + } + cacheID := genID(eid, e.chunkOffset, e.chunkSize) + er, err := r.cache.Get(cacheID) + if err != nil { + t.Fatalf("failed to get cache %q: %+v", cacheID, e) + } + data, err := io.ReadAll(io.NewSectionReader(er, 0, e.chunkSize)) + er.Close() + if err != nil { + t.Fatalf("failed to read cache %q: %+v", cacheID, e) + } + if string(data) != e.data { + t.Fatalf("unexpected contents of cache %q (%+v): %q; wanted %q", cacheID, e, longBytesView(data), longBytesView([]byte(e.data))) + } + } + } +} + +func lookup(r *reader, name string) (uint32, error) { + name = strings.TrimPrefix(path.Clean("/"+name), "/") + if name == "" { + return r.Metadata().RootID(), nil + } + dir, base := filepath.Split(name) + pid, err := lookup(r, dir) + if err != nil { + return 0, err + } + id, _, err := r.Metadata().GetChild(pid, base) + return id, err +} + +type calledReaderAt struct { + io.ReaderAt + called []int64 +} + +func (r *calledReaderAt) ReadAt(p []byte, off int64) (int, error) { + r.called = append(r.called, off) + return r.ReaderAt.ReadAt(p, off) +} + +// longBytesView is an alias of []byte suitable for printing a long data as an omitted string to avoid long data being printed. +type longBytesView []byte + +func (b longBytesView) String() string { + if len(b) < 100 { + return string(b) + } + return string(b[:50]) + "...(omit)..." + string(b[len(b)-50:]) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/remote/blob.go b/vendor/github.com/containerd/stargz-snapshotter/fs/remote/blob.go index 9329765b468e..c7e649406851 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/remote/blob.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/remote/blob.go @@ -26,7 +26,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "regexp" "sort" "strings" @@ -193,7 +192,7 @@ func (b *blob) cacheAt(offset int64, size int64, fr fetcher, cacheOpts *options) if r, err := b.cache.Get(fr.genID(reg), cacheOpts.cacheOpts...); err == nil { return r.Close() // nop if the cache hits } - discard[reg] = ioutil.Discard + discard[reg] = io.Discard return nil }) if err != nil { diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/remote/resolver.go b/vendor/github.com/containerd/stargz-snapshotter/fs/remote/resolver.go index 74cd7b557ba3..8efb4eb4781a 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/fs/remote/resolver.go +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/remote/resolver.go @@ -27,12 +27,10 @@ import ( "crypto/sha256" "fmt" "io" - "io/ioutil" "math/rand" "mime" "mime/multipart" "net/http" - "net/url" "path" "strconv" "strings" @@ -200,7 +198,7 @@ func newHTTPFetcher(ctx context.Context, fc *fetcherConfig) (*httpFetcher, int64 return nil, 0, fmt.Errorf("Digest is mandatory in layer descriptor") } digest := desc.Digest - pullScope, err := repositoryScope(fc.refspec, false) + pullScope, err := docker.RepositoryScope(fc.refspec, false) if err != nil { return nil, 0, err } @@ -240,7 +238,7 @@ func newHTTPFetcher(ctx context.Context, fc *fetcherConfig) (*httpFetcher, int64 path.Join(host.Host, host.Path), strings.TrimPrefix(fc.refspec.Locator, fc.refspec.Hostname()+"/"), digest) - url, err := redirect(ctx, blobURL, tr, timeout) + url, header, err := redirect(ctx, blobURL, tr, timeout, host.Header) if err != nil { rErr = fmt.Errorf("failed to redirect (host %q, ref:%q, digest:%q): %v: %w", host.Host, fc.refspec, digest, err, rErr) continue // Try another @@ -249,7 +247,7 @@ func newHTTPFetcher(ctx context.Context, fc *fetcherConfig) (*httpFetcher, int64 // Get size information // TODO: we should try to use the Size field in the descriptor here. start := time.Now() // start time before getting layer header - size, err := getSize(ctx, url, tr, timeout) + size, err := getSize(ctx, url, tr, timeout, header) commonmetrics.MeasureLatencyInMilliseconds(commonmetrics.StargzHeaderGet, digest, start) // time to get layer header if err != nil { rErr = fmt.Errorf("failed to get size (host %q, ref:%q, digest:%q): %v: %w", host.Host, fc.refspec, digest, err, rErr) @@ -258,11 +256,13 @@ func newHTTPFetcher(ctx context.Context, fc *fetcherConfig) (*httpFetcher, int64 // Hit one destination return &httpFetcher{ - url: url, - tr: tr, - blobURL: blobURL, - digest: digest, - timeout: timeout, + url: url, + tr: tr, + blobURL: blobURL, + digest: digest, + timeout: timeout, + header: header, + orgHeader: host.Header, }, size, nil } @@ -311,7 +311,7 @@ func (tr *transport) RoundTrip(req *http.Request) (*http.Response, error) { return resp, nil } -func redirect(ctx context.Context, blobURL string, tr http.RoundTripper, timeout time.Duration) (url string, err error) { +func redirect(ctx context.Context, blobURL string, tr http.RoundTripper, timeout time.Duration, header http.Header) (url string, withHeader http.Header, err error) { if timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, timeout) @@ -322,32 +322,38 @@ func redirect(ctx context.Context, blobURL string, tr http.RoundTripper, timeout // ghcr.io returns 200 on HEAD without Location header (2020). req, err := http.NewRequestWithContext(ctx, "GET", blobURL, nil) if err != nil { - return "", fmt.Errorf("failed to make request to the registry: %w", err) + return "", nil, fmt.Errorf("failed to make request to the registry: %w", err) + } + req.Header = http.Header{} + for k, v := range header { + req.Header[k] = v } req.Close = false req.Header.Set("Range", "bytes=0-1") res, err := tr.RoundTrip(req) if err != nil { - return "", fmt.Errorf("failed to request: %w", err) + return "", nil, fmt.Errorf("failed to request: %w", err) } defer func() { - io.Copy(ioutil.Discard, res.Body) + io.Copy(io.Discard, res.Body) res.Body.Close() }() if res.StatusCode/100 == 2 { url = blobURL + withHeader = header } else if redir := res.Header.Get("Location"); redir != "" && res.StatusCode/100 == 3 { // TODO: Support nested redirection url = redir + // Do not pass headers to the redirected location. } else { - return "", fmt.Errorf("failed to access to the registry with code %v", res.StatusCode) + return "", nil, fmt.Errorf("failed to access to the registry with code %v", res.StatusCode) } return } -func getSize(ctx context.Context, url string, tr http.RoundTripper, timeout time.Duration) (int64, error) { +func getSize(ctx context.Context, url string, tr http.RoundTripper, timeout time.Duration, header http.Header) (int64, error) { if timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, timeout) @@ -357,6 +363,10 @@ func getSize(ctx context.Context, url string, tr http.RoundTripper, timeout time if err != nil { return 0, err } + req.Header = http.Header{} + for k, v := range header { + req.Header[k] = v + } req.Close = false res, err := tr.RoundTrip(req) if err != nil { @@ -375,6 +385,10 @@ func getSize(ctx context.Context, url string, tr http.RoundTripper, timeout time if err != nil { return 0, fmt.Errorf("failed to make request to the registry: %w", err) } + req.Header = http.Header{} + for k, v := range header { + req.Header[k] = v + } req.Close = false req.Header.Set("Range", "bytes=0-1") res, err = tr.RoundTrip(req) @@ -382,7 +396,7 @@ func getSize(ctx context.Context, url string, tr http.RoundTripper, timeout time return 0, fmt.Errorf("failed to request: %w", err) } defer func() { - io.Copy(ioutil.Discard, res.Body) + io.Copy(io.Discard, res.Body) res.Body.Close() }() @@ -406,6 +420,8 @@ type httpFetcher struct { singleRange bool singleRangeMu sync.Mutex timeout time.Duration + header http.Header + orgHeader http.Header } type multipartReadCloser interface { @@ -445,6 +461,10 @@ func (f *httpFetcher) fetch(ctx context.Context, rs []region, retry bool) (multi if err != nil { return nil, err } + req.Header = http.Header{} + for k, v := range f.header { + req.Header[k] = v + } var ranges string for _, reg := range requests { ranges += fmt.Sprintf("%d-%d,", reg.b, reg.e) @@ -516,6 +536,10 @@ func (f *httpFetcher) check() error { if err != nil { return fmt.Errorf("check failed: failed to make request: %w", err) } + req.Header = http.Header{} + for k, v := range f.header { + req.Header[k] = v + } req.Close = false req.Header.Set("Range", "bytes=0-1") res, err := f.tr.RoundTrip(req) @@ -523,7 +547,7 @@ func (f *httpFetcher) check() error { return fmt.Errorf("check failed: failed to request to registry: %w", err) } defer func() { - io.Copy(ioutil.Discard, res.Body) + io.Copy(io.Discard, res.Body) res.Body.Close() }() if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusPartialContent { @@ -546,12 +570,13 @@ func (f *httpFetcher) check() error { } func (f *httpFetcher) refreshURL(ctx context.Context) error { - newURL, err := redirect(ctx, f.blobURL, f.tr, f.timeout) + newURL, headers, err := redirect(ctx, f.blobURL, f.tr, f.timeout, f.orgHeader) if err != nil { return err } f.urlMu.Lock() f.url = newURL + f.header = headers f.urlMu.Unlock() return nil } @@ -661,24 +686,6 @@ func WithCacheOpts(cacheOpts ...cache.Option) Option { } } -// NOTE: ported from https://github.com/containerd/containerd/blob/v1.5.2/remotes/docker/scope.go#L29-L42 -// TODO: import this from containerd package once we drop support to continerd v1.4.x -// -// repositoryScope returns a repository scope string such as "repository:foo/bar:pull" -// for "host/foo/bar:baz". -// When push is true, both pull and push are added to the scope. -func repositoryScope(refspec reference.Spec, push bool) (string, error) { - u, err := url.Parse("dummy://" + refspec.Locator) - if err != nil { - return "", err - } - s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull" - if push { - s += ",push" - } - return s, nil -} - type remoteFetcher struct { r Fetcher } diff --git a/vendor/github.com/containerd/stargz-snapshotter/metadata/memory/reader.go b/vendor/github.com/containerd/stargz-snapshotter/metadata/memory/reader.go index b20790228d6e..6854be34ee91 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/metadata/memory/reader.go +++ b/vendor/github.com/containerd/stargz-snapshotter/metadata/memory/reader.go @@ -21,7 +21,6 @@ import ( "io" "math" "os" - "sync" "time" "github.com/containerd/stargz-snapshotter/estargz" @@ -34,23 +33,13 @@ type reader struct { rootID uint32 idMap map[uint32]*estargz.TOCEntry - idOfEntry map[*estargz.TOCEntry]uint32 - mu sync.Mutex + idOfEntry map[string]uint32 - curID uint32 - curIDMu sync.Mutex - - opts *metadata.Options + estargzOpts []estargz.OpenOption } -func (r *reader) nextID() (uint32, error) { - r.curIDMu.Lock() - defer r.curIDMu.Unlock() - if r.curID == math.MaxUint32 { - return 0, fmt.Errorf("sequence id too large") - } - r.curID++ - return r.curID, nil +func newReader(er *estargz.Reader, rootID uint32, idMap map[uint32]*estargz.TOCEntry, idOfEntry map[string]uint32, estargzOpts []estargz.OpenOption) *reader { + return &reader{r: er, rootID: rootID, idMap: idMap, idOfEntry: idOfEntry, estargzOpts: estargzOpts} } func NewReader(sr *io.SectionReader, opts ...metadata.Option) (metadata.Reader, error) { @@ -71,54 +60,75 @@ func NewReader(sr *io.SectionReader, opts ...metadata.Option) (metadata.Reader, for _, d := range rOpts.Decompressors { decompressors = append(decompressors, d) } - er, err := estargz.Open(sr, + + erOpts := []estargz.OpenOption{ estargz.WithTOCOffset(rOpts.TOCOffset), estargz.WithTelemetry(telemetry), estargz.WithDecompressors(decompressors...), - ) + } + er, err := estargz.Open(sr, erOpts...) if err != nil { return nil, err } - root, ok := er.Lookup("") if !ok { return nil, fmt.Errorf("failed to get root node") } - r := &reader{r: er, idMap: make(map[uint32]*estargz.TOCEntry), idOfEntry: make(map[*estargz.TOCEntry]uint32), opts: &rOpts} - rootID, err := r.initID(root) + rootID, idMap, idOfEntry, err := assignIDs(er, root) if err != nil { return nil, err } - r.rootID = rootID + r := newReader(er, rootID, idMap, idOfEntry, erOpts) return r, nil } -func (r *reader) initID(e *estargz.TOCEntry) (id uint32, err error) { - var ok bool - r.mu.Lock() - id, ok = r.idOfEntry[e] - if !ok { - id, err = r.nextID() - if err != nil { - return 0, err +// assignIDs assigns an to each TOC item and returns a mapping from ID to entry and vice-versa. +func assignIDs(er *estargz.Reader, e *estargz.TOCEntry) (rootID uint32, idMap map[uint32]*estargz.TOCEntry, idOfEntry map[string]uint32, err error) { + idMap = make(map[uint32]*estargz.TOCEntry) + idOfEntry = make(map[string]uint32) + curID := uint32(0) + + nextID := func() (uint32, error) { + if curID == math.MaxUint32 { + return 0, fmt.Errorf("sequence id too large") } - r.idMap[id] = e - r.idOfEntry[e] = id + curID++ + return curID, nil } - r.mu.Unlock() - - e.ForeachChild(func(_ string, ent *estargz.TOCEntry) bool { - if ent.Type == "hardlink" { - var ok bool - ent, ok = r.r.Lookup(ent.Name) - if !ok { - return false + + var mapChildren func(e *estargz.TOCEntry) (uint32, error) + mapChildren = func(e *estargz.TOCEntry) (uint32, error) { + if e.Type == "hardlink" { + return 0, fmt.Errorf("unexpected type \"hardlink\": this should be replaced to the destination entry") + } + + var ok bool + id, ok := idOfEntry[e.Name] + if !ok { + id, err = nextID() + if err != nil { + return 0, err } + idMap[id] = e + idOfEntry[e.Name] = id } - _, err = r.initID(ent) - return err == nil - }) - return id, err + + e.ForeachChild(func(_ string, ent *estargz.TOCEntry) bool { + _, err = mapChildren(ent) + return err == nil + }) + if err != nil { + return 0, err + } + return id, nil + } + + rootID, err = mapChildren(e) + if err != nil { + return 0, nil, nil, err + } + + return rootID, idMap, idOfEntry, nil } func (r *reader) RootID() uint32 { @@ -130,8 +140,6 @@ func (r *reader) TOCDigest() digest.Digest { } func (r *reader) GetOffset(id uint32) (offset int64, err error) { - r.mu.Lock() - defer r.mu.Unlock() e, ok := r.idMap[id] if !ok { return 0, fmt.Errorf("entry %d not found", id) @@ -140,9 +148,7 @@ func (r *reader) GetOffset(id uint32) (offset int64, err error) { } func (r *reader) GetAttr(id uint32) (attr metadata.Attr, err error) { - r.mu.Lock() e, ok := r.idMap[id] - r.mu.Unlock() if !ok { err = fmt.Errorf("entry %d not found", id) return @@ -153,9 +159,7 @@ func (r *reader) GetAttr(id uint32) (attr metadata.Attr, err error) { } func (r *reader) GetChild(pid uint32, base string) (id uint32, attr metadata.Attr, err error) { - r.mu.Lock() e, ok := r.idMap[pid] - r.mu.Unlock() if !ok { err = fmt.Errorf("parent entry %d not found", pid) return @@ -165,14 +169,7 @@ func (r *reader) GetChild(pid uint32, base string) (id uint32, attr metadata.Att err = fmt.Errorf("child %q of entry %d not found", base, pid) return } - if child.Type == "hardlink" { - child, ok = r.r.Lookup(child.Name) - if !ok { - err = fmt.Errorf("child %q ()hardlink of entry %d not found", base, pid) - return - } - } - cid, ok := r.idOfEntry[child] + cid, ok := r.idOfEntry[child.Name] if !ok { err = fmt.Errorf("id of entry %q not found", base) return @@ -183,24 +180,13 @@ func (r *reader) GetChild(pid uint32, base string) (id uint32, attr metadata.Att } func (r *reader) ForeachChild(id uint32, f func(name string, id uint32, mode os.FileMode) bool) error { - r.mu.Lock() e, ok := r.idMap[id] - r.mu.Unlock() if !ok { return fmt.Errorf("parent entry %d not found", id) } var err error e.ForeachChild(func(baseName string, ent *estargz.TOCEntry) bool { - if ent.Type == "hardlink" { - var ok bool - ent, ok = r.r.Lookup(ent.Name) - if !ok { - return false - } - } - r.mu.Lock() - id, ok := r.idOfEntry[ent] - r.mu.Unlock() + id, ok := r.idOfEntry[ent.Name] if !ok { err = fmt.Errorf("id of child entry %q not found", baseName) return false @@ -211,9 +197,7 @@ func (r *reader) ForeachChild(id uint32, f func(name string, id uint32, mode os. } func (r *reader) OpenFile(id uint32) (metadata.File, error) { - r.mu.Lock() e, ok := r.idMap[id] - r.mu.Unlock() if !ok { return nil, fmt.Errorf("entry %d not found", id) } @@ -224,12 +208,31 @@ func (r *reader) OpenFile(id uint32) (metadata.File, error) { return &file{r, e, sr}, nil } +func (r *reader) OpenFileWithPreReader(id uint32, preRead func(id uint32, chunkOffset, chunkSize int64, chunkDigest string, r io.Reader) error) (metadata.File, error) { + e, ok := r.idMap[id] + if !ok { + return nil, fmt.Errorf("entry %d not found", id) + } + sr, err := r.r.OpenFileWithPreReader(e.Name, func(e *estargz.TOCEntry, chunkR io.Reader) error { + cid, ok := r.idOfEntry[e.Name] + if !ok { + return fmt.Errorf("id of entry %q not found", e.Name) + } + return preRead(cid, e.ChunkOffset, e.ChunkSize, e.ChunkDigest, chunkR) + }) + if err != nil { + return nil, err + } + return &file{r, e, sr}, nil +} + func (r *reader) Clone(sr *io.SectionReader) (metadata.Reader, error) { - return NewReader(sr, - metadata.WithTOCOffset(r.opts.TOCOffset), - metadata.WithTelemetry(r.opts.Telemetry), - metadata.WithDecompressors(r.opts.Decompressors...), - ) + er, err := estargz.Open(sr, r.estargzOpts...) + if err != nil { + return nil, err + } + + return newReader(er, r.rootID, r.idMap, r.idOfEntry, r.estargzOpts), nil } func (r *reader) Close() error { diff --git a/vendor/github.com/containerd/stargz-snapshotter/metadata/metadata.go b/vendor/github.com/containerd/stargz-snapshotter/metadata/metadata.go index 1774d0818d6a..ee25f27d0016 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/metadata/metadata.go +++ b/vendor/github.com/containerd/stargz-snapshotter/metadata/metadata.go @@ -71,6 +71,7 @@ type Reader interface { GetChild(pid uint32, base string) (id uint32, attr Attr, err error) ForeachChild(id uint32, f func(name string, id uint32, mode os.FileMode) bool) error OpenFile(id uint32) (File, error) + OpenFileWithPreReader(id uint32, preRead func(id uint32, chunkOffset, chunkSize int64, chunkDigest string, r io.Reader) error) (File, error) Clone(sr *io.SectionReader) (Reader, error) Close() error @@ -85,6 +86,10 @@ type Decompressor interface { estargz.Decompressor // DecompressTOC decompresses the passed blob and returns a reader of TOC JSON. + // + // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob. + // Pass nil reader to DecompressTOC then we expect that DecompressTOC acquire TOC from the external + // location and return it. DecompressTOC(io.Reader) (tocJSON io.ReadCloser, err error) } diff --git a/vendor/github.com/containerd/stargz-snapshotter/metadata/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/metadata/testutil.go deleted file mode 100644 index 40913c0024eb..000000000000 --- a/vendor/github.com/containerd/stargz-snapshotter/metadata/testutil.go +++ /dev/null @@ -1,659 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package metadata - -import ( - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/containerd/stargz-snapshotter/estargz" - "github.com/containerd/stargz-snapshotter/estargz/zstdchunked" - "github.com/containerd/stargz-snapshotter/util/testutil" - "github.com/hashicorp/go-multierror" - "github.com/klauspost/compress/zstd" -) - -var allowedPrefix = [4]string{"", "./", "/", "../"} - -type compression interface { - estargz.Compressor - Decompressor -} - -var srcCompressions = map[string]compression{ - "zstd-fastest": zstdCompressionWithLevel(zstd.SpeedFastest), - "zstd-default": zstdCompressionWithLevel(zstd.SpeedDefault), - "zstd-bettercompression": zstdCompressionWithLevel(zstd.SpeedBetterCompression), - "gzip-nocompression": gzipCompressionWithLevel(gzip.NoCompression), - "gzip-bestspeed": gzipCompressionWithLevel(gzip.BestSpeed), - "gzip-bestcompression": gzipCompressionWithLevel(gzip.BestCompression), - "gzip-defaultcompression": gzipCompressionWithLevel(gzip.DefaultCompression), - "gzip-huffmanonly": gzipCompressionWithLevel(gzip.HuffmanOnly), -} - -type zstdCompression struct { - *zstdchunked.Compressor - *zstdchunked.Decompressor -} - -func zstdCompressionWithLevel(compressionLevel zstd.EncoderLevel) compression { - return &zstdCompression{&zstdchunked.Compressor{CompressionLevel: compressionLevel}, &zstdchunked.Decompressor{}} -} - -type gzipCompression struct { - *estargz.GzipCompressor - *estargz.GzipDecompressor -} - -func gzipCompressionWithLevel(compressionLevel int) compression { - return gzipCompression{estargz.NewGzipCompressorWithLevel(compressionLevel), &estargz.GzipDecompressor{}} -} - -type ReaderFactory func(sr *io.SectionReader, opts ...Option) (r TestableReader, err error) - -type TestableReader interface { - Reader - NumOfNodes() (i int, _ error) -} - -// TestReader tests Reader returns correct file metadata. -func TestReader(t *testing.T, factory ReaderFactory) { - sampleTime := time.Now().Truncate(time.Second) - sampleText := "qwer" + "tyui" + "opas" + "dfgh" + "jk" - tests := []struct { - name string - chunkSize int - in []testutil.TarEntry - want []check - }{ - { - name: "empty", - in: []testutil.TarEntry{}, - want: []check{ - numOfNodes(2), // root dir + prefetch landmark - }, - }, - { - name: "files", - in: []testutil.TarEntry{ - testutil.File("foo", "foofoo", testutil.WithFileMode(0644|os.ModeSetuid)), - testutil.Dir("bar/"), - testutil.File("bar/baz.txt", "bazbazbaz", testutil.WithFileOwner(1000, 1000)), - testutil.File("xxx.txt", "xxxxx", testutil.WithFileModTime(sampleTime)), - testutil.File("y.txt", "", testutil.WithFileXattrs(map[string]string{"testkey": "testval"})), - }, - want: []check{ - numOfNodes(7), // root dir + prefetch landmark + 1 dir + 4 files - hasFile("foo", "foofoo", 6), - hasMode("foo", 0644|os.ModeSetuid), - hasFile("bar/baz.txt", "bazbazbaz", 9), - hasOwner("bar/baz.txt", 1000, 1000), - hasFile("xxx.txt", "xxxxx", 5), - hasModTime("xxx.txt", sampleTime), - hasFile("y.txt", "", 0), - hasXattrs("y.txt", map[string]string{"testkey": "testval"}), - }, - }, - { - name: "dirs", - in: []testutil.TarEntry{ - testutil.Dir("foo/", testutil.WithDirMode(os.ModeDir|0600|os.ModeSticky)), - testutil.Dir("foo/bar/", testutil.WithDirOwner(1000, 1000)), - testutil.File("foo/bar/baz.txt", "testtest"), - testutil.File("foo/bar/xxxx", "x"), - testutil.File("foo/bar/yyy", "yyy"), - testutil.Dir("foo/a/", testutil.WithDirModTime(sampleTime)), - testutil.Dir("foo/a/1/", testutil.WithDirXattrs(map[string]string{"testkey": "testval"})), - testutil.File("foo/a/1/2", "1111111111"), - }, - want: []check{ - numOfNodes(10), // root dir + prefetch landmark + 4 dirs + 4 files - hasDirChildren("foo", "bar", "a"), - hasDirChildren("foo/bar", "baz.txt", "xxxx", "yyy"), - hasDirChildren("foo/a", "1"), - hasDirChildren("foo/a/1", "2"), - hasMode("foo", os.ModeDir|0600|os.ModeSticky), - hasOwner("foo/bar", 1000, 1000), - hasModTime("foo/a", sampleTime), - hasXattrs("foo/a/1", map[string]string{"testkey": "testval"}), - hasFile("foo/bar/baz.txt", "testtest", 8), - hasFile("foo/bar/xxxx", "x", 1), - hasFile("foo/bar/yyy", "yyy", 3), - hasFile("foo/a/1/2", "1111111111", 10), - }, - }, - { - name: "hardlinks", - in: []testutil.TarEntry{ - testutil.File("foo", "foofoo", testutil.WithFileOwner(1000, 1000)), - testutil.Dir("bar/"), - testutil.Link("bar/foolink", "foo"), - testutil.Link("bar/foolink2", "bar/foolink"), - testutil.Dir("bar/1/"), - testutil.File("bar/1/baz.txt", "testtest"), - testutil.Link("barlink", "bar/1/baz.txt"), - testutil.Symlink("foosym", "bar/foolink2"), - }, - want: []check{ - numOfNodes(7), // root dir + prefetch landmark + 2 dirs + 1 flie(linked) + 1 file(linked) + 1 symlink - hasFile("foo", "foofoo", 6), - hasOwner("foo", 1000, 1000), - hasFile("bar/foolink", "foofoo", 6), - hasOwner("bar/foolink", 1000, 1000), - hasFile("bar/foolink2", "foofoo", 6), - hasOwner("bar/foolink2", 1000, 1000), - hasFile("bar/1/baz.txt", "testtest", 8), - hasFile("barlink", "testtest", 8), - hasDirChildren("bar", "foolink", "foolink2", "1"), - hasDirChildren("bar/1", "baz.txt"), - sameNodes("foo", "bar/foolink", "bar/foolink2"), - sameNodes("bar/1/baz.txt", "barlink"), - linkName("foosym", "bar/foolink2"), - hasNumLink("foo", 3), // parent dir + 2 links - hasNumLink("barlink", 2), // parent dir + 1 link - hasNumLink("bar", 3), // parent + "." + child's ".." - }, - }, - { - name: "various files", - in: []testutil.TarEntry{ - testutil.Dir("bar/"), - testutil.File("bar/../bar///////////////////foo", ""), - testutil.Chardev("bar/cdev", 10, 11), - testutil.Blockdev("bar/bdev", 100, 101), - testutil.Fifo("bar/fifo"), - }, - want: []check{ - numOfNodes(7), // root dir + prefetch landmark + 1 file + 1 dir + 1 cdev + 1 bdev + 1 fifo - hasFile("bar/foo", "", 0), - hasChardev("bar/cdev", 10, 11), - hasBlockdev("bar/bdev", 100, 101), - hasFifo("bar/fifo"), - }, - }, - { - name: "chunks", - chunkSize: 4, - in: []testutil.TarEntry{ - testutil.Dir("foo/"), - testutil.File("foo/small", sampleText[:2]), - testutil.File("foo/large", sampleText), - }, - want: []check{ - numOfNodes(5), // root dir + prefetch landmark + 1 dir + 2 files - numOfChunks("foo/large", 1+(len(sampleText)/4)), - hasFileContentsOffset("foo/small", 0, sampleText[:2]), - hasFileContentsOffset("foo/large", 0, sampleText[0:]), - hasFileContentsOffset("foo/large", 1, sampleText[1:]), - hasFileContentsOffset("foo/large", 2, sampleText[2:]), - hasFileContentsOffset("foo/large", 3, sampleText[3:]), - hasFileContentsOffset("foo/large", 4, sampleText[4:]), - hasFileContentsOffset("foo/large", 5, sampleText[5:]), - hasFileContentsOffset("foo/large", 6, sampleText[6:]), - hasFileContentsOffset("foo/large", 7, sampleText[7:]), - hasFileContentsOffset("foo/large", 8, sampleText[8:]), - hasFileContentsOffset("foo/large", 9, sampleText[9:]), - hasFileContentsOffset("foo/large", 10, sampleText[10:]), - hasFileContentsOffset("foo/large", 11, sampleText[11:]), - hasFileContentsOffset("foo/large", 12, sampleText[12:]), - hasFileContentsOffset("foo/large", int64(len(sampleText)-1), ""), - }, - }, - } - for _, tt := range tests { - for _, prefix := range allowedPrefix { - prefix := prefix - for srcCompresionName, srcCompression := range srcCompressions { - srcCompression := srcCompression - t.Run(tt.name+"-"+srcCompresionName, func(t *testing.T) { - opts := []testutil.BuildEStargzOption{ - testutil.WithBuildTarOptions(testutil.WithPrefix(prefix)), - testutil.WithEStargzOptions(estargz.WithCompression(srcCompression)), - } - if tt.chunkSize > 0 { - opts = append(opts, testutil.WithEStargzOptions(estargz.WithChunkSize(tt.chunkSize))) - } - esgz, _, err := testutil.BuildEStargz(tt.in, opts...) - if err != nil { - t.Fatalf("failed to build sample eStargz: %v", err) - } - - telemetry, checkCalled := newCalledTelemetry() - r, err := factory(esgz, - WithDecompressors(new(zstdchunked.Decompressor)), WithTelemetry(telemetry)) - if err != nil { - t.Fatalf("failed to create new reader: %v", err) - } - defer r.Close() - t.Logf("vvvvv Node tree vvvvv") - t.Logf("[%d] ROOT", r.RootID()) - dumpNodes(t, r, r.RootID(), 1) - t.Logf("^^^^^^^^^^^^^^^^^^^^^") - for _, want := range tt.want { - want(t, r) - } - if err := checkCalled(); err != nil { - t.Errorf("telemetry failure: %v", err) - } - }) - } - } - } -} - -func newCalledTelemetry() (telemetry *Telemetry, check func() error) { - var getFooterLatencyCalled bool - var getTocLatencyCalled bool - var deserializeTocLatencyCalled bool - return &Telemetry{ - func(time.Time) { getFooterLatencyCalled = true }, - func(time.Time) { getTocLatencyCalled = true }, - func(time.Time) { deserializeTocLatencyCalled = true }, - }, func() error { - var allErr error - if !getFooterLatencyCalled { - allErr = multierror.Append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) - } - if !getTocLatencyCalled { - allErr = multierror.Append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) - } - if !deserializeTocLatencyCalled { - allErr = multierror.Append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) - } - return allErr - } -} - -func dumpNodes(t *testing.T, r TestableReader, id uint32, level int) { - if err := r.ForeachChild(id, func(name string, id uint32, mode os.FileMode) bool { - ind := "" - for i := 0; i < level; i++ { - ind += " " - } - t.Logf("%v+- [%d] %q : %v", ind, id, name, mode) - dumpNodes(t, r, id, level+1) - return true - }); err != nil { - t.Errorf("failed to dump nodes %v", err) - } -} - -type check func(*testing.T, TestableReader) - -func numOfNodes(want int) check { - return func(t *testing.T, r TestableReader) { - i, err := r.NumOfNodes() - if err != nil { - t.Errorf("num of nodes: %v", err) - } - if want != i { - t.Errorf("unexpected num of nodes %d; want %d", i, want) - } - } -} - -func numOfChunks(name string, num int) check { - return func(t *testing.T, r TestableReader) { - nr, ok := r.(interface { - NumOfChunks(id uint32) (i int, _ error) - }) - if !ok { - return // skip - } - id, err := lookup(r, name) - if err != nil { - t.Errorf("failed to lookup %q: %v", name, err) - return - } - i, err := nr.NumOfChunks(id) - if err != nil { - t.Errorf("failed to get num of chunks of %q: %v", name, err) - return - } - if i != num { - t.Errorf("unexpected num of chunk of %q : %d want %d", name, i, num) - } - } -} - -func sameNodes(n string, nodes ...string) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, n) - if err != nil { - t.Errorf("failed to lookup %q: %v", n, err) - return - } - for _, en := range nodes { - eid, err := lookup(r, en) - if err != nil { - t.Errorf("failed to lookup %q: %v", en, err) - return - } - if eid != id { - t.Errorf("unexpected ID of %q: %d want %d", en, eid, id) - } - } - } -} - -func linkName(name string, linkName string) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, name) - if err != nil { - t.Errorf("failed to lookup %q: %v", name, err) - return - } - attr, err := r.GetAttr(id) - if err != nil { - t.Errorf("failed to get attr of %q: %v", name, err) - return - } - if attr.Mode&os.ModeSymlink == 0 { - t.Errorf("%q is not a symlink: %v", name, attr.Mode) - return - } - if attr.LinkName != linkName { - t.Errorf("unexpected link name of %q : %q want %q", name, attr.LinkName, linkName) - return - } - } -} - -func hasNumLink(name string, numLink int) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, name) - if err != nil { - t.Errorf("failed to lookup %q: %v", name, err) - return - } - attr, err := r.GetAttr(id) - if err != nil { - t.Errorf("failed to get attr of %q: %v", name, err) - return - } - if attr.NumLink != numLink { - t.Errorf("unexpected numLink of %q: %d want %d", name, attr.NumLink, numLink) - return - } - } -} - -func hasDirChildren(name string, children ...string) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, name) - if err != nil { - t.Errorf("failed to lookup %q: %v", name, err) - return - } - attr, err := r.GetAttr(id) - if err != nil { - t.Errorf("failed to get attr of %q: %v", name, err) - return - } - if !attr.Mode.IsDir() { - t.Errorf("%q is not directory: %v", name, attr.Mode) - return - } - found := map[string]struct{}{} - if err := r.ForeachChild(id, func(name string, id uint32, mode os.FileMode) bool { - found[name] = struct{}{} - return true - }); err != nil { - t.Errorf("failed to see children %v", err) - return - } - if len(found) != len(children) { - t.Errorf("unexpected number of children of %q : %d want %d", name, len(found), len(children)) - } - for _, want := range children { - if _, ok := found[want]; !ok { - t.Errorf("expected child %q not found in %q", want, name) - } - } - } -} - -func hasChardev(name string, maj, min int) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, name) - if err != nil { - t.Errorf("cannot find chardev %q: %v", name, err) - return - } - attr, err := r.GetAttr(id) - if err != nil { - t.Errorf("cannot get attr of chardev %q: %v", name, err) - return - } - if attr.Mode&os.ModeDevice == 0 || attr.Mode&os.ModeCharDevice == 0 { - t.Errorf("file %q is not a chardev: %v", name, attr.Mode) - return - } - if attr.DevMajor != maj || attr.DevMinor != min { - t.Errorf("unexpected major/minor of chardev %q: %d/%d want %d/%d", name, attr.DevMajor, attr.DevMinor, maj, min) - return - } - } -} - -func hasBlockdev(name string, maj, min int) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, name) - if err != nil { - t.Errorf("cannot find blockdev %q: %v", name, err) - return - } - attr, err := r.GetAttr(id) - if err != nil { - t.Errorf("cannot get attr of blockdev %q: %v", name, err) - return - } - if attr.Mode&os.ModeDevice == 0 || attr.Mode&os.ModeCharDevice != 0 { - t.Errorf("file %q is not a blockdev: %v", name, attr.Mode) - return - } - if attr.DevMajor != maj || attr.DevMinor != min { - t.Errorf("unexpected major/minor of blockdev %q: %d/%d want %d/%d", name, attr.DevMajor, attr.DevMinor, maj, min) - return - } - } -} - -func hasFifo(name string) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, name) - if err != nil { - t.Errorf("cannot find blockdev %q: %v", name, err) - return - } - attr, err := r.GetAttr(id) - if err != nil { - t.Errorf("cannot get attr of blockdev %q: %v", name, err) - return - } - if attr.Mode&os.ModeNamedPipe == 0 { - t.Errorf("file %q is not a fifo: %v", name, attr.Mode) - return - } - } -} - -func hasFile(name, content string, size int64) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, name) - if err != nil { - t.Errorf("cannot find file %q: %v", name, err) - return - } - attr, err := r.GetAttr(id) - if err != nil { - t.Errorf("cannot get attr of file %q: %v", name, err) - return - } - if !attr.Mode.IsRegular() { - t.Errorf("file %q is not a regular file: %v", name, attr.Mode) - return - } - sr, err := r.OpenFile(id) - if err != nil { - t.Errorf("cannot open file %q: %v", name, err) - return - } - data, err := ioutil.ReadAll(io.NewSectionReader(sr, 0, attr.Size)) - if err != nil { - t.Errorf("cannot read file %q: %v", name, err) - return - } - if attr.Size != size { - t.Errorf("unexpected size of file %q : %d (%q) want %d (%q)", name, attr.Size, string(data), size, content) - return - } - if string(data) != content { - t.Errorf("unexpected content of %q: %q want %q", name, string(data), content) - return - } - } -} - -func hasFileContentsOffset(name string, off int64, contents string) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, name) - if err != nil { - t.Errorf("failed to lookup %q: %v", name, err) - return - } - fr, err := r.OpenFile(id) - if err != nil { - t.Errorf("failed to open file %q: %v", name, err) - return - } - buf := make([]byte, len(contents)) - n, err := fr.ReadAt(buf, off) - if err != nil && err != io.EOF { - t.Errorf("failed to read file %q (off:%d, want:%q): %v", name, off, contents, err) - return - } - if n != len(contents) { - t.Errorf("failed to read contents %q (off:%d, want:%q) got %q", name, off, contents, string(buf)) - return - } - } -} - -func hasMode(name string, mode os.FileMode) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, name) - if err != nil { - t.Errorf("cannot find file %q: %v", name, err) - return - } - attr, err := r.GetAttr(id) - if err != nil { - t.Errorf("cannot get attr of file %q: %v", name, err) - return - } - if attr.Mode != mode { - t.Errorf("unexpected mode of %q: %v want %v", name, attr.Mode, mode) - return - } - } -} - -func hasOwner(name string, uid, gid int) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, name) - if err != nil { - t.Errorf("cannot find file %q: %v", name, err) - return - } - attr, err := r.GetAttr(id) - if err != nil { - t.Errorf("cannot get attr of file %q: %v", name, err) - return - } - if attr.UID != uid || attr.GID != gid { - t.Errorf("unexpected owner of %q: (%d:%d) want (%d:%d)", name, attr.UID, attr.GID, uid, gid) - return - } - } -} - -func hasModTime(name string, modTime time.Time) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, name) - if err != nil { - t.Errorf("cannot find file %q: %v", name, err) - return - } - attr, err := r.GetAttr(id) - if err != nil { - t.Errorf("cannot get attr of file %q: %v", name, err) - return - } - attrModTime := attr.ModTime - if attrModTime.Before(modTime) || attrModTime.After(modTime) { - t.Errorf("unexpected time of %q: %v; want %v", name, attrModTime, modTime) - return - } - } -} - -func hasXattrs(name string, xattrs map[string]string) check { - return func(t *testing.T, r TestableReader) { - id, err := lookup(r, name) - if err != nil { - t.Errorf("cannot find file %q: %v", name, err) - return - } - attr, err := r.GetAttr(id) - if err != nil { - t.Errorf("cannot get attr of file %q: %v", name, err) - return - } - if len(attr.Xattrs) != len(xattrs) { - t.Errorf("unexpected size of xattr of %q: %d want %d", name, len(attr.Xattrs), len(xattrs)) - return - } - for k, v := range attr.Xattrs { - if xattrs[k] != string(v) { - t.Errorf("unexpected xattr of %q: %q=%q want %q=%q", name, k, string(v), k, xattrs[k]) - } - } - } -} - -func lookup(r TestableReader, name string) (uint32, error) { - name = strings.TrimPrefix(path.Clean("/"+name), "/") - if name == "" { - return r.RootID(), nil - } - dir, base := filepath.Split(name) - pid, err := lookup(r, dir) - if err != nil { - return 0, err - } - id, _, err := r.GetChild(pid, base) - return id, err -} diff --git a/vendor/github.com/containerd/stargz-snapshotter/snapshot/overlayutils/check.go b/vendor/github.com/containerd/stargz-snapshotter/snapshot/overlayutils/check.go deleted file mode 100644 index e76c0b3a56f1..000000000000 --- a/vendor/github.com/containerd/stargz-snapshotter/snapshot/overlayutils/check.go +++ /dev/null @@ -1,172 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// ===== -// NOTE: This file is ported from https://github.com/containerd/containerd/blob/v1.5.2/snapshots/overlay/overlayutils/check.go -// TODO: import this from containerd package once we drop support to continerd v1.4.x -// ===== - -package overlayutils - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/mount" - userns "github.com/containerd/containerd/sys" - "github.com/containerd/continuity/fs" -) - -// SupportsMultipleLowerDir checks if the system supports multiple lowerdirs, -// which is required for the overlay snapshotter. On 4.x kernels, multiple lowerdirs -// are always available (so this check isn't needed), and backported to RHEL and -// CentOS 3.x kernels (3.10.0-693.el7.x86_64 and up). This function is to detect -// support on those kernels, without doing a kernel version compare. -// -// Ported from moby overlay2. -func SupportsMultipleLowerDir(d string) error { - td, err := ioutil.TempDir(d, "multiple-lowerdir-check") - if err != nil { - return err - } - defer func() { - if err := os.RemoveAll(td); err != nil { - log.L.WithError(err).Warnf("Failed to remove check directory %v", td) - } - }() - - for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} { - if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil { - return err - } - } - - opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work")) - m := mount.Mount{ - Type: "overlay", - Source: "overlay", - Options: []string{opts}, - } - dest := filepath.Join(td, "merged") - if err := m.Mount(dest); err != nil { - return fmt.Errorf("failed to mount overlay: %w", err) - } - if err := mount.UnmountAll(dest, 0); err != nil { - log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest) - } - return nil -} - -// Supported returns nil when the overlayfs is functional on the system with the root directory. -// Supported is not called during plugin initialization, but exposed for downstream projects which uses -// this snapshotter as a library. -func Supported(root string) error { - if err := os.MkdirAll(root, 0700); err != nil { - return err - } - supportsDType, err := fs.SupportsDType(root) - if err != nil { - return err - } - if !supportsDType { - return fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root) - } - return SupportsMultipleLowerDir(root) -} - -// NeedsUserXAttr returns whether overlayfs should be mounted with the "userxattr" mount option. -// -// The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. -// -// The "userxattr" option is NOT needed for the initial user namespace (aka "the host"). -// -// Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount -// the overlayfs in a user namespace without the "userxattr" option. -// -// The corresponding kernel commit: https://github.com/torvalds/linux/commit/2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1 -// > ovl: user xattr -// > -// > Optionally allow using "user.overlay." namespace instead of "trusted.overlay." -// > ... -// > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the -// > "user.overlay.redirect" or "user.overlay.metacopy" xattrs. -// > ... -// -// The "userxattr" support is not exposed in "/sys/module/overlay/parameters". -func NeedsUserXAttr(d string) (bool, error) { - if !userns.RunningInUserNS() { - // we are the real root (i.e., the root in the initial user NS), - // so we do never need "userxattr" opt. - return false, nil - } - - // TODO: add fast path for kernel >= 5.11 . - // - // Keep in mind that distro vendors might be going to backport the patch to older kernels. - // So we can't completely remove the check. - - tdRoot := filepath.Join(d, "userxattr-check") - if err := os.RemoveAll(tdRoot); err != nil { - log.L.WithError(err).Warnf("Failed to remove check directory %v", tdRoot) - } - - if err := os.MkdirAll(tdRoot, 0700); err != nil { - return false, err - } - - defer func() { - if err := os.RemoveAll(tdRoot); err != nil { - log.L.WithError(err).Warnf("Failed to remove check directory %v", tdRoot) - } - }() - - td, err := ioutil.TempDir(tdRoot, "") - if err != nil { - return false, err - } - - for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} { - if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil { - return false, err - } - } - - opts := []string{ - fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work")), - "userxattr", - } - - m := mount.Mount{ - Type: "overlay", - Source: "overlay", - Options: opts, - } - - dest := filepath.Join(td, "merged") - if err := m.Mount(dest); err != nil { - // Probably the host is running Ubuntu/Debian kernel (< 5.11) with the userns patch but without the userxattr patch. - // Return false without error. - log.L.WithError(err).Debugf("cannot mount overlay with \"userxattr\", probably the kernel does not support userxattr") - return false, nil - } - if err := mount.UnmountAll(dest, 0); err != nil { - log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest) - } - return true, nil -} diff --git a/vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go b/vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go index 62688ab61b8e..e44f708dac9c 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go +++ b/vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go @@ -19,7 +19,6 @@ package snapshot import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -29,9 +28,9 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/snapshots" + "github.com/containerd/containerd/snapshots/overlay/overlayutils" "github.com/containerd/containerd/snapshots/storage" "github.com/containerd/continuity/fs" - "github.com/containerd/stargz-snapshotter/snapshot/overlayutils" "github.com/moby/sys/mountinfo" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" @@ -72,8 +71,9 @@ type FileSystem interface { // SnapshotterConfig is used to configure the remote snapshotter instance type SnapshotterConfig struct { - asyncRemove bool - noRestore bool + asyncRemove bool + noRestore bool + allowInvalidMountsOnRestart bool } // Opt is an option to configure the remote snapshotter @@ -93,15 +93,21 @@ func NoRestore(config *SnapshotterConfig) error { return nil } +func AllowInvalidMountsOnRestart(config *SnapshotterConfig) error { + config.allowInvalidMountsOnRestart = true + return nil +} + type snapshotter struct { root string ms *storage.MetaStore asyncRemove bool // fs is a filesystem that this snapshotter recognizes. - fs FileSystem - userxattr bool // whether to enable "userxattr" mount option - noRestore bool + fs FileSystem + userxattr bool // whether to enable "userxattr" mount option + noRestore bool + allowInvalidMountsOnRestart bool } // NewSnapshotter returns a Snapshotter which can use unpacked remote layers @@ -145,12 +151,13 @@ func NewSnapshotter(ctx context.Context, root string, targetFs FileSystem, opts } o := &snapshotter{ - root: root, - ms: ms, - asyncRemove: config.asyncRemove, - fs: targetFs, - userxattr: userxattr, - noRestore: config.noRestore, + root: root, + ms: ms, + asyncRemove: config.asyncRemove, + fs: targetFs, + userxattr: userxattr, + noRestore: config.noRestore, + allowInvalidMountsOnRestart: config.allowInvalidMountsOnRestart, } if err := o.restoreRemoteSnapshot(ctx); err != nil { @@ -552,7 +559,7 @@ func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k } func (o *snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) { - td, err := ioutil.TempDir(snapshotDir, "new-") + td, err := os.MkdirTemp(snapshotDir, "new-") if err != nil { return "", fmt.Errorf("failed to create temp dir: %w", err) } @@ -742,6 +749,14 @@ func (o *snapshotter) restoreRemoteSnapshot(ctx context.Context) error { } for _, info := range task { if err := o.prepareRemoteSnapshot(ctx, info.Name, info.Labels); err != nil { + if o.allowInvalidMountsOnRestart { + logrus.WithError(err).Warnf("failed to restore remote snapshot %s; remove this snapshot manually", info.Name) + // This snapshot mount is invalid but allow this. + // NOTE: snapshotter.Mount() will fail to return the mountpoint of these invalid snapshots so + // containerd cannot use them anymore. User needs to manually remove the snapshots from + // containerd's metadata store using ctr (e.g. `ctr snapshot rm`). + continue + } return fmt.Errorf("failed to prepare remote snapshot: %s: %w", info.Name, err) } } diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/testutil/compression.go b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/compression.go new file mode 100644 index 000000000000..bf5f8d9e7bcf --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/compression.go @@ -0,0 +1,80 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package testutil + +import ( + "bytes" + "io" + + "github.com/containerd/stargz-snapshotter/estargz" + esgzexternaltoc "github.com/containerd/stargz-snapshotter/estargz/externaltoc" + "github.com/containerd/stargz-snapshotter/estargz/zstdchunked" + "github.com/klauspost/compress/zstd" +) + +type Compression interface { + estargz.Compressor + estargz.Decompressor + + // DecompressTOC decompresses the passed blob and returns a reader of TOC JSON. + // This is needed to be used from metadata pkg + DecompressTOC(io.Reader) (tocJSON io.ReadCloser, err error) +} + +type CompressionFactory func() Compression + +type zstdCompression struct { + *zstdchunked.Compressor + *zstdchunked.Decompressor +} + +func ZstdCompressionWithLevel(compressionLevel zstd.EncoderLevel) CompressionFactory { + return func() Compression { + return &zstdCompression{&zstdchunked.Compressor{CompressionLevel: compressionLevel}, &zstdchunked.Decompressor{}} + } +} + +type gzipCompression struct { + *estargz.GzipCompressor + *estargz.GzipDecompressor +} + +func GzipCompressionWithLevel(compressionLevel int) CompressionFactory { + return func() Compression { + return gzipCompression{estargz.NewGzipCompressorWithLevel(compressionLevel), &estargz.GzipDecompressor{}} + } +} + +type externalTOCGzipCompression struct { + *esgzexternaltoc.GzipCompressor + *esgzexternaltoc.GzipDecompressor +} + +func ExternalTOCGzipCompressionWithLevel(compressionLevel int) CompressionFactory { + return func() Compression { + compressor := esgzexternaltoc.NewGzipCompressorWithLevel(compressionLevel) + decompressor := esgzexternaltoc.NewGzipDecompressor(func() ([]byte, error) { + buf := new(bytes.Buffer) + if _, err := compressor.WriteTOCTo(buf); err != nil { + return nil, err + } + return buf.Bytes(), nil + }) + return &externalTOCGzipCompression{compressor, decompressor} + } + +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/testutil/ensurehello.go b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/ensurehello.go index bf938da0516e..4d64c5177413 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/util/testutil/ensurehello.go +++ b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/ensurehello.go @@ -21,8 +21,8 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" + "os" "github.com/containerd/containerd/content" "github.com/containerd/containerd/content/local" @@ -57,7 +57,7 @@ func EnsureHello(ctx context.Context) (*ocispec.Descriptor, content.Store, error return nil, nil, err } - tempDir, err := ioutil.TempDir("", "test-estargz") + tempDir, err := os.MkdirTemp("", "test-estargz") if err != nil { return nil, nil, err } diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/testutil/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/estargz.go index bac1b89e42cc..3331a380ce2e 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/util/testutil/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/estargz.go @@ -34,7 +34,7 @@ type BuildEStargzOption func(o *buildEStargzOptions) error // WithEStargzOptions specifies options for estargz lib func WithEStargzOptions(eo ...estargz.Option) BuildEStargzOption { return func(o *buildEStargzOptions) error { - o.estargzOptions = eo + o.estargzOptions = append(o.estargzOptions, eo...) return nil } } @@ -42,7 +42,7 @@ func WithEStargzOptions(eo ...estargz.Option) BuildEStargzOption { // WithBuildTarOptions option specifies the options for tar creation func WithBuildTarOptions(to ...BuildTarOption) BuildEStargzOption { return func(o *buildEStargzOptions) error { - o.buildTarOptions = to + o.buildTarOptions = append(o.buildTarOptions, to...) return nil } } diff --git a/vendor/github.com/containerd/stargz-snapshotter/util/testutil/util.go b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/util.go new file mode 100644 index 000000000000..a1c07eb81827 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/util/testutil/util.go @@ -0,0 +1,31 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package testutil + +import ( + "math/rand" + "testing" +) + +// RandomBytes returns the specified number of random bytes +func RandomBytes(t *testing.T, n int) []byte { + b := make([]byte, n) + if _, err := rand.Read(b); err != nil { + t.Fatalf("failed rand.Read: %v", err) + } + return b +} diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go index d28135ff3ccd..3cd6a59d1c09 100644 --- a/vendor/github.com/containernetworking/cni/libcni/conf.go +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -21,6 +21,8 @@ import ( "os" "path/filepath" "sort" + + "github.com/containernetworking/cni/pkg/types" ) type NotFoundError struct { @@ -41,8 +43,8 @@ func (e NoConfigsFoundError) Error() string { } func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { - conf := &NetworkConfig{Bytes: bytes} - if err := json.Unmarshal(bytes, &conf.Network); err != nil { + conf := &NetworkConfig{Bytes: bytes, Network: &types.NetConf{}} + if err := json.Unmarshal(bytes, conf.Network); err != nil { return nil, fmt.Errorf("error parsing configuration: %w", err) } if conf.Network.Type == "" { diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go index e79bffe63eb7..55ed392a016f 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -16,6 +16,7 @@ package invoke import ( "context" + "encoding/json" "fmt" "os" @@ -33,6 +34,43 @@ type Exec interface { Decode(jsonBytes []byte) (version.PluginInfo, error) } +// Plugin must return result in same version as specified in netconf; but +// for backwards compatibility reasons if the result version is empty use +// config version (rather than technically correct 0.1.0). +// https://github.com/containernetworking/cni/issues/895 +func fixupResultVersion(netconf, result []byte) (string, []byte, error) { + versionDecoder := &version.ConfigDecoder{} + confVersion, err := versionDecoder.Decode(netconf) + if err != nil { + return "", nil, err + } + + var rawResult map[string]interface{} + if err := json.Unmarshal(result, &rawResult); err != nil { + return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err) + } + + // Manually decode Result version; we need to know whether its cniVersion + // is empty, while built-in decoders (correctly) substitute 0.1.0 for an + // empty version per the CNI spec. + if resultVerRaw, ok := rawResult["cniVersion"]; ok { + resultVer, ok := resultVerRaw.(string) + if ok && resultVer != "" { + return resultVer, result, nil + } + } + + // If the cniVersion is not present or empty, assume the result is + // the same CNI spec version as the config + rawResult["cniVersion"] = confVersion + newBytes, err := json.Marshal(rawResult) + if err != nil { + return "", nil, fmt.Errorf("failed to remarshal fixed result: %w", err) + } + + return confVersion, newBytes, nil +} + // For example, a testcase could pass an instance of the following fakeExec // object to ExecPluginWithResult() to verify the incoming stdin and environment // and provide a tailored response: @@ -84,7 +122,12 @@ func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte return nil, err } - return create.CreateFromBytes(stdoutBytes) + resultVersion, fixedBytes, err := fixupResultVersion(netconf, stdoutBytes) + if err != nil { + return nil, err + } + + return create.Create(resultVersion, fixedBytes) } func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go index d4bc9d169cd4..17b22b6b0c4e 100644 --- a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go +++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -86,8 +86,8 @@ func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { // minor, and micro numbers or returns an error func ParseVersion(version string) (int, int, int, error) { var major, minor, micro int - if version == "" { - return -1, -1, -1, fmt.Errorf("invalid version %q: the version is empty", version) + if version == "" { // special case: no version declared == v0.1.0 + return 0, 1, 0, nil } parts := strings.Split(version, ".") diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go b/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go index fc7db98fb41b..bf7671dd2b2f 100644 --- a/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go +++ b/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows // Package activation implements primitives for systemd socket activation. diff --git a/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go b/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go index 7a0e0d3a51b1..25d9c1aa9387 100644 --- a/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go +++ b/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go @@ -30,8 +30,8 @@ import ( // It returns one of the following: // (0, nil) - watchdog isn't enabled or we aren't the watched PID. // (0, err) - an error happened (e.g. error converting time). -// (time, nil) - watchdog is enabled and we can send ping. -// time is delay before inactive service will be killed. +// (time, nil) - watchdog is enabled and we can send ping. time is delay +// before inactive service will be killed. func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) { wusec := os.Getenv("WATCHDOG_USEC") wpid := os.Getenv("WATCHDOG_PID") diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 0668a66cf707..be2b3436062d 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -15,7 +15,7 @@ type roffRenderer struct { extensions blackfriday.Extensions listCounters []int firstHeader bool - defineTerm bool + firstDD bool listDepth int } @@ -42,7 +42,8 @@ const ( quoteCloseTag = "\n.RE\n" listTag = "\n.RS\n" listCloseTag = "\n.RE\n" - arglistTag = "\n.TP\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" tableStart = "\n.TS\nallbox;\n" tableEnd = ".TE\n" tableCellStart = "T{\n" @@ -90,7 +91,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering switch node.Type { case blackfriday.Text: - r.handleText(w, node, entering) + escapeSpecialChars(w, node.Literal) case blackfriday.Softbreak: out(w, crTag) case blackfriday.Hardbreak: @@ -150,40 +151,21 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering out(w, codeCloseTag) case blackfriday.Table: r.handleTable(w, node, entering) - case blackfriday.TableCell: - r.handleTableCell(w, node, entering) case blackfriday.TableHead: case blackfriday.TableBody: case blackfriday.TableRow: // no action as cell entries do all the nroff formatting return blackfriday.GoToNext + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.HTMLSpan: + // ignore other HTML tags default: fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) } return walkAction } -func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - // handle special roff table cell text encapsulation - if node.Parent.Type == blackfriday.TableCell { - if len(node.Literal) > 30 { - start = tableCellStart - end = tableCellEnd - } else { - // end rows that aren't terminated by "tableCellEnd" with a cr if end of row - if node.Parent.Next == nil && !node.Parent.IsHeader { - end = crTag - } - } - } - out(w, start) - escapeSpecialChars(w, node.Literal) - out(w, end) -} - func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { if entering { switch node.Level { @@ -230,15 +212,20 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering if node.ListFlags&blackfriday.ListTypeOrdered != 0 { out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) r.listCounters[len(r.listCounters)-1]++ + } else if node.ListFlags&blackfriday.ListTypeTerm != 0 { + // DT (definition term): line just before DD (see below). + out(w, dtTag) + r.firstDD = true } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // state machine for handling terms and following definitions - // since blackfriday does not distinguish them properly, nor - // does it seperate them into separate lists as it should - if !r.defineTerm { - out(w, arglistTag) - r.defineTerm = true + // DD (definition description): line that starts with ": ". + // + // We have to distinguish between the first DD and the + // subsequent ones, as there should be no vertical + // whitespace between the DT and the first DD. + if r.firstDD { + r.firstDD = false } else { - r.defineTerm = false + out(w, dd2Tag) } } else { out(w, ".IP \\(bu 2\n") @@ -251,7 +238,7 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { if entering { out(w, tableStart) - //call walker to count cells (and rows?) so format section can be produced + // call walker to count cells (and rows?) so format section can be produced columns := countColumns(node) out(w, strings.Repeat("l ", columns)+"\n") out(w, strings.Repeat("l ", columns)+".\n") @@ -261,28 +248,41 @@ func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering } func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - if node.IsHeader { - start = codespanTag - end = codespanCloseTag - } if entering { + var start string if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { - out(w, "\t"+start) - } else { - out(w, start) + start = "\t" + } + if node.IsHeader { + start += codespanTag + } else if nodeLiteralSize(node) > 30 { + start += tableCellStart } + out(w, start) } else { - // need to carriage return if we are at the end of the header row - if node.IsHeader && node.Next == nil { - end = end + crTag + var end string + if node.IsHeader { + end = codespanCloseTag + } else if nodeLiteralSize(node) > 30 { + end = tableCellEnd + } + if node.Next == nil && end != tableCellEnd { + // Last cell: need to carriage return if we are at the end of the + // header row and content isn't wrapped in a "tablecell" + end += crTag } out(w, end) } } +func nodeLiteralSize(node *blackfriday.Node) int { + total := 0 + for n := node.FirstChild; n != nil; n = n.FirstChild { + total += len(n.Literal) + } + return total +} + // because roff format requires knowing the column count before outputting any table // data we need to walk a table tree and count the columns func countColumns(node *blackfriday.Node) int { @@ -309,15 +309,6 @@ func out(w io.Writer, output string) { io.WriteString(w, output) // nolint: errcheck } -func needsBackslash(c byte) bool { - for _, r := range []byte("-_&\\~") { - if c == r { - return true - } - } - return false -} - func escapeSpecialChars(w io.Writer, text []byte) { for i := 0; i < len(text); i++ { // escape initial apostrophe or period @@ -328,7 +319,7 @@ func escapeSpecialChars(w io.Writer, text []byte) { // directly copy normal characters org := i - for i < len(text) && !needsBackslash(text[i]) { + for i < len(text) && text[i] != '\\' { i++ } if i > org { diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS index 8990f85b56e4..483743c99214 100644 --- a/vendor/github.com/docker/cli/AUTHORS +++ b/vendor/github.com/docker/cli/AUTHORS @@ -1,9 +1,10 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `scripts/docs/generate-authors.sh`. +# File @generated by scripts/docs/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See scripts/docs/generate-authors.sh to make modifications. Aanand Prasad Aaron L. Xu -Aaron Lehmann +Aaron Lehmann Aaron.L.Xu Abdur Rehman Abhinandan Prativadi @@ -24,22 +25,27 @@ Akihiro Suda Akim Demaille Alan Thompson Albert Callarisa +Alberto Roura Albin Kerouanton Aleksa Sarai Aleksander Piotrowski Alessandro Boch +Alex Couture-Beil Alex Mavrogiannis Alex Mayer Alexander Boyd Alexander Larsson -Alexander Morozov +Alexander Morozov Alexander Ryabov Alexandre González +Alexey Igrychev +Alexis Couvreur Alfred Landrum Alicia Lauerman Allen Sun Alvin Deng Amen Belayneh +Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com> Amir Goldstein Amit Krishnan Amit Shukla @@ -48,6 +54,8 @@ Anca Iordache Anda Xu Andrea Luzzardi Andreas Köhler +Andres G. Aragoneses +Andres Leon Rangel Andrew France Andrew Hsu Andrew Macpherson @@ -67,8 +75,9 @@ Antonis Kalipetis Anusha Ragunathan Ao Li Arash Deshmeh -Arko Dasgupta -Arnaud Porterie +Arko Dasgupta +Arnaud Porterie +Arnaud Rebillout Arthur Peka Ashwini Oruganti Azat Khuyiyakhmetov @@ -76,18 +85,23 @@ Bardia Keyoumarsi Barnaby Gray Bastiaan Bakker BastianHofmann +Ben Bodenmiller Ben Bonnefoy Ben Creasy Ben Firshman Benjamin Boudreau +Benjamin Böhmke +Benjamin Nater Benoit Sigoure Bhumika Bayani Bill Wang Bin Liu Bingshen Wang +Bishal Das Boaz Shuster Bogdan Anton Boris Pruessmann +Brad Baker Bradley Cicenas Brandon Mitchell Brandon Philips @@ -96,6 +110,7 @@ Bret Fisher Brian (bex) Exelbierd Brian Goff Brian Wieder +Bruno Sousa Bryan Bess Bryan Boreham Bryan Murphy @@ -114,15 +129,19 @@ Charles Chan Charles Law Charles Smith Charlie Drage +Charlotte Mach ChaYoung You +Chee Hau Lim Chen Chuanliang Chen Hanxiao Chen Mingjie Chen Qiu +Chris Couzens Chris Gavin Chris Gibson Chris McKinnel Chris Snow +Chris Vermilion Chris Weyl Christian Persson Christian Stefanescu @@ -131,6 +150,7 @@ Christophe Vidal Christopher Biscardi Christopher Crone Christopher Jones +Christopher Svensson Christy Norman Chun Chen Clinton Kitson @@ -139,8 +159,10 @@ Colin Hebert Collin Guarino Colm Hally Comical Derskeal <27731088+derskeal@users.noreply.github.com> +Conner Crosby Corey Farrell Corey Quon +Cory Bennet Craig Wilhite Cristian Staretu Daehyeok Mun @@ -170,11 +192,13 @@ Dattatraya Kumbhar Dave Goodchild Dave Henderson Dave Tucker +David Alvarez David Beitey David Calavera David Cramer David Dooling David Gageot +David Karlsson David Lechner David Scott David Sheets @@ -186,7 +210,8 @@ Denis Defreyne Denis Gladkikh Denis Ollier Dennis Docter -Derek McGowan +Derek McGowan +Des Preston Deshi Xiao Dharmit Shah Dhawal Yogesh Bhanushali @@ -196,12 +221,14 @@ Dimitry Andric Ding Fei Diogo Monica Djordje Lukic +Dmitriy Fishman Dmitry Gusev Dmitry Smirnov Dmitry V. Krivenok Dominik Braun Don Kjer Dong Chen +DongGeon Lee Doug Davis Drew Erny Ed Costello @@ -211,12 +238,14 @@ Eli Uriegas Elias Faxö Elliot Luo <956941328@qq.com> Eric Curtin +Eric Engestrom Eric G. Noriega Eric Rosenberg Eric Sage Eric-Olivier Lamey Erica Windisch Erik Hollensbe +Erik Humphrey Erik St. Martin Essam A. Hassan Ethan Haynes @@ -229,8 +258,10 @@ Evelyn Xu Everett Toews Fabio Falci Fabrizio Soppelsa +Felix Geyer Felix Hupfeld Felix Rabe +fezzik1620 Filip Jareš Flavio Crisciani Florian Klein @@ -242,6 +273,7 @@ Frederic Hemberger Frederick F. Kautz IV Frederik Nordahl Jul Sabroe Frieder Bluemle +Gabriel Gore Gabriel Nicolas Avellaneda Gaetan de Villele Gang Qiao @@ -251,13 +283,18 @@ George MacRorie George Xie Gianluca Borello Gildas Cuisinier +Gio d'Amelio +Gleb Stsenov Goksu Toprak Gou Rao +Govind Rai Grant Reaber Greg Pflaum +Gsealy Guilhem Lettron Guillaume J. Charmes Guillaume Le Floch +Guillaume Tardif gwx296173 Günther Jungbluth Hakan Özler @@ -278,6 +315,7 @@ Hugo Gabriel Eyherabide huqun Huu Nguyen Hyzhou Zhy +Iain Samuel McLean Elder Ian Campbell Ian Philpot Ignacio Capurro @@ -287,6 +325,7 @@ Ilya Sotkov Ioan Eugen Stan Isabel Jimenez Ivan Grcic +Ivan Grund Ivan Markin Jacob Atzen Jacob Tomlinson @@ -302,15 +341,18 @@ Jan-Jaap Driessen Jana Radhakrishnan Jared Hocutt Jasmine Hegman +Jason Hall Jason Heiss Jason Plum Jay Kamat +Jean Lecordier Jean Rouge Jean-Christophe Sirot Jean-Pierre Huynh Jeff Lindsay Jeff Nickoloff Jeff Silberman +Jennings Zhang Jeremy Chambers Jeremy Unruh Jeremy Yallop @@ -322,6 +364,7 @@ Jian Zhang Jie Luo Jilles Oldenbeuving Jim Galasyn +Jim Lin Jimmy Leger Jimmy Song jimmyxian @@ -338,6 +381,7 @@ Johannes 'fish' Ziemke John Feminella John Harris John Howard +John Howard John Laswell John Maguire John Mulhausen @@ -347,13 +391,16 @@ John Tims John V. Martinez John Willis Jon Johnson +Jon Zeolla Jonatas Baldin Jonathan Boulle Jonathan Lee Jonathan Lomas Jonathan McCrohan +Jonathan Warriss-Simmons Jonh Wendell Jordan Jennings +Jorge Vallecillo Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> Joseph Kern Josh Bodah @@ -383,9 +430,11 @@ Katie McLaughlin Ke Xu Kei Ohmura Keith Hudgins +Kelton Bassingthwaite Ken Cochrane Ken ICHIKAWA Kenfe-Mickaël Laventure +Kevin Alvarez Kevin Burke Kevin Feyrer Kevin Kern @@ -401,6 +450,7 @@ Krasi Georgiev Kris-Mikael Krister Kun Zhang Kunal Kushwaha +Kyle Mitofsky Lachlan Cooper Lai Jiangshan Lars Kellogg-Stedman @@ -410,6 +460,7 @@ Lee Gaines Lei Jitang Lennie Leo Gallucci +Leonid Skorospelov Lewis Daly Li Yi Li Yi @@ -445,6 +496,7 @@ Manjunath A Kumatagi Mansi Nahar mapk0y Marc Bihlmaier +Marc Cornellà Marco Mariani Marco Vedovati Marcus Martins @@ -459,6 +511,7 @@ Mason Fish Mason Malone Mateusz Major Mathieu Champlon +Mathieu Rollet Matt Gucci Matt Robenolt Matteo Orefice @@ -467,11 +520,13 @@ Matthieu Hauglustaine Mauro Porras P Max Shytikov Maxime Petazzoni +Maximillian Fan Xavier Mei ChunTao +Metal <2466052+tedhexaflow@users.noreply.github.com> Micah Zoltu Michael A. Smith Michael Bridgen -Michael Crosby +Michael Crosby Michael Friis Michael Irwin Michael Käufl @@ -487,6 +542,7 @@ Mihai Borobocea Mihuleacc Sergiu Mike Brown Mike Casas +Mike Dalton Mike Danese Mike Dillon Mike Goelzer @@ -503,9 +559,12 @@ Mohini Anne Dsouza Moorthy RS Morgan Bauer Morten Hekkvang +Morten Linderud Moysés Borges +Mozi <29089388+pzhlkj6612@users.noreply.github.com> Mrunal Patel muicoder +Murukesh Mohanan Muthukumar R Máximo Cuadros Mårten Cassel @@ -521,6 +580,7 @@ Nathan LeClaire Nathan McCauley Neil Peterson Nick Adcock +Nick Santos Nico Stapelbroek Nicola Kabar Nicolas Borboën @@ -535,6 +595,8 @@ Noah Treuhaft O.S. Tezer Odin Ugedal ohmystack +OKA Naoya +Oliver Pomeroy Olle Jonsson Olli Janatuinen Oscar Wieman @@ -550,9 +612,12 @@ Paul Lietar Paul Mulders Paul Weaver Pavel Pospisil +Paweł Gronowski +Paweł Pokrywka Paweł Szczekutowicz Peeyush Gupta Per Lundberg +Peter Dave Hello Peter Edge Peter Hsu Peter Jaffe @@ -560,11 +625,13 @@ Peter Kehl Peter Nagy Peter Salvatore Peter Waller -Phil Estes +Phil Estes Philip Alexander Etling Philipp Gillé Philipp Schmied +Phong Tran pidster +Pieter E Smit pixelistik Pratik Karki Prayag Verma @@ -574,6 +641,7 @@ Qiang Huang Qinglan Peng qudongfang Raghavendra K T +Rahul Kadyan Rahul Zoldyck Ravi Shekhar Jethani Ray Tsang @@ -582,6 +650,7 @@ Remy Suen Renaud Gaubert Ricardo N Feliciano Rich Moyse +Richard Chen Zheng <58443436+rchenzheng@users.noreply.github.com> Richard Mathie Richard Scothern Rick Wieman @@ -591,6 +660,7 @@ Rob Gulewich Robert Wallis Robin Naundorf Robin Speekenbrink +Roch Feuillade Rodolfo Ortiz Rogelio Canedo Rohan Verma @@ -609,11 +679,13 @@ Sainath Grandhi Sakeven Jiang Sally O'Malley Sam Neirinck +Sam Thibault Samarth Shah Sambuddha Basu Sami Tabet Samuel Cochran Samuel Karp +Sandro Jäckel Santhosh Manohar Sargun Dhillon Saswat Bhattacharya @@ -643,7 +715,8 @@ Slava Semushin Solomon Hykes Song Gao Spencer Brown -squeegels <1674195+squeegels@users.noreply.github.com> +Spring Lee +squeegels Srini Brahmaroutu Stefan S. Stefan Scherer @@ -654,6 +727,7 @@ Stephen Rust Steve Durrheimer Steve Richards Steven Burgess +Stoica-Marcu Floris-Andrei Subhajit Ghosh Sun Jianbo Sune Keller @@ -665,7 +739,10 @@ Sébastien HOUZÉ T K Sourabh TAGOMORI Satoshi taiji-tech +Takeshi Koenuma +Takuya Noguchi Taylor Jones +Teiva Harsanyi Tejaswini Duggaraju Tengfei Wang Teppei Fukuda @@ -696,6 +773,7 @@ Tom Fotherby Tom Klingenberg Tom Milligan Tom X. Tobin +Tomas Bäckman Tomas Tomecek Tomasz Kopczynski Tomáš Hrčka @@ -711,6 +789,7 @@ Ulrich Bareth Ulysses Souza Umesh Yadav Valentin Lorentz +Vardan Pogosian Venkateswara Reddy Bukkasamudram Veres Lajos Victor Vieux @@ -757,6 +836,7 @@ Yunxiang Huang Zachary Romero Zander Mackie zebrilee +Zeel B Patel Zhang Kun Zhang Wei Zhang Wentao @@ -768,4 +848,5 @@ Zhu Guihua Álex González Álvaro Lázaro Átila Camurça Alves +Александр Менщиков <__Singleton__@hackerdom.ru> 徐俊杰 diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go index 93275f3d9858..b7c05c3f860f 100644 --- a/vendor/github.com/docker/cli/cli/config/config.go +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -19,7 +19,7 @@ const ( // ConfigFileName is the name of config file ConfigFileName = "config.json" configFileDir = ".docker" - oldConfigfile = ".dockercfg" + oldConfigfile = ".dockercfg" // Deprecated: remove once we stop printing deprecation warning contextsDir = "contexts" ) @@ -84,16 +84,6 @@ func Path(p ...string) (string, error) { return path, nil } -// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from -// a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LegacyLoadFromReader(configData) - return &configFile, err -} - // LoadFromReader is a convenience function that creates a ConfigFile object from // a reader func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { @@ -104,14 +94,18 @@ func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { return &configFile, err } -// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file -var printLegacyFileWarning bool - // Load reads the configuration files in the given directory, and sets up // the auth config information and returns values. // FIXME: use the internal golang config parser func Load(configDir string) (*configfile.ConfigFile, error) { - printLegacyFileWarning = false + cfg, _, err := load(configDir) + return cfg, err +} + +// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file +// so we can remove the bool return value and collapse this back into `Load` +func load(configDir string) (*configfile.ConfigFile, bool, error) { + printLegacyFileWarning := false if configDir == "" { configDir = Dir() @@ -127,34 +121,30 @@ func Load(configDir string) (*configfile.ConfigFile, error) { if err != nil { err = errors.Wrap(err, filename) } - return configFile, err + return configFile, printLegacyFileWarning, err } else if !os.IsNotExist(err) { // if file is there but we can't stat it for any reason other // than it doesn't exist then stop - return configFile, errors.Wrap(err, filename) + return configFile, printLegacyFileWarning, errors.Wrap(err, filename) } // Can't find latest config file so check for the old one filename = filepath.Join(getHomeDir(), oldConfigfile) - if file, err := os.Open(filename); err == nil { + if _, err := os.Stat(filename); err == nil { printLegacyFileWarning = true - defer file.Close() - if err := configFile.LegacyLoadFromReader(file); err != nil { - return configFile, errors.Wrap(err, filename) - } } - return configFile, nil + return configFile, printLegacyFileWarning, nil } // LoadDefaultConfigFile attempts to load the default config file and returns // an initialized ConfigFile struct if none is found. func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { - configFile, err := Load(Dir()) + configFile, printLegacyFileWarning, err := load(Dir()) if err != nil { fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) } if printLegacyFileWarning { - _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release") + _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format has been removed and the configuration file will be ignored") } if !configFile.ContainsAuth() { configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index dc9f39eb7e3d..796b0a0aed48 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -3,9 +3,7 @@ package configfile import ( "encoding/base64" "encoding/json" - "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -16,13 +14,6 @@ import ( "github.com/sirupsen/logrus" ) -const ( - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexServer = "https://index.docker.io/v1/" -) - // ConfigFile ~/.docker/config.json file info type ConfigFile struct { AuthConfigs map[string]types.AuthConfig `json:"auths"` @@ -46,8 +37,7 @@ type ConfigFile struct { PruneFilters []string `json:"pruneFilters,omitempty"` Proxies map[string]ProxyConfig `json:"proxies,omitempty"` Experimental string `json:"experimental,omitempty"` - StackOrchestrator string `json:"stackOrchestrator,omitempty"` - Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` + StackOrchestrator string `json:"stackOrchestrator,omitempty"` // Deprecated: swarm is now the default orchestrator, and this option is ignored. CurrentContext string `json:"currentContext,omitempty"` CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` Plugins map[string]map[string]string `json:"plugins,omitempty"` @@ -60,11 +50,7 @@ type ProxyConfig struct { HTTPSProxy string `json:"httpsProxy,omitempty"` NoProxy string `json:"noProxy,omitempty"` FTPProxy string `json:"ftpProxy,omitempty"` -} - -// KubernetesConfig contains Kubernetes orchestrator settings -type KubernetesConfig struct { - AllNamespaces string `json:"allNamespaces,omitempty"` + AllProxy string `json:"allProxy,omitempty"` } // New initializes an empty configuration file for the given filename 'fn' @@ -78,48 +64,10 @@ func New(fn string) *ConfigFile { } } -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return errors.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return errors.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - authConfig.ServerAddress = defaultIndexServer - configFile.AuthConfigs[defaultIndexServer] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - // LoadFromReader reads the configuration data given and sets up the auth config // information with given directory and populates the receiver object func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { - if err := json.NewDecoder(configData).Decode(&configFile); err != nil && !errors.Is(err, io.EOF) { + if err := json.NewDecoder(configData).Decode(configFile); err != nil && !errors.Is(err, io.EOF) { return err } var err error @@ -134,7 +82,7 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { ac.ServerAddress = addr configFile.AuthConfigs[addr] = ac } - return checkKubernetesConfiguration(configFile.Kubernetes) + return nil } // ContainsAuth returns whether there is authentication configured @@ -191,10 +139,10 @@ func (configFile *ConfigFile) Save() (retErr error) { } dir := filepath.Dir(configFile.Filename) - if err := os.MkdirAll(dir, 0700); err != nil { + if err := os.MkdirAll(dir, 0o700); err != nil { return err } - temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename)) + temp, err := os.CreateTemp(dir, filepath.Base(configFile.Filename)) if err != nil { return err } @@ -244,6 +192,7 @@ func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]* "HTTPS_PROXY": &config.HTTPSProxy, "NO_PROXY": &config.NoProxy, "FTP_PROXY": &config.FTPProxy, + "ALL_PROXY": &config.AllProxy, } m := runOpts if m == nil { @@ -399,17 +348,3 @@ func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) delete(configFile.Plugins, pluginname) } } - -func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error { - if kubeConfig == nil { - return nil - } - switch kubeConfig.AllNamespaces { - case "": - case "enabled": - case "disabled": - default: - return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces) - } - return nil -} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go index 3ca65c6140d6..353887547cd3 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package configfile @@ -11,7 +12,7 @@ import ( // ignoring any error during the process. func copyFilePermissions(src, dst string) { var ( - mode os.FileMode = 0600 + mode os.FileMode = 0o600 uid, gid int ) diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go index 3028168ac240..c9630ea51bad 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go @@ -1,3 +1,4 @@ +//go:build !windows && !darwin && !linux // +build !windows,!darwin,!linux package credentials diff --git a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go index afe542cc3ce9..f9619b0381c8 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go @@ -7,7 +7,7 @@ import ( ) const ( - remoteCredentialsPrefix = "docker-credential-" + remoteCredentialsPrefix = "docker-credential-" //nolint:gosec // ignore G101: Potential hardcoded credentials tokenUsername = "" ) diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go index 128da447b5f6..a0b035c92a5c 100644 --- a/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go @@ -4,13 +4,13 @@ // For example, to provide an http.Client that can connect to a Docker daemon // running in a Docker container ("DIND"): // -// httpClient := &http.Client{ -// Transport: &http.Transport{ -// DialContext: func(ctx context.Context, _network, _addr string) (net.Conn, error) { -// return commandconn.New(ctx, "docker", "exec", "-it", containerID, "docker", "system", "dial-stdio") -// }, -// }, -// } +// httpClient := &http.Client{ +// Transport: &http.Transport{ +// DialContext: func(ctx context.Context, _network, _addr string) (net.Conn, error) { +// return commandconn.New(ctx, "docker", "exec", "-it", containerID, "docker", "system", "dial-stdio") +// }, +// }, +// } package commandconn import ( @@ -37,7 +37,7 @@ func New(ctx context.Context, cmd string, args ...string) (net.Conn, error) { c commandConn err error ) - c.cmd = exec.CommandContext(ctx, cmd, args...) + c.cmd = exec.Command(cmd, args...) // we assume that args never contains sensitive information logrus.Debugf("commandconn: starting %s with %v", cmd, args) c.cmd.Env = os.Environ() @@ -236,17 +236,21 @@ func (c *commandConn) Close() error { func (c *commandConn) LocalAddr() net.Addr { return c.localAddr } + func (c *commandConn) RemoteAddr() net.Addr { return c.remoteAddr } + func (c *commandConn) SetDeadline(t time.Time) error { logrus.Debugf("unimplemented call: SetDeadline(%v)", t) return nil } + func (c *commandConn) SetReadDeadline(t time.Time) error { logrus.Debugf("unimplemented call: SetReadDeadline(%v)", t) return nil } + func (c *commandConn) SetWriteDeadline(t time.Time) error { logrus.Debugf("unimplemented call: SetWriteDeadline(%v)", t) return nil diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_nolinux.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_nolinux.go index ab07166724f0..2adcf0816085 100644 --- a/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_nolinux.go +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_nolinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package commandconn diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_unix.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_unix.go index 6448500d6392..57bdecec03f2 100644 --- a/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_unix.go +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package commandconn diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go deleted file mode 100644 index 4c35b879afd8..000000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ /dev/null @@ -1,267 +0,0 @@ -package errcode - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr := daErr.(type) { - case ErrorCode: - err = daErr.WithDetail(nil) - case Error: - err = daErr - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go deleted file mode 100644 index d77e70473e7b..000000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go +++ /dev/null @@ -1,40 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" -) - -// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err -// and sets the content-type header to 'application/json'. It will handle -// ErrorCoder and Errors, and if necessary will create an envelope. -func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - var sc int - - switch errs := err.(type) { - case Errors: - if len(errs) < 1 { - break - } - - if err, ok := errs[0].(ErrorCoder); ok { - sc = err.ErrorCode().Descriptor().HTTPStatusCode - } - case ErrorCoder: - sc = errs.ErrorCode().Descriptor().HTTPStatusCode - err = Errors{err} // create an envelope. - default: - // We just have an unhandled error type, so just place in an envelope - // and move along. - err = Errors{err} - } - - if sc == 0 { - sc = http.StatusInternalServerError - } - - w.WriteHeader(sc) - - return json.NewEncoder(w).Encode(err) -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go deleted file mode 100644 index d1e8826c6d7d..000000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/register.go +++ /dev/null @@ -1,138 +0,0 @@ -package errcode - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go index da8b594e7f89..91d9d4bbae9f 100644 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go @@ -169,8 +169,8 @@ func Erase(helper Helper, reader io.Reader) error { return helper.Delete(serverURL) } -//List returns all the serverURLs of keys in -//the OS store as a list of strings +// List returns all the serverURLs of keys in +// the OS store as a list of strings func List(helper Helper, writer io.Writer) error { accts, err := helper.List() if err != nil { @@ -179,8 +179,8 @@ func List(helper Helper, writer io.Writer) error { return json.NewEncoder(writer).Encode(accts) } -//PrintVersion outputs the current version. +// PrintVersion outputs the current version. func PrintVersion(writer io.Writer) error { - fmt.Fprintln(writer, Version) + fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version) return nil } diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go index 185e367961a5..84377c26309f 100644 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go @@ -1,4 +1,16 @@ package credentials -// Version holds a string describing the current version -const Version = "0.6.4" +var ( + // Name is filled at linking time + Name = "" + + // Package is filled at linking time + Package = "github.com/docker/docker-credential-helpers" + + // Version holds the complete version number. Filled in at linking time. + Version = "v0.0.0+unknown" + + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" +) diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 2ae76d2c2c9c..0728bfe18f36 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -1,5 +1,6 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. +# File @generated by hack/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See hack/generate-authors.sh to make modifications. Aanand Prasad Aaron Davidson @@ -7,9 +8,8 @@ Aaron Feng Aaron Hnatiw Aaron Huslage Aaron L. Xu -Aaron Lehmann +Aaron Lehmann Aaron Welch -Aaron.L.Xu Abel Muiño Abhijeet Kasurde Abhinandan Prativadi @@ -17,6 +17,7 @@ Abhinav Ajgaonkar Abhishek Chanda Abhishek Sharma Abin Shahab +Abirdcfly Ada Mancini Adam Avilla Adam Dobrawy @@ -61,10 +62,11 @@ Alan Scherger Alan Thompson Albert Callarisa Albert Zhang -Albin Kerouanton +Albin Kerouanton Alec Benson Alejandro González Hevia Aleksa Sarai +Aleksandr Chebotov Aleksandrs Fadins Alena Prokharchyk Alessandro Boch @@ -76,6 +78,7 @@ Alex Crawford Alex Ellis Alex Gaynor Alex Goodman +Alex Nordlund Alex Olshansky Alex Samorukov Alex Warhawk @@ -83,7 +86,7 @@ Alexander Artemenko Alexander Boyd Alexander Larsson Alexander Midlash -Alexander Morozov +Alexander Morozov Alexander Polakov Alexander Shopov Alexandre Beslic @@ -159,7 +162,6 @@ Andrey Kolomentsev Andrey Petrov Andrey Stolbovsky André Martins -andy Andy Chambers andy diller Andy Goldstein @@ -168,6 +170,7 @@ Andy Lindeman Andy Rothfusz Andy Smith Andy Wilson +Andy Zhang Anes Hasicic Angel Velazquez Anil Belur @@ -192,19 +195,22 @@ Antony Messerli Anuj Bahuguna Anuj Varma Anusha Ragunathan +Anyu Wang apocas Arash Deshmeh ArikaChen -Arko Dasgupta +Arko Dasgupta Arnaud Lefebvre -Arnaud Porterie +Arnaud Porterie Arnaud Rebillout +Artem Khramov Arthur Barr Arthur Gautier Artur Meyster Arun Gupta Asad Saeeduddin Asbjørn Enge +Austin Vazquez averagehuman Avi Das Avi Kivity @@ -218,6 +224,7 @@ Barnaby Gray Barry Allard Bartłomiej Piotrowski Bastiaan Bakker +Bastien Pascard bdevloed Bearice Ren Ben Bonnefoy @@ -225,6 +232,7 @@ Ben Firshman Ben Golub Ben Gould Ben Hall +Ben Langfeld Ben Sargent Ben Severson Ben Toews @@ -250,9 +258,11 @@ Billy Ridgway Bily Zhang Bin Liu Bingshen Wang +Bjorn Neergaard Blake Geno Boaz Shuster bobby abbott +Bojun Zhu Boqin Qin Boris Pruessmann Boshi Lian @@ -334,6 +344,7 @@ Charlie Drage Charlie Lewis Chase Bolt ChaYoung You +Chee Hau Lim Chen Chao Chen Chuanliang Chen Hanxiao @@ -343,6 +354,7 @@ Chen Qiu Cheng-mean Liu Chengfei Shang Chengguang Xu +Chenyang Yan chenyuzhu Chetan Birajdar Chewey @@ -406,20 +418,23 @@ Colin Walters Collin Guarino Colm Hally companycy +Conor Evans Corbin Coleman Corey Farrell Cory Forsyth +Cory Snider cressie176 -CrimsonGlory Cristian Ariza Cristian Staretu cristiano balducci Cristina Yenyxe Gonzalez Garcia Cruceru Calin-Cristian CUI Wei +cuishuang Cuong Manh Le Cyprian Gracz Cyril F +Da McGrady Daan van Berkel Daehyeok Mun Dafydd Crosby @@ -437,6 +452,7 @@ Dan Hirsch Dan Keder Dan Levy Dan McPherson +Dan Plamadeala Dan Stine Dan Williams Dani Hodovic @@ -457,6 +473,7 @@ Daniel Mizyrycki Daniel Nephin Daniel Norberg Daniel Nordberg +Daniel P. Berrangé Daniel Robinson Daniel S Daniel Sweet @@ -465,6 +482,7 @@ Daniel Watkins Daniel X Moore Daniel YC Lin Daniel Zhang +Daniele Rondina Danny Berger Danny Milosavljevic Danny Yates @@ -530,10 +548,9 @@ Dennis Docter Derek Derek Derek Ch -Derek McGowan +Derek McGowan Deric Crago Deshi Xiao -devmeyster Devon Estes Devvyn Murphy Dharmit Shah @@ -550,9 +567,11 @@ Dimitris Rozakis Dimitry Andric Dinesh Subhraveti Ding Fei +dingwei Diogo Monica DiuDiugirl Djibril Koné +Djordje Lukic dkumor Dmitri Logvinenko Dmitri Shuralyov @@ -601,6 +620,7 @@ Elango Sivanandam Elena Morozova Eli Uriegas Elias Faxö +Elias Koromilas Elias Probst Elijah Zupancic eluck @@ -610,6 +630,7 @@ Emil Hernvall Emily Maier Emily Rose Emir Ozer +Eng Zer Jun Enguerran Eohyung Lee epeterso @@ -634,6 +655,7 @@ Erik Dubbelboer Erik Hollensbe Erik Inge Bolsø Erik Kristensen +Erik Sipsma Erik St. Martin Erik Weathers Erno Hopearuoho @@ -691,6 +713,7 @@ Fengtu Wang Ferenc Szabo Fernando Fero Volar +Feroz Salam Ferran Rodenas Filipe Brandenburger Filipe Oliveira @@ -724,11 +747,14 @@ Frederik Loeffert Frederik Nordahl Jul Sabroe Freek Kalter Frieder Bluemle +frobnicaty <92033765+frobnicaty@users.noreply.github.com> +Frédéric Dalleau Fu JinLin Félix Baylac-Jacqué Félix Cantournet Gabe Rosenhouse Gabor Nagy +Gabriel Goller Gabriel L. Somlo Gabriel Linder Gabriel Monroy @@ -751,6 +777,7 @@ George Kontridze George MacRorie George Xie Georgi Hristozov +Georgy Yakovlev Gereon Frey German DZ Gert van Valkenhoef @@ -762,6 +789,7 @@ Gildas Cuisinier Giovan Isa Musthofa gissehel Giuseppe Mazzotta +Giuseppe Scrivano Gleb Fotengauer-Malinovskiy Gleb M Borisov Glyn Normington @@ -785,6 +813,7 @@ Guilherme Salgado Guillaume Dufour Guillaume J. Charmes Gunadhya S. <6939749+gunadhya@users.noreply.github.com> +Guoqiang QI guoxiuyan Guri Gurjeet Singh @@ -794,12 +823,13 @@ gwx296173 Günter Zöchbauer Haichao Yang haikuoliu +haining.cao Hakan Özler Hamish Hutchings Hannes Ljungberg Hans Kristian Flaatten Hans Rødtang -Hao Shu Wei +Hao Shu Wei Hao Zhang <21521210@zju.edu.cn> Harald Albers Harald Niesche @@ -838,10 +868,9 @@ Hui Kang Hunter Blanks huqun Huu Nguyen -hyeongkyu.lee +Hyeongkyu Lee Hyzhou Zhy Iago López Galeiras -Ian Babrou Ian Bishop Ian Bull Ian Calvert @@ -858,6 +887,7 @@ Igor Dolzhikov Igor Karpovich Iliana Weller Ilkka Laukkanen +Illo Abdulrahim Ilya Dmitrichenko Ilya Gusev Ilya Khlopotov @@ -889,6 +919,7 @@ Jake Champlin Jake Moshenko Jake Sanders Jakub Drahos +Jakub Guzik James Allen James Carey James Carr @@ -900,11 +931,14 @@ James Lal James Mills James Nesbitt James Nugent +James Sanders James Turnbull James Watkins-Harvey Jamie Hannaford Jamshid Afshar +Jan Breig Jan Chren +Jan Götte Jan Keromnes Jan Koprowski Jan Pazdziora @@ -917,7 +951,6 @@ Januar Wayong Jared Biel Jared Hocutt Jaroslaw Zabiello -jaseg Jasmine Hegman Jason A. Donenfeld Jason Divock @@ -932,10 +965,11 @@ Jason Shepherd Jason Smith Jason Sommer Jason Stangroome +Javier Bassi jaxgeller -Jay Jay Jay Kamat +Jay Lim Jean Rouge Jean-Baptiste Barth Jean-Baptiste Dalido @@ -1100,6 +1134,7 @@ Justas Brazauskas Justen Martin Justin Cormack Justin Force +Justin Keller <85903732+jk-vb@users.noreply.github.com> Justin Menga Justin Plock Justin Simonelis @@ -1148,6 +1183,7 @@ Kenjiro Nakayama Kent Johnson Kenta Tada Kevin "qwazerty" Houdebert +Kevin Alvarez Kevin Burke Kevin Clark Kevin Feyrer @@ -1174,6 +1210,7 @@ knappe Kohei Tsuruta Koichi Shiraishi Konrad Kleine +Konrad Ponichtera Konstantin Gribov Konstantin L Konstantin Pelykh @@ -1184,7 +1221,6 @@ Kris-Mikael Krister Kristian Haugene Kristina Zabunova Krystian Wojcicki -Kun Zhang Kunal Kushwaha Kunal Tyagi Kyle Conroy @@ -1212,7 +1248,6 @@ Leandro Siqueira Lee Calcote Lee Chao <932819864@qq.com> Lee, Meng-Han -leeplay Lei Gong Lei Jitang Leiiwang @@ -1239,7 +1274,6 @@ Lifubang Lihua Tang Lily Guo limeidan -limsy Lin Lu LingFaKe Linus Heckemann @@ -1269,6 +1303,7 @@ Lucas Chi Lucas Molas Lucas Silvestre Luciano Mores +Luis Henrique Mulinari Luis Martínez de Bartolomé Izquierdo Luiz Svoboda Lukas Heeren @@ -1317,6 +1352,7 @@ Marius Gundersen Marius Sturm Marius Voila Mark Allen +Mark Feit Mark Jeromin Mark McGranaghan Mark McKinstry @@ -1332,6 +1368,8 @@ Markus Fix Markus Kortlang Martijn Dwars Martijn van Oosterhout +Martin Braun +Martin Dojcak Martin Honermeyer Martin Kelly Martin Mosegaard Amdisen @@ -1348,6 +1386,7 @@ Mathias Monnerville Mathieu Champlon Mathieu Le Marec - Pasquet Mathieu Parent +Mathieu Paturel Matt Apperson Matt Bachmann Matt Bajor @@ -1356,6 +1395,7 @@ Matt Haggard Matt Hoyle Matt McCormick Matt Moore +Matt Morrison <3maven@gmail.com> Matt Richardson Matt Rickard Matt Robenolt @@ -1400,7 +1440,7 @@ Michael Beskin Michael Bridgen Michael Brown Michael Chiang -Michael Crosby +Michael Crosby Michael Currie Michael Friis Michael Gorsuch @@ -1409,6 +1449,7 @@ Michael Holzheu Michael Hudson-Doyle Michael Huettermann Michael Irwin +Michael Kuehn Michael Käufl Michael Neale Michael Nussbaum @@ -1418,6 +1459,7 @@ Michael Spetsiotis Michael Stapelberg Michael Steinert Michael Thies +Michael Weidmann Michael West Michael Zhao Michal Fojtik @@ -1458,6 +1500,7 @@ Mike Snitzer mikelinjie <294893458@qq.com> Mikhail Sobolev Miklos Szegedi +Milas Bowman Milind Chawre Miloslav Trmač mingqing @@ -1533,6 +1576,7 @@ Nicolas Kaiser Nicolas Sterchele Nicolas V Castet Nicolás Hock Isaza +Niel Drummond Nigel Poulton Nik Nyby Nikhil Chawla @@ -1614,6 +1658,7 @@ Pavel Tikhomirov Pavlos Ratis Pavol Vargovcik Pawel Konczalski +Paweł Gronowski Peeyush Gupta Peggy Li Pei Su @@ -1621,6 +1666,7 @@ Peng Tao Penghan Wang Per Weijnitz perhapszzy@sina.com +Pete Woods Peter Bourgon Peter Braden Peter Bücker @@ -1638,7 +1684,8 @@ Peter Waller Petr Švihlík Petros Angelatos Phil -Phil Estes +Phil Estes +Phil Sphicas Phil Spitler Philip Alexander Etling Philip Monroe @@ -1707,9 +1754,9 @@ Renaud Gaubert Rhys Hiltner Ri Xu Ricardo N Feliciano +Rich Horwood Rich Moyse Rich Seymour -Richard Richard Burnison Richard Harvey Richard Mathie @@ -1731,6 +1778,7 @@ Robert Bachmann Robert Bittle Robert Obryk Robert Schneider +Robert Shade Robert Stern Robert Terhaar Robert Wallis @@ -1743,6 +1791,7 @@ Robin Speekenbrink Robin Thoni robpc Rodolfo Carvalho +Rodrigo Campos Rodrigo Vaz Roel Van Nyen Roger Peppe @@ -1757,6 +1806,8 @@ Roma Sokolov Roman Dudin Roman Mazur Roman Strashkin +Roman Volosatovs +Roman Zabaluev Ron Smits Ron Williams Rong Gao @@ -1782,6 +1833,7 @@ Russ Magee Ryan Abrams Ryan Anderson Ryan Aslett +Ryan Barry Ryan Belgrave Ryan Campbell Ryan Detzel @@ -1790,6 +1842,7 @@ Ryan Liu Ryan McLaughlin Ryan O'Donnell Ryan Seto +Ryan Shea Ryan Simmen Ryan Stelly Ryan Thomas @@ -1802,7 +1855,6 @@ Ryo Nakao Ryoga Saito Rémy Greinhofer s. rannou -s00318865 Sabin Basyal Sachin Joshi Sagar Hani @@ -1822,8 +1874,9 @@ Sambuddha Basu Sami Wagiaalla Samuel Andaya Samuel Dion-Girardeau -Samuel Karp +Samuel Karp Samuel PHAN +sanchayanghosh Sandeep Bansal Sankar சங்கர் Sanket Saurav @@ -1852,6 +1905,7 @@ Sean P. Kane Sean Rodman Sebastiaan van Steenis Sebastiaan van Stijn +Sebastian Höffner Sebastian Radloff Sebastien Goasguen Senthil Kumar Selvaraj @@ -1881,6 +1935,7 @@ Shengbo Song Shengjing Zhu Shev Yan Shih-Yuan Lee +Shihao Xia Shijiang Wei Shijun Qin Shishir Mahajan @@ -1889,7 +1944,6 @@ Shourya Sarcar Shu-Wai Chow shuai-z Shukui Yang -Shuwei Hao Sian Lerk Lau Siarhei Rasiukevich Sidhartha Mani @@ -1897,7 +1951,6 @@ sidharthamani Silas Sewell Silvan Jegen Simão Reis -Simei He Simon Barendse Simon Eskildsen Simon Ferquel @@ -1933,6 +1986,7 @@ Stefan S. Stefan Scherer Stefan Staudenmeyer Stefan Weil +Steffen Butzer Stephan Spindler Stephen Benjamin Stephen Crosby @@ -1951,6 +2005,7 @@ Steven Iveson Steven Merrill Steven Richards Steven Taylor +Stéphane Este-Gracias Stig Larsson Su Wang Subhajit Ghosh @@ -1962,15 +2017,16 @@ Sunny Gogoi Suryakumar Sudar Sven Dowideit Swapnil Daingade -Sylvain Baubeau +Sylvain Baubeau Sylvain Bellemare Sébastien Sébastien HOUZÉ Sébastien Luttringer Sébastien Stormacq +Sören Tempel Tabakhase Tadej Janež -TAGOMORI Satoshi +Takuto Sato tang0th Tangi Colin Tatsuki Sugiura @@ -1983,7 +2039,6 @@ Tejaswini Duggaraju Tejesh Mehta Terry Chu terryding77 <550147740@qq.com> -tgic Thatcher Peskens theadactyl Thell 'Bo' Fowler @@ -1996,6 +2051,7 @@ Thomas Gazagnaire Thomas Graf Thomas Grainger Thomas Hansen +Thomas Ledos Thomas Leonard Thomas Léveil Thomas Orozco @@ -2006,6 +2062,7 @@ Thomas Swift Thomas Tanaka Thomas Texier Ti Zhou +Tiago Seabra Tianon Gravi Tianyi Wang Tibor Vass @@ -2064,9 +2121,11 @@ Tomas Tomecek Tomasz Kopczynski Tomasz Lipinski Tomasz Nurkiewicz +Tomek Mańko Tommaso Visconti Tomoya Tabuchi Tomáš Hrčka +tonic Tonny Xu Tony Abboud Tony Daws @@ -2087,6 +2146,7 @@ Trevor Sullivan Trishna Guha Tristan Carel Troy Denton +Tudor Brindus Ty Alexander Tycho Andersen Tyler Brock @@ -2118,7 +2178,7 @@ Viktor Stanchev Viktor Vojnovski VinayRaghavanKS Vincent Batts -Vincent Bernat +Vincent Bernat Vincent Boulineau Vincent Demeester Vincent Giersch @@ -2141,7 +2201,6 @@ VladimirAus Vladislav Kolesnikov Vlastimil Zeman Vojtech Vitek (V-Teq) -waitingkuo Walter Leibbrandt Walter Stanish Wang Chao @@ -2171,7 +2230,6 @@ Wendel Fleming Wenjun Tang Wenkai Yin wenlxie -Wentao Zhang Wenxuan Zhao Wenyu You <21551128@zju.edu.cn> Wenzhi Liang @@ -2196,6 +2254,7 @@ Wolfgang Powisch Wonjun Kim WuLonghui xamyzhao +Xia Wu Xian Chaobo Xianglin Gao Xianjie @@ -2220,6 +2279,7 @@ Xuecong Liao xuzhaokui Yadnyawalkya Tale Yahya +yalpul YAMADA Tsuyoshi Yamasaki Masahide Yan Feng @@ -2228,6 +2288,7 @@ Yang Bai Yang Li Yang Pengfei yangchenliang +Yann Autissier Yanqiang Miao Yao Zaiyong Yash Murty @@ -2247,6 +2308,7 @@ Yosef Fertel You-Sheng Yang (楊有勝) youcai Youcef YEKHLEF +Youfu Zhang Yu Changchun Yu Chengxia Yu Peng @@ -2254,6 +2316,7 @@ Yu-Ju Hong Yuan Sun Yuanhong Peng Yue Zhang +Yufei Xiong Yuhao Fang Yuichiro Kaneko YujiOshima @@ -2298,7 +2361,6 @@ Zou Yu zqh Zuhayr Elahi Zunayed Ali -Álex González Álvaro Lázaro Átila Camurça Alves 尹吉峰 diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 49dda1376903..cda28276197c 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -24,7 +24,7 @@ info: title: "Docker Engine API" version: "1.42" x-logo: - url: "https://docs.docker.com/images/logo-docker-main.png" + url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker @@ -202,24 +202,74 @@ definitions: MountPoint: type: "object" - description: "A mount point inside a container" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. properties: Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `volume` a docker volume with the given `Name`. + - `tmpfs` a `tmpfs`. + - `npipe` a named pipe from the host into the container. + - `cluster` a Swarm cluster volume type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + - "cluster" + example: "volume" Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. type: "string" + example: "myvolume" Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. type: "string" + example: "/usr/share/nginx/html/" Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). type: "string" + example: "local" Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). type: "string" + example: "z" RW: + description: | + Whether the mount is mounted writable (read-write). type: "boolean" + example: true Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. type: "string" + example: "" DeviceMapping: type: "object" @@ -302,12 +352,14 @@ definitions: - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `cluster` a Swarm cluster volume type: "string" enum: - "bind" - "volume" - "tmpfs" - "npipe" + - "cluster" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -332,6 +384,10 @@ definitions: description: "Disable recursive bind mount." type: "boolean" default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" @@ -529,19 +585,13 @@ definitions: type: "array" items: $ref: "#/definitions/DeviceRequest" - KernelMemory: + KernelMemoryTCP: description: | - Kernel memory limit in bytes. + Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. -


    - - > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated - > `kmem.limit_in_bytes`. - type: "integer" - format: "int64" - example: 209715200 - KernelMemoryTCP: - description: "Hard limit for kernel TCP buffer memory (in bytes)." + This field is omitted when empty. type: "integer" format: "int64" MemoryReservation: @@ -725,11 +775,13 @@ definitions: The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" + format: "int64" Timeout: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" + format: "int64" Retries: description: | The number of consecutive failures needed to consider a container as @@ -741,6 +793,7 @@ definitions: health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" + format: "int64" Health: description: | @@ -913,6 +966,16 @@ definitions: type: "array" items: $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 # Applicable to UNIX platforms CapAdd: @@ -1027,8 +1090,9 @@ definitions: description: "Mount the container's root filesystem as read only." SecurityOpt: type: "array" - description: "A list of string values to customize labels for MLS - systems, such as SELinux." + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. items: type: "string" StorageOpt: @@ -1076,15 +1140,6 @@ definitions: type: "string" description: "Runtime to use with this container." # Applicable to Windows - ConsoleSize: - type: "array" - description: | - Initial console size, as an `[height, width]` array. (Windows only) - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 Isolation: type: "string" description: | @@ -1109,14 +1164,25 @@ definitions: type: "string" ContainerConfig: - description: "Configuration for a container that is portable between hosts" + description: | + Configuration for a container that is portable between hosts. + + When used as `ContainerConfig` field in an image, `ContainerConfig` is an + optional field containing the configuration of the container that was last + committed when creating the image. + + Previous versions of Docker builder used this field to store build cache, + and it is not in active use anymore. type: "object" properties: Hostname: - description: "The hostname to use for the container, as a valid RFC 1123 hostname." + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. type: "string" + example: "439f4e91bd1d" Domainname: - description: "The domain name to use for the container." + description: | + The domain name to use for the container. type: "string" User: description: "The user that commands are run as inside the container." @@ -1139,11 +1205,16 @@ definitions: `{"/": {}}` type: "object" + x-nullable: true additionalProperties: type: "object" enum: - {} default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } Tty: description: | Attach standard streams to a TTY, including `stdin` if it is not closed. @@ -1165,21 +1236,29 @@ definitions: type: "array" items: type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Cmd: description: | Command to run specified as a string or an array of strings. type: "array" items: type: "string" + example: ["/bin/sh"] Healthcheck: $ref: "#/definitions/HealthConfig" ArgsEscaped: description: "Command is already escaped (Windows only)" type: "boolean" + default: false + example: false + x-nullable: true Image: description: | - The name of the image to use when creating the container/ + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. type: "string" + example: "example-image:1.0" Volumes: description: | An object mapping mount point paths inside the container to empty @@ -1193,6 +1272,7 @@ definitions: WorkingDir: description: "The working directory for commands to run in." type: "string" + example: "/public/" Entrypoint: description: | The entry point for the container as a string or an array of strings. @@ -1203,38 +1283,50 @@ definitions: type: "array" items: type: "string" + example: [] NetworkDisabled: description: "Disable networking for the container." type: "boolean" + x-nullable: true MacAddress: description: "MAC address of the container." type: "string" + x-nullable: true OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. type: "array" + x-nullable: true items: type: "string" + example: [] Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" StopSignal: description: | Signal to stop a container as a string or unsigned integer. type: "string" - default: "SIGTERM" + example: "SIGTERM" + x-nullable: true StopTimeout: description: "Timeout to stop a container in seconds." type: "integer" default: 10 + x-nullable: true Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. type: "array" + x-nullable: true items: type: "string" + example: ["/bin/sh", "-c"] NetworkingConfig: description: | @@ -1491,107 +1583,215 @@ definitions: example: "4443" GraphDriverData: - description: "Information about a container's graph driver." + description: | + Information about the storage driver used to store the container's and + image's filesystem. type: "object" required: [Name, Data] properties: Name: + description: "Name of the storage driver." type: "string" x-nullable: false + example: "overlay2" Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. type: "object" x-nullable: false additionalProperties: type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } - Image: + ImageInspect: + description: | + Information about an image in the local image cache. type: "object" - required: - - Id - - Parent - - Comment - - Created - - Container - - DockerVersion - - Author - - Architecture - - Os - - Size - - VirtualSize - - GraphDriver - - RootFS properties: Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. type: "string" x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. type: "array" items: type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. type: "array" items: type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. type: "string" x-nullable: false + example: "" Comment: + description: | + Optional message that was set when committing or importing the image. type: "string" x-nullable: false + example: "" Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" x-nullable: false + example: "2022-02-04T21:20:12.497794809Z" Container: + description: | + The ID of the container that was used to create the image. + + Depending on how the image was created, this field may be empty. type: "string" x-nullable: false + example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" ContainerConfig: $ref: "#/definitions/ContainerConfig" DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. type: "string" x-nullable: false + example: "20.10.7" Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. type: "string" x-nullable: false + example: "" Config: $ref: "#/definitions/ContainerConfig" Architecture: + description: | + Hardware CPU architecture that the image runs on. type: "string" x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" Os: + description: | + Operating System the image is built to run on. type: "string" x-nullable: false + example: "linux" OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). type: "string" + example: "" + x-nullable: true Size: + description: | + Total size of the image including all layers it is composed of. type: "integer" format: "int64" x-nullable: false + example: 1239828 VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + In versions of Docker before v1.10, this field was calculated from + the image itself and all of its parent images. Docker v1.10 and up + store images self-contained, and no longer use a parent-chain, making + this field an equivalent of the Size field. + + This field is kept for backward compatibility, but may be removed in + a future version of the API. type: "integer" format: "int64" x-nullable: false + example: 1239828 GraphDriver: $ref: "#/definitions/GraphDriverData" RootFS: + description: | + Information about the image's RootFS, including the layer IDs. type: "object" required: [Type] properties: Type: type: "string" x-nullable: false + example: "layers" Layers: type: "array" items: type: "string" - BaseLayer: - type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. type: "object" properties: LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. type: "string" format: "dateTime" - + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true ImageSummary: type: "object" required: @@ -1607,41 +1807,120 @@ definitions: - Containers properties: Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. type: "string" x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. type: "string" x-nullable: false + example: "" RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. type: "array" x-nullable: false items: type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. type: "array" x-nullable: false items: type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds sinds EPOCH). type: "integer" x-nullable: false + example: "1644009612" Size: + description: | + Total size of the image including all layers it is composed of. type: "integer" + format: "int64" x-nullable: false + example: 172064416 SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. type: "integer" + format: "int64" x-nullable: false + example: 1239828 VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + In versions of Docker before v1.10, this field was calculated from + the image itself and all of its parent images. Docker v1.10 and up + store images self-contained, and no longer use a parent-chain, making + this field an equivalent of the Size field. + + This field is kept for backward compatibility, but may be removed in + a future version of the API. type: "integer" + format: "int64" x-nullable: false + example: 172064416 Labels: + description: "User-defined key/value metadata." type: "object" x-nullable: false additionalProperties: type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + This size is not calculated by default, and depends on which API endpoint + is used. `-1` indicates that the value has not been set / calculated. x-nullable: false type: "integer" + example: 2 AuthConfig: type: "object" @@ -1683,18 +1962,22 @@ definitions: type: "string" description: "Name of the volume." x-nullable: false + example: "tardis" Driver: type: "string" description: "Name of the volume driver used by the volume." x-nullable: false + example: "custom" Mountpoint: type: "string" description: "Mount path of the volume on the host." x-nullable: false + example: "/var/lib/docker/volumes/tardis" CreatedAt: type: "string" format: "dateTime" description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" Status: type: "object" description: | @@ -1706,12 +1989,17 @@ definitions: does not support this feature. additionalProperties: type: "object" + example: + hello: "world" Labels: type: "object" description: "User-defined key/value metadata." x-nullable: false additionalProperties: type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" Scope: type: "string" description: | @@ -1720,15 +2008,23 @@ definitions: default: "local" x-nullable: false enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" Options: type: "object" description: | The driver specific options used when creating the volume. additionalProperties: type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" UsageData: type: "object" x-nullable: true + x-go-name: "UsageData" required: [Size, RefCount] description: | Usage details about the volume. This information is used by the @@ -1736,6 +2032,7 @@ definitions: properties: Size: type: "integer" + format: "int64" default: -1 description: | Amount of disk space used by the volume (in bytes). This information @@ -1745,23 +2042,71 @@ definitions: x-nullable: false RefCount: type: "integer" + format: "int64" default: -1 description: | The number of containers referencing this volume. This field is set to `-1` if the reference-count is not available. x-nullable: false - example: - Name: "tardis" - Driver: "custom" - Mountpoint: "/var/lib/docker/volumes/tardis" - Status: - hello: "world" + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: "local" - CreatedAt: "2016-06-07T20:31:11.853781916Z" + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] Network: type: "object" @@ -1849,15 +2194,27 @@ definitions: ``` type: "array" items: - type: "object" - additionalProperties: - type: "string" + $ref: "#/definitions/IPAMConfig" Options: description: "Driver-specific options, specified as a map." type: "object" additionalProperties: type: "string" + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + IPRange: + type: "string" + Gateway: + type: "string" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + NetworkContainer: type: "object" properties: @@ -1894,23 +2251,63 @@ definitions: BuildCache: type: "object" + description: | + BuildCache contains information about a build cache record. properties: ID: type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" Parent: + description: | + ID of the parent build cache record. + + > **Deprecated**: This field is deprecated, and omitted if empty. type: "string" + x-nullable: true + example: "" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] Type: type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" Description: type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" InUse: type: "boolean" + description: | + Indicates if the build cache is in use. + example: false Shared: type: "boolean" + description: | + Indicates if the build cache is shared. + example: true Size: description: | Amount of disk space used by the build cache (in bytes). type: "integer" + example: 51 CreatedAt: description: | Date and time at which the build cache was created in @@ -1928,6 +2325,7 @@ definitions: example: "2017-08-09T07:09:37.632105588Z" UsageCount: type: "integer" + example: 26 ImageID: type: "object" @@ -3357,7 +3755,7 @@ definitions: Limits: description: "Define resources limits." $ref: "#/definitions/Limit" - Reservation: + Reservations: description: "Define resources reservation." $ref: "#/definitions/ResourceObject" RestartPolicy: @@ -3641,6 +4039,7 @@ definitions: ServiceSpec: description: "User modifiable configuration for a service." + type: object properties: Name: description: "Name of the service." @@ -4096,7 +4495,7 @@ definitions: Mounts: type: "array" items: - $ref: "#/definitions/Mount" + $ref: "#/definitions/MountPoint" Driver: description: "Driver represents a driver (network, logging, secrets)." @@ -4278,6 +4677,50 @@ definitions: Health: $ref: "#/definitions/Health" + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + SystemVersion: type: "object" description: | @@ -4458,14 +4901,13 @@ definitions: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true - KernelMemory: + KernelMemoryTCP: description: | - Indicates if the host has kernel memory limit support enabled. - -


    + Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. - > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated - > `kmem.limit_in_bytes`. + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. type: "boolean" example: true CpuCfsPeriod: @@ -5175,6 +5617,7 @@ definitions: PeerNode: description: "Represents a peer-node in the swarm" + type: "object" properties: NodeID: description: "Unique identifier of for this node in the swarm." @@ -5284,22 +5727,22 @@ definitions: type: "integer" format: "int64" example: 3987495 - # TODO Not yet including these fields for now, as they are nil / omitted in our response. - # urls: - # description: | - # List of URLs from which this object MAY be downloaded. - # type: "array" - # items: - # type: "string" - # format: "uri" - # annotations: - # description: | - # Arbitrary metadata relating to the targeted content. - # type: "object" - # additionalProperties: - # type: "string" - # platform: - # $ref: "#/definitions/OCIPlatform" + # TODO Not yet including these fields for now, as they are nil / omitted in our response. + # urls: + # description: | + # List of URLs from which this object MAY be downloaded. + # type: "array" + # items: + # type: "string" + # format: "uri" + # annotations: + # description: | + # Arbitrary metadata relating to the targeted content. + # type: "object" + # additionalProperties: + # type: "string" + # platform: + # $ref: "#/definitions/OCIPlatform" OCIPlatform: type: "object" @@ -5340,23 +5783,259 @@ definitions: type: "string" example: "v7" - DistributionInspect: - type: "object" - x-go-name: DistributionInspect - title: "DistributionInspectResponse" - required: [Descriptor, Platforms] + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: description: | - Describes the result obtained from contacting the registry to retrieve - image metadata. - properties: - Descriptor: - $ref: "#/definitions/OCIDescriptor" - Platforms: - type: "array" - description: | - An array containing all platforms supported by the image. - items: - $ref: "#/definitions/OCIPlatform" + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" paths: /containers/json: @@ -5576,6 +6255,28 @@ paths: `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. type: "string" pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" - name: "body" in: "body" description: "Container to create" @@ -5626,7 +6327,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -5720,25 +6420,7 @@ paths: 201: description: "Container created successfully" schema: - type: "object" - title: "ContainerCreateResponse" - description: "OK response to ContainerCreate operation" - required: [Id, Warnings] - properties: - Id: - description: "The ID of the created container" - type: "string" - x-nullable: false - Warnings: - description: "Warnings encountered when creating the container" - type: "array" - x-nullable: false - items: - type: "string" - examples: - application/json: - Id: "e90e34656806" - Warnings: [] + $ref: "#/definitions/ContainerCreateResponse" 400: description: "bad parameter" schema: @@ -5918,7 +6600,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -6116,6 +6797,9 @@ paths: Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" operationId: "ContainerLogs" responses: 200: @@ -6529,6 +7213,11 @@ paths: required: true description: "ID or name of the container" type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" @@ -6558,6 +7247,11 @@ paths: required: true description: "ID or name of the container" type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" @@ -6599,7 +7293,8 @@ paths: type: "string" - name: "signal" in: "query" - description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). type: "string" default: "SIGKILL" tags: ["Container"] @@ -6663,7 +7358,6 @@ paths: Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 - KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" @@ -6817,7 +7511,8 @@ paths: ### Stream format When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), - the stream over the hijacked connected is multiplexed to separate out + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. @@ -6861,6 +7556,7 @@ paths: operationId: "ContainerAttach" produces: - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" responses: 101: description: "no error, hints proxy about hijacking" @@ -7002,22 +7698,7 @@ paths: 200: description: "The container has exit." schema: - type: "object" - title: "ContainerWaitResponse" - description: "OK response to ContainerWait operation" - required: [StatusCode] - properties: - StatusCode: - description: "Exit code of the container" - type: "integer" - x-nullable: false - Error: - description: "container waiting error, if any" - type: "object" - properties: - Message: - description: "Details of an error" - type: "string" + $ref: "#/definitions/ContainerWaitResponse" 400: description: "bad parameter" schema: @@ -7125,17 +7806,7 @@ paths: 400: description: "Bad parameter" schema: - allOf: - - $ref: "#/definitions/ErrorResponse" - - type: "object" - properties: - message: - description: | - The error message. Either "must specify path parameter" - (path cannot be empty) or "not a directory" (path was - asserted to be a directory but exists as a file). - type: "string" - x-nullable: false + $ref: "#/definitions/ErrorResponse" 404: description: "Container or path does not exist" schema: @@ -7170,17 +7841,7 @@ paths: 400: description: "Bad parameter" schema: - allOf: - - $ref: "#/definitions/ErrorResponse" - - type: "object" - properties: - message: - description: | - The error message. Either "must specify path parameter" - (path cannot be empty) or "not a directory" (path was - asserted to be a directory but exists as a file). - type: "string" - x-nullable: false + $ref: "#/definitions/ErrorResponse" 404: description: "Container or path does not exist" schema: @@ -7206,7 +7867,10 @@ paths: tags: ["Container"] put: summary: "Extract an archive of files or folders to a directory in a container" - description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". operationId: "PutContainerArchive" consumes: ["application/x-tar", "application/octet-stream"] responses: @@ -7216,6 +7880,9 @@ paths: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" 403: description: "Permission denied, the volume or container rootfs is marked as read-only." schema: @@ -7317,35 +7984,6 @@ paths: type: "array" items: $ref: "#/definitions/ImageSummary" - examples: - application/json: - - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" - ParentId: "" - RepoTags: - - "ubuntu:12.04" - - "ubuntu:precise" - RepoDigests: - - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" - Created: 1474925151 - Size: 103579269 - VirtualSize: 103579269 - SharedSize: 0 - Labels: {} - Containers: 2 - - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" - ParentId: "" - RepoTags: - - "ubuntu:12.10" - - "ubuntu:quantal" - RepoDigests: - - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" - - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" - Created: 1403128455 - Size: 172064416 - VirtualSize: 172064416 - SharedSize: 0 - Labels: {} - Containers: 5 500: description: "server error" schema: @@ -7717,84 +8355,7 @@ paths: 200: description: "No error" schema: - $ref: "#/definitions/Image" - examples: - application/json: - Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" - Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" - Comment: "" - Os: "linux" - Architecture: "amd64" - Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - ContainerConfig: - Tty: false - Hostname: "e611e15f9c9d" - Domainname: "" - AttachStdout: false - PublishService: "" - AttachStdin: false - OpenStdin: false - StdinOnce: false - NetworkDisabled: false - OnBuild: [] - Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - User: "" - WorkingDir: "" - MacAddress: "" - AttachStderr: false - Labels: - com.example.license: "GPL" - com.example.version: "1.0" - com.example.vendor: "Acme" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Cmd: - - "/bin/sh" - - "-c" - - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" - DockerVersion: "1.9.0-dev" - VirtualSize: 188359297 - Size: 0 - Author: "" - Created: "2015-09-10T08:30:53.26995814Z" - GraphDriver: - Name: "aufs" - Data: {} - RepoDigests: - - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" - RepoTags: - - "example:1.0" - - "example:latest" - - "example:stable" - Config: - Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - NetworkDisabled: false - OnBuild: [] - StdinOnce: false - PublishService: "" - AttachStdin: false - OpenStdin: false - Domainname: "" - AttachStdout: false - Tty: false - Hostname: "e611e15f9c9d" - Cmd: - - "/bin/bash" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Labels: - com.example.vendor: "Acme" - com.example.version: "1.0" - com.example.license: "GPL" - MacAddress: "" - AttachStderr: false - WorkingDir: "" - User: "" - RootFS: - Type: "layers" - Layers: - - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + $ref: "#/definitions/ImageInspect" 404: description: "No such image" schema: @@ -8225,10 +8786,27 @@ paths: description: "Max API Version the server supports" Builder-Version: type: "string" - description: "Default version of docker image builder" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" @@ -8268,6 +8846,13 @@ paths: Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" @@ -8505,7 +9090,7 @@ paths: BuildCache: - ID: "hw53o5aio51xtltp5xjp8v7fx" - Parent: "" + Parents: [] Type: "regular" Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" InUse: false @@ -8516,7 +9101,7 @@ paths: UsageCount: 26 - ID: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: "hw53o5aio51xtltp5xjp8v7fx" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] Type: "regular" Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" InUse: false @@ -8700,6 +9285,15 @@ paths: AttachStderr: type: "boolean" description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 DetachKeys: type: "string" description: | @@ -8764,6 +9358,7 @@ paths: - "application/json" produces: - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" responses: 200: description: "No error" @@ -8788,9 +9383,19 @@ paths: Tty: type: "boolean" description: "Allocate a pseudo-TTY." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 example: Detach: false - Tty: false + Tty: true + ConsoleSize: [80, 64] - name: "id" in: "path" description: "Exec instance ID" @@ -8916,41 +9521,7 @@ paths: 200: description: "Summary volume data that matches the query" schema: - type: "object" - title: "VolumeListResponse" - description: "Volume list response" - required: [Volumes, Warnings] - properties: - Volumes: - type: "array" - x-nullable: false - description: "List of volumes" - items: - $ref: "#/definitions/Volume" - Warnings: - type: "array" - x-nullable: false - description: | - Warnings that occurred when fetching the list of volumes. - items: - type: "string" - - examples: - application/json: - Volumes: - - CreatedAt: "2017-07-19T12:00:26Z" - Name: "tardis" - Driver: "local" - Mountpoint: "/var/lib/docker/volumes/tardis" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: "local" - Options: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - Warnings: [] + $ref: "#/definitions/VolumeListResponse" 500: description: "Server error" schema: @@ -8995,38 +9566,7 @@ paths: required: true description: "Volume configuration" schema: - type: "object" - description: "Volume configuration" - title: "VolumeConfig" - properties: - Name: - description: | - The new volume's name. If not specified, Docker generates a name. - type: "string" - x-nullable: false - Driver: - description: "Name of the volume driver to use." - type: "string" - default: "local" - x-nullable: false - DriverOpts: - description: | - A mapping of driver options and values. These options are - passed directly to the driver and are driver specific. - type: "object" - additionalProperties: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - Name: "tardis" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Driver: "custom" + $ref: "#/definitions/VolumeCreateOptions" tags: ["Volume"] /volumes/{name}: @@ -9055,6 +9595,64 @@ paths: type: "string" tags: ["Volume"] + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + delete: summary: "Remove a volume" description: "Instruct the driver to remove the volume." @@ -9086,6 +9684,7 @@ paths: type: "boolean" default: false tags: ["Volume"] + /volumes/prune: post: summary: "Delete unused volumes" @@ -9100,6 +9699,7 @@ paths: Available filters: - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. type: "string" responses: 200: @@ -10727,6 +11327,9 @@ paths: **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" operationId: "ServiceLogs" responses: 200: @@ -10982,6 +11585,9 @@ paths: **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" responses: 200: description: "logs returned as a stream in response body" diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index e3c06cef691a..97aca023064a 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -112,10 +112,16 @@ type NetworkListOptions struct { Filters filters.Args } +// NewHijackedResponse intializes a HijackedResponse type +func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { + return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} +} + // HijackedResponse holds connection information for a hijacked request. type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader + mediaType string + Conn net.Conn + Reader *bufio.Reader } // Close closes the hijacked connection and reader. @@ -123,6 +129,15 @@ func (h *HijackedResponse) Close() { h.Conn.Close() } +// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. +// returns false if HTTP Content-Type is not relevant, and container must be inspected +func (h *HijackedResponse) MediaType() (string, bool) { + if h.mediaType == "" { + return "", false + } + return h.mediaType, true +} + // CloseWriter is an interface that implements structs // that close input streams to prevent from writing. type CloseWriter interface { diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index 3dd133a3a58a..7689f38b331f 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -33,6 +33,7 @@ type ExecConfig struct { User string // User that will run the command Privileged bool // Is the container in privileged mode Tty bool // Attach standard streams to a tty. + ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] AttachStdin bool // Attach the standard input, makes possible user interaction AttachStderr bool // Attach the standard error AttachStdout bool // Attach the standard output diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index f767195b94b4..077583e66c1f 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -1,6 +1,7 @@ package container // import "github.com/docker/docker/api/types/container" import ( + "io" "time" "github.com/docker/docker/api/types/strslice" @@ -13,6 +14,24 @@ import ( // Docker interprets it as 3 nanoseconds. const MinimumDuration = 1 * time.Millisecond +// StopOptions holds the options to stop or restart a container. +type StopOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If not value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + // HealthConfig holds configuration settings for the HEALTHCHECK feature. type HealthConfig struct { // Test is the test to perform to check that the container is healthy. @@ -34,6 +53,14 @@ type HealthConfig struct { Retries int `json:",omitempty"` } +// ExecStartOptions holds the options to start container's exec. +type ExecStartOptions struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + ConsoleSize *[2]uint `json:",omitempty"` +} + // Config contains the configuration data about a container. // It should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go deleted file mode 100644 index d0c852f84d5c..000000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerCreateCreatedBody OK response to ContainerCreate operation -// swagger:model ContainerCreateCreatedBody -type ContainerCreateCreatedBody struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go deleted file mode 100644 index 49e05ae66944..000000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ /dev/null @@ -1,28 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerWaitOKBodyError container waiting error, if any -// swagger:model ContainerWaitOKBodyError -type ContainerWaitOKBodyError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} - -// ContainerWaitOKBody OK response to ContainerWait operation -// swagger:model ContainerWaitOKBody -type ContainerWaitOKBody struct { - - // error - // Required: true - Error *ContainerWaitOKBodyError `json:"Error"` - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/docker/docker/api/types/container/create_response.go new file mode 100644 index 000000000000..aa0e7f7d0789 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/create_response.go @@ -0,0 +1,19 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse ContainerCreateResponse +// +// OK response to ContainerCreate operation +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/deprecated.go b/vendor/github.com/docker/docker/api/types/container/deprecated.go new file mode 100644 index 000000000000..0cb70e363817 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/deprecated.go @@ -0,0 +1,16 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ContainerCreateCreatedBody OK response to ContainerCreate operation +// +// Deprecated: use CreateResponse +type ContainerCreateCreatedBody = CreateResponse + +// ContainerWaitOKBody OK response to ContainerWait operation +// +// Deprecated: use WaitResponse +type ContainerWaitOKBody = WaitResponse + +// ContainerWaitOKBodyError container waiting error, if any +// +// Deprecated: use WaitExitError +type ContainerWaitOKBodyError = WaitExitError diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index dcea6c8a5ae6..100f434ce7fd 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -376,14 +376,17 @@ type Resources struct { Devices []DeviceMapping // List of devices to map inside the container DeviceCgroupRules []string // List of rule to be added to the device cgroup DeviceRequests []DeviceRequest // List of device requests for device drivers - KernelMemory int64 // Kernel memory limit (in bytes), Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // KernelMemory specifies the kernel memory limit (in bytes) for the container. + // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. + KernelMemory int64 `json:",omitempty"` + KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*units.Ulimit // List of ulimits to be set in the container // Applicable to Windows CPUCount int64 `json:"CpuCount"` // CPU count @@ -414,6 +417,7 @@ type HostConfig struct { AutoRemove bool // Automatically remove container when it exits VolumeDriver string // Name of the volume driver used to mount volumes VolumesFrom []string // List of volumes to take from other container + ConsoleSize [2]uint // Initial console size (height,width) // Applicable to UNIX platforms CapAdd strslice.StrSlice // List of kernel capabilities to add to the container @@ -442,8 +446,7 @@ type HostConfig struct { Runtime string `json:",omitempty"` // Runtime to use with this container // Applicable to Windows - ConsoleSize [2]uint // Initial console size (height,width) - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) // Contains container's resources (cgroups, ulimits) Resources diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go new file mode 100644 index 000000000000..ab56d4eed8e1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go @@ -0,0 +1,12 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitExitError container waiting error, if any +// swagger:model WaitExitError +type WaitExitError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/docker/docker/api/types/container/wait_response.go new file mode 100644 index 000000000000..84fc6afddc60 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_response.go @@ -0,0 +1,18 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitResponse ContainerWaitResponse +// +// OK response to ContainerWait operation +// swagger:model WaitResponse +type WaitResponse struct { + + // error + Error *WaitExitError `json:"Error,omitempty"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/deprecated.go b/vendor/github.com/docker/docker/api/types/deprecated.go new file mode 100644 index 000000000000..216d1df0ffaa --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/deprecated.go @@ -0,0 +1,14 @@ +package types // import "github.com/docker/docker/api/types" + +import "github.com/docker/docker/api/types/volume" + +// Volume volume +// +// Deprecated: use github.com/docker/docker/api/types/volume.Volume +type Volume = volume.Volume + +// VolumeUsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// Deprecated: use github.com/docker/docker/api/types/volume.UsageData +type VolumeUsageData = volume.UsageData diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 4bc91cffd6e5..52c190ec7985 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -1,4 +1,5 @@ -/*Package filters provides tools for encoding a mapping of keys to a set of +/* +Package filters provides tools for encoding a mapping of keys to a set of multiple values. */ package filters // import "github.com/docker/docker/api/types/filters" @@ -9,6 +10,7 @@ import ( "strings" "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" ) // Args stores a mapping of keys to a set of multiple values. @@ -97,7 +99,7 @@ func FromJSON(p string) (Args, error) { // Fallback to parsing arguments in the legacy slice format deprecated := map[string][]string{} if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, err + return args, invalidFilter{errors.Wrap(err, "invalid filter")} } args.fields = deprecatedArgs(deprecated) @@ -247,10 +249,10 @@ func (args Args) Contains(field string) bool { return ok } -type invalidFilter string +type invalidFilter struct{ error } func (e invalidFilter) Error() string { - return "Invalid filter '" + string(e) + "'" + return e.error.Error() } func (invalidFilter) InvalidParameter() {} @@ -260,7 +262,7 @@ func (invalidFilter) InvalidParameter() {} func (args Args) Validate(accepted map[string]bool) error { for name := range args.fields { if !accepted[name] { - return invalidFilter(name) + return invalidFilter{errors.New("invalid filter '" + name + "'")} } } return nil diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go index 4d9bf1c62c89..ce3deb331c51 100644 --- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ b/vendor/github.com/docker/docker/api/types/graph_driver_data.go @@ -3,15 +3,21 @@ package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// GraphDriverData Information about a container's graph driver. +// GraphDriverData Information about the storage driver used to store the container's and +// image's filesystem. +// // swagger:model GraphDriverData type GraphDriverData struct { - // data + // Low-level storage metadata, provided as key/value pairs. + // + // This information is driver-specific, and depends on the storage-driver + // in use, and should be used for informational purposes only. + // // Required: true Data map[string]string `json:"Data"` - // name + // Name of the storage driver. // Required: true Name string `json:"Name"` } diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go index e145b3dcfcd1..90b983a25cc5 100644 --- a/vendor/github.com/docker/docker/api/types/image_summary.go +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -7,43 +7,91 @@ package types // swagger:model ImageSummary type ImageSummary struct { - // containers + // Number of containers using this image. Includes both stopped and running + // containers. + // + // This size is not calculated by default, and depends on which API endpoint + // is used. `-1` indicates that the value has not been set / calculated. + // // Required: true Containers int64 `json:"Containers"` - // created + // Date and time at which the image was created as a Unix timestamp + // (number of seconds sinds EPOCH). + // // Required: true Created int64 `json:"Created"` - // Id + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + // // Required: true ID string `json:"Id"` - // labels + // User-defined key/value metadata. // Required: true Labels map[string]string `json:"Labels"` - // parent Id + // ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + // // Required: true ParentID string `json:"ParentId"` - // repo digests + // List of content-addressable digests of locally available image manifests + // that the image is referenced from. Multiple manifests can refer to the + // same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + // // Required: true RepoDigests []string `json:"RepoDigests"` - // repo tags + // List of image names/tags in the local image cache that reference this + // image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + // // Required: true RepoTags []string `json:"RepoTags"` - // shared size + // Total size of image layers that are shared between this image and other + // images. + // + // This size is not calculated by default. `-1` indicates that the value + // has not been set / calculated. + // // Required: true SharedSize int64 `json:"SharedSize"` - // size + // Total size of the image including all layers it is composed of. + // // Required: true Size int64 `json:"Size"` - // virtual size + // Total size of the image including all layers it is composed of. + // + // In versions of Docker before v1.10, this field was calculated from + // the image itself and all of its parent images. Docker v1.10 and up + // store images self-contained, and no longer use a parent-chain, making + // this field an equivalent of the Size field. + // + // This field is kept for backward compatibility, but may be removed in + // a future version of the API. + // // Required: true VirtualSize int64 `json:"VirtualSize"` } diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index 443b8d07a9f3..ac4ce622310d 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -17,6 +17,8 @@ const ( TypeTmpfs Type = "tmpfs" // TypeNamedPipe is the type for mounting Windows named pipes TypeNamedPipe Type = "npipe" + // TypeCluster is the type for Swarm Cluster Volumes. + TypeCluster Type = "cluster" ) // Mount represents a mount (volume). @@ -30,9 +32,10 @@ type Mount struct { ReadOnly bool `json:",omitempty"` Consistency Consistency `json:",omitempty"` - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` + ClusterOptions *ClusterOptions `json:",omitempty"` } // Propagation represents the propagation of a mount. @@ -79,8 +82,9 @@ const ( // BindOptions defines options specific to mounts of type "bind". type BindOptions struct { - Propagation Propagation `json:",omitempty"` - NonRecursive bool `json:",omitempty"` + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` + CreateMountpoint bool `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. @@ -129,3 +133,8 @@ type TmpfsOptions struct { // Some of these may be straightforward to add, but others, such as // uid/gid have implications in a clustered system. } + +// ClusterOptions specifies options for a Cluster volume. +type ClusterOptions struct { + // intentionally empty +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 53e47084c8d5..62a88f5be89d 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -45,31 +45,32 @@ func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { // IndexInfo contains information about a registry // // RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } // -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } type IndexInfo struct { // Name is the name of the registry, such as "docker.io" Name string diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go index ef020f458bd4..5ded7dba8a5f 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -1,12 +1,20 @@ package swarm // import "github.com/docker/docker/api/types/swarm" -import "time" +import ( + "strconv" + "time" +) // Version represents the internal object version. type Version struct { Index uint64 `json:",omitempty"` } +// String implements fmt.Stringer interface. +func (v Version) String() string { + return strconv.FormatUint(v.Index, 10) +} + // Meta is a base object inherited by most of the other once. type Meta struct { Version Version `json:",omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go index 1e30f5fa10dd..bb98d5eedc65 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -53,6 +53,7 @@ type NodeDescription struct { Resources Resources `json:",omitempty"` Engine EngineDescription `json:",omitempty"` TLSInfo TLSInfo `json:",omitempty"` + CSIInfo []NodeCSIInfo `json:",omitempty"` } // Platform represents the platform (Arch/OS). @@ -68,6 +69,21 @@ type EngineDescription struct { Plugins []PluginDescription `json:",omitempty"` } +// NodeCSIInfo represents information about a CSI plugin available on the node +type NodeCSIInfo struct { + // PluginName is the name of the CSI plugin. + PluginName string `json:",omitempty"` + // NodeID is the ID of the node as reported by the CSI plugin. This is + // different from the swarm node ID. + NodeID string `json:",omitempty"` + // MaxVolumesPerNode is the maximum number of volumes that may be published + // to this node + MaxVolumesPerNode int64 `json:",omitempty"` + // AccessibleTopology indicates the location of this node in the CSI + // plugin's topology + AccessibleTopology *Topology `json:",omitempty"` +} + // PluginDescription represents the description of an engine plugin. type PluginDescription struct { Type string `json:",omitempty"` @@ -113,3 +129,11 @@ const ( // NodeStateDisconnected DISCONNECTED NodeStateDisconnected NodeState = "disconnected" ) + +// Topology defines the CSI topology of this node. This type is a duplicate of +// github.com/docker/docker/api/types.Topology. Because the type definition +// is so simple and to avoid complicated structure or circular imports, we just +// duplicate it here. See that type for full documentation +type Topology struct { + Segments map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index b25f9996462e..3eae4b9b297d 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -213,6 +213,16 @@ type Info struct { Warnings []string `json:",omitempty"` } +// Status provides information about the current swarm status and role, +// obtained from the "Swarm" header in the API response. +type Status struct { + // NodeState represents the state of the node. + NodeState LocalNodeState + + // ControlAvailable indicates if the node is a swarm manager. + ControlAvailable bool +} + // Peer represents a peer. type Peer struct { NodeID string diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index a6f7ab7b5c79..ad3eeca0b7f2 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -62,6 +62,11 @@ type Task struct { // used to determine which Tasks belong to which run of the job. This field // is absent if the Service mode is Replicated or Global. JobIteration *Version `json:",omitempty"` + + // Volumes is the list of VolumeAttachments for this task. It specifies + // which particular volumes are to be used by this particular task, and + // fulfilling what mounts in the spec. + Volumes []VolumeAttachment } // TaskSpec represents the spec of a task. @@ -204,3 +209,17 @@ type ContainerStatus struct { type PortStatus struct { Ports []PortConfig `json:",omitempty"` } + +// VolumeAttachment contains the associating a Volume to a Task. +type VolumeAttachment struct { + // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. + ID string `json:",omitempty"` + + // Source, together with Target, indicates the Mount, as specified in the + // ContainerSpec, that this volume fulfills. + Source string `json:",omitempty"` + + // Target, together with Source, indicates the Mount, as specified + // in the ContainerSpec, that this volume fulfills. + Target string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go deleted file mode 100644 index 84b6f073224c..000000000000 --- a/vendor/github.com/docker/docker/api/types/time/duration_convert.go +++ /dev/null @@ -1,12 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "strconv" - "time" -) - -// DurationToSecondsString converts the specified duration to the number -// seconds it represents, formatted as a string. -func DurationToSecondsString(duration time.Duration) string { - return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) -} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go index ea3495efeb8a..2a74b7a59795 100644 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -100,8 +100,10 @@ func GetTimestamp(value string, reference time.Time) (string, error) { // if the incoming nanosecond portion is longer or shorter than 9 digits it is // converted to nanoseconds. The expectation is that the seconds and // seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) +// +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// // returns seconds as def(aultSeconds) if value == "" func ParseTimestamps(value string, def int64) (int64, int64, error) { if value == "" { diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index a186550d7242..036405299ea2 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -14,43 +14,136 @@ import ( "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" "github.com/docker/go-connections/nat" ) +const ( + // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams + MediaTypeRawStream = "application/vnd.docker.raw-stream" + + // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams + MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" +) + // RootFS returns Image's RootFS description including the layer IDs. type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` + Type string `json:",omitempty"` + Layers []string `json:",omitempty"` } // ImageInspect contains response of Engine API: // GET "/images/{name:.*}/json" type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + ID string `json:"Id"` + + // RepoTags is a list of image names/tags in the local image cache that + // reference this image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + RepoTags []string + + // RepoDigests is a list of content-addressable digests of locally available + // image manifests that the image is referenced from. Multiple manifests can + // refer to the same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + RepoDigests []string + + // Parent is the ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + Parent string + + // Comment is an optional message that can be set when committing or + // importing the image. + Comment string + + // Created is the date and time at which the image was created, formatted in + // RFC 3339 nano-seconds (time.RFC3339Nano). + Created string + + // Container is the ID of the container that was used to create the image. + // + // Depending on how the image was created, this field may be empty. + Container string + + // ContainerConfig is an optional field containing the configuration of the + // container that was last committed when creating the image. + // + // Previous versions of Docker builder used this field to store build cache, + // and it is not in active use anymore. ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Variant string `json:",omitempty"` - Os string - OsVersion string `json:",omitempty"` - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS - Metadata ImageMetadata + + // DockerVersion is the version of Docker that was used to build the image. + // + // Depending on how the image was created, this field may be empty. + DockerVersion string + + // Author is the name of the author that was specified when committing the + // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. + Author string + Config *container.Config + + // Architecture is the hardware CPU architecture that the image runs on. + Architecture string + + // Variant is the CPU architecture variant (presently ARM-only). + Variant string `json:",omitempty"` + + // OS is the Operating System the image is built to run on. + Os string + + // OsVersion is the version of the Operating System the image is built to + // run on (especially for Windows). + OsVersion string `json:",omitempty"` + + // Size is the total size of the image including all layers it is composed of. + Size int64 + + // VirtualSize is the total size of the image including all layers it is + // composed of. + // + // In versions of Docker before v1.10, this field was calculated from + // the image itself and all of its parent images. Docker v1.10 and up + // store images self-contained, and no longer use a parent-chain, making + // this field an equivalent of the Size field. + // + // This field is kept for backward compatibility, but may be removed in + // a future version of the API. + VirtualSize int64 // TODO(thaJeztah): deprecate this field + + // GraphDriver holds information about the storage driver used to store the + // container's and image's filesystem. + GraphDriver GraphDriverData + + // RootFS contains information about the image's RootFS, including the + // layer IDs. + RootFS RootFS + + // Metadata of the image in the local cache. + // + // This information is local to the daemon, and not part of the image itself. + Metadata ImageMetadata } // ImageMetadata contains engine-local data about the image type ImageMetadata struct { + // LastTagTime is the date and time at which the image was last tagged. LastTagTime time.Time `json:",omitempty"` } @@ -107,6 +200,15 @@ type Ping struct { OSType string Experimental bool BuilderVersion BuilderVersion + + // SwarmStatus provides information about the current swarm status of the + // engine, obtained from the "Swarm" header in the API response. + // + // It can be a nil struct if the API version does not provide this header + // in the ping response, or if an error occurred, in which case the client + // should use other ways to get the current swarm status, such as the /swarm + // endpoint. + SwarmStatus *swarm.Status } // ComponentVersion describes the version information for a specific component. @@ -158,8 +260,8 @@ type Info struct { Plugins PluginsInfo MemoryLimit bool SwapLimit bool - KernelMemory bool // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool + KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. CPUCfsPeriod bool `json:"CpuCfsPeriod"` CPUCfsQuota bool `json:"CpuCfsQuota"` CPUShares bool @@ -288,6 +390,8 @@ type ExecStartCheck struct { Detach bool // Check if there's a tty Tty bool + // Terminal size [height, width], unused if Tty == false + ConsoleSize *[2]uint `json:",omitempty"` } // HealthcheckResult stores information about a single run of a healthcheck probe @@ -421,13 +525,44 @@ type DefaultNetworkSettings struct { // MountPoint represents a mount point configuration inside the container. // This is used for reporting the mountpoints in use by a container. type MountPoint struct { - Type mount.Type `json:",omitempty"` - Name string `json:",omitempty"` - Source string + // Type is the type of mount, see `Type` definitions in + // github.com/docker/docker/api/types/mount.Type + Type mount.Type `json:",omitempty"` + + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name. + Name string `json:",omitempty"` + + // Source is the source location of the mount. + // + // For volumes, this contains the storage location of the volume (within + // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + // the source (host) part of the bind-mount. For `tmpfs` mount points, this + // field is empty. + Source string + + // Destination is the path relative to the container root (`/`) where the + // Source is mounted inside the container. Destination string - Driver string `json:",omitempty"` - Mode string - RW bool + + // Driver is the volume driver used to create the volume (if it is a volume). + Driver string `json:",omitempty"` + + // Mode is a comma separated list of options supplied by the user when + // creating the bind/volume mount. + // + // The default is platform-specific (`"z"` on Linux, empty on Windows). + Mode string + + // RW indicates whether the mount is mounted writable (read-write). + RW bool + + // Propagation describes how mounts are propagated from the host into the + // mount point, and vice-versa. Refer to the Linux kernel documentation + // for details: + // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // + // This field is not used on Windows. Propagation mount.Propagation } @@ -562,7 +697,7 @@ type DiskUsage struct { LayersSize int64 Images []*ImageSummary Containers []*Container - Volumes []*Volume + Volumes []*volume.Volume BuildCache []*BuildCache BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. } @@ -639,18 +774,31 @@ type BuildResult struct { ID string } -// BuildCache contains information about a build cache record +// BuildCache contains information about a build cache record. type BuildCache struct { - ID string - Parent string - Type string + // ID is the unique ID of the build cache record. + ID string + // Parent is the ID of the parent build cache record. + // + // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. + Parent string `json:"Parent,omitempty"` + // Parents is the list of parent build cache record IDs. + Parents []string `json:" Parents,omitempty"` + // Type is the cache record type. + Type string + // Description is a description of the build-step that produced the build cache. Description string - InUse bool - Shared bool - Size int64 - CreatedAt time.Time - LastUsedAt *time.Time - UsageCount int + // InUse indicates if the build cache is in use. + InUse bool + // Shared indicates if the build cache is shared. + Shared bool + // Size is the amount of disk space used by the build cache (in bytes). + Size int64 + // CreatedAt is the date and time at which the build cache was created. + CreatedAt time.Time + // LastUsedAt is the date and time at which the build cache was last used. + LastUsedAt *time.Time + UsageCount int } // BuildCachePruneOptions hold parameters to prune the build cache diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go new file mode 100644 index 000000000000..55fc5d389939 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go @@ -0,0 +1,420 @@ +package volume + +import ( + "github.com/docker/docker/api/types/swarm" +) + +// ClusterVolume contains options and information specific to, and only present +// on, Swarm CSI cluster volumes. +type ClusterVolume struct { + // ID is the Swarm ID of the volume. Because cluster volumes are Swarm + // objects, they have an ID, unlike non-cluster volumes, which only have a + // Name. This ID can be used to refer to the cluster volume. + ID string + + // Meta is the swarm metadata about this volume. + swarm.Meta + + // Spec is the cluster-specific options from which this volume is derived. + Spec ClusterVolumeSpec + + // PublishStatus contains the status of the volume as it pertains to its + // publishing on Nodes. + PublishStatus []*PublishStatus `json:",omitempty"` + + // Info is information about the global status of the volume. + Info *Info `json:",omitempty"` +} + +// ClusterVolumeSpec contains the spec used to create this volume. +type ClusterVolumeSpec struct { + // Group defines the volume group of this volume. Volumes belonging to the + // same group can be referred to by group name when creating Services. + // Referring to a volume by group instructs swarm to treat volumes in that + // group interchangeably for the purpose of scheduling. Volumes with an + // empty string for a group technically all belong to the same, emptystring + // group. + Group string `json:",omitempty"` + + // AccessMode defines how the volume is used by tasks. + AccessMode *AccessMode `json:",omitempty"` + + // AccessibilityRequirements specifies where in the cluster a volume must + // be accessible from. + // + // This field must be empty if the plugin does not support + // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the + // plugin does not support it, volume will not be created. + // + // If AccessibilityRequirements is empty, but the plugin does support + // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire + // cluster is a valid target for the volume. + AccessibilityRequirements *TopologyRequirement `json:",omitempty"` + + // CapacityRange defines the desired capacity that the volume should be + // created with. If nil, the plugin will decide the capacity. + CapacityRange *CapacityRange `json:",omitempty"` + + // Secrets defines Swarm Secrets that are passed to the CSI storage plugin + // when operating on this volume. + Secrets []Secret `json:",omitempty"` + + // Availability is the Volume's desired availability. Analogous to Node + // Availability, this allows the user to take volumes offline in order to + // update or delete them. + Availability Availability `json:",omitempty"` +} + +// Availability specifies the availability of the volume. +type Availability string + +const ( + // AvailabilityActive indicates that the volume is active and fully + // schedulable on the cluster. + AvailabilityActive Availability = "active" + + // AvailabilityPause indicates that no new workloads should use the + // volume, but existing workloads can continue to use it. + AvailabilityPause Availability = "pause" + + // AvailabilityDrain indicates that all workloads using this volume + // should be rescheduled, and the volume unpublished from all nodes. + AvailabilityDrain Availability = "drain" +) + +// AccessMode defines the access mode of a volume. +type AccessMode struct { + // Scope defines the set of nodes this volume can be used on at one time. + Scope Scope `json:",omitempty"` + + // Sharing defines the number and way that different tasks can use this + // volume at one time. + Sharing SharingMode `json:",omitempty"` + + // MountVolume defines options for using this volume as a Mount-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + MountVolume *TypeMount `json:",omitempty"` + + // BlockVolume defines options for using this volume as a Block-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + BlockVolume *TypeBlock `json:",omitempty"` +} + +// Scope defines the Scope of a Cluster Volume. This is how many nodes a +// Volume can be accessed simultaneously on. +type Scope string + +const ( + // ScopeSingleNode indicates the volume can be used on one node at a + // time. + ScopeSingleNode Scope = "single" + + // ScopeMultiNode indicates the volume can be used on many nodes at + // the same time. + ScopeMultiNode Scope = "multi" +) + +// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a +// Volume at the same time can use it. +type SharingMode string + +const ( + // SharingNone indicates that only one Task may use the Volume at a + // time. + SharingNone SharingMode = "none" + + // SharingReadOnly indicates that the Volume may be shared by any + // number of Tasks, but they must be read-only. + SharingReadOnly SharingMode = "readonly" + + // SharingOneWriter indicates that the Volume may be shared by any + // number of Tasks, but all after the first must be read-only. + SharingOneWriter SharingMode = "onewriter" + + // SharingAll means that the Volume may be shared by any number of + // Tasks, as readers or writers. + SharingAll SharingMode = "all" +) + +// TypeBlock defines options for using a volume as a block-type volume. +// +// Intentionally empty. +type TypeBlock struct{} + +// TypeMount contains options for using a volume as a Mount-type +// volume. +type TypeMount struct { + // FsType specifies the filesystem type for the mount volume. Optional. + FsType string `json:",omitempty"` + + // MountFlags defines flags to pass when mounting the volume. Optional. + MountFlags []string `json:",omitempty"` +} + +// TopologyRequirement expresses the user's requirements for a volume's +// accessible topology. +type TopologyRequirement struct { + // Requisite specifies a list of Topologies, at least one of which the + // volume must be accessible from. + // + // Taken verbatim from the CSI Spec: + // + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []Topology `json:",omitempty"` + + // Preferred is a list of Topologies that the volume should attempt to be + // provisioned in. + // + // Taken from the CSI spec: + // + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []Topology `json:",omitempty"` +} + +// Topology is a map of topological domains to topological segments. +// +// This description is taken verbatim from the CSI Spec: +// +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `json:",omitempty"` +} + +// CapacityRange describes the minimum and maximum capacity a volume should be +// created with +type CapacityRange struct { + // RequiredBytes specifies that a volume must be at least this big. The + // value of 0 indicates an unspecified minimum. + RequiredBytes int64 + + // LimitBytes specifies that a volume must not be bigger than this. The + // value of 0 indicates an unspecified maximum + LimitBytes int64 +} + +// Secret represents a Swarm Secret value that must be passed to the CSI +// storage plugin when operating on this Volume. It represents one key-value +// pair of possibly many. +type Secret struct { + // Key is the name of the key of the key-value pair passed to the plugin. + Key string + + // Secret is the swarm Secret object from which to read data. This can be a + // Secret name or ID. The Secret data is retrieved by Swarm and used as the + // value of the key-value pair passed to the plugin. + Secret string +} + +// PublishState represents the state of a Volume as it pertains to its +// use on a particular Node. +type PublishState string + +const ( + // StatePending indicates that the volume should be published on + // this node, but the call to ControllerPublishVolume has not been + // successfully completed yet and the result recorded by swarmkit. + StatePending PublishState = "pending-publish" + + // StatePublished means the volume is published successfully to the node. + StatePublished PublishState = "published" + + // StatePendingNodeUnpublish indicates that the Volume should be + // unpublished on the Node, and we're waiting for confirmation that it has + // done so. After the Node has confirmed that the Volume has been + // unpublished, the state will move to StatePendingUnpublish. + StatePendingNodeUnpublish PublishState = "pending-node-unpublish" + + // StatePendingUnpublish means the volume is still published to the node + // by the controller, awaiting the operation to unpublish it. + StatePendingUnpublish PublishState = "pending-controller-unpublish" +) + +// PublishStatus represents the status of the volume as published to an +// individual node +type PublishStatus struct { + // NodeID is the ID of the swarm node this Volume is published to. + NodeID string `json:",omitempty"` + + // State is the publish state of the volume. + State PublishState `json:",omitempty"` + + // PublishContext is the PublishContext returned by the CSI plugin when + // a volume is published. + PublishContext map[string]string `json:",omitempty"` +} + +// Info contains information about the Volume as a whole as provided by +// the CSI storage plugin. +type Info struct { + // CapacityBytes is the capacity of the volume in bytes. A value of 0 + // indicates that the capacity is unknown. + CapacityBytes int64 `json:",omitempty"` + + // VolumeContext is the context originating from the CSI storage plugin + // when the Volume is created. + VolumeContext map[string]string `json:",omitempty"` + + // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This + // is distinct from the Volume's Swarm ID, which is the ID used by all of + // the Docker Engine to refer to the Volume. If this field is blank, then + // the Volume has not been successfully created yet. + VolumeID string `json:",omitempty"` + + // AccessibleTopolgoy is the topology this volume is actually accessible + // from. + AccessibleTopology []Topology `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/create_options.go b/vendor/github.com/docker/docker/api/types/volume/create_options.go new file mode 100644 index 000000000000..37c41a609690 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/create_options.go @@ -0,0 +1,29 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateOptions VolumeConfig +// +// Volume configuration +// swagger:model CreateOptions +type CreateOptions struct { + + // cluster volume spec + ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` + + // Name of the volume driver to use. + Driver string `json:"Driver,omitempty"` + + // A mapping of driver options and values. These options are + // passed directly to the driver and are driver specific. + // + DriverOpts map[string]string `json:"DriverOpts,omitempty"` + + // User-defined key/value metadata. + Labels map[string]string `json:"Labels,omitempty"` + + // The new volume's name. If not specified, Docker generates a name. + // + Name string `json:"Name,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/deprecated.go b/vendor/github.com/docker/docker/api/types/volume/deprecated.go new file mode 100644 index 000000000000..ab622d8ccb44 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/deprecated.go @@ -0,0 +1,11 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// VolumeCreateBody Volume configuration +// +// Deprecated: use CreateOptions +type VolumeCreateBody = CreateOptions + +// VolumeListOKBody Volume list response +// +// Deprecated: use ListResponse +type VolumeListOKBody = ListResponse diff --git a/vendor/github.com/docker/docker/api/types/volume/list_response.go b/vendor/github.com/docker/docker/api/types/volume/list_response.go new file mode 100644 index 000000000000..ca5192a2a91e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/list_response.go @@ -0,0 +1,18 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ListResponse VolumeListResponse +// +// Volume list response +// swagger:model ListResponse +type ListResponse struct { + + // List of volumes + Volumes []*Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes. + // + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go new file mode 100644 index 000000000000..8b0dd1389986 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/options.go @@ -0,0 +1,8 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +import "github.com/docker/docker/api/types/filters" + +// ListOptions holds parameters to list volumes. +type ListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume/volume.go similarity index 87% rename from vendor/github.com/docker/docker/api/types/volume.go rename to vendor/github.com/docker/docker/api/types/volume/volume.go index c69b08448df4..ea7d555e5b49 100644 --- a/vendor/github.com/docker/docker/api/types/volume.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume.go @@ -1,4 +1,4 @@ -package types +package volume // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command @@ -7,6 +7,9 @@ package types // swagger:model Volume type Volume struct { + // cluster volume + ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` + // Date/Time the volume was created. CreatedAt string `json:"CreatedAt,omitempty"` @@ -47,14 +50,14 @@ type Volume struct { Status map[string]interface{} `json:"Status,omitempty"` // usage data - UsageData *VolumeUsageData `json:"UsageData,omitempty"` + UsageData *UsageData `json:"UsageData,omitempty"` } -// VolumeUsageData Usage details about the volume. This information is used by the +// UsageData Usage details about the volume. This information is used by the // `GET /system/df` endpoint, and omitted in other endpoints. // -// swagger:model VolumeUsageData -type VolumeUsageData struct { +// swagger:model UsageData +type UsageData struct { // The number of containers referencing this volume. This field // is set to `-1` if the reference-count is not available. diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go deleted file mode 100644 index 8538078dd663..000000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_create.go +++ /dev/null @@ -1,31 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// VolumeCreateBody Volume configuration -// swagger:model VolumeCreateBody -type VolumeCreateBody struct { - - // Name of the volume driver to use. - // Required: true - Driver string `json:"Driver"` - - // A mapping of driver options and values. These options are - // passed directly to the driver and are driver specific. - // - // Required: true - DriverOpts map[string]string `json:"DriverOpts"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // The new volume's name. If not specified, Docker generates a name. - // - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go deleted file mode 100644 index be06179bf488..000000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_list.go +++ /dev/null @@ -1,23 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -import "github.com/docker/docker/api/types" - -// VolumeListOKBody Volume list response -// swagger:model VolumeListOKBody -type VolumeListOKBody struct { - - // List of volumes - // Required: true - Volumes []*types.Volume `json:"Volumes"` - - // Warnings that occurred when fetching the list of volumes. - // - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go new file mode 100644 index 000000000000..f958f80a6692 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume_update.go @@ -0,0 +1,7 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// UpdateOptions is configuration to update a Volume with. +type UpdateOptions struct { + // Spec is the ClusterVolumeSpec to update the volume to. + Spec *ClusterVolumeSpec `json:"Spec,omitempty"` +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index 66d46dd161ba..39cfb959ff5e 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -20,7 +20,7 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) defer ensureReaderClosed(resp) if err != nil { - return checkpoints, wrapResponseError(err, resp, "container", container) + return checkpoints, err } err = json.NewDecoder(resp.body).Decode(&checkpoints) diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 6a8b4d4feaeb..26a0fa27562f 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -4,7 +4,7 @@ Package client is a Go client for the Docker Engine API. For more information about the Engine API, see the documentation: https://docs.docker.com/engine/api/ -Usage +# Usage You use the library by creating a client object and calling methods on it. The client can be created either from environment variables with NewClientWithOpts(client.FromEnv), @@ -37,13 +37,11 @@ For example, to list running containers (the equivalent of "docker ps"): fmt.Printf("%s %s\n", container.ID[:10], container.Image) } } - */ package client // import "github.com/docker/docker/client" import ( "context" - "fmt" "net" "net/http" "net/url" @@ -93,15 +91,18 @@ type Client struct { } // CheckRedirect specifies the policy for dealing with redirect responses: -// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// If the request is non-GET return ErrRedirect, otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) +// in the client. The Docker client (and by extension docker API client) can be +// made to send a request like POST /containers//start where what would normally +// be in the name section of the URL is empty. This triggers an HTTP 301 from +// the daemon. // -// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . -// The Docker client (and by extension docker API client) can be made to send a request -// like POST /containers//start where what would normally be in the name section of the URL is empty. -// This triggers an HTTP 301 from the daemon. -// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. -// This behavior change manifests in the client in that before the 301 was not followed and -// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting +// a 404 from the daemon. This behavior change manifests in the client in that +// before, the 301 was not followed and the client did not generate an error, +// but now results in a message like Error response from daemon: page not found. func CheckRedirect(req *http.Request, via []*http.Request) error { if via[0].Method == http.MethodGet { return http.ErrUseLastResponse @@ -109,13 +110,20 @@ func CheckRedirect(req *http.Request, via []*http.Request) error { return ErrRedirect } -// NewClientWithOpts initializes a new API client with default values. It takes functors -// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` -// It also initializes the custom http headers to add to each request. +// NewClientWithOpts initializes a new API client with a default HTTPClient, and +// default API host and version. It also initializes the custom HTTP headers to +// add to each request. // -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. +// It takes an optional list of Opt functional arguments, which are applied in +// the order they're provided, which allows modifying the defaults when creating +// the client. For example, the following initializes a client that configures +// itself with values from environment variables (client.FromEnv), and has +// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()). +// +// cli, err := client.NewClientWithOpts( +// client.FromEnv, +// client.WithAPIVersionNegotiation(), +// ) func NewClientWithOpts(ops ...Opt) (*Client, error) { client, err := defaultHTTPClient(DefaultDockerHost) if err != nil { @@ -153,12 +161,12 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) { } func defaultHTTPClient(host string) (*http.Client, error) { - url, err := ParseHostURL(host) + hostURL, err := ParseHostURL(host) if err != nil { return nil, err } - transport := new(http.Transport) - sockets.ConfigureTransport(transport, url.Scheme, url.Host) + transport := &http.Transport{} + _ = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) return &http.Client{ Transport: transport, CheckRedirect: CheckRedirect, @@ -194,11 +202,21 @@ func (cli *Client) ClientVersion() string { return cli.version } -// NegotiateAPIVersion queries the API and updates the version to match the -// API version. Any errors are silently ignored. If a manual override is in place, -// either through the `DOCKER_API_VERSION` environment variable, or if the client -// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation -// will be performed. +// NegotiateAPIVersion queries the API and updates the version to match the API +// version. NegotiateAPIVersion downgrades the client's API version to match the +// APIVersion if the ping version is lower than the default version. If the API +// version reported by the server is higher than the maximum version supported +// by the client, it uses the client's maximum version. +// +// If a manual override is in place, either through the "DOCKER_API_VERSION" +// (EnvOverrideAPIVersion) environment variable, or if the client is initialized +// with a fixed version (WithVersion(xx)), no negotiation is performed. +// +// If the API server's ping response does not contain an API version, or if the +// client did not get a successful ping response, it assumes it is connected with +// an old daemon that does not support API version negotiation, in which case it +// downgrades to the latest version of the API before version negotiation was +// added (1.24). func (cli *Client) NegotiateAPIVersion(ctx context.Context) { if !cli.manualOverride { ping, _ := cli.Ping(ctx) @@ -206,23 +224,31 @@ func (cli *Client) NegotiateAPIVersion(ctx context.Context) { } } -// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion -// if the ping version is less than the default version. If a manual override is -// in place, either through the `DOCKER_API_VERSION` environment variable, or if -// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no -// negotiation is performed. -func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { +// NegotiateAPIVersionPing downgrades the client's API version to match the +// APIVersion in the ping response. If the API version in pingResponse is higher +// than the maximum version supported by the client, it uses the client's maximum +// version. +// +// If a manual override is in place, either through the "DOCKER_API_VERSION" +// (EnvOverrideAPIVersion) environment variable, or if the client is initialized +// with a fixed version (WithVersion(xx)), no negotiation is performed. +// +// If the API server's ping response does not contain an API version, we assume +// we are connected with an old daemon without API version negotiation support, +// and downgrade to the latest version of the API before version negotiation was +// added (1.24). +func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { if !cli.manualOverride { - cli.negotiateAPIVersionPing(p) + cli.negotiateAPIVersionPing(pingResponse) } } // negotiateAPIVersionPing queries the API and updates the version to match the -// API version. Any errors are silently ignored. -func (cli *Client) negotiateAPIVersionPing(p types.Ping) { - // try the latest version before versioning headers existed - if p.APIVersion == "" { - p.APIVersion = "1.24" +// API version from the ping response. +func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { + // default to the latest version before versioning headers existed + if pingResponse.APIVersion == "" { + pingResponse.APIVersion = "1.24" } // if the client is not initialized with a version, start with the latest supported version @@ -231,8 +257,8 @@ func (cli *Client) negotiateAPIVersionPing(p types.Ping) { } // if server version is lower than the client version, downgrade - if versions.LessThan(p.APIVersion, cli.version) { - cli.version = p.APIVersion + if versions.LessThan(pingResponse.APIVersion, cli.version) { + cli.version = pingResponse.APIVersion } // Store the results, so that automatic API version negotiation (if enabled) @@ -258,7 +284,7 @@ func (cli *Client) HTTPClient() *http.Client { func ParseHostURL(host string) (*url.URL, error) { protoAddrParts := strings.SplitN(host, "://", 2) if len(protoAddrParts) == 1 { - return nil, fmt.Errorf("unable to parse docker host `%s`", host) + return nil, errors.Errorf("unable to parse docker host `%s`", host) } var basePath string @@ -278,7 +304,9 @@ func ParseHostURL(host string) (*url.URL, error) { }, nil } -// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection. +// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, +// that can be used for proxying the daemon connection. +// // Used by `docker dial-stdio` (docker/cli#889). func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { return func(ctx context.Context) (net.Conn, error) { diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index 5846f888fea2..f0783f708581 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -3,7 +3,8 @@ package client // import "github.com/docker/docker/client" -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. const DefaultDockerHost = "unix:///var/run/docker.sock" const defaultProto = "unix" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go index c649e54412ce..5abe60457d53 100644 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -1,6 +1,7 @@ package client // import "github.com/docker/docker/client" -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. const DefaultDockerHost = "npipe:////./pipe/docker_engine" const defaultProto = "npipe" diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go index f1b0d7f7536c..9be7882c3d7d 100644 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -20,7 +20,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C resp, err := cli.get(ctx, "/configs/"+id, nil, nil) defer ensureReaderClosed(resp) if err != nil { - return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) + return swarm.Config{}, nil, err } body, err := io.ReadAll(resp.body) diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go index 93de0d8445b5..24b94e9c18b0 100644 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -9,5 +9,5 @@ func (cli *Client) ConfigRemove(ctx context.Context, id string) error { } resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "config", id) + return err } diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go index ba79ae64e592..1ac298543517 100644 --- a/vendor/github.com/docker/docker/client/config_update.go +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -3,7 +3,6 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "strconv" "github.com/docker/docker/api/types/swarm" ) @@ -14,7 +13,7 @@ func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Ve return err } query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("version", version.String()) resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index 88ba1ef6396d..ba92117d3ed9 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -22,7 +22,7 @@ import ( // multiplexed. // The format of the multiplexed stream is as follows: // -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} // // STREAM_TYPE can be 1 for stdout and 2 for stderr // @@ -52,6 +52,8 @@ func (cli *Client) ContainerAttach(ctx context.Context, container string, option query.Set("logs", "1") } - headers := map[string][]string{"Content-Type": {"text/plain"}} + headers := map[string][]string{ + "Content-Type": {"text/plain"}, + } return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) } diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index c0a47c14e31b..883be7fa3451 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -23,7 +23,7 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri response, err := cli.head(ctx, urlStr, query, nil) defer ensureReaderClosed(response) if err != nil { - return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) + return types.ContainerPathStat{}, err } return getContainerPathStatFromHeader(response.header) } @@ -47,12 +47,7 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str response, err := cli.putRaw(ctx, apiPath, query, content, nil) defer ensureReaderClosed(response) if err != nil { - return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) - } - - // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior - if response.statusCode != http.StatusOK { - return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + return err } return nil @@ -67,12 +62,7 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s apiPath := "/containers/" + containerID + "/archive" response, err := cli.get(ctx, apiPath, query, nil) if err != nil { - return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) - } - - // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior - if response.statusCode != http.StatusOK { - return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + return nil, types.ContainerPathStat{}, err } // In order to get the copy behavior right, we need to know information diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index 47d15c2bbd6b..f82420b673ec 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -20,22 +20,31 @@ type configWrapper struct { // ContainerCreate creates a new container based on the given configuration. // It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error) { - var response container.ContainerCreateCreatedBody +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) { + var response container.CreateResponse if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { return response, err } - - // When using API 1.24 and under, the client is responsible for removing the container - if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { - hostConfig.AutoRemove = false - } - if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { return response, err } + if hostConfig != nil { + if versions.LessThan(cli.ClientVersion(), "1.25") { + // When using API 1.24 and under, the client is responsible for removing the container + hostConfig.AutoRemove = false + } + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") { + // KernelMemory was added in API 1.40, and deprecated in API 1.42 + hostConfig.KernelMemory = 0 + } + if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") { + // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize + hostConfig.ConsoleSize = [2]uint{0, 0} + } + } + query := url.Values{} if p := formatPlatform(platform); p != "" { query.Set("platform", p) diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index e3ee755b71dc..6a2cb006f88b 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -5,6 +5,7 @@ import ( "encoding/json" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" ) // ContainerExecCreate creates a new exec configuration to run an exec process. @@ -14,6 +15,9 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { return response, err } + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) defer ensureReaderClosed(resp) @@ -26,6 +30,9 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co // ContainerExecStart starts an exec process already created in the docker host. func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) ensureReaderClosed(resp) return err @@ -36,7 +43,12 @@ func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { - headers := map[string][]string{"Content-Type": {"application/json"}} + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } + headers := map[string][]string{ + "Content-Type": {"application/json"}, + } return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) } diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go index 43db32bd973a..d48f0d3a6856 100644 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -18,7 +18,7 @@ func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (ty serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) + return types.ContainerJSON{}, err } var response types.ContainerJSON @@ -38,7 +38,7 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) defer ensureReaderClosed(serverResp) if err != nil { - return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) + return types.ContainerJSON{}, nil, err } body, err := io.ReadAll(serverResp.body) diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go index 4d6f1d23da9d..7c9529f1e140 100644 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -8,7 +8,9 @@ import ( // ContainerKill terminates the container process but does not remove the container from the docker host. func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { query := url.Values{} - query.Set("signal", signal) + if signal != "" { + query.Set("signal", signal) + } resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index a973de597fdf..bd491b3db92e 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -18,7 +18,7 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis query.Set("all", "1") } - if options.Limit != -1 { + if options.Limit > 0 { query.Set("limit", strconv.Itoa(options.Limit)) } diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 5b6541f03596..9bdf2b0fa602 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -24,7 +24,7 @@ import ( // multiplexed. // The format of the multiplexed stream is as follows: // -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} // // STREAM_TYPE can be 1 for stdout and 2 for stderr // @@ -74,7 +74,7 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) if err != nil { - return nil, wrapResponseError(err, resp, "container", container) + return nil, err } return resp.body, nil } diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go index df81461b889c..c21de609b0b7 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -23,5 +23,5 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "container", containerID) + return err } diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index aa0d6485de39..1e0ad999815a 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -3,18 +3,22 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "time" + "strconv" - timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" ) // ContainerRestart stops and starts a container again. // It makes the daemon wait for the container to be up again for // a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error { query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) } resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index 629d7ab64c80..2a43ce22749f 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -3,9 +3,10 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "time" + "strconv" - timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" ) // ContainerStop stops a container. In case the container fails to stop @@ -15,10 +16,13 @@ import ( // If the timeout is nil, the container's StopTimeout value is used, if set, // otherwise the engine default. A negative timeout value can be specified, // meaning no timeout, i.e. no forceful termination is performed. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { +func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) } resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index e9b134c9d2d3..9aff7161325c 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -24,12 +24,12 @@ import ( // wait request or in getting the response. This allows the caller to // synchronize ContainerWait with other calls, such as specifying a // "next-exit" condition before issuing a ContainerStart request. -func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { if versions.LessThan(cli.ClientVersion(), "1.30") { return cli.legacyContainerWait(ctx, containerID) } - resultC := make(chan container.ContainerWaitOKBody) + resultC := make(chan container.WaitResponse) errC := make(chan error, 1) query := url.Values{} @@ -46,7 +46,7 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit go func() { defer ensureReaderClosed(resp) - var res container.ContainerWaitOKBody + var res container.WaitResponse if err := json.NewDecoder(resp.body).Decode(&res); err != nil { errC <- err return @@ -60,8 +60,8 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit // legacyContainerWait returns immediately and doesn't have an option to wait // until the container is removed. -func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { - resultC := make(chan container.ContainerWaitOKBody) +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.WaitResponse, <-chan error) { + resultC := make(chan container.WaitResponse) errC := make(chan error) go func() { @@ -72,7 +72,7 @@ func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) } defer ensureReaderClosed(resp) - var res container.ContainerWaitOKBody + var res container.WaitResponse if err := json.NewDecoder(resp.body).Decode(&res); err != nil { errC <- err return diff --git a/vendor/github.com/docker/docker/client/envvars.go b/vendor/github.com/docker/docker/client/envvars.go new file mode 100644 index 000000000000..61dd45c1d721 --- /dev/null +++ b/vendor/github.com/docker/docker/client/envvars.go @@ -0,0 +1,90 @@ +package client // import "github.com/docker/docker/client" + +const ( + // EnvOverrideHost is the name of the environment variable that can be used + // to override the default host to connect to (DefaultDockerHost). + // + // This env-var is read by FromEnv and WithHostFromEnv and when set to a + // non-empty value, takes precedence over the default host (which is platform + // specific), or any host already set. + EnvOverrideHost = "DOCKER_HOST" + + // EnvOverrideAPIVersion is the name of the environment variable that can + // be used to override the API version to use. Value should be + // formatted as MAJOR.MINOR, for example, "1.19". + // + // This env-var is read by FromEnv and WithVersionFromEnv and when set to a + // non-empty value, takes precedence over API version negotiation. + // + // This environment variable should be used for debugging purposes only, as + // it can set the client to use an incompatible (or invalid) API version. + EnvOverrideAPIVersion = "DOCKER_API_VERSION" + + // EnvOverrideCertPath is the name of the environment variable that can be + // used to specify the directory from which to load the TLS certificates + // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure + // the Client for a TCP connection protected by TLS client authentication. + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection. Refer to EnvTLSVerify below to learn how to + // disable verification for testing purposes. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the "daemon attack + // surface" (https://docs.docker.com/go/attack-surface/). + // + // For local access to the API, it is recommended to connect with the daemon + // using the default local socket connection (on Linux), or the named pipe + // (on Windows). + // + // If you need to access the API of a remote daemon, consider using an SSH + // (ssh://) connection, which is easier to set up, and requires no additional + // configuration if the host is accessible using ssh. + // + // If you cannot use the alternatives above, and you must expose the API over + // a TCP connection, refer to https://docs.docker.com/engine/security/protect-access/ + // to learn how to configure the daemon and client to use a TCP connection + // with TLS client authentication. Make sure you know the differences between + // a regular TLS connection and a TLS connection protected by TLS client + // authentication, and verify that the API cannot be accessed by other clients. + EnvOverrideCertPath = "DOCKER_CERT_PATH" + + // EnvTLSVerify is the name of the environment variable that can be used to + // enable or disable TLS certificate verification. When set to a non-empty + // value, TLS certificate verification is enabled, and the client is configured + // to use a TLS connection, using certificates from the default directories + // (within `~/.docker`); refer to EnvOverrideCertPath above for additional + // details. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the "daemon attack + // surface" (https://docs.docker.com/go/attack-surface/). + // + // Before setting up your client and daemon to use a TCP connection with TLS + // client authentication, consider using one of the alternatives mentioned + // in EnvOverrideCertPath above. + // + // Disabling TLS certificate verification (for testing purposes) + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection, and it is highly recommended to keep verification + // enabled to prevent machine-in-the-middle attacks. Refer to the documentation + // at https://docs.docker.com/engine/security/protect-access/ and pages linked + // from that page to learn how to configure the daemon and client to use a + // TCP connection with TLS client authentication enabled. + // + // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to + // disable TLS certificate verification. Disabling verification is insecure, + // so should only be done for testing purposes. From the Go documentation + // (https://pkg.go.dev/crypto/tls#Config): + // + // InsecureSkipVerify controls whether a client verifies the server's + // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls + // accepts any certificate presented by the server and any host name in that + // certificate. In this mode, TLS is susceptible to machine-in-the-middle + // attacks unless custom verification is used. This should be used only for + // testing or in combination with VerifyConnection or VerifyPeerCertificate. + EnvTLSVerify = "DOCKER_TLS_VERIFY" +) diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 041bc8d49c44..e5a8a865f9f3 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -2,7 +2,6 @@ package client // import "github.com/docker/docker/client" import ( "fmt" - "net/http" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/errdefs" @@ -41,11 +40,11 @@ type notFound interface { // IsErrNotFound returns true if the error is a NotFound error, which is returned // by the API when some object is not found. func IsErrNotFound(err error) bool { - var e notFound - if errors.As(err, &e) { + if errdefs.IsNotFound(err) { return true } - return errdefs.IsNotFound(err) + var e notFound + return errors.As(err, &e) } type objectNotFoundError struct { @@ -59,35 +58,11 @@ func (e objectNotFoundError) Error() string { return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) } -func wrapResponseError(err error, resp serverResponse, object, id string) error { - switch { - case err == nil: - return nil - case resp.statusCode == http.StatusNotFound: - return objectNotFoundError{object: object, id: id} - case resp.statusCode == http.StatusNotImplemented: - return errdefs.NotImplemented(err) - default: - return err - } -} - -// unauthorizedError represents an authorization error in a remote registry. -type unauthorizedError struct { - cause error -} - -// Error returns a string representation of an unauthorizedError -func (u unauthorizedError) Error() string { - return u.cause.Error() -} - // IsErrUnauthorized returns true if the error is caused // when a remote registry authentication fails +// +// Deprecated: use errdefs.IsUnauthorized func IsErrUnauthorized(err error) bool { - if _, ok := err.(unauthorizedError); ok { - return ok - } return errdefs.IsUnauthorized(err) } @@ -99,32 +74,12 @@ func (e pluginPermissionDenied) Error() string { return "Permission denied while installing plugin " + e.name } -// IsErrPluginPermissionDenied returns true if the error is caused -// when a user denies a plugin's permissions -func IsErrPluginPermissionDenied(err error) bool { - _, ok := err.(pluginPermissionDenied) - return ok -} - -type notImplementedError struct { - message string -} - -func (e notImplementedError) Error() string { - return e.message -} - -func (e notImplementedError) NotImplemented() bool { - return true -} - // IsErrNotImplemented returns true if the error is a NotImplemented error. // This is returned by the API when a requested feature has not been // implemented. +// +// Deprecated: use errdefs.IsNotImplemented func IsErrNotImplemented(err error) bool { - if _, ok := err.(notImplementedError); ok { - return ok - } return errdefs.IsNotImplemented(err) } diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index f0dc9d9e12f3..a9c48a9288d4 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -17,7 +17,6 @@ import ( // be sent over the error channel. If an error is sent all processing will be stopped. It's up // to the caller to reopen the stream in the event of an error by reinvoking this method. func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { - messages := make(chan events.Message) errs := make(chan error, 1) diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index e1dc49ef0f66..6bdacab10adb 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -12,6 +12,7 @@ import ( "time" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/sockets" "github.com/pkg/errors" ) @@ -30,12 +31,12 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu } req = cli.addHeaders(req, headers) - conn, err := cli.setupHijackConn(ctx, req, "tcp") + conn, mediaType, err := cli.setupHijackConn(ctx, req, "tcp") if err != nil { return types.HijackedResponse{}, err } - return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err + return types.NewHijackedResponse(conn, mediaType), err } // DialHijack returns a hijacked connection with negotiated protocol proto. @@ -46,7 +47,8 @@ func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[s } req = cli.addHeaders(req, meta) - return cli.setupHijackConn(ctx, req, proto) + conn, _, err := cli.setupHijackConn(ctx, req, proto) + return conn, err } // fallbackDial is used when WithDialer() was not called. @@ -61,7 +63,7 @@ func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { return net.Dial(proto, addr) } -func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, error) { +func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, string, error) { req.Host = cli.addr req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", proto) @@ -69,7 +71,7 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto dialer := cli.Dialer() conn, err := dialer(ctx) if err != nil { - return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") } // When we set up a TCP connection for hijack, there could be long periods @@ -91,18 +93,18 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons if err != httputil.ErrPersistEOF { if err != nil { - return nil, err + return nil, "", err } if resp.StatusCode != http.StatusSwitchingProtocols { resp.Body.Close() - return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) } } c, br := clientconn.Hijack() if br.Buffered() > 0 { // If there is buffered content, wrap the connection. We return an - // object that implements CloseWrite iff the underlying connection + // object that implements CloseWrite if the underlying connection // implements it. if _, ok := c.(types.CloseWriter); ok { c = &hijackedConnCloseWriter{&hijackedConn{c, br}} @@ -113,7 +115,13 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto br.Reset(nil) } - return c, nil + var mediaType string + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") { + // Prior to 1.42, Content-Type is always set to raw-stream and not relevant + mediaType = resp.Header.Get("Content-Type") + } + + return c, mediaType, nil } // hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go index 03aa12d8b4cd..1de10e5a0802 100644 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -17,7 +17,7 @@ func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (typ serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) + return types.ImageInspect{}, nil, err } body, err := io.ReadAll(serverResp.body) diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index a4d7505094cd..950d51333471 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -34,6 +34,9 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions if options.All { query.Set("all", "1") } + if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("shared-size", "1") + } serverResp, err := cli.get(ctx, "/images/json", query, nil) defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index 84a41af0f2ca..6a9fb3f41f53 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -23,7 +23,7 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) defer ensureReaderClosed(resp) if err != nil { - return dels, wrapResponseError(err, resp, "image", imageID) + return dels, err } err = json.NewDecoder(resp.body).Decode(&dels) diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index 5f40a22a964c..e69fa372258b 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -3,8 +3,8 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" - "fmt" "net/url" + "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" @@ -18,7 +18,9 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I var results []registry.SearchResult query := url.Values{} query.Set("term", term) - query.Set("limit", fmt.Sprintf("%d", options.Limit)) + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } if options.Filters.Len() > 0 { filterJSON, err := filters.ToJSON(options.Filters) diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 7277d1bbdd29..e9c1ed722ee5 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -5,17 +5,16 @@ import ( "io" "net" "net/http" - "time" "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/image" - networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" - volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/api/types/volume" specs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -48,8 +47,8 @@ type CommonAPIClient interface { type ContainerAPIClient interface { ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, platform *specs.Platform, containerName string) (containertypes.ContainerCreateCreatedBody, error) - ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) + ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) @@ -65,16 +64,16 @@ type ContainerAPIClient interface { ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error ContainerRename(ctx context.Context, container, newContainerName string) error ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerRestart(ctx context.Context, container string, options container.StopOptions) error ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error - ContainerStop(ctx context.Context, container string, timeout *time.Duration) error - ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) + ContainerStop(ctx context.Context, container string, options container.StopOptions) error + ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) @@ -107,7 +106,7 @@ type ImageAPIClient interface { // NetworkAPIClient defines API client methods for the networks type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error + NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) NetworkDisconnect(ctx context.Context, network, container string, force bool) error NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) @@ -174,12 +173,13 @@ type SystemAPIClient interface { // VolumeAPIClient defines API client methods for the volumes type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) - VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) + VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) VolumeRemove(ctx context.Context, volumeID string, force bool) error VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) + VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error } // SecretAPIClient defines API client methods for secrets diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index ecf20ceb6e46..0f90e2bb9028 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -36,7 +36,7 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) defer ensureReaderClosed(resp) if err != nil { - return networkResource, nil, wrapResponseError(err, resp, "network", networkID) + return networkResource, nil, err } body, err := io.ReadAll(resp.body) diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go index e71b16d86929..9d6c6cef0781 100644 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -6,5 +6,5 @@ import "context" func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "network", networkID) + return err } diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go index b58db528567b..95ab9b1be061 100644 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -17,7 +17,7 @@ func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) + return swarm.Node{}, nil, err } body, err := io.ReadAll(serverResp.body) diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go index 03ab87809741..e44436debc3f 100644 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -16,5 +16,5 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types. resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "node", nodeID) + return err } diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go index de32a617fb01..0d0fc3b7881b 100644 --- a/vendor/github.com/docker/docker/client/node_update.go +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -3,7 +3,6 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "strconv" "github.com/docker/docker/api/types/swarm" ) @@ -11,7 +10,7 @@ import ( // NodeUpdate updates a Node. func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("version", version.String()) resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go index 77a9abc14198..099ad41846af 100644 --- a/vendor/github.com/docker/docker/client/options.go +++ b/vendor/github.com/docker/docker/client/options.go @@ -18,11 +18,18 @@ type Opt func(*Client) error // FromEnv configures the client with values from environment variables. // -// Supported environment variables: -// DOCKER_HOST to set the url to the docker server. -// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// DOCKER_CERT_PATH to load the TLS certificates from. -// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +// FromEnv uses the following environment variables: +// +// DOCKER_HOST (EnvOverrideHost) to set the URL to the docker server. +// +// DOCKER_API_VERSION (EnvOverrideAPIVersion) to set the version of the API to +// use, leave empty for latest. +// +// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to +// load the TLS certificates (ca.pem, cert.pem, key.pem). +// +// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by +// default). func FromEnv(c *Client) error { ops := []Opt{ WithTLSClientConfigFromEnv(), @@ -37,13 +44,6 @@ func FromEnv(c *Client) error { return nil } -// WithDialer applies the dialer.DialContext to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. -// Deprecated: use WithDialContext -func WithDialer(dialer *net.Dialer) Opt { - return WithDialContext(dialer.DialContext) -} - // WithDialContext applies the dialer to the client transport. This can be // used to set the Timeout and KeepAlive settings of the client. func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { @@ -75,11 +75,11 @@ func WithHost(host string) Opt { } // WithHostFromEnv overrides the client host with the host specified in the -// DOCKER_HOST environment variable. If DOCKER_HOST is not set, the host is -// not modified. +// DOCKER_HOST (EnvOverrideHost) environment variable. If DOCKER_HOST is not set, +// or set to an empty value, the host is not modified. func WithHostFromEnv() Opt { return func(c *Client) error { - if host := os.Getenv("DOCKER_HOST"); host != "" { + if host := os.Getenv(EnvOverrideHost); host != "" { return WithHost(host)(c) } return nil @@ -145,12 +145,16 @@ func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { // settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables. // If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified. // -// Supported environment variables: -// DOCKER_CERT_PATH directory to load the TLS certificates (ca.pem, cert.pem, key.pem) from. -// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +// WithTLSClientConfigFromEnv uses the following environment variables: +// +// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to +// load the TLS certificates (ca.pem, cert.pem, key.pem). +// +// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by +// default). func WithTLSClientConfigFromEnv() Opt { return func(c *Client) error { - dockerCertPath := os.Getenv("DOCKER_CERT_PATH") + dockerCertPath := os.Getenv(EnvOverrideCertPath) if dockerCertPath == "" { return nil } @@ -158,7 +162,7 @@ func WithTLSClientConfigFromEnv() Opt { CAFile: filepath.Join(dockerCertPath, "ca.pem"), CertFile: filepath.Join(dockerCertPath, "cert.pem"), KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", } tlsc, err := tlsconfig.Client(options) if err != nil { @@ -190,10 +194,7 @@ func WithVersion(version string) Opt { // the version is not modified. func WithVersionFromEnv() Opt { return func(c *Client) error { - if version := os.Getenv("DOCKER_API_VERSION"); version != "" { - return WithVersion(version)(c) - } - return nil + return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) } } diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index a9af001ef46b..27e8695cb547 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -4,8 +4,10 @@ import ( "context" "net/http" "path" + "strings" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/errdefs" ) @@ -61,6 +63,13 @@ func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { if bv := resp.header.Get("Builder-Version"); bv != "" { ping.BuilderVersion = types.BuilderVersion(bv) } + if si := resp.header.Get("Swarm"); si != "" { + parts := strings.SplitN(si, "/", 2) + ping.SwarmStatus = &swarm.Status{ + NodeState: swarm.LocalNodeState(parts[0]), + ControlAvailable: len(parts) == 2 && parts[1] == "manager", + } + } err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) } diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go index 4a90bec51a0c..f09e460660b0 100644 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -17,7 +17,7 @@ func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*type resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) defer ensureReaderClosed(resp) if err != nil { - return nil, nil, wrapResponseError(err, resp, "plugin", name) + return nil, nil, err } body, err := io.ReadAll(resp.body) diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index cf1935e2f5ee..2091a054d6ac 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -25,7 +25,7 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P resp, err := cli.get(ctx, "/plugins", query, nil) defer ensureReaderClosed(resp) if err != nil { - return plugins, wrapResponseError(err, resp, "plugin", "") + return plugins, err } err = json.NewDecoder(resp.body).Decode(&plugins) diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go index 51ca1040d6d2..4cd66958c3fe 100644 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -16,5 +16,5 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types. resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "plugin", name) + return err } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index f0fd4e77afd7..c799095c1227 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -49,6 +49,14 @@ func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, b return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + // putRaw sends an http request to the docker API using the method PUT. func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) @@ -133,7 +141,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp } if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, errors.Wrap(err, "The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings") + return serverResp, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings") } // Don't decorate context sentinel errors; users may be comparing to @@ -145,7 +153,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp if nErr, ok := err.(*url.Error); ok { if nErr, ok := nErr.Err.(*net.OpError); ok { if os.IsPermission(nErr.Err) { - return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) } } } @@ -172,10 +180,10 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { // Checks if client is running with elevated privileges if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { - err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.") + err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") } else { f.Close() - err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.") + err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") } } @@ -238,14 +246,14 @@ func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request // Add CLI Config's HTTP Headers BEFORE we set the Docker headers // then the user can't change OUR headers for k, v := range cli.customHTTPHeaders { - if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + if versions.LessThan(cli.version, "1.25") && http.CanonicalHeaderKey(k) == "User-Agent" { continue } req.Header.Set(k, v) } for k, v := range headers { - req.Header[k] = v + req.Header[http.CanonicalHeaderKey(k)] = v } return req } diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index c07c9550d448..5906874b15d2 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -20,7 +20,7 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) defer ensureReaderClosed(resp) if err != nil { - return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) + return swarm.Secret{}, nil, err } body, err := io.ReadAll(resp.body) diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index f6c69e57f850..f47f68b6e0c9 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -9,5 +9,5 @@ func (cli *Client) SecretRemove(ctx context.Context, id string) error { } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "secret", id) + return err } diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index d082dcef75cc..2e939e8ced7a 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -3,7 +3,6 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "strconv" "github.com/docker/docker/api/types/swarm" ) @@ -14,7 +13,7 @@ func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Ve return err } query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("version", version.String()) resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index a07315f71fe2..23024d0f8fbb 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -9,7 +9,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go index c5368bab1e3c..cee020c98bc5 100644 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -22,7 +22,7 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) defer ensureReaderClosed(serverResp) if err != nil { - return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) + return swarm.Service{}, nil, err } body, err := io.ReadAll(serverResp.body) diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go index 953a2adf5aec..2c46326ebcf3 100644 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -6,5 +6,5 @@ import "context" func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "service", serviceID) + return err } diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index c63895f74f25..8014b8625842 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "net/url" - "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" @@ -35,7 +34,7 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version query.Set("rollback", options.Rollback) } - query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("version", version.String()) if err := validateServiceSpec(service); err != nil { return response, err diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go index 56a5bea761e6..9fde7d75ee64 100644 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -2,7 +2,6 @@ package client // import "github.com/docker/docker/client" import ( "context" - "fmt" "net/url" "strconv" @@ -12,10 +11,10 @@ import ( // SwarmUpdate updates the swarm. func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) - query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) - query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + query.Set("version", version.String()) + query.Set("rotateWorkerToken", strconv.FormatBool(flags.RotateWorkerToken)) + query.Set("rotateManagerToken", strconv.FormatBool(flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", strconv.FormatBool(flags.RotateManagerUnlockKey)) resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go index 410fc526e268..dde1f6c59d32 100644 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -17,7 +17,7 @@ func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) + return swarm.Task{}, nil, err } body, err := io.ReadAll(serverResp.body) diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go index 92761b3c639e..b3b182437bb4 100644 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -4,18 +4,17 @@ import ( "context" "encoding/json" - "github.com/docker/docker/api/types" - volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/api/types/volume" ) // VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { - var volume types.Volume +func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) { + var vol volume.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) defer ensureReaderClosed(resp) if err != nil { - return volume, err + return vol, err } - err = json.NewDecoder(resp.body).Decode(&volume) - return volume, err + err = json.NewDecoder(resp.body).Decode(&vol) + return vol, err } diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go index 5c5b3f905c54..b3ba4e60461b 100644 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -6,33 +6,33 @@ import ( "encoding/json" "io" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/volume" ) // VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { - volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) - return volume, err +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) { + vol, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return vol, err } // VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation -func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) { if volumeID == "" { - return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + return volume.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} } - var volume types.Volume + var vol volume.Volume resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) defer ensureReaderClosed(resp) if err != nil { - return volume, nil, wrapResponseError(err, resp, "volume", volumeID) + return vol, nil, err } body, err := io.ReadAll(resp.body) if err != nil { - return volume, nil, err + return vol, nil, err } rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&volume) - return volume, body, err + err = json.NewDecoder(rdr).Decode(&vol) + return vol, body, err } diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index 942498dde2c7..d8204f8db5d1 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -6,12 +6,12 @@ import ( "net/url" "github.com/docker/docker/api/types/filters" - volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/api/types/volume" ) // VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { - var volumes volumetypes.VolumeListOKBody +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) { + var volumes volume.ListResponse query := url.Values{} if filter.Len() > 0 { diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index 79decdafab88..1f264383606e 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -17,5 +17,5 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool } resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "volume", volumeID) + return err } diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go new file mode 100644 index 000000000000..33bd31e5315c --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_update.go @@ -0,0 +1,24 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" +) + +// VolumeUpdate updates a volume. This only works for Cluster Volumes, and +// only some fields can be updated. +func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { + if err := cli.NewVersionError("1.42", "volume update"); err != nil { + return err + } + + query := url.Values{} + query.Set("version", version.String()) + + resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index 73576f1c6e44..77bda389d1de 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -1,82 +1,13 @@ package errdefs // import "github.com/docker/docker/errdefs" import ( - "fmt" "net/http" - - containerderrors "github.com/containerd/containerd/errdefs" - "github.com/docker/distribution/registry/api/errcode" - "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) -// GetHTTPErrorStatusCode retrieves status code from error message. -func GetHTTPErrorStatusCode(err error) int { - if err == nil { - logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") - return http.StatusInternalServerError - } - - var statusCode int - - // Stop right there - // Are you sure you should be adding a new error class here? Do one of the existing ones work? - - // Note that the below functions are already checking the error causal chain for matches. - switch { - case IsNotFound(err): - statusCode = http.StatusNotFound - case IsInvalidParameter(err): - statusCode = http.StatusBadRequest - case IsConflict(err): - statusCode = http.StatusConflict - case IsUnauthorized(err): - statusCode = http.StatusUnauthorized - case IsUnavailable(err): - statusCode = http.StatusServiceUnavailable - case IsForbidden(err): - statusCode = http.StatusForbidden - case IsNotModified(err): - statusCode = http.StatusNotModified - case IsNotImplemented(err): - statusCode = http.StatusNotImplemented - case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err): - statusCode = http.StatusInternalServerError - default: - statusCode = statusCodeFromGRPCError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - statusCode = statusCodeFromContainerdError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - statusCode = statusCodeFromDistributionError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - if e, ok := err.(causer); ok { - return GetHTTPErrorStatusCode(e.Cause()) - } - - logrus.WithFields(logrus.Fields{ - "module": "api", - "error_type": fmt.Sprintf("%T", err), - }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) - } - - if statusCode == 0 { - statusCode = http.StatusInternalServerError - } - - return statusCode -} - // FromStatusCode creates an errdef error, based on the provided HTTP status-code func FromStatusCode(err error, statusCode int) error { if err == nil { - return err + return nil } switch statusCode { case http.StatusNotFound: @@ -100,11 +31,6 @@ func FromStatusCode(err error, statusCode int) error { err = System(err) } default: - logrus.WithError(err).WithFields(logrus.Fields{ - "module": "api", - "status_code": statusCode, - }).Debug("FIXME: Got an status-code for which error does not match any expected type!!!") - switch { case statusCode >= 200 && statusCode < 400: // it's a client error @@ -118,74 +44,3 @@ func FromStatusCode(err error, statusCode int) error { } return err } - -// statusCodeFromGRPCError returns status code according to gRPC error -func statusCodeFromGRPCError(err error) int { - switch status.Code(err) { - case codes.InvalidArgument: // code 3 - return http.StatusBadRequest - case codes.NotFound: // code 5 - return http.StatusNotFound - case codes.AlreadyExists: // code 6 - return http.StatusConflict - case codes.PermissionDenied: // code 7 - return http.StatusForbidden - case codes.FailedPrecondition: // code 9 - return http.StatusBadRequest - case codes.Unauthenticated: // code 16 - return http.StatusUnauthorized - case codes.OutOfRange: // code 11 - return http.StatusBadRequest - case codes.Unimplemented: // code 12 - return http.StatusNotImplemented - case codes.Unavailable: // code 14 - return http.StatusServiceUnavailable - default: - // codes.Canceled(1) - // codes.Unknown(2) - // codes.DeadlineExceeded(4) - // codes.ResourceExhausted(8) - // codes.Aborted(10) - // codes.Internal(13) - // codes.DataLoss(15) - return http.StatusInternalServerError - } -} - -// statusCodeFromDistributionError returns status code according to registry errcode -// code is loosely based on errcode.ServeJSON() in docker/distribution -func statusCodeFromDistributionError(err error) int { - switch errs := err.(type) { - case errcode.Errors: - if len(errs) < 1 { - return http.StatusInternalServerError - } - if _, ok := errs[0].(errcode.ErrorCoder); ok { - return statusCodeFromDistributionError(errs[0]) - } - case errcode.ErrorCoder: - return errs.ErrorCode().Descriptor().HTTPStatusCode - } - return http.StatusInternalServerError -} - -// statusCodeFromContainerdError returns status code for containerd errors when -// consumed directly (not through gRPC) -func statusCodeFromContainerdError(err error) int { - switch { - case containerderrors.IsInvalidArgument(err): - return http.StatusBadRequest - case containerderrors.IsNotFound(err): - return http.StatusNotFound - case containerderrors.IsAlreadyExists(err): - return http.StatusConflict - case containerderrors.IsFailedPrecondition(err): - return http.StatusPreconditionFailed - case containerderrors.IsUnavailable(err): - return http.StatusServiceUnavailable - case containerderrors.IsNotImplemented(err): - return http.StatusNotImplemented - default: - return http.StatusInternalServerError - } -} diff --git a/vendor/github.com/docker/docker/libnetwork/ipamutils/utils.go b/vendor/github.com/docker/docker/libnetwork/ipamutils/utils.go deleted file mode 100644 index 3fd37cd88403..000000000000 --- a/vendor/github.com/docker/docker/libnetwork/ipamutils/utils.go +++ /dev/null @@ -1,135 +0,0 @@ -// Package ipamutils provides utility functions for ipam management -package ipamutils - -import ( - "fmt" - "net" - "sync" -) - -var ( - // PredefinedLocalScopeDefaultNetworks contains a list of 31 IPv4 private networks with host size 16 and 12 - // (172.17-31.x.x/16, 192.168.x.x/20) which do not overlap with the networks in `PredefinedGlobalScopeDefaultNetworks` - PredefinedLocalScopeDefaultNetworks []*net.IPNet - // PredefinedGlobalScopeDefaultNetworks contains a list of 64K IPv4 private networks with host size 8 - // (10.x.x.x/24) which do not overlap with the networks in `PredefinedLocalScopeDefaultNetworks` - PredefinedGlobalScopeDefaultNetworks []*net.IPNet - mutex sync.Mutex - localScopeDefaultNetworks = []*NetworkToSplit{{"172.17.0.0/16", 16}, {"172.18.0.0/16", 16}, {"172.19.0.0/16", 16}, - {"172.20.0.0/14", 16}, {"172.24.0.0/14", 16}, {"172.28.0.0/14", 16}, - {"192.168.0.0/16", 20}} - globalScopeDefaultNetworks = []*NetworkToSplit{{"10.0.0.0/8", 24}} -) - -// NetworkToSplit represent a network that has to be split in chunks with mask length Size. -// Each subnet in the set is derived from the Base pool. Base is to be passed -// in CIDR format. -// Example: a Base "10.10.0.0/16 with Size 24 will define the set of 256 -// 10.10.[0-255].0/24 address pools -type NetworkToSplit struct { - Base string `json:"base"` - Size int `json:"size"` -} - -func init() { - var err error - if PredefinedGlobalScopeDefaultNetworks, err = splitNetworks(globalScopeDefaultNetworks); err != nil { - //we are going to panic in case of error as we should never get into this state - panic("InitAddressPools failed to initialize the global scope default address pool") - } - - if PredefinedLocalScopeDefaultNetworks, err = splitNetworks(localScopeDefaultNetworks); err != nil { - //we are going to panic in case of error as we should never get into this state - panic("InitAddressPools failed to initialize the local scope default address pool") - } -} - -// configDefaultNetworks configures local as well global default pool based on input -func configDefaultNetworks(defaultAddressPool []*NetworkToSplit, result *[]*net.IPNet) error { - mutex.Lock() - defer mutex.Unlock() - defaultNetworks, err := splitNetworks(defaultAddressPool) - if err != nil { - return err - } - *result = defaultNetworks - return nil -} - -// GetGlobalScopeDefaultNetworks returns PredefinedGlobalScopeDefaultNetworks -func GetGlobalScopeDefaultNetworks() []*net.IPNet { - mutex.Lock() - defer mutex.Unlock() - return PredefinedGlobalScopeDefaultNetworks -} - -// GetLocalScopeDefaultNetworks returns PredefinedLocalScopeDefaultNetworks -func GetLocalScopeDefaultNetworks() []*net.IPNet { - mutex.Lock() - defer mutex.Unlock() - return PredefinedLocalScopeDefaultNetworks -} - -// ConfigGlobalScopeDefaultNetworks configures global default pool. -// Ideally this will be called from SwarmKit as part of swarm init -func ConfigGlobalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error { - if defaultAddressPool == nil { - defaultAddressPool = globalScopeDefaultNetworks - } - return configDefaultNetworks(defaultAddressPool, &PredefinedGlobalScopeDefaultNetworks) -} - -// ConfigLocalScopeDefaultNetworks configures local default pool. -// Ideally this will be called during libnetwork init -func ConfigLocalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error { - if defaultAddressPool == nil { - return nil - } - return configDefaultNetworks(defaultAddressPool, &PredefinedLocalScopeDefaultNetworks) -} - -// splitNetworks takes a slice of networks, split them accordingly and returns them -func splitNetworks(list []*NetworkToSplit) ([]*net.IPNet, error) { - localPools := make([]*net.IPNet, 0, len(list)) - - for _, p := range list { - _, b, err := net.ParseCIDR(p.Base) - if err != nil { - return nil, fmt.Errorf("invalid base pool %q: %v", p.Base, err) - } - ones, _ := b.Mask.Size() - if p.Size <= 0 || p.Size < ones { - return nil, fmt.Errorf("invalid pools size: %d", p.Size) - } - localPools = append(localPools, splitNetwork(p.Size, b)...) - } - return localPools, nil -} - -func splitNetwork(size int, base *net.IPNet) []*net.IPNet { - one, bits := base.Mask.Size() - mask := net.CIDRMask(size, bits) - n := 1 << uint(size-one) - s := uint(bits - size) - list := make([]*net.IPNet, 0, n) - - for i := 0; i < n; i++ { - ip := copyIP(base.IP) - addIntToIP(ip, uint(i<= 0; i-- { - array[i] |= (byte)(ordinal & 0xff) - ordinal >>= 8 - } -} diff --git a/vendor/github.com/docker/docker/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/docker/docker/libnetwork/resolvconf/resolvconf.go index a9d8027886d5..09c2dacbf1ce 100644 --- a/vendor/github.com/docker/docker/libnetwork/resolvconf/resolvconf.go +++ b/vendor/github.com/docker/docker/libnetwork/resolvconf/resolvconf.go @@ -85,12 +85,6 @@ var ( optionsRegexp = regexp.MustCompile(`^\s*options\s*(([^\s]+\s*)*)$`) ) -var lastModified struct { - sync.Mutex - sha256 string - contents []byte -} - // File contains the resolv.conf content and its hash type File struct { Content []byte @@ -115,46 +109,12 @@ func GetSpecific(path string) (*File, error) { return &File{Content: resolv, Hash: hash}, nil } -// GetIfChanged retrieves the host /etc/resolv.conf file, checks against the last hash -// and, if modified since last check, returns the bytes and new hash. -// This feature is used by the resolv.conf updater for containers -func GetIfChanged() (*File, error) { - lastModified.Lock() - defer lastModified.Unlock() - - resolv, err := os.ReadFile(Path()) - if err != nil { - return nil, err - } - newHash, err := hashData(bytes.NewReader(resolv)) - if err != nil { - return nil, err - } - if lastModified.sha256 != newHash { - lastModified.sha256 = newHash - lastModified.contents = resolv - return &File{Content: resolv, Hash: newHash}, nil - } - // nothing changed, so return no data - return nil, nil -} - -// GetLastModified retrieves the last used contents and hash of the host resolv.conf. -// Used by containers updating on restart -func GetLastModified() *File { - lastModified.Lock() - defer lastModified.Unlock() - - return &File{Content: lastModified.contents, Hash: lastModified.sha256} -} - // FilterResolvDNS cleans up the config in resolvConf. It has two main jobs: -// 1. It looks for localhost (127.*|::1) entries in the provided -// resolv.conf, removing local nameserver entries, and, if the resulting -// cleaned config has no defined nameservers left, adds default DNS entries -// 2. Given the caller provides the enable/disable state of IPv6, the filter -// code will remove all IPv6 nameservers if it is not enabled for containers -// +// 1. It looks for localhost (127.*|::1) entries in the provided +// resolv.conf, removing local nameserver entries, and, if the resulting +// cleaned config has no defined nameservers left, adds default DNS entries +// 2. Given the caller provides the enable/disable state of IPv6, the filter +// code will remove all IPv6 nameservers if it is not enabled for containers func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) { cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{}) // if IPv6 is not enabled, also clean out any IPv6 address nameserver diff --git a/vendor/github.com/docker/docker/libnetwork/types/types.go b/vendor/github.com/docker/docker/libnetwork/types/types.go deleted file mode 100644 index e4ade05902f7..000000000000 --- a/vendor/github.com/docker/docker/libnetwork/types/types.go +++ /dev/null @@ -1,576 +0,0 @@ -// Package types contains types that are common across libnetwork project -package types - -import ( - "bytes" - "fmt" - "net" - "strings" - - "github.com/ishidawataru/sctp" -) - -// constants for the IP address type -// Deprecated: use the consts defined in github.com/docker/docker/libnetwork/resolvconf -const ( - IP = iota // IPv4 and IPv6 - IPv4 - IPv6 -) - -// EncryptionKey is the libnetwork representation of the key distributed by the lead -// manager. -type EncryptionKey struct { - Subsystem string - Algorithm int32 - Key []byte - LamportTime uint64 -} - -// UUID represents a globally unique ID of various resources like network and endpoint -type UUID string - -// QosPolicy represents a quality of service policy on an endpoint -type QosPolicy struct { - MaxEgressBandwidth uint64 -} - -// TransportPort represents a local Layer 4 endpoint -type TransportPort struct { - Proto Protocol - Port uint16 -} - -// Equal checks if this instance of Transportport is equal to the passed one -func (t *TransportPort) Equal(o *TransportPort) bool { - if t == o { - return true - } - - if o == nil { - return false - } - - if t.Proto != o.Proto || t.Port != o.Port { - return false - } - - return true -} - -// GetCopy returns a copy of this TransportPort structure instance -func (t *TransportPort) GetCopy() TransportPort { - return TransportPort{Proto: t.Proto, Port: t.Port} -} - -// String returns the TransportPort structure in string form -func (t *TransportPort) String() string { - return fmt.Sprintf("%s/%d", t.Proto.String(), t.Port) -} - -// PortBinding represents a port binding between the container and the host -type PortBinding struct { - Proto Protocol - IP net.IP - Port uint16 - HostIP net.IP - HostPort uint16 - HostPortEnd uint16 -} - -// HostAddr returns the host side transport address -func (p PortBinding) HostAddr() (net.Addr, error) { - switch p.Proto { - case UDP: - return &net.UDPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil - case TCP: - return &net.TCPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil - case SCTP: - return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.HostIP}}, Port: int(p.HostPort)}, nil - default: - return nil, ErrInvalidProtocolBinding(p.Proto.String()) - } -} - -// ContainerAddr returns the container side transport address -func (p PortBinding) ContainerAddr() (net.Addr, error) { - switch p.Proto { - case UDP: - return &net.UDPAddr{IP: p.IP, Port: int(p.Port)}, nil - case TCP: - return &net.TCPAddr{IP: p.IP, Port: int(p.Port)}, nil - case SCTP: - return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.IP}}, Port: int(p.Port)}, nil - default: - return nil, ErrInvalidProtocolBinding(p.Proto.String()) - } -} - -// GetCopy returns a copy of this PortBinding structure instance -func (p *PortBinding) GetCopy() PortBinding { - return PortBinding{ - Proto: p.Proto, - IP: GetIPCopy(p.IP), - Port: p.Port, - HostIP: GetIPCopy(p.HostIP), - HostPort: p.HostPort, - HostPortEnd: p.HostPortEnd, - } -} - -// String returns the PortBinding structure in string form -func (p *PortBinding) String() string { - ret := fmt.Sprintf("%s/", p.Proto) - if p.IP != nil { - ret += p.IP.String() - } - ret = fmt.Sprintf("%s:%d/", ret, p.Port) - if p.HostIP != nil { - ret += p.HostIP.String() - } - ret = fmt.Sprintf("%s:%d", ret, p.HostPort) - return ret -} - -// Equal checks if this instance of PortBinding is equal to the passed one -func (p *PortBinding) Equal(o *PortBinding) bool { - if p == o { - return true - } - - if o == nil { - return false - } - - if p.Proto != o.Proto || p.Port != o.Port || - p.HostPort != o.HostPort || p.HostPortEnd != o.HostPortEnd { - return false - } - - if p.IP != nil { - if !p.IP.Equal(o.IP) { - return false - } - } else { - if o.IP != nil { - return false - } - } - - if p.HostIP != nil { - if !p.HostIP.Equal(o.HostIP) { - return false - } - } else { - if o.HostIP != nil { - return false - } - } - - return true -} - -// ErrInvalidProtocolBinding is returned when the port binding protocol is not valid. -type ErrInvalidProtocolBinding string - -func (ipb ErrInvalidProtocolBinding) Error() string { - return fmt.Sprintf("invalid transport protocol: %s", string(ipb)) -} - -const ( - // ICMP is for the ICMP ip protocol - ICMP = 1 - // TCP is for the TCP ip protocol - TCP = 6 - // UDP is for the UDP ip protocol - UDP = 17 - // SCTP is for the SCTP ip protocol - SCTP = 132 -) - -// Protocol represents an IP protocol number -type Protocol uint8 - -func (p Protocol) String() string { - switch p { - case ICMP: - return "icmp" - case TCP: - return "tcp" - case UDP: - return "udp" - case SCTP: - return "sctp" - default: - return fmt.Sprintf("%d", p) - } -} - -// ParseProtocol returns the respective Protocol type for the passed string -func ParseProtocol(s string) Protocol { - switch strings.ToLower(s) { - case "icmp": - return ICMP - case "udp": - return UDP - case "tcp": - return TCP - case "sctp": - return SCTP - default: - return 0 - } -} - -// GetMacCopy returns a copy of the passed MAC address -func GetMacCopy(from net.HardwareAddr) net.HardwareAddr { - if from == nil { - return nil - } - to := make(net.HardwareAddr, len(from)) - copy(to, from) - return to -} - -// GetIPCopy returns a copy of the passed IP address -func GetIPCopy(from net.IP) net.IP { - if from == nil { - return nil - } - to := make(net.IP, len(from)) - copy(to, from) - return to -} - -// GetIPNetCopy returns a copy of the passed IP Network -func GetIPNetCopy(from *net.IPNet) *net.IPNet { - if from == nil { - return nil - } - bm := make(net.IPMask, len(from.Mask)) - copy(bm, from.Mask) - return &net.IPNet{IP: GetIPCopy(from.IP), Mask: bm} -} - -// GetIPNetCanonical returns the canonical form for the passed network -func GetIPNetCanonical(nw *net.IPNet) *net.IPNet { - if nw == nil { - return nil - } - c := GetIPNetCopy(nw) - c.IP = c.IP.Mask(nw.Mask) - return c -} - -// CompareIPNet returns equal if the two IP Networks are equal -func CompareIPNet(a, b *net.IPNet) bool { - if a == b { - return true - } - if a == nil || b == nil { - return false - } - return a.IP.Equal(b.IP) && bytes.Equal(a.Mask, b.Mask) -} - -// GetMinimalIP returns the address in its shortest form -// If ip contains an IPv4-mapped IPv6 address, the 4-octet form of the IPv4 address will be returned. -// Otherwise ip is returned unchanged. -func GetMinimalIP(ip net.IP) net.IP { - if ip != nil && ip.To4() != nil { - return ip.To4() - } - return ip -} - -// IsIPNetValid returns true if the ipnet is a valid network/mask -// combination. Otherwise returns false. -func IsIPNetValid(nw *net.IPNet) bool { - return nw.String() != "0.0.0.0/0" -} - -var v4inV6MaskPrefix = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} - -// compareIPMask checks if the passed ip and mask are semantically compatible. -// It returns the byte indexes for the address and mask so that caller can -// do bitwise operations without modifying address representation. -func compareIPMask(ip net.IP, mask net.IPMask) (is int, ms int, err error) { - // Find the effective starting of address and mask - if len(ip) == net.IPv6len && ip.To4() != nil { - is = 12 - } - if len(ip[is:]) == net.IPv4len && len(mask) == net.IPv6len && bytes.Equal(mask[:12], v4inV6MaskPrefix) { - ms = 12 - } - // Check if address and mask are semantically compatible - if len(ip[is:]) != len(mask[ms:]) { - err = fmt.Errorf("ip and mask are not compatible: (%#v, %#v)", ip, mask) - } - return -} - -// GetHostPartIP returns the host portion of the ip address identified by the mask. -// IP address representation is not modified. If address and mask are not compatible -// an error is returned. -func GetHostPartIP(ip net.IP, mask net.IPMask) (net.IP, error) { - // Find the effective starting of address and mask - is, ms, err := compareIPMask(ip, mask) - if err != nil { - return nil, fmt.Errorf("cannot compute host portion ip address because %s", err) - } - - // Compute host portion - out := GetIPCopy(ip) - for i := 0; i < len(mask[ms:]); i++ { - out[is+i] &= ^mask[ms+i] - } - - return out, nil -} - -// GetBroadcastIP returns the broadcast ip address for the passed network (ip and mask). -// IP address representation is not modified. If address and mask are not compatible -// an error is returned. -func GetBroadcastIP(ip net.IP, mask net.IPMask) (net.IP, error) { - // Find the effective starting of address and mask - is, ms, err := compareIPMask(ip, mask) - if err != nil { - return nil, fmt.Errorf("cannot compute broadcast ip address because %s", err) - } - - // Compute broadcast address - out := GetIPCopy(ip) - for i := 0; i < len(mask[ms:]); i++ { - out[is+i] |= ^mask[ms+i] - } - - return out, nil -} - -// ParseCIDR returns the *net.IPNet represented by the passed CIDR notation -func ParseCIDR(cidr string) (n *net.IPNet, e error) { - var i net.IP - if i, n, e = net.ParseCIDR(cidr); e == nil { - n.IP = i - } - return -} - -const ( - // NEXTHOP indicates a StaticRoute with an IP next hop. - NEXTHOP = iota - - // CONNECTED indicates a StaticRoute with an interface for directly connected peers. - CONNECTED -) - -// StaticRoute is a statically-provisioned IP route. -type StaticRoute struct { - Destination *net.IPNet - - RouteType int // NEXT_HOP or CONNECTED - - // NextHop will be resolved by the kernel (i.e. as a loose hop). - NextHop net.IP -} - -// GetCopy returns a copy of this StaticRoute structure -func (r *StaticRoute) GetCopy() *StaticRoute { - d := GetIPNetCopy(r.Destination) - nh := GetIPCopy(r.NextHop) - return &StaticRoute{Destination: d, - RouteType: r.RouteType, - NextHop: nh, - } -} - -// InterfaceStatistics represents the interface's statistics -type InterfaceStatistics struct { - RxBytes uint64 - RxPackets uint64 - RxErrors uint64 - RxDropped uint64 - TxBytes uint64 - TxPackets uint64 - TxErrors uint64 - TxDropped uint64 -} - -func (is *InterfaceStatistics) String() string { - return fmt.Sprintf("\nRxBytes: %d, RxPackets: %d, RxErrors: %d, RxDropped: %d, TxBytes: %d, TxPackets: %d, TxErrors: %d, TxDropped: %d", - is.RxBytes, is.RxPackets, is.RxErrors, is.RxDropped, is.TxBytes, is.TxPackets, is.TxErrors, is.TxDropped) -} - -/****************************** - * Well-known Error Interfaces - ******************************/ - -// MaskableError is an interface for errors which can be ignored by caller -type MaskableError interface { - // Maskable makes implementer into MaskableError type - Maskable() -} - -// RetryError is an interface for errors which might get resolved through retry -type RetryError interface { - // Retry makes implementer into RetryError type - Retry() -} - -// BadRequestError is an interface for errors originated by a bad request -type BadRequestError interface { - // BadRequest makes implementer into BadRequestError type - BadRequest() -} - -// NotFoundError is an interface for errors raised because a needed resource is not available -type NotFoundError interface { - // NotFound makes implementer into NotFoundError type - NotFound() -} - -// ForbiddenError is an interface for errors which denote a valid request that cannot be honored -type ForbiddenError interface { - // Forbidden makes implementer into ForbiddenError type - Forbidden() -} - -// NoServiceError is an interface for errors returned when the required service is not available -type NoServiceError interface { - // NoService makes implementer into NoServiceError type - NoService() -} - -// TimeoutError is an interface for errors raised because of timeout -type TimeoutError interface { - // Timeout makes implementer into TimeoutError type - Timeout() -} - -// NotImplementedError is an interface for errors raised because of requested functionality is not yet implemented -type NotImplementedError interface { - // NotImplemented makes implementer into NotImplementedError type - NotImplemented() -} - -// InternalError is an interface for errors raised because of an internal error -type InternalError interface { - // Internal makes implementer into InternalError type - Internal() -} - -/****************************** - * Well-known Error Formatters - ******************************/ - -// BadRequestErrorf creates an instance of BadRequestError -func BadRequestErrorf(format string, params ...interface{}) error { - return badRequest(fmt.Sprintf(format, params...)) -} - -// NotFoundErrorf creates an instance of NotFoundError -func NotFoundErrorf(format string, params ...interface{}) error { - return notFound(fmt.Sprintf(format, params...)) -} - -// ForbiddenErrorf creates an instance of ForbiddenError -func ForbiddenErrorf(format string, params ...interface{}) error { - return forbidden(fmt.Sprintf(format, params...)) -} - -// NoServiceErrorf creates an instance of NoServiceError -func NoServiceErrorf(format string, params ...interface{}) error { - return noService(fmt.Sprintf(format, params...)) -} - -// NotImplementedErrorf creates an instance of NotImplementedError -func NotImplementedErrorf(format string, params ...interface{}) error { - return notImpl(fmt.Sprintf(format, params...)) -} - -// TimeoutErrorf creates an instance of TimeoutError -func TimeoutErrorf(format string, params ...interface{}) error { - return timeout(fmt.Sprintf(format, params...)) -} - -// InternalErrorf creates an instance of InternalError -func InternalErrorf(format string, params ...interface{}) error { - return internal(fmt.Sprintf(format, params...)) -} - -// InternalMaskableErrorf creates an instance of InternalError and MaskableError -func InternalMaskableErrorf(format string, params ...interface{}) error { - return maskInternal(fmt.Sprintf(format, params...)) -} - -// RetryErrorf creates an instance of RetryError -func RetryErrorf(format string, params ...interface{}) error { - return retry(fmt.Sprintf(format, params...)) -} - -/*********************** - * Internal Error Types - ***********************/ -type badRequest string - -func (br badRequest) Error() string { - return string(br) -} -func (br badRequest) BadRequest() {} - -type notFound string - -func (nf notFound) Error() string { - return string(nf) -} -func (nf notFound) NotFound() {} - -type forbidden string - -func (frb forbidden) Error() string { - return string(frb) -} -func (frb forbidden) Forbidden() {} - -type noService string - -func (ns noService) Error() string { - return string(ns) -} -func (ns noService) NoService() {} - -type timeout string - -func (to timeout) Error() string { - return string(to) -} -func (to timeout) Timeout() {} - -type notImpl string - -func (ni notImpl) Error() string { - return string(ni) -} -func (ni notImpl) NotImplemented() {} - -type internal string - -func (nt internal) Error() string { - return string(nt) -} -func (nt internal) Internal() {} - -type maskInternal string - -func (mnt maskInternal) Error() string { - return string(mnt) -} -func (mnt maskInternal) Internal() {} -func (mnt maskInternal) Maskable() {} - -type retry string - -func (r retry) Error() string { - return string(r) -} -func (r retry) Retry() {} diff --git a/vendor/github.com/docker/docker/opts/address_pools.go b/vendor/github.com/docker/docker/opts/address_pools.go deleted file mode 100644 index 6274b35a871d..000000000000 --- a/vendor/github.com/docker/docker/opts/address_pools.go +++ /dev/null @@ -1,84 +0,0 @@ -package opts - -import ( - "encoding/csv" - "encoding/json" - "fmt" - "strconv" - "strings" - - types "github.com/docker/docker/libnetwork/ipamutils" -) - -// PoolsOpt is a Value type for parsing the default address pools definitions -type PoolsOpt struct { - Values []*types.NetworkToSplit -} - -// UnmarshalJSON fills values structure info from JSON input -func (p *PoolsOpt) UnmarshalJSON(raw []byte) error { - return json.Unmarshal(raw, &(p.Values)) -} - -// Set predefined pools -func (p *PoolsOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - poolsDef := types.NetworkToSplit{} - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - key := strings.ToLower(parts[0]) - value := strings.ToLower(parts[1]) - - switch key { - case "base": - poolsDef.Base = value - case "size": - size, err := strconv.Atoi(value) - if err != nil { - return fmt.Errorf("invalid size value: %q (must be integer): %v", value, err) - } - poolsDef.Size = size - default: - return fmt.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - - p.Values = append(p.Values, &poolsDef) - - return nil -} - -// Type returns the type of this option -func (p *PoolsOpt) Type() string { - return "pool-options" -} - -// String returns a string repr of this option -func (p *PoolsOpt) String() string { - var pools []string - for _, pool := range p.Values { - repr := fmt.Sprintf("%s %d", pool.Base, pool.Size) - pools = append(pools, repr) - } - return strings.Join(pools, ", ") -} - -// Value returns the mounts -func (p *PoolsOpt) Value() []*types.NetworkToSplit { - return p.Values -} - -// Name returns the flag name of this option -func (p *PoolsOpt) Name() string { - return "default-address-pools" -} diff --git a/vendor/github.com/docker/docker/opts/env.go b/vendor/github.com/docker/docker/opts/env.go deleted file mode 100644 index 97e1a8c8a26d..000000000000 --- a/vendor/github.com/docker/docker/opts/env.go +++ /dev/null @@ -1,30 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "os" - "strings" - - "github.com/pkg/errors" -) - -// ValidateEnv validates an environment variable and returns it. -// If no value is specified, it obtains its value from the current environment -// -// As on ParseEnvFile and related to #16585, environment variable names -// are not validate whatsoever, it's up to application inside docker -// to validate them or not. -// -// The only validation here is to check if name is empty, per #25099 -func ValidateEnv(val string) (string, error) { - arr := strings.SplitN(val, "=", 2) - if arr[0] == "" { - return "", errors.New("invalid environment variable: " + val) - } - if len(arr) > 1 { - return val, nil - } - if envVal, ok := os.LookupEnv(arr[0]); ok { - return arr[0] + "=" + envVal, nil - } - return val, nil -} diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go deleted file mode 100644 index a3123adefe70..000000000000 --- a/vendor/github.com/docker/docker/opts/hosts.go +++ /dev/null @@ -1,183 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - "net" - "net/url" - "path/filepath" - "strconv" - "strings" - - "github.com/docker/docker/pkg/homedir" -) - -const ( - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp:// - // These are the IANA registered port numbers for use with Docker - // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled - DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" - // DefaultTCPHost constant defines the default host string used by docker on Windows - DefaultTCPHost = "tcp://" + DefaultHTTPHost + ":2375" - // DefaultTLSHost constant defines the default host string used by docker for TLS sockets - DefaultTLSHost = "tcp://" + DefaultHTTPHost + ":2376" - // DefaultNamedPipe defines the default named pipe used by docker on Windows - DefaultNamedPipe = `//./pipe/docker_engine` - // HostGatewayName is the string value that can be passed - // to the IPAddr section in --add-host that is replaced by - // the value of HostGatewayIP daemon config value - HostGatewayName = "host-gateway" -) - -// ValidateHost validates that the specified string is a valid host and returns it. -func ValidateHost(val string) (string, error) { - host := strings.TrimSpace(val) - // The empty string means default and is not handled by parseDaemonHost - if host != "" { - _, err := parseDaemonHost(host) - if err != nil { - return val, err - } - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for TLS - return val, nil -} - -// ParseHost and set defaults for a Daemon host string. -// defaultToTLS is preferred over defaultToUnixXDG. -func ParseHost(defaultToTLS, defaultToUnixXDG bool, val string) (string, error) { - host := strings.TrimSpace(val) - if host == "" { - if defaultToTLS { - host = DefaultTLSHost - } else if defaultToUnixXDG { - runtimeDir, err := homedir.GetRuntimeDir() - if err != nil { - return "", err - } - socket := filepath.Join(runtimeDir, "docker.sock") - host = "unix://" + socket - } else { - host = DefaultHost - } - } else { - var err error - host, err = parseDaemonHost(host) - if err != nil { - return val, err - } - } - return host, nil -} - -// parseDaemonHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. -func parseDaemonHost(addr string) (string, error) { - addrParts := strings.SplitN(addr, "://", 2) - if len(addrParts) == 1 && addrParts[0] != "" { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], DefaultTCPHost) - case "unix": - return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) - case "npipe": - return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// parseSimpleProtoAddr parses and validates that the specified address is a valid -// socket address for simple protocols like unix and npipe. It returns a formatted -// socket address, either using the address parsed from addr, or the contents of -// defaultAddr if addr is a blank string. -func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, proto+"://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("%s://%s", proto, addr), nil -} - -// ParseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. -// tryAddr is expected to have already been Trim()'d -// defaultAddr must be in the full `tcp://host:port` form -func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { - if tryAddr == "" || tryAddr == "tcp://" { - return defaultAddr, nil - } - addr := strings.TrimPrefix(tryAddr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) - } - - defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") - defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) - if err != nil { - return "", err - } - // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but - // not 1.4. See https://github.com/golang/go/issues/12200 and - // https://github.com/golang/go/issues/6530. - if strings.HasSuffix(addr, "]:") { - addr += defaultPort - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - // try port addition once - host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) - } - if err != nil { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if host == "" { - host = defaultHost - } - if port == "" { - port = defaultPort - } - p, err := strconv.Atoi(port) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil -} - -// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). -func ValidateExtraHost(val string) (string, error) { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return "", fmt.Errorf("bad format for add-host: %q", val) - } - // Skip IPaddr validation for special "host-gateway" string - if arr[1] != HostGatewayName { - if _, err := ValidateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - } - return val, nil -} diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go deleted file mode 100644 index 4b1c8512e238..000000000000 --- a/vendor/github.com/docker/docker/opts/hosts_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !windows -// +build !windows - -package opts // import "github.com/docker/docker/opts" - -const ( - // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 - DefaultHTTPHost = "localhost" - - // DefaultHost constant defines the default host string used by docker on other hosts than Windows - DefaultHost = "unix://" + DefaultUnixSocket -) diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go deleted file mode 100644 index 576236ba42b0..000000000000 --- a/vendor/github.com/docker/docker/opts/hosts_windows.go +++ /dev/null @@ -1,60 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -const ( - // TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. - // - // On Windows, this mitigates a problem with the default options of running - // a docker client against a local docker daemon on TP5. - // - // What was found that if the default host is "localhost", even if the client - // (and daemon as this is local) is not physically on a network, and the DNS - // cache is flushed (ipconfig /flushdns), then the client will pause for - // exactly one second when connecting to the daemon for calls. For example - // using docker run windowsservercore cmd, the CLI will send a create followed - // by an attach. You see the delay between the attach finishing and the attach - // being seen by the daemon. - // - // Here's some daemon debug logs with additional debug spew put in. The - // AfterWriteJSON log is the very last thing the daemon does as part of the - // create call. The POST /attach is the second CLI call. Notice the second - // time gap. - // - // time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" - // time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" - // time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." - // time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... - // time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." - // time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." - // time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" - // time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" - // time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" - // time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" - // time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" - // ... 1 second gap here.... - // time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" - // time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" - // - // We suspect this is either a bug introduced in GOLang 1.5.1, or that a change - // in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, - // the Windows networking stack is supposed to resolve "localhost" internally, - // without hitting DNS, or even reading the hosts file (which is why localhost - // is commented out in the hosts file on Windows). - // - // We have validated that working around this using the actual IPv4 localhost - // address does not cause the delay. - // - // This does not occur with the docker client built with 1.4.3 on the same - // Windows build, regardless of whether the daemon is built using 1.5.1 - // or 1.4.3. It does not occur on Linux. We also verified we see the same thing - // on a cross-compiled Windows binary (from Linux). - // - // Final note: This is a mitigation, not a 'real' fix. It is still susceptible - // to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' - // explicitly. - - // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 - DefaultHTTPHost = "127.0.0.1" - - // DefaultHost constant defines the default host string used by docker on Windows - DefaultHost = "npipe://" + DefaultNamedPipe -) diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go deleted file mode 100644 index cfbff3a9fd61..000000000000 --- a/vendor/github.com/docker/docker/opts/ip.go +++ /dev/null @@ -1,47 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - "net" -) - -// IPOpt holds an IP. It is used to store values from CLI flags. -type IPOpt struct { - *net.IP -} - -// NewIPOpt creates a new IPOpt from a reference net.IP and a -// string representation of an IP. If the string is not a valid -// IP it will fallback to the specified reference. -func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { - o := &IPOpt{ - IP: ref, - } - o.Set(defaultVal) - return o -} - -// Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parsable as an IP address it returns an error. -func (o *IPOpt) Set(val string) error { - ip := net.ParseIP(val) - if ip == nil { - return fmt.Errorf("%s is not an ip address", val) - } - *o.IP = ip - return nil -} - -// String returns the IP address stored in the IPOpt. If stored IP is a -// nil pointer, it returns an empty string. -func (o *IPOpt) String() string { - if *o.IP == nil { - return "" - } - return o.IP.String() -} - -// Type returns the type of the option -func (o *IPOpt) Type() string { - return "ip" -} diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go deleted file mode 100644 index 60a093f28c9e..000000000000 --- a/vendor/github.com/docker/docker/opts/opts.go +++ /dev/null @@ -1,348 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - "net" - "path" - "regexp" - "strings" - - units "github.com/docker/go-units" -) - -var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) -) - -// ListOpts holds a list of values and a validation function. -type ListOpts struct { - values *[]string - validator ValidatorFctType -} - -// NewListOpts creates a new ListOpts with the specified validator. -func NewListOpts(validator ValidatorFctType) ListOpts { - var values []string - return *NewListOptsRef(&values, validator) -} - -// NewListOptsRef creates a new ListOpts with the specified values and validator. -func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { - return &ListOpts{ - values: values, - validator: validator, - } -} - -func (opts *ListOpts) String() string { - if len(*opts.values) == 0 { - return "" - } - return fmt.Sprintf("%v", *opts.values) -} - -// Set validates if needed the input value and adds it to the -// internal slice. -func (opts *ListOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - *opts.values = append(*opts.values, value) - return nil -} - -// Delete removes the specified element from the slice. -func (opts *ListOpts) Delete(key string) { - for i, k := range *opts.values { - if k == key { - *opts.values = append((*opts.values)[:i], (*opts.values)[i+1:]...) - return - } - } -} - -// GetMap returns the content of values in a map in order to avoid -// duplicates. -func (opts *ListOpts) GetMap() map[string]struct{} { - ret := make(map[string]struct{}) - for _, k := range *opts.values { - ret[k] = struct{}{} - } - return ret -} - -// GetAll returns the values of slice. -func (opts *ListOpts) GetAll() []string { - return *opts.values -} - -// GetAllOrEmpty returns the values of the slice -// or an empty slice when there are no values. -func (opts *ListOpts) GetAllOrEmpty() []string { - v := *opts.values - if v == nil { - return make([]string, 0) - } - return v -} - -// Get checks the existence of the specified key. -func (opts *ListOpts) Get(key string) bool { - for _, k := range *opts.values { - if k == key { - return true - } - } - return false -} - -// Len returns the amount of element in the slice. -func (opts *ListOpts) Len() int { - return len(*opts.values) -} - -// Type returns a string name for this Option type -func (opts *ListOpts) Type() string { - return "list" -} - -// WithValidator returns the ListOpts with validator set. -func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { - opts.validator = validator - return opts -} - -// NamedOption is an interface that list and map options -// with names implement. -type NamedOption interface { - Name() string -} - -// NamedListOpts is a ListOpts with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedListOpts struct { - name string - ListOpts -} - -var _ NamedOption = &NamedListOpts{} - -// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. -func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { - return &NamedListOpts{ - name: name, - ListOpts: *NewListOptsRef(values, validator), - } -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *NamedListOpts) Name() string { - return o.name -} - -// MapOpts holds a map of values and a validation function. -type MapOpts struct { - values map[string]string - validator ValidatorFctType -} - -// Set validates if needed the input value and add it to the -// internal map, by splitting on '='. -func (opts *MapOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - vals := strings.SplitN(value, "=", 2) - if len(vals) == 1 { - (opts.values)[vals[0]] = "" - } else { - (opts.values)[vals[0]] = vals[1] - } - return nil -} - -// GetAll returns the values of MapOpts as a map. -func (opts *MapOpts) GetAll() map[string]string { - return opts.values -} - -func (opts *MapOpts) String() string { - return fmt.Sprintf("%v", opts.values) -} - -// Type returns a string name for this Option type -func (opts *MapOpts) Type() string { - return "map" -} - -// NewMapOpts creates a new MapOpts with the specified map of values and a validator. -func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { - if values == nil { - values = make(map[string]string) - } - return &MapOpts{ - values: values, - validator: validator, - } -} - -// NamedMapOpts is a MapOpts struct with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedMapOpts struct { - name string - MapOpts -} - -var _ NamedOption = &NamedMapOpts{} - -// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. -func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { - return &NamedMapOpts{ - name: name, - MapOpts: *NewMapOpts(values, validator), - } -} - -// Name returns the name of the NamedMapOpts in the configuration. -func (o *NamedMapOpts) Name() string { - return o.name -} - -// ValidatorFctType defines a validator function that returns a validated string and/or an error. -type ValidatorFctType func(val string) (string, error) - -// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error -type ValidatorFctListType func(val string) ([]string, error) - -// ValidateIPAddress validates an Ip address. -func ValidateIPAddress(val string) (string, error) { - var ip = net.ParseIP(strings.TrimSpace(val)) - if ip != nil { - return ip.String(), nil - } - return "", fmt.Errorf("%s is not an ip address", val) -} - -// ValidateDNSSearch validates domain for resolvconf search configuration. -// A zero length domain is represented by a dot (.). -func ValidateDNSSearch(val string) (string, error) { - if val = strings.Trim(val, " "); val == "." { - return val, nil - } - return validateDomain(val) -} - -func validateDomain(val string) (string, error) { - if alphaRegexp.FindString(val) == "" { - return "", fmt.Errorf("%s is not a valid domain", val) - } - ns := domainRegexp.FindSubmatch([]byte(val)) - if len(ns) > 0 && len(ns[1]) < 255 { - return string(ns[1]), nil - } - return "", fmt.Errorf("%s is not a valid domain", val) -} - -// ValidateLabel validates that the specified string is a valid label, -// it does not use the reserved namespaces com.docker.*, io.docker.*, org.dockerproject.* -// and returns it. -// Labels are in the form on key=value. -func ValidateLabel(val string) (string, error) { - if strings.Count(val, "=") < 1 { - return "", fmt.Errorf("bad attribute format: %s", val) - } - - lowered := strings.ToLower(val) - if strings.HasPrefix(lowered, "com.docker.") || strings.HasPrefix(lowered, "io.docker.") || - strings.HasPrefix(lowered, "org.dockerproject.") { - return "", fmt.Errorf( - "label %s is not allowed: the namespaces com.docker.*, io.docker.*, and org.dockerproject.* are reserved for internal use", - val) - } - - return val, nil -} - -// ValidateSingleGenericResource validates that a single entry in the -// generic resource list is valid. -// i.e 'GPU=UID1' is valid however 'GPU:UID1' or 'UID1' isn't -func ValidateSingleGenericResource(val string) (string, error) { - if strings.Count(val, "=") < 1 { - return "", fmt.Errorf("invalid node-generic-resource format `%s` expected `name=value`", val) - } - return val, nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get a HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} - -// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) -type MemBytes int64 - -// String returns the string format of the human readable memory bytes -func (m *MemBytes) String() string { - // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not. - // We return "0" in case value is 0 here so that the default value is hidden. - // (Sometimes "default 0 B" is actually misleading) - if m.Value() != 0 { - return units.BytesSize(float64(m.Value())) - } - return "0" -} - -// Set sets the value of the MemBytes by passing a string -func (m *MemBytes) Set(value string) error { - val, err := units.RAMInBytes(value) - *m = MemBytes(val) - return err -} - -// Type returns the type -func (m *MemBytes) Type() string { - return "bytes" -} - -// Value returns the value in int64 -func (m *MemBytes) Value() int64 { - return int64(*m) -} - -// UnmarshalJSON is the customized unmarshaler for MemBytes -func (m *MemBytes) UnmarshalJSON(s []byte) error { - if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' { - return fmt.Errorf("invalid size: %q", s) - } - val, err := units.RAMInBytes(string(s[1 : len(s)-1])) - *m = MemBytes(val) - return err -} diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go deleted file mode 100644 index 34f30971e400..000000000000 --- a/vendor/github.com/docker/docker/opts/quotedstring.go +++ /dev/null @@ -1,41 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -// QuotedString is a string that may have extra quotes around the value. The -// quotes are stripped from the value. -type QuotedString struct { - value *string -} - -// Set sets a new value -func (s *QuotedString) Set(val string) error { - *s.value = trimQuotes(val) - return nil -} - -// Type returns the type of the value -func (s *QuotedString) Type() string { - return "string" -} - -func (s *QuotedString) String() string { - return *s.value -} - -func trimQuotes(value string) string { - if len(value) < 2 { - return value - } - - lastIndex := len(value) - 1 - for _, char := range []byte{'\'', '"'} { - if value[0] == char && value[lastIndex] == char { - return value[1:lastIndex] - } - } - return value -} - -// NewQuotedString returns a new quoted string option -func NewQuotedString(value *string) *QuotedString { - return &QuotedString{value: value} -} diff --git a/vendor/github.com/docker/docker/opts/runtime.go b/vendor/github.com/docker/docker/opts/runtime.go deleted file mode 100644 index 4b9babf0a5c7..000000000000 --- a/vendor/github.com/docker/docker/opts/runtime.go +++ /dev/null @@ -1,79 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" -) - -// RuntimeOpt defines a map of Runtimes -type RuntimeOpt struct { - name string - stockRuntimeName string - values *map[string]types.Runtime -} - -// NewNamedRuntimeOpt creates a new RuntimeOpt -func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { - if ref == nil { - ref = &map[string]types.Runtime{} - } - return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *RuntimeOpt) Name() string { - return o.name -} - -// Set validates and updates the list of Runtimes -func (o *RuntimeOpt) Set(val string) error { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.TrimSpace(parts[0]) - parts[1] = strings.TrimSpace(parts[1]) - if parts[0] == "" || parts[1] == "" { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.ToLower(parts[0]) - if parts[0] == o.stockRuntimeName { - return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) - } - - if _, ok := (*o.values)[parts[0]]; ok { - return fmt.Errorf("runtime '%s' was already defined", parts[0]) - } - - (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} - - return nil -} - -// String returns Runtime values as a string. -func (o *RuntimeOpt) String() string { - var out []string - for k := range *o.values { - out = append(out, k) - } - - return fmt.Sprintf("%v", out) -} - -// GetMap returns a map of Runtimes (name: path) -func (o *RuntimeOpt) GetMap() map[string]types.Runtime { - if o.values != nil { - return *o.values - } - - return map[string]types.Runtime{} -} - -// Type returns the type of the option -func (o *RuntimeOpt) Type() string { - return "runtime" -} diff --git a/vendor/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/docker/docker/opts/ulimit.go deleted file mode 100644 index 61cc58d4d329..000000000000 --- a/vendor/github.com/docker/docker/opts/ulimit.go +++ /dev/null @@ -1,81 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - - units "github.com/docker/go-units" -) - -// UlimitOpt defines a map of Ulimits -type UlimitOpt struct { - values *map[string]*units.Ulimit -} - -// NewUlimitOpt creates a new UlimitOpt -func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { - if ref == nil { - ref = &map[string]*units.Ulimit{} - } - return &UlimitOpt{ref} -} - -// Set validates a Ulimit and sets its name as a key in UlimitOpt -func (o *UlimitOpt) Set(val string) error { - l, err := units.ParseUlimit(val) - if err != nil { - return err - } - - (*o.values)[l.Name] = l - - return nil -} - -// String returns Ulimit values as a string. -func (o *UlimitOpt) String() string { - var out []string - for _, v := range *o.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to Ulimits. -func (o *UlimitOpt) GetList() []*units.Ulimit { - var ulimits []*units.Ulimit - for _, v := range *o.values { - ulimits = append(ulimits, v) - } - - return ulimits -} - -// Type returns the option type -func (o *UlimitOpt) Type() string { - return "ulimit" -} - -// NamedUlimitOpt defines a named map of Ulimits -type NamedUlimitOpt struct { - name string - UlimitOpt -} - -var _ NamedOption = &NamedUlimitOpt{} - -// NewNamedUlimitOpt creates a new NamedUlimitOpt -func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt { - if ref == nil { - ref = &map[string]*units.Ulimit{} - } - return &NamedUlimitOpt{ - name: name, - UlimitOpt: *NewUlimitOpt(ref), - } -} - -// Name returns the option name -func (o *NamedUlimitOpt) Name() string { - return o.name -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index a9fd1e955205..e9ac1e322e69 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -18,16 +18,31 @@ import ( "syscall" "time" - "github.com/docker/docker/pkg/fileutils" + "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/klauspost/compress/zstd" + "github.com/moby/patternmatcher" + "github.com/moby/sys/sequential" + "github.com/pkg/errors" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) +// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a +// tar, but that do not have their own header entry. +// +// The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not +// proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and +// a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755. +// +// This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is +// subject to change in Moby at any time -- image authors who require consistent or known directory permissions +// should explicitly control them by ensuring that header entries exist for any applicable path. +const ImpliedDirectoryMode = 0755 + type ( // Compression is the state represents if compressed or not. Compression int @@ -40,8 +55,7 @@ type ( ExcludePatterns []string Compression Compression NoLchown bool - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap + IDMap idtools.IdentityMapping ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. @@ -63,12 +77,12 @@ type ( // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error - IDMapping *idtools.IdentityMapping + IDMapping idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} + return &Archiver{Untar: Untar} } // breakoutError is used to differentiate errors related to breaking out @@ -381,7 +395,6 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi } pipeWriter.Close() - }() return pipeReader } @@ -534,7 +547,7 @@ type tarAppender struct { // for hardlink mapping SeenFiles map[uint64]string - IdentityMapping *idtools.IdentityMapping + IdentityMapping idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the @@ -544,7 +557,7 @@ type tarAppender struct { WhiteoutConverter tarWhiteoutConverter } -func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { +func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), @@ -659,10 +672,9 @@ func (ta *tarAppender) addTarFile(path, name string) error { } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use system.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - // On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(path) + // We use sequential file access to avoid depleting the standby list on + // Windows. On Linux, this equates to a regular os.Open. + file, err := sequential.Open(path) if err != nil { return err } @@ -700,10 +712,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeReg, tar.TypeRegA: - // Source is regular file. We use system.OpenFileSequential to use sequential - // file access to avoid depleting the standby list on Windows. - // On Linux, this equates to a regular os.OpenFile - file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + // Source is regular file. We use sequential file access to avoid depleting + // the standby list on Windows. On Linux, this equates to a regular os.OpenFile. + file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } @@ -729,7 +740,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeLink: - //#nosec G305 -- The target path is checked for path traversal. + // #nosec G305 -- The target path is checked for path traversal. targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { @@ -742,7 +753,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) //#nosec G305 -- The target path is checked for path traversal. + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // #nosec G305 -- The target path is checked for path traversal. // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: @@ -767,7 +778,11 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err + msg := "failed to Lchown %q for UID %d, GID %d" + if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() { + msg += " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)" + } + return errors.Wrapf(err, msg, path, hdr.Uid, hdr.Gid) } } @@ -786,7 +801,6 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } return err } - } if len(errors) > 0 { @@ -836,12 +850,11 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) { // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) - pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + pm, err := patternmatcher.New(options.ExcludePatterns) if err != nil { return nil, err } @@ -860,7 +873,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) go func() { ta := newTarAppender( - idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + options.IDMap, compressWriter, options.ChownOpts, ) @@ -916,7 +929,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) rebaseName := options.RebaseNames[include] var ( - parentMatchInfo []fileutils.MatchInfo + parentMatchInfo []patternmatcher.MatchInfo parentDirs []string ) @@ -955,11 +968,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1] } - var matchInfo fileutils.MatchInfo + var matchInfo patternmatcher.MatchInfo if len(parentMatchInfo) != 0 { skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1]) } else { - skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, fileutils.MatchInfo{}) + skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) @@ -1044,8 +1057,6 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header - idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err @@ -1080,22 +1091,13 @@ loop: } } - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) - if err != nil { - return err - } - } + // Ensure that the parent directory exists. + err = createImpliedDirectories(dest, hdr, options) + if err != nil { + return err } - //#nosec G305 -- The joined path is checked for path traversal. + // #nosec G305 -- The joined path is checked for path traversal. path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { @@ -1134,7 +1136,7 @@ loop: } trBuf.Reset(tr) - if err := remapIDs(idMapping, hdr); err != nil { + if err := remapIDs(options.IDMap, hdr); err != nil { return err } @@ -1160,7 +1162,7 @@ loop: } for _, hdr := range dirs { - //#nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. + // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { @@ -1170,10 +1172,40 @@ loop: return nil } +// createImpliedDirectories will create all parent directories of the current path with default permissions, if they do +// not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is +// defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus +// we most both create them and choose metadata like permissions. +// +// The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS +// on which the daemon is running. This precondition is required because this function assumes a OS-specific path +// separator when checking that a path is not the root. +func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error { + // Not the root directory, ensure that the parent directory exists + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + // RootPair() is confined inside this loop as most cases will not require a call, so we can spend some + // unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche + // usage that reduces the portability of an image. + rootIDs := options.IDMap.RootPair() + + err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs) + if err != nil { + return err + } + } + } + + return nil +} + // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. +// identity (uncompressed), gzip, bzip2, xz. +// // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { return untarHandler(tarArchive, dest, options, true) @@ -1221,8 +1253,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error { } defer archive.Close() options := &TarOptions{ - UIDMaps: archiver.IDMapping.UIDs(), - GIDMaps: archiver.IDMapping.GIDs(), + IDMap: archiver.IDMapping, } return archiver.Untar(archive, dst, options) } @@ -1235,8 +1266,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error { } defer archive.Close() options := &TarOptions{ - UIDMaps: archiver.IDMapping.UIDs(), - GIDMaps: archiver.IDMapping.GIDs(), + IDMap: archiver.IDMapping, } return archiver.Untar(archive, dst, options) } @@ -1343,11 +1373,11 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { } // IdentityMapping returns the IdentityMapping of the archiver. -func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { +func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping { return archiver.IDMapping } -func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { +func remapIDs(idMapping idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index a0f25942c14f..7f7242be50e6 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -246,7 +246,6 @@ func (info *FileInfo) path() string { } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - sizeAtEntry := len(*changes) if oldInfo == nil { @@ -319,7 +318,6 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) (*changes)[sizeAtEntry] = change } - } // Changes add changes to file information. @@ -394,10 +392,10 @@ func ChangesSize(newDir string, changes []Change) int64 { } // ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { +func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) { reader, writer := io.Pipe() go func() { - ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + ta := newTarAppender(idMap, writer, nil) // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index 801b844b786c..f3111b79b1b2 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -29,8 +29,8 @@ var ( // clean path already ends in the separator, then another is not added. func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { // Ensure paths are in platform semantics - cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) - originalPath = strings.Replace(originalPath, "/", string(sep), -1) + cleanedPath = strings.ReplaceAll(cleanedPath, "/", string(sep)) + originalPath = strings.ReplaceAll(originalPath, "/", string(sep)) if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { if !hasTrailingPathSeparator(cleanedPath, sep) { @@ -303,7 +303,6 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil } - } // RebaseArchiveEntries rewrites the given srcContent archive replacing diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index 6174bc2af43a..8eeccb608b74 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -9,7 +9,6 @@ import ( "runtime" "strings" - "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/sirupsen/logrus" @@ -32,7 +31,6 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } - idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) @@ -74,20 +72,10 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } } - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) - if err != nil { - return 0, err - } - } + // Ensure that the parent directory exists. + err = createImpliedDirectories(dest, hdr, options) + if err != nil { + return 0, err } // Skip AUFS metadata dirs @@ -192,7 +180,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, srcData = tmpFile } - if err := remapIDs(idMapping, srcHdr); err != nil { + if err := remapIDs(options.IDMap, srcHdr); err != nil { return 0, err } @@ -241,13 +229,8 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp dest = filepath.Clean(dest) // We need to be able to set any perms - if runtime.GOOS != "windows" { - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) - } + restore := overrideUmask(0) + defer restore() if decompress { decompLayer, err := DecompressStream(layer) diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go new file mode 100644 index 000000000000..d7f806445e80 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go @@ -0,0 +1,22 @@ +//go:build !windows +// +build !windows + +package archive + +import "golang.org/x/sys/unix" + +// overrideUmask sets current process's file mode creation mask to newmask +// and returns a function to restore it. +// +// WARNING for readers stumbling upon this code. Changing umask in a multi- +// threaded environment isn't safe. Don't use this without understanding the +// risks, and don't export this function for others to use (we shouldn't even +// be using this ourself). +// +// FIXME(thaJeztah): we should get rid of these hacks if possible. +func overrideUmask(newMask int) func() { + oldMask := unix.Umask(newMask) + return func() { + unix.Umask(oldMask) + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go new file mode 100644 index 000000000000..d28f5b2dfdd9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go @@ -0,0 +1,6 @@ +package archive + +// overrideUmask is a no-op on windows. +func overrideUmask(newmask int) func() { + return func() {} +} diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go index 85435694cff7..032db82cea82 100644 --- a/vendor/github.com/docker/docker/pkg/archive/wrap.go +++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -17,8 +17,8 @@ import ( // Generate("foo.txt", "hello world", "emptyfile") // // The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content +// - ./foo.txt with content "hello world" +// - ./empty with empty content // // FIXME: stream content instead of buffering // FIXME: specify permissions and other archive metadata diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go index 427abee7e82f..0620157df998 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -20,10 +20,7 @@ func init() { } // NewArchiver returns a new Archiver which uses chrootarchive.Untar -func NewArchiver(idMapping *idtools.IdentityMapping) *archive.Archiver { - if idMapping == nil { - idMapping = &idtools.IdentityMapping{} - } +func NewArchiver(idMapping idtools.IdentityMapping) *archive.Archiver { return &archive.Archiver{ Untar: Untar, IDMapping: idMapping, @@ -33,7 +30,7 @@ func NewArchiver(idMapping *idtools.IdentityMapping) *archive.Archiver { // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. +// identity (uncompressed), gzip, bzip2, xz. func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { return untarHandler(tarArchive, dest, options, true, dest) } @@ -76,8 +73,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions // If dest is inside a root then directory is created within chroot by extractor. // This case is only currently used by cp. if dest == root { - idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMapping.RootPair() + rootIDs := options.IDMap.RootPair() dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go index e1bf74d1d5ed..fcc02f675e9d 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -16,7 +16,7 @@ import ( "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" ) type applyLayerResponse struct { @@ -42,11 +42,8 @@ func applyLayer() { } // We need to be able to set any perms - oldmask, err := system.Umask(0) - defer system.Umask(oldmask) - if err != nil { - fatal(err) - } + oldmask := unix.Umask(0) + defer unix.Umask(oldmask) if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { fatal(err) diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go deleted file mode 100644 index 15ed874e7751..000000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" - -func init() { -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go deleted file mode 100644 index d6307953595f..000000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ /dev/null @@ -1,542 +0,0 @@ -package fileutils // import "github.com/docker/docker/pkg/fileutils" - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "strings" - "text/scanner" - "unicode/utf8" -) - -// escapeBytes is a bitmap used to check whether a character should be escaped when creating the regex. -var escapeBytes [8]byte - -// shouldEscape reports whether a rune should be escaped as part of the regex. -// -// This only includes characters that require escaping in regex but are also NOT valid filepath pattern characters. -// Additionally, '\' is not excluded because there is specific logic to properly handle this, as it's a path separator -// on Windows. -// -// Adapted from regexp::QuoteMeta in go stdlib. -// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/regexp/regexp.go;l=703-715;drc=refs%2Ftags%2Fgo1.17.2 -func shouldEscape(b rune) bool { - return b < utf8.RuneSelf && escapeBytes[b%8]&(1<<(b/8)) != 0 -} - -func init() { - for _, b := range []byte(`.+()|{}$`) { - escapeBytes[b%8] |= 1 << (b / 8) - } -} - -// PatternMatcher allows checking paths against a list of patterns -type PatternMatcher struct { - patterns []*Pattern - exclusions bool -} - -// NewPatternMatcher creates a new matcher object for specific patterns that can -// be used later to match against patterns against paths -func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { - pm := &PatternMatcher{ - patterns: make([]*Pattern, 0, len(patterns)), - } - for _, p := range patterns { - // Eliminate leading and trailing whitespace. - p = strings.TrimSpace(p) - if p == "" { - continue - } - p = filepath.Clean(p) - newp := &Pattern{} - if p[0] == '!' { - if len(p) == 1 { - return nil, errors.New("illegal exclusion pattern: \"!\"") - } - newp.exclusion = true - p = p[1:] - pm.exclusions = true - } - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(p, "."); err != nil { - return nil, err - } - newp.cleanedPattern = p - newp.dirs = strings.Split(p, string(os.PathSeparator)) - pm.patterns = append(pm.patterns, newp) - } - return pm, nil -} - -// Matches returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -// -// The "file" argument should be a slash-delimited path. -// -// Matches is not safe to call concurrently. -// -// Deprecated: This implementation is buggy (it only checks a single parent dir -// against the pattern) and will be removed soon. Use either -// MatchesOrParentMatches or MatchesUsingParentResults instead. -func (pm *PatternMatcher) Matches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for _, pattern := range pm.patterns { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(pattern.dirs) <= len(parentPathDirs) { - match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) - } - } - - if match { - matched = !pattern.exclusion - } - } - - return matched, nil -} - -// MatchesOrParentMatches returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -// -// The "file" argument should be a slash-delimited path. -// -// Matches is not safe to call concurrently. -func (pm *PatternMatcher) MatchesOrParentMatches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for _, pattern := range pm.patterns { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - for i := range parentPathDirs { - match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) - if match { - break - } - } - } - - if match { - matched = !pattern.exclusion - } - } - - return matched, nil -} - -// MatchesUsingParentResult returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. The functionality is -// the same as Matches, but as an optimization, the caller keeps track of -// whether the parent directory matched. -// -// The "file" argument should be a slash-delimited path. -// -// MatchesUsingParentResult is not safe to call concurrently. -// -// Deprecated: this function does behave correctly in some cases (see -// https://github.com/docker/buildx/issues/850). -// -// Use MatchesUsingParentResults instead. -func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bool) (bool, error) { - matched := parentMatched - file = filepath.FromSlash(file) - - for _, pattern := range pm.patterns { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if match { - matched = !pattern.exclusion - } - } - return matched, nil -} - -// MatchInfo tracks information about parent dir matches while traversing a -// filesystem. -type MatchInfo struct { - parentMatched []bool -} - -// MatchesUsingParentResults returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. The functionality is -// the same as Matches, but as an optimization, the caller passes in -// intermediate results from matching the parent directory. -// -// The "file" argument should be a slash-delimited path. -// -// MatchesUsingParentResults is not safe to call concurrently. -func (pm *PatternMatcher) MatchesUsingParentResults(file string, parentMatchInfo MatchInfo) (bool, MatchInfo, error) { - parentMatched := parentMatchInfo.parentMatched - if len(parentMatched) != 0 && len(parentMatched) != len(pm.patterns) { - return false, MatchInfo{}, errors.New("wrong number of values in parentMatched") - } - - file = filepath.FromSlash(file) - matched := false - - matchInfo := MatchInfo{ - parentMatched: make([]bool, len(pm.patterns)), - } - for i, pattern := range pm.patterns { - match := false - // If the parent matched this pattern, we don't need to recheck. - if len(parentMatched) != 0 { - match = parentMatched[i] - } - - if !match { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - var err error - match, err = pattern.match(file) - if err != nil { - return false, matchInfo, err - } - - // If the zero value of MatchInfo was passed in, we don't have - // any information about the parent dir's match results, and we - // apply the same logic as MatchesOrParentMatches. - if !match && len(parentMatched) == 0 { - if parentPath := filepath.Dir(file); parentPath != "." { - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - // Check to see if the pattern matches one of our parent dirs. - for i := range parentPathDirs { - match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) - if match { - break - } - } - } - } - } - matchInfo.parentMatched[i] = match - - if match { - matched = !pattern.exclusion - } - } - return matched, matchInfo, nil -} - -// Exclusions returns true if any of the patterns define exclusions -func (pm *PatternMatcher) Exclusions() bool { - return pm.exclusions -} - -// Patterns returns array of active patterns -func (pm *PatternMatcher) Patterns() []*Pattern { - return pm.patterns -} - -// Pattern defines a single regexp used to filter file paths. -type Pattern struct { - matchType matchType - cleanedPattern string - dirs []string - regexp *regexp.Regexp - exclusion bool -} - -type matchType int - -const ( - unknownMatch matchType = iota - exactMatch - prefixMatch - suffixMatch - regexpMatch -) - -func (p *Pattern) String() string { - return p.cleanedPattern -} - -// Exclusion returns true if this pattern defines exclusion -func (p *Pattern) Exclusion() bool { - return p.exclusion -} - -func (p *Pattern) match(path string) (bool, error) { - if p.matchType == unknownMatch { - if err := p.compile(string(os.PathSeparator)); err != nil { - return false, filepath.ErrBadPattern - } - } - - switch p.matchType { - case exactMatch: - return path == p.cleanedPattern, nil - case prefixMatch: - // strip trailing ** - return strings.HasPrefix(path, p.cleanedPattern[:len(p.cleanedPattern)-2]), nil - case suffixMatch: - // strip leading ** - suffix := p.cleanedPattern[2:] - if strings.HasSuffix(path, suffix) { - return true, nil - } - // **/foo matches "foo" - return suffix[0] == os.PathSeparator && path == suffix[1:], nil - case regexpMatch: - return p.regexp.MatchString(path), nil - } - - return false, nil -} - -func (p *Pattern) compile(sl string) error { - regStr := "^" - pattern := p.cleanedPattern - // Go through the pattern and convert it to a regexp. - // We use a scanner so we can support utf-8 chars. - var scan scanner.Scanner - scan.Init(strings.NewReader(pattern)) - - escSL := sl - if sl == `\` { - escSL += `\` - } - - p.matchType = exactMatch - for i := 0; scan.Peek() != scanner.EOF; i++ { - ch := scan.Next() - - if ch == '*' { - if scan.Peek() == '*' { - // is some flavor of "**" - scan.Next() - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - - if scan.Peek() == scanner.EOF { - // is "**EOF" - to align with .gitignore just accept all - if p.matchType == exactMatch { - p.matchType = prefixMatch - } else { - regStr += ".*" - p.matchType = regexpMatch - } - } else { - // is "**" - // Note that this allows for any # of /'s (even 0) because - // the .* will eat everything, even /'s - regStr += "(.*" + escSL + ")?" - p.matchType = regexpMatch - } - - if i == 0 { - p.matchType = suffixMatch - } - } else { - // is "*" so map it to anything but "/" - regStr += "[^" + escSL + "]*" - p.matchType = regexpMatch - } - } else if ch == '?' { - // "?" is any char except "/" - regStr += "[^" + escSL + "]" - p.matchType = regexpMatch - } else if shouldEscape(ch) { - // Escape some regexp special chars that have no meaning - // in golang's filepath.Match - regStr += `\` + string(ch) - } else if ch == '\\' { - // escape next char. Note that a trailing \ in the pattern - // will be left alone (but need to escape it) - if sl == `\` { - // On windows map "\" to "\\", meaning an escaped backslash, - // and then just continue because filepath.Match on - // Windows doesn't allow escaping at all - regStr += escSL - continue - } - if scan.Peek() != scanner.EOF { - regStr += `\` + string(scan.Next()) - p.matchType = regexpMatch - } else { - regStr += `\` - } - } else if ch == '[' || ch == ']' { - regStr += string(ch) - p.matchType = regexpMatch - } else { - regStr += string(ch) - } - } - - if p.matchType != regexpMatch { - return nil - } - - regStr += "$" - - re, err := regexp.Compile(regStr) - if err != nil { - return err - } - - p.regexp = re - p.matchType = regexpMatch - return nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -// -// This implementation is buggy (it only checks a single parent dir against the -// pattern) and will be removed soon. Use MatchesOrParentMatches instead. -func Matches(file string, patterns []string) (bool, error) { - pm, err := NewPatternMatcher(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.Matches(file) -} - -// MatchesOrParentMatches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func MatchesOrParentMatches(file string, patterns []string) (bool, error) { - pm, err := NewPatternMatcher(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.MatchesOrParentMatches(file) -} - -// CopyFile copies from src to dst until either EOF is reached -// on src or an error occurs. It verifies src exists and removes -// the dst if it exists. -func CopyFile(src, dst string) (int64, error) { - cleanSrc := filepath.Clean(src) - cleanDst := filepath.Clean(dst) - if cleanSrc == cleanDst { - return 0, nil - } - sf, err := os.Open(cleanSrc) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(cleanDst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// CreateIfNotExists creates a file or a directory only if it does not already exist. -func CreateIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0755) - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go deleted file mode 100644 index e40cc271b3bf..000000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go +++ /dev/null @@ -1,27 +0,0 @@ -package fileutils // import "github.com/docker/docker/pkg/fileutils" - -import ( - "os" - "os/exec" - "strconv" - "strings" -) - -// GetTotalUsedFds returns the number of used File Descriptors by -// executing `lsof -p PID` -func GetTotalUsedFds() int { - pid := os.Getpid() - - cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) - - output, err := cmd.CombinedOutput() - if err != nil { - return -1 - } - - outputStr := strings.TrimSpace(string(output)) - - fds := strings.Split(outputStr, "\n") - - return len(fds) - 1 -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go deleted file mode 100644 index f782b4266aad..000000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build linux || freebsd -// +build linux freebsd - -package fileutils // import "github.com/docker/docker/pkg/fileutils" - -import ( - "fmt" - "os" - - "github.com/sirupsen/logrus" -) - -// GetTotalUsedFds Returns the number of used File Descriptors by -// reading it via /proc filesystem. -func GetTotalUsedFds() int { - if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go deleted file mode 100644 index 3f1ebb65678e..000000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils // import "github.com/docker/docker/pkg/fileutils" - -// GetTotalUsedFds Returns the number of used File Descriptors. Not supported -// on Windows. -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go index 25a57b231e01..1e0a89004a98 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -108,70 +108,72 @@ type Identity struct { SID string } -// IdentityMapping contains a mappings of UIDs and GIDs -type IdentityMapping struct { - uids []IDMap - gids []IDMap +// Chown changes the numeric uid and gid of the named file to id.UID and id.GID. +func (id Identity) Chown(name string) error { + return os.Chown(name, id.UID, id.GID) } -// NewIDMappingsFromMaps creates a new mapping from two slices -// Deprecated: this is a temporary shim while transitioning to IDMapping -func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping { - return &IdentityMapping{uids: uids, gids: gids} +// IdentityMapping contains a mappings of UIDs and GIDs. +// The zero value represents an empty mapping. +type IdentityMapping struct { + UIDMaps []IDMap `json:"UIDMaps"` + GIDMaps []IDMap `json:"GIDMaps"` } // RootPair returns a uid and gid pair for the root user. The error is ignored // because a root user always exists, and the defaults are correct when the uid // and gid maps are empty. -func (i *IdentityMapping) RootPair() Identity { - uid, gid, _ := GetRootUIDGID(i.uids, i.gids) +func (i IdentityMapping) RootPair() Identity { + uid, gid, _ := GetRootUIDGID(i.UIDMaps, i.GIDMaps) return Identity{UID: uid, GID: gid} } // ToHost returns the host UID and GID for the container uid, gid. // Remapping is only performed if the ids aren't already the remapped root ids -func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) { +func (i IdentityMapping) ToHost(pair Identity) (Identity, error) { var err error target := i.RootPair() if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.uids) + target.UID, err = toHost(pair.UID, i.UIDMaps) if err != nil { return target, err } } if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.gids) + target.GID, err = toHost(pair.GID, i.GIDMaps) } return target, err } // ToContainer returns the container UID and GID for the host uid and gid -func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) { - uid, err := toContainer(pair.UID, i.uids) +func (i IdentityMapping) ToContainer(pair Identity) (int, int, error) { + uid, err := toContainer(pair.UID, i.UIDMaps) if err != nil { return -1, -1, err } - gid, err := toContainer(pair.GID, i.gids) + gid, err := toContainer(pair.GID, i.GIDMaps) return uid, gid, err } // Empty returns true if there are no id mappings -func (i *IdentityMapping) Empty() bool { - return len(i.uids) == 0 && len(i.gids) == 0 +func (i IdentityMapping) Empty() bool { + return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 } -// UIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IdentityMapping) UIDs() []IDMap { - return i.uids +// UIDs returns the mapping for UID. +// +// Deprecated: reference the UIDMaps field directly. +func (i IdentityMapping) UIDs() []IDMap { + return i.UIDMaps } -// GIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IdentityMapping) GIDs() []IDMap { - return i.gids +// GIDs returns the mapping for GID. +// +// Deprecated: reference the GIDMaps field directly. +func (i IdentityMapping) GIDs() []IDMap { + return i.GIDMaps } func createIDMap(subidRanges ranges) []IDMap { diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index ceec0339b567..03846b0307e4 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "os" + "os/exec" "path/filepath" "strconv" "sync" @@ -30,6 +31,10 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting // chown the full directory path if it exists var paths []string + path, err := filepath.Abs(path) + if err != nil { + return err + } stat, err := system.Stat(path) if err == nil { @@ -195,7 +200,7 @@ func callGetent(database, key string) (io.Reader, error) { } out, err := execCmd(getentCmd, database, key) if err != nil { - exitCode, errC := system.GetExitCode(err) + exitCode, errC := getExitCode(err) if errC != nil { return nil, err } @@ -209,11 +214,22 @@ func callGetent(database, key string) (io.Reader, error) { default: return nil, err } - } return bytes.NewReader(out), nil } +// getExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func getExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + // setPermissions performs a chown/chmod only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. @@ -240,24 +256,37 @@ func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair +// +// Deprecated: Use LoadIdentityMapping. func NewIdentityMapping(name string) (*IdentityMapping, error) { + m, err := LoadIdentityMapping(name) + if err != nil { + return nil, err + } + return &m, err +} + +// LoadIdentityMapping takes a requested username and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func LoadIdentityMapping(name string) (IdentityMapping, error) { usr, err := LookupUser(name) if err != nil { - return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) + return IdentityMapping{}, fmt.Errorf("Could not get user for username %s: %v", name, err) } subuidRanges, err := lookupSubUIDRanges(usr) if err != nil { - return nil, err + return IdentityMapping{}, err } subgidRanges, err := lookupSubGIDRanges(usr) if err != nil { - return nil, err + return IdentityMapping{}, err } - return &IdentityMapping{ - uids: subuidRanges, - gids: subgidRanges, + return IdentityMapping{ + UIDMaps: subuidRanges, + GIDMaps: subgidRanges, }, nil } diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go index bf7ae0564ba5..3ad9255df27f 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -88,7 +88,6 @@ func addUser(name string) error { } func createSubordinateRanges(name string) error { - // first, we should verify that ranges weren't automatically created // by the distro tooling ranges, err := parseSubuid(name) diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go deleted file mode 100644 index cf8d04b1b201..000000000000 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ /dev/null @@ -1,283 +0,0 @@ -package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "time" - - units "github.com/docker/go-units" - "github.com/moby/term" - "github.com/morikuni/aec" -) - -// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to -// ensure the formatted time isalways the same number of characters. -const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - -// JSONError wraps a concrete Code and Message, `Code` is -// is an integer error code, `Message` is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, -// Start is the initial value for the operation. Current is the current status and -// value of the progress made towards Total. Total is the end value describing when -// we made 100% progress for an operation. -type JSONProgress struct { - terminalFd uintptr - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` - // If true, don't show xB/yB - HideCounts bool `json:"hidecounts,omitempty"` - Units string `json:"units,omitempty"` - nowFunc func() time.Time - winSize int -} - -func (p *JSONProgress) String() string { - var ( - width = p.width() - pbBox string - numbersBox string - timeLeftBox string - ) - if p.Current <= 0 && p.Total <= 0 { - return "" - } - if p.Total <= 0 { - switch p.Units { - case "": - current := units.HumanSize(float64(p.Current)) - return fmt.Sprintf("%8v", current) - default: - return fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negative gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - - switch { - case p.HideCounts: - case p.Units == "": // no units, use bytes - current := units.HumanSize(float64(p.Current)) - total := units.HumanSize(float64(p.Total)) - - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) - } - default: - numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := p.now().Sub(time.Unix(p.Start, 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -// shim for testing -func (p *JSONProgress) now() time.Time { - if p.nowFunc == nil { - p.nowFunc = func() time.Time { - return time.Now().UTC() - } - } - return p.nowFunc() -} - -// shim for testing -func (p *JSONProgress) width() int { - if p.winSize != 0 { - return p.winSize - } - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - return int(ws.Width) - } - return 200 -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` // deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` // deprecated - // Aux contains out-of-band data, such as digests for push signing and image id after building. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -func clearLine(out io.Writer) { - eraseMode := aec.EraseModes.All - cl := aec.EraseLine(eraseMode) - fmt.Fprint(out, cl) -} - -func cursorUp(out io.Writer, l uint) { - fmt.Fprint(out, aec.Up(l)) -} - -func cursorDown(out io.Writer, l uint) { - fmt.Fprint(out, aec.Down(l)) -} - -// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the -// entire current line when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("authentication is required") - } - return jm.Error - } - var endl string - if isTerminal && jm.Stream == "" && jm.Progress != nil { - clearLine(out) - endl = "\r" - fmt.Fprint(out, endl) - } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) - } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && isTerminal { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { // deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` -// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of -// each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]uint) - ) - - for { - var diff uint - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = uint(len(ids)) - ids[jm.ID] = line - if isTerminal { - fmt.Fprintf(out, "\n") - } - } - diff = uint(len(ids)) - line - if isTerminal { - cursorUp(out, diff) - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]uint) - } - err := jm.Display(out, isTerminal) - if jm.ID != "" && isTerminal { - cursorDown(out, diff) - } - if err != nil { - return err - } - } - return nil -} - -type stream interface { - io.Writer - FD() uintptr - IsTerminal() bool -} - -// DisplayJSONMessagesToStream prints json messages to the output stream -func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { - return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) -} diff --git a/vendor/github.com/docker/docker/pkg/stringid/README.md b/vendor/github.com/docker/docker/pkg/stringid/README.md deleted file mode 100644 index 37a5098fd988..000000000000 --- a/vendor/github.com/docker/docker/pkg/stringid/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go deleted file mode 100644 index 5fe071d6284e..000000000000 --- a/vendor/github.com/docker/docker/pkg/stringid/stringid.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package stringid provides helper functions for dealing with string identifiers -package stringid // import "github.com/docker/docker/pkg/stringid" - -import ( - "crypto/rand" - "encoding/hex" - "fmt" - "regexp" - "strconv" - "strings" -) - -const shortLen = 12 - -var ( - validShortID = regexp.MustCompile("^[a-f0-9]{12}$") - validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) -) - -// IsShortID determines if an arbitrary string *looks like* a short ID. -func IsShortID(id string) bool { - return validShortID.MatchString(id) -} - -// TruncateID returns a shorthand version of a string identifier for convenience. -// A collision with other shorthands is very unlikely, but possible. -// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller -// will need to use a longer prefix, or the full-length Id. -func TruncateID(id string) string { - if i := strings.IndexRune(id, ':'); i >= 0 { - id = id[i+1:] - } - if len(id) > shortLen { - id = id[:shortLen] - } - return id -} - -// GenerateRandomID returns a unique id. -func GenerateRandomID() string { - b := make([]byte, 32) - for { - if _, err := rand.Read(b); err != nil { - panic(err) // This shouldn't happen - } - id := hex.EncodeToString(b) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numeric and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { - continue - } - return id - } -} - -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go deleted file mode 100644 index 4ba8fe35bfd9..000000000000 --- a/vendor/github.com/docker/docker/pkg/system/exitcode.go +++ /dev/null @@ -1,19 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "fmt" - "os/exec" - "syscall" -) - -// GetExitCode returns the ExitStatus of the specified error if its type is -// exec.ExitError, returns 0 and an error otherwise. -func GetExitCode(err error) (int, error) { - exitCode := 0 - if exiterr, ok := err.(*exec.ExitError); ok { - if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return procExit.ExitStatus(), nil - } - } - return exitCode, fmt.Errorf("failed to get exit code") -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 000000000000..ce5990c914f4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,19 @@ +package system + +import ( + "os" + "path/filepath" + "strings" +) + +// IsAbs is a platform-agnostic wrapper for filepath.IsAbs. +// +// On Windows, golang filepath.IsAbs does not consider a path \windows\system32 +// as absolute as it doesn't start with a drive-letter/colon combination. However, +// in docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon). This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + return filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go b/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go new file mode 100644 index 000000000000..b2ee006314a9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go @@ -0,0 +1,35 @@ +package system + +import ( + "os" + + "github.com/moby/sys/sequential" +) + +// CreateSequential is deprecated. +// +// Deprecated: use os.Create or github.com/moby/sys/sequential.Create() +func CreateSequential(name string) (*os.File, error) { + return sequential.Create(name) +} + +// OpenSequential is deprecated. +// +// Deprecated: use os.Open or github.com/moby/sys/sequential.Open +func OpenSequential(name string) (*os.File, error) { + return sequential.Open(name) +} + +// OpenFileSequential is deprecated. +// +// Deprecated: use github.com/moby/sys/sequential.OpenFile() +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return sequential.OpenFile(name, flag, perm) +} + +// TempFileSequential is deprecated. +// +// Deprecated: use os.CreateTemp or github.com/moby/sys/sequential.CreateTemp +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return sequential.CreateTemp(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go index 8b991201a98f..380112940495 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go @@ -3,10 +3,7 @@ package system // import "github.com/docker/docker/pkg/system" -import ( - "os" - "path/filepath" -) +import "os" // MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems. func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { @@ -18,50 +15,3 @@ func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { func MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) } - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} - -// The functions below here are wrappers for the equivalents in the os and ioutils packages. -// They are passthrough on Unix platforms, and only relevant on Windows. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return os.Create(name) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return os.Open(name) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(name, flag, perm) -} - -// TempFileSequential creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - return os.CreateTemp(dir, prefix) -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index 8f79dc8fe06c..e3fa9f731ce6 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -2,13 +2,8 @@ package system // import "github.com/docker/docker/pkg/system" import ( "os" - "path/filepath" "regexp" - "strconv" - "strings" - "sync" "syscall" - "time" "unsafe" "golang.org/x/sys/windows" @@ -121,172 +116,3 @@ func mkdirWithACL(name string, sddl string) error { } return nil } - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) { - return true - } - return false -} - -// The origin of the functions below here are the golang OS and windows packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDONLY, 0) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, errf := windowsOpenFileSequential(name, flag, 0) - if errf == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: errf} -} - -func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -func makeInheritSa() *windows.SecurityAttributes { - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { - if len(path) == 0 { - return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND - } - pathp, err := windows.UTF16PtrFromString(path) - if err != nil { - return windows.InvalidHandle, err - } - var access uint32 - switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { - case windows.O_RDONLY: - access = windows.GENERIC_READ - case windows.O_WRONLY: - access = windows.GENERIC_WRITE - case windows.O_RDWR: - access = windows.GENERIC_READ | windows.GENERIC_WRITE - } - if mode&windows.O_CREAT != 0 { - access |= windows.GENERIC_WRITE - } - if mode&windows.O_APPEND != 0 { - access &^= windows.GENERIC_WRITE - access |= windows.FILE_APPEND_DATA - } - sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) - var sa *windows.SecurityAttributes - if mode&windows.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): - createmode = windows.CREATE_NEW - case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): - createmode = windows.CREATE_ALWAYS - case mode&windows.O_CREAT == windows.O_CREAT: - createmode = windows.OPEN_ALWAYS - case mode&windows.O_TRUNC == windows.O_TRUNC: - createmode = windows.TRUNCATE_EXISTING - default: - createmode = windows.OPEN_EXISTING - } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} - -// Helpers for TempFileSequential -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFileSequential is a copy of os.CreateTemp, modified to use sequential -// file access. Below is the original comment from golang: -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go index cd060eff24de..02a7377c1fe3 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -6,8 +6,6 @@ import ( "os" "strconv" "strings" - - units "github.com/docker/go-units" ) // ReadMemInfo retrieves memory statistics of the host system and returns a @@ -42,7 +40,8 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) { if err != nil { continue } - bytes := int64(size) * units.KiB + // Convert to KiB + bytes := int64(size) * 1024 switch parts[0] { case "MemTotal:": @@ -56,7 +55,6 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) { case "SwapFree:": meminfo.SwapFree = bytes } - } if memAvailable != -1 { meminfo.MemFree = memAvailable diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go index 6ed93f2fe268..124d2c502dcf 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -27,7 +27,7 @@ type memorystatusex struct { } // ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. +// MemInfo type. func ReadMemInfo() (*MemInfo, error) { msi := &memorystatusex{ dwLength: 64, diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go index 4d81906b9d24..818b20efee7a 100644 --- a/vendor/github.com/docker/docker/pkg/system/path.go +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -13,7 +13,6 @@ func DefaultPathEnv(os string) string { return "" } return defaultUnixPathEnv - } // PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go index d2ab9c3d7e03..1c2c6a3096ee 100644 --- a/vendor/github.com/docker/docker/pkg/system/process_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go @@ -33,6 +33,7 @@ func IsProcessZombie(pid int) (bool, error) { statPath := fmt.Sprintf("/proc/%d/stat", pid) dataBytes, err := os.ReadFile(statPath) if err != nil { + // TODO(thaJeztah) should we ignore os.IsNotExist() here? ("/proc//stat" will be gone if the process exited) return false, err } data := string(dataBytes) diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go deleted file mode 100644 index f2d81597c9dc..000000000000 --- a/vendor/github.com/docker/docker/pkg/system/rm.go +++ /dev/null @@ -1,79 +0,0 @@ -//go:build !darwin && !windows -// +build !darwin,!windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "syscall" - "time" - - "github.com/moby/sys/mount" - "github.com/pkg/errors" -) - -// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can -// often be remedied. -// Only use `EnsureRemoveAll` if you really want to make every effort to remove -// a directory. -// -// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there -// can be a race between reading directory entries and then actually attempting -// to remove everything in the directory. -// These types of errors do not need to be returned since it's ok for the dir to -// be gone we can just retry the remove operation. -// -// This should not return a `os.ErrNotExist` kind of error under any circumstances -func EnsureRemoveAll(dir string) error { - notExistErr := make(map[string]bool) - - // track retries - exitOnErr := make(map[string]int) - maxRetry := 50 - - // Attempt to unmount anything beneath this dir first - mount.RecursiveUnmount(dir) - - for { - err := os.RemoveAll(dir) - if err == nil { - return nil - } - - pe, ok := err.(*os.PathError) - if !ok { - return err - } - - if os.IsNotExist(err) { - if notExistErr[pe.Path] { - return err - } - notExistErr[pe.Path] = true - - // There is a race where some subdir can be removed but after the parent - // dir entries have been read. - // So the path could be from `os.Remove(subdir)` - // If the reported non-existent path is not the passed in `dir` we - // should just retry, but otherwise return with no error. - if pe.Path == dir { - return nil - } - continue - } - - if pe.Err != syscall.EBUSY { - return err - } - - if e := mount.Unmount(pe.Path); e != nil { - return errors.Wrapf(e, "error while removing %s", dir) - } - - if exitOnErr[pe.Path] == maxRetry { - return err - } - exitOnErr[pe.Path]++ - time.Sleep(100 * time.Millisecond) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/rm_windows.go b/vendor/github.com/docker/docker/pkg/system/rm_windows.go deleted file mode 100644 index ed9c5dcb8ae9..000000000000 --- a/vendor/github.com/docker/docker/pkg/system/rm_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package system - -import "os" - -// EnsureRemoveAll is an alias to os.RemoveAll on Windows -var EnsureRemoveAll = os.RemoveAll diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go index b2456cb88704..0ff3af2fa174 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -20,12 +20,12 @@ func (s StatT) Size() int64 { // Mode returns file's permission mode. func (s StatT) Mode() os.FileMode { - return os.FileMode(s.mode) + return s.mode } // Mtim returns file's last modification time. func (s StatT) Mtim() time.Time { - return time.Time(s.mtim) + return s.mtim } // Stat takes a path to a file and returns diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go deleted file mode 100644 index ef782d7ac813..000000000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "golang.org/x/sys/windows" - -const ( - // Deprecated: use github.com/docker/pkg/idtools.SeTakeOwnershipPrivilege - SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" - // Deprecated: use github.com/docker/pkg/idtools.ContainerAdministratorSidString - ContainerAdministratorSidString = "S-1-5-93-2-1" - // Deprecated: use github.com/docker/pkg/idtools.ContainerUserSidString - ContainerUserSidString = "S-1-5-93-2-2" -) - -// VER_NT_WORKSTATION, see https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-osversioninfoexa -const verNTWorkstation = 0x00000001 // VER_NT_WORKSTATION - -// IsWindowsClient returns true if the SKU is client. It returns false on -// Windows server, or if an error occurred when making the GetVersionExW -// syscall. -func IsWindowsClient() bool { - ver := windows.RtlGetVersion() - return ver != nil && ver.ProductType == verNTWorkstation -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go deleted file mode 100644 index d4a15cbedc33..000000000000 --- a/vendor/github.com/docker/docker/pkg/system/umask.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !windows -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "golang.org/x/sys/unix" -) - -// Umask sets current process's file mode creation mask to newmask -// and returns oldmask. -func Umask(newmask int) (oldmask int, err error) { - return unix.Umask(newmask), nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go deleted file mode 100644 index fc62388c3891..000000000000 --- a/vendor/github.com/docker/docker/pkg/system/umask_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json index 80213ddaca1a..f361066a2f7a 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/default.json +++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json @@ -48,6 +48,10 @@ "subArchitectures": [ "SCMP_ARCH_S390" ] + }, + { + "architecture": "SCMP_ARCH_RISCV64", + "subArchitectures": null } ], "syscalls": [ @@ -127,6 +131,7 @@ "ftruncate64", "futex", "futex_time64", + "futex_waitv", "futimesat", "getcpu", "getcwd", @@ -183,6 +188,9 @@ "io_uring_setup", "ipc", "kill", + "landlock_add_rule", + "landlock_create_ruleset", + "landlock_restrict_self", "lchown", "lchown32", "lgetxattr", @@ -200,6 +208,7 @@ "madvise", "membarrier", "memfd_create", + "memfd_secret", "mincore", "mkdir", "mkdirat", @@ -239,6 +248,9 @@ "pidfd_send_signal", "pipe", "pipe2", + "pkey_alloc", + "pkey_free", + "pkey_mprotect", "poll", "ppoll", "ppoll_time64", @@ -247,6 +259,7 @@ "preadv", "preadv2", "prlimit64", + "process_mrelease", "pselect6", "pselect6_time64", "pwrite64", @@ -343,7 +356,6 @@ "signalfd4", "sigprocmask", "sigreturn", - "socket", "socketcall", "socketpair", "splice", @@ -407,6 +419,19 @@ "minKernel": "4.8" } }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 40, + "op": "SCMP_CMP_NE" + } + ] + }, { "names": [ "personality" @@ -540,6 +565,17 @@ ] } }, + { + "names": [ + "riscv_flush_icache" + ], + "action": "SCMP_ACT_ALLOW", + "includes": { + "arches": [ + "riscv64" + ] + } + }, { "names": [ "open_by_handle_at" @@ -563,11 +599,13 @@ "fspick", "lookup_dcookie", "mount", + "mount_setattr", "move_mount", "name_to_handle_at", "open_tree", "perf_event_open", "quotactl", + "quotactl_fd", "setdomainname", "sethostname", "setns", @@ -720,7 +758,8 @@ "names": [ "settimeofday", "stime", - "clock_settime" + "clock_settime", + "clock_settime64" ], "action": "SCMP_ACT_ALLOW", "includes": { @@ -763,6 +802,28 @@ "CAP_SYSLOG" ] } + }, + { + "names": [ + "bpf" + ], + "action": "SCMP_ACT_ALLOW", + "includes": { + "caps": [ + "CAP_BPF" + ] + } + }, + { + "names": [ + "perf_event_open" + ], + "action": "SCMP_ACT_ALLOW", + "includes": { + "caps": [ + "CAP_PERFMON" + ] + } } ] } \ No newline at end of file diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default_linux.go b/vendor/github.com/docker/docker/profiles/seccomp/default_linux.go index e51f1018aabe..1ee7d7a808b0 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/default_linux.go +++ b/vendor/github.com/docker/docker/profiles/seccomp/default_linux.go @@ -1,6 +1,3 @@ -//go:build seccomp -// +build seccomp - package seccomp // import "github.com/docker/docker/profiles/seccomp" import ( @@ -38,6 +35,10 @@ func arches() []Architecture { Arch: specs.ArchS390X, SubArches: []specs.Arch{specs.ArchS390}, }, + { + Arch: specs.ArchRISCV64, + SubArches: nil, + }, } } @@ -122,6 +123,7 @@ func DefaultProfile() *Seccomp { "ftruncate64", "futex", "futex_time64", + "futex_waitv", "futimesat", "getcpu", "getcwd", @@ -178,6 +180,9 @@ func DefaultProfile() *Seccomp { "io_uring_setup", "ipc", "kill", + "landlock_add_rule", + "landlock_create_ruleset", + "landlock_restrict_self", "lchown", "lchown32", "lgetxattr", @@ -195,6 +200,7 @@ func DefaultProfile() *Seccomp { "madvise", "membarrier", "memfd_create", + "memfd_secret", "mincore", "mkdir", "mkdirat", @@ -234,6 +240,9 @@ func DefaultProfile() *Seccomp { "pidfd_send_signal", "pipe", "pipe2", + "pkey_alloc", + "pkey_free", + "pkey_mprotect", "poll", "ppoll", "ppoll_time64", @@ -242,6 +251,7 @@ func DefaultProfile() *Seccomp { "preadv", "preadv2", "prlimit64", + "process_mrelease", "pselect6", "pselect6_time64", "pwrite64", @@ -338,7 +348,6 @@ func DefaultProfile() *Seccomp { "signalfd4", "sigprocmask", "sigreturn", - "socket", "socketcall", "socketpair", "splice", @@ -405,6 +414,19 @@ func DefaultProfile() *Seccomp { MinKernel: &KernelVersion{4, 8}, }, }, + { + LinuxSyscall: specs.LinuxSyscall{ + Names: []string{"socket"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{ + { + Index: 0, + Value: unix.AF_VSOCK, + Op: specs.OpNotEqual, + }, + }, + }, + }, { LinuxSyscall: specs.LinuxSyscall{ Names: []string{"personality"}, @@ -533,6 +555,17 @@ func DefaultProfile() *Seccomp { Arches: []string{"s390", "s390x"}, }, }, + { + LinuxSyscall: specs.LinuxSyscall{ + Names: []string{ + "riscv_flush_icache", + }, + Action: specs.ActAllow, + }, + Includes: &Filter{ + Arches: []string{"riscv64"}, + }, + }, { LinuxSyscall: specs.LinuxSyscall{ Names: []string{ @@ -557,11 +590,13 @@ func DefaultProfile() *Seccomp { "fspick", "lookup_dcookie", "mount", + "mount_setattr", "move_mount", "name_to_handle_at", "open_tree", "perf_event_open", "quotactl", + "quotactl_fd", "setdomainname", "sethostname", "setns", @@ -711,6 +746,7 @@ func DefaultProfile() *Seccomp { "settimeofday", "stime", "clock_settime", + "clock_settime64", }, Action: specs.ActAllow, }, @@ -753,6 +789,28 @@ func DefaultProfile() *Seccomp { Caps: []string{"CAP_SYSLOG"}, }, }, + { + LinuxSyscall: specs.LinuxSyscall{ + Names: []string{ + "bpf", + }, + Action: specs.ActAllow, + }, + Includes: &Filter{ + Caps: []string{"CAP_BPF"}, + }, + }, + { + LinuxSyscall: specs.LinuxSyscall{ + Names: []string{ + "perf_event_open", + }, + Action: specs.ActAllow, + }, + Includes: &Filter{ + Caps: []string{"CAP_PERFMON"}, + }, + }, } errnoRet := uint(unix.EPERM) diff --git a/vendor/github.com/docker/docker/profiles/seccomp/kernel_linux.go b/vendor/github.com/docker/docker/profiles/seccomp/kernel_linux.go index 558eabda3880..9f62697d68f6 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/kernel_linux.go +++ b/vendor/github.com/docker/docker/profiles/seccomp/kernel_linux.go @@ -1,7 +1,6 @@ package seccomp import ( - "bytes" "fmt" "sync" @@ -22,7 +21,7 @@ func getKernelVersion() (*KernelVersion, error) { return } // Remove the \x00 from the release for Atoi to parse correctly - currentKernelVersion, kernelVersionError = parseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)])) + currentKernelVersion, kernelVersionError = parseRelease(unix.ByteSliceToString(uts.Release[:])) }) return currentKernelVersion, kernelVersionError } diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go deleted file mode 100644 index d337695e10b5..000000000000 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build linux && !seccomp -// +build linux,!seccomp - -package seccomp // import "github.com/docker/docker/profiles/seccomp" - -// DefaultProfile returns a nil pointer on unsupported systems. -func DefaultProfile() *Seccomp { - return nil -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/config.go b/vendor/github.com/docker/docker/testutil/daemon/config.go deleted file mode 100644 index 1bf182ae2890..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/config.go +++ /dev/null @@ -1,72 +0,0 @@ -package daemon - -import ( - "context" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "gotest.tools/v3/assert" -) - -// ConfigConstructor defines a swarm config constructor -type ConfigConstructor func(*swarm.Config) - -// CreateConfig creates a config given the specified spec -func (d *Daemon) CreateConfig(t testing.TB, configSpec swarm.ConfigSpec) string { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - scr, err := cli.ConfigCreate(context.Background(), configSpec) - assert.NilError(t, err) - return scr.ID -} - -// ListConfigs returns the list of the current swarm configs -func (d *Daemon) ListConfigs(t testing.TB) []swarm.Config { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - configs, err := cli.ConfigList(context.Background(), types.ConfigListOptions{}) - assert.NilError(t, err) - return configs -} - -// GetConfig returns a swarm config identified by the specified id -func (d *Daemon) GetConfig(t testing.TB, id string) *swarm.Config { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - config, _, err := cli.ConfigInspectWithRaw(context.Background(), id) - assert.NilError(t, err) - return &config -} - -// DeleteConfig removes the swarm config identified by the specified id -func (d *Daemon) DeleteConfig(t testing.TB, id string) { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - err := cli.ConfigRemove(context.Background(), id) - assert.NilError(t, err) -} - -// UpdateConfig updates the swarm config identified by the specified id -// Currently, only label update is supported. -func (d *Daemon) UpdateConfig(t testing.TB, id string, f ...ConfigConstructor) { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - config := d.GetConfig(t, id) - for _, fn := range f { - fn(config) - } - - err := cli.ConfigUpdate(context.Background(), config.ID, config.Version, config.Spec) - assert.NilError(t, err) -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/container.go b/vendor/github.com/docker/docker/testutil/daemon/container.go deleted file mode 100644 index 8e88e7b202b8..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/container.go +++ /dev/null @@ -1,36 +0,0 @@ -package daemon - -import ( - "context" - "testing" - - "github.com/docker/docker/api/types" - "gotest.tools/v3/assert" -) - -// ActiveContainers returns the list of ids of the currently running containers -func (d *Daemon) ActiveContainers(t testing.TB) []string { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) - assert.NilError(t, err) - - ids := make([]string, len(containers)) - for i, c := range containers { - ids[i] = c.ID - } - return ids -} - -// FindContainerIP returns the ip of the specified container -func (d *Daemon) FindContainerIP(t testing.TB, id string) string { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - i, err := cli.ContainerInspect(context.Background(), id) - assert.NilError(t, err) - return i.NetworkSettings.IPAddress -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/daemon.go b/vendor/github.com/docker/docker/testutil/daemon/daemon.go deleted file mode 100644 index 9d9fa5e57e5f..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/daemon.go +++ /dev/null @@ -1,867 +0,0 @@ -package daemon // import "github.com/docker/docker/testutil/daemon" - -import ( - "context" - "encoding/json" - "net/http" - "os" - "os/exec" - "os/user" - "path/filepath" - "strconv" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/testutil/request" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" - "gotest.tools/v3/assert" -) - -// LogT is the subset of the testing.TB interface used by the daemon. -type LogT interface { - Logf(string, ...interface{}) -} - -// nopLog is a no-op implementation of LogT that is used in daemons created by -// NewDaemon (where no testing.TB is available). -type nopLog struct{} - -func (nopLog) Logf(string, ...interface{}) {} - -const ( - defaultDockerdBinary = "dockerd" - defaultContainerdSocket = "/var/run/docker/containerd/containerd.sock" - defaultDockerdRootlessBinary = "dockerd-rootless.sh" - defaultUnixSocket = "/var/run/docker.sock" - defaultTLSHost = "localhost:2376" -) - -var errDaemonNotStarted = errors.New("daemon not started") - -// SockRoot holds the path of the default docker integration daemon socket -var SockRoot = filepath.Join(os.TempDir(), "docker-integration") - -type clientConfig struct { - transport *http.Transport - scheme string - addr string -} - -// Daemon represents a Docker daemon for the testing framework -type Daemon struct { - Root string - Folder string - Wait chan error - UseDefaultHost bool - UseDefaultTLSHost bool - - id string - logFile *os.File - cmd *exec.Cmd - storageDriver string - userlandProxy bool - defaultCgroupNamespaceMode string - execRoot string - experimental bool - init bool - dockerdBinary string - log LogT - pidFile string - args []string - containerdSocket string - rootlessUser *user.User - rootlessXDGRuntimeDir string - - // swarm related field - swarmListenAddr string - SwarmPort int // FIXME(vdemeester) should probably not be exported - DefaultAddrPool []string - SubnetSize uint32 - DataPathPort uint32 - OOMScoreAdjust int - // cached information - CachedInfo types.Info -} - -// NewDaemon returns a Daemon instance to be used for testing. -// The daemon will not automatically start. -// The daemon will modify and create files under workingDir. -func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) { - storageDriver := os.Getenv("DOCKER_GRAPHDRIVER") - - if err := os.MkdirAll(SockRoot, 0700); err != nil { - return nil, errors.Wrapf(err, "failed to create daemon socket root %q", SockRoot) - } - - id := "d" + stringid.TruncateID(stringid.GenerateRandomID()) - dir := filepath.Join(workingDir, id) - daemonFolder, err := filepath.Abs(dir) - if err != nil { - return nil, err - } - daemonRoot := filepath.Join(daemonFolder, "root") - if err := os.MkdirAll(daemonRoot, 0755); err != nil { - return nil, errors.Wrapf(err, "failed to create daemon root %q", daemonRoot) - } - - userlandProxy := true - if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { - if val, err := strconv.ParseBool(env); err != nil { - userlandProxy = val - } - } - d := &Daemon{ - id: id, - Folder: daemonFolder, - Root: daemonRoot, - storageDriver: storageDriver, - userlandProxy: userlandProxy, - // dxr stands for docker-execroot (shortened for avoiding unix(7) path length limitation) - execRoot: filepath.Join(os.TempDir(), "dxr", id), - dockerdBinary: defaultDockerdBinary, - swarmListenAddr: defaultSwarmListenAddr, - SwarmPort: DefaultSwarmPort, - log: nopLog{}, - containerdSocket: defaultContainerdSocket, - } - - for _, op := range ops { - op(d) - } - - if d.rootlessUser != nil { - if err := os.Chmod(SockRoot, 0777); err != nil { - return nil, err - } - uid, err := strconv.Atoi(d.rootlessUser.Uid) - if err != nil { - return nil, err - } - gid, err := strconv.Atoi(d.rootlessUser.Gid) - if err != nil { - return nil, err - } - if err := os.Chown(d.Folder, uid, gid); err != nil { - return nil, err - } - if err := os.Chown(d.Root, uid, gid); err != nil { - return nil, err - } - if err := os.MkdirAll(filepath.Dir(d.execRoot), 0700); err != nil { - return nil, err - } - if err := os.Chown(filepath.Dir(d.execRoot), uid, gid); err != nil { - return nil, err - } - if err := os.MkdirAll(d.execRoot, 0700); err != nil { - return nil, err - } - if err := os.Chown(d.execRoot, uid, gid); err != nil { - return nil, err - } - d.rootlessXDGRuntimeDir = filepath.Join(d.Folder, "xdgrun") - if err := os.MkdirAll(d.rootlessXDGRuntimeDir, 0700); err != nil { - return nil, err - } - if err := os.Chown(d.rootlessXDGRuntimeDir, uid, gid); err != nil { - return nil, err - } - d.containerdSocket = "" - } - - return d, nil -} - -// New returns a Daemon instance to be used for testing. -// This will create a directory such as d123456789 in the folder specified by -// $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. -// The daemon will not automatically start. -func New(t testing.TB, ops ...Option) *Daemon { - t.Helper() - dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") - if dest == "" { - dest = os.Getenv("DEST") - } - dest = filepath.Join(dest, t.Name()) - - assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable") - - if os.Getenv("DOCKER_ROOTLESS") != "" { - if os.Getenv("DOCKER_REMAP_ROOT") != "" { - t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_REMAP_ROOT currently") - } - if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { - if val, err := strconv.ParseBool(env); err == nil && !val { - t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_USERLANDPROXY=false") - } - } - ops = append(ops, WithRootlessUser("unprivilegeduser")) - } - ops = append(ops, WithOOMScoreAdjust(-500)) - - d, err := NewDaemon(dest, ops...) - assert.NilError(t, err, "could not create daemon at %q", dest) - if d.rootlessUser != nil && d.dockerdBinary != defaultDockerdBinary { - t.Skipf("DOCKER_ROOTLESS doesn't support specifying non-default dockerd binary path %q", d.dockerdBinary) - } - - return d -} - -// BinaryPath returns the binary and its arguments. -func (d *Daemon) BinaryPath() (string, error) { - dockerdBinary, err := exec.LookPath(d.dockerdBinary) - if err != nil { - return "", errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id) - } - return dockerdBinary, nil -} - -// ContainersNamespace returns the containerd namespace used for containers. -func (d *Daemon) ContainersNamespace() string { - return d.id -} - -// RootDir returns the root directory of the daemon. -func (d *Daemon) RootDir() string { - return d.Root -} - -// ID returns the generated id of the daemon -func (d *Daemon) ID() string { - return d.id -} - -// StorageDriver returns the configured storage driver of the daemon -func (d *Daemon) StorageDriver() string { - return d.storageDriver -} - -// Sock returns the socket path of the daemon -func (d *Daemon) Sock() string { - return "unix://" + d.sockPath() -} - -func (d *Daemon) sockPath() string { - return filepath.Join(SockRoot, d.id+".sock") -} - -// LogFileName returns the path the daemon's log file -func (d *Daemon) LogFileName() string { - return d.logFile.Name() -} - -// ReadLogFile returns the content of the daemon log file -func (d *Daemon) ReadLogFile() ([]byte, error) { - _ = d.logFile.Sync() - return os.ReadFile(d.logFile.Name()) -} - -// NewClientT creates new client based on daemon's socket path -func (d *Daemon) NewClientT(t testing.TB, extraOpts ...client.Opt) *client.Client { - t.Helper() - - c, err := d.NewClient(extraOpts...) - assert.NilError(t, err, "[%s] could not create daemon client", d.id) - return c -} - -// NewClient creates new client based on daemon's socket path -func (d *Daemon) NewClient(extraOpts ...client.Opt) (*client.Client, error) { - clientOpts := []client.Opt{ - client.FromEnv, - client.WithHost(d.Sock()), - } - clientOpts = append(clientOpts, extraOpts...) - - return client.NewClientWithOpts(clientOpts...) -} - -// Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files -func (d *Daemon) Cleanup(t testing.TB) { - t.Helper() - cleanupMount(t, d) - cleanupRaftDir(t, d) - cleanupDaemonStorage(t, d) - cleanupNetworkNamespace(t, d) -} - -// Start starts the daemon and return once it is ready to receive requests. -func (d *Daemon) Start(t testing.TB, args ...string) { - t.Helper() - if err := d.StartWithError(args...); err != nil { - d.DumpStackAndQuit() // in case the daemon is stuck - t.Fatalf("[%s] failed to start daemon with arguments %v : %v", d.id, d.args, err) - } -} - -// StartWithError starts the daemon and return once it is ready to receive requests. -// It returns an error in case it couldn't start. -func (d *Daemon) StartWithError(args ...string) error { - logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) - if err != nil { - return errors.Wrapf(err, "[%s] failed to create logfile", d.id) - } - - return d.StartWithLogFile(logFile, args...) -} - -// StartWithLogFile will start the daemon and attach its streams to a given file. -func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { - d.handleUserns() - dockerdBinary, err := d.BinaryPath() - if err != nil { - return err - } - - if d.pidFile == "" { - d.pidFile = filepath.Join(d.Folder, "docker.pid") - } - - d.args = []string{} - if d.rootlessUser != nil { - if d.dockerdBinary != defaultDockerdBinary { - return errors.Errorf("[%s] DOCKER_ROOTLESS doesn't support non-default dockerd binary path %q", d.id, d.dockerdBinary) - } - dockerdBinary = "sudo" - d.args = append(d.args, - "-u", d.rootlessUser.Username, - "-E", "XDG_RUNTIME_DIR="+d.rootlessXDGRuntimeDir, - "-E", "HOME="+d.rootlessUser.HomeDir, - "-E", "PATH="+os.Getenv("PATH"), - "--", - defaultDockerdRootlessBinary, - ) - } - - d.args = append(d.args, - "--data-root", d.Root, - "--exec-root", d.execRoot, - "--pidfile", d.pidFile, - "--userland-proxy="+strconv.FormatBool(d.userlandProxy), - "--containerd-namespace", d.id, - "--containerd-plugins-namespace", d.id+"p", - ) - if d.containerdSocket != "" { - d.args = append(d.args, "--containerd", d.containerdSocket) - } - - if d.defaultCgroupNamespaceMode != "" { - d.args = append(d.args, "--default-cgroupns-mode", d.defaultCgroupNamespaceMode) - } - if d.experimental { - d.args = append(d.args, "--experimental") - } - if d.init { - d.args = append(d.args, "--init") - } - if !(d.UseDefaultHost || d.UseDefaultTLSHost) { - d.args = append(d.args, "--host", d.Sock()) - } - if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { - d.args = append(d.args, "--userns-remap", root) - } - - // If we don't explicitly set the log-level or debug flag(-D) then - // turn on debug mode - foundLog := false - foundSd := false - for _, a := range providedArgs { - if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { - foundLog = true - } - if strings.Contains(a, "--storage-driver") { - foundSd = true - } - } - if !foundLog { - d.args = append(d.args, "--debug") - } - if d.storageDriver != "" && !foundSd { - d.args = append(d.args, "--storage-driver", d.storageDriver) - } - - d.args = append(d.args, providedArgs...) - d.cmd = exec.Command(dockerdBinary, d.args...) - d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") - d.cmd.Stdout = out - d.cmd.Stderr = out - d.logFile = out - if d.rootlessUser != nil { - // sudo requires this for propagating signals - setsid(d.cmd) - } - - if err := d.cmd.Start(); err != nil { - return errors.Wrapf(err, "[%s] could not start daemon container", d.id) - } - - wait := make(chan error, 1) - - go func() { - ret := d.cmd.Wait() - d.log.Logf("[%s] exiting daemon", d.id) - // If we send before logging, we might accidentally log _after_ the test is done. - // As of Go 1.12, this incurs a panic instead of silently being dropped. - wait <- ret - close(wait) - }() - - d.Wait = wait - - clientConfig, err := d.getClientConfig() - if err != nil { - return err - } - client := &http.Client{ - Transport: clientConfig.transport, - } - - req, err := http.NewRequest(http.MethodGet, "/_ping", nil) - if err != nil { - return errors.Wrapf(err, "[%s] could not create new request", d.id) - } - req.URL.Host = clientConfig.addr - req.URL.Scheme = clientConfig.scheme - - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() - - // make sure daemon is ready to receive requests - for i := 0; ; i++ { - d.log.Logf("[%s] waiting for daemon to start", d.id) - - select { - case <-ctx.Done(): - return errors.Wrapf(ctx.Err(), "[%s] daemon exited and never started", d.id) - case err := <-d.Wait: - return errors.Wrapf(err, "[%s] daemon exited during startup", d.id) - default: - rctx, rcancel := context.WithTimeout(context.TODO(), 2*time.Second) - defer rcancel() - - resp, err := client.Do(req.WithContext(rctx)) - if err != nil { - if i > 2 { // don't log the first couple, this ends up just being noise - d.log.Logf("[%s] error pinging daemon on start: %v", d.id, err) - } - - select { - case <-ctx.Done(): - case <-time.After(500 * time.Millisecond): - } - continue - } - - resp.Body.Close() - if resp.StatusCode != http.StatusOK { - d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status) - } - d.log.Logf("[%s] daemon started\n", d.id) - d.Root, err = d.queryRootDir() - if err != nil { - return errors.Wrapf(err, "[%s] error querying daemon for root directory", d.id) - } - return nil - } - } -} - -// StartWithBusybox will first start the daemon with Daemon.Start() -// then save the busybox image from the main daemon and load it into this Daemon instance. -func (d *Daemon) StartWithBusybox(t testing.TB, arg ...string) { - t.Helper() - d.Start(t, arg...) - d.LoadBusybox(t) -} - -// Kill will send a SIGKILL to the daemon -func (d *Daemon) Kill() error { - if d.cmd == nil || d.Wait == nil { - return errDaemonNotStarted - } - - defer func() { - d.logFile.Close() - d.cmd = nil - }() - - if err := d.cmd.Process.Kill(); err != nil { - return err - } - - if d.pidFile != "" { - _ = os.Remove(d.pidFile) - } - return nil -} - -// Pid returns the pid of the daemon -func (d *Daemon) Pid() int { - return d.cmd.Process.Pid -} - -// Interrupt stops the daemon by sending it an Interrupt signal -func (d *Daemon) Interrupt() error { - return d.Signal(os.Interrupt) -} - -// Signal sends the specified signal to the daemon if running -func (d *Daemon) Signal(signal os.Signal) error { - if d.cmd == nil || d.Wait == nil { - return errDaemonNotStarted - } - return d.cmd.Process.Signal(signal) -} - -// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its -// stack to its log file and exit -// This is used primarily for gathering debug information on test timeout -func (d *Daemon) DumpStackAndQuit() { - if d.cmd == nil || d.cmd.Process == nil { - return - } - SignalDaemonDump(d.cmd.Process.Pid) -} - -// Stop will send a SIGINT every second and wait for the daemon to stop. -// If it times out, a SIGKILL is sent. -// Stop will not delete the daemon directory. If a purged daemon is needed, -// instantiate a new one with NewDaemon. -// If an error occurs while starting the daemon, the test will fail. -func (d *Daemon) Stop(t testing.TB) { - t.Helper() - err := d.StopWithError() - if err != nil { - if err != errDaemonNotStarted { - t.Fatalf("[%s] error while stopping the daemon: %v", d.id, err) - } else { - t.Logf("[%s] daemon is not started", d.id) - } - } -} - -// StopWithError will send a SIGINT every second and wait for the daemon to stop. -// If it timeouts, a SIGKILL is sent. -// Stop will not delete the daemon directory. If a purged daemon is needed, -// instantiate a new one with NewDaemon. -func (d *Daemon) StopWithError() (err error) { - if d.cmd == nil || d.Wait == nil { - return errDaemonNotStarted - } - defer func() { - if err != nil { - d.log.Logf("[%s] error while stopping daemon: %v", d.id, err) - } else { - d.log.Logf("[%s] daemon stopped", d.id) - if d.pidFile != "" { - _ = os.Remove(d.pidFile) - } - } - if err := d.logFile.Close(); err != nil { - d.log.Logf("[%s] failed to close daemon logfile: %v", d.id, err) - } - d.cmd = nil - }() - - i := 1 - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - tick := ticker.C - - d.log.Logf("[%s] stopping daemon", d.id) - - if err := d.cmd.Process.Signal(os.Interrupt); err != nil { - if strings.Contains(err.Error(), "os: process already finished") { - return errDaemonNotStarted - } - return errors.Wrapf(err, "[%s] could not send signal", d.id) - } - -out1: - for { - select { - case err := <-d.Wait: - return err - case <-time.After(20 * time.Second): - // time for stopping jobs and run onShutdown hooks - d.log.Logf("[%s] daemon stop timed out after 20 seconds", d.id) - break out1 - } - } - -out2: - for { - select { - case err := <-d.Wait: - return err - case <-tick: - i++ - if i > 5 { - d.log.Logf("[%s] tried to interrupt daemon for %d times, now try to kill it", d.id, i) - break out2 - } - d.log.Logf("[%d] attempt #%d/5: daemon is still running with pid %d", i, d.cmd.Process.Pid) - if err := d.cmd.Process.Signal(os.Interrupt); err != nil { - return errors.Wrapf(err, "[%s] attempt #%d/5 could not send signal", d.id, i) - } - } - } - - if err := d.cmd.Process.Kill(); err != nil { - d.log.Logf("[%s] failed to kill daemon: %v", d.id, err) - return err - } - - return nil -} - -// Restart will restart the daemon by first stopping it and the starting it. -// If an error occurs while starting the daemon, the test will fail. -func (d *Daemon) Restart(t testing.TB, args ...string) { - t.Helper() - d.Stop(t) - d.Start(t, args...) -} - -// RestartWithError will restart the daemon by first stopping it and then starting it. -func (d *Daemon) RestartWithError(arg ...string) error { - if err := d.StopWithError(); err != nil { - return err - } - return d.StartWithError(arg...) -} - -func (d *Daemon) handleUserns() { - // in the case of tests running a user namespace-enabled daemon, we have resolved - // d.Root to be the actual final path of the graph dir after the "uid.gid" of - // remapped root is added--we need to subtract it from the path before calling - // start or else we will continue making subdirectories rather than truly restarting - // with the same location/root: - if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { - d.Root = filepath.Dir(d.Root) - } -} - -// ReloadConfig asks the daemon to reload its configuration -func (d *Daemon) ReloadConfig() error { - if d.cmd == nil || d.cmd.Process == nil { - return errors.New("daemon is not running") - } - - errCh := make(chan error, 1) - started := make(chan struct{}) - go func() { - _, body, err := request.Get("/events", request.Host(d.Sock())) - close(started) - if err != nil { - errCh <- err - return - } - defer body.Close() - dec := json.NewDecoder(body) - for { - var e events.Message - if err := dec.Decode(&e); err != nil { - errCh <- err - return - } - if e.Type != events.DaemonEventType { - continue - } - if e.Action != "reload" { - continue - } - close(errCh) // notify that we are done - return - } - }() - - <-started - if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { - return errors.Wrapf(err, "[%s] error signaling daemon reload", d.id) - } - select { - case err := <-errCh: - if err != nil { - return errors.Wrapf(err, "[%s] error waiting for daemon reload event", d.id) - } - case <-time.After(30 * time.Second): - return errors.Errorf("[%s] daemon reload event timed out after 30 seconds", d.id) - } - return nil -} - -// LoadBusybox image into the daemon -func (d *Daemon) LoadBusybox(t testing.TB) { - t.Helper() - clientHost, err := client.NewClientWithOpts(client.FromEnv) - assert.NilError(t, err, "[%s] failed to create client", d.id) - defer clientHost.Close() - - ctx := context.Background() - reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"}) - assert.NilError(t, err, "[%s] failed to download busybox", d.id) - defer reader.Close() - - c := d.NewClientT(t) - defer c.Close() - - resp, err := c.ImageLoad(ctx, reader, true) - assert.NilError(t, err, "[%s] failed to load busybox", d.id) - defer resp.Body.Close() -} - -func (d *Daemon) getClientConfig() (*clientConfig, error) { - var ( - transport *http.Transport - scheme string - addr string - proto string - ) - if d.UseDefaultTLSHost { - option := &tlsconfig.Options{ - CAFile: "fixtures/https/ca.pem", - CertFile: "fixtures/https/client-cert.pem", - KeyFile: "fixtures/https/client-key.pem", - } - tlsConfig, err := tlsconfig.Client(*option) - if err != nil { - return nil, err - } - transport = &http.Transport{ - TLSClientConfig: tlsConfig, - } - addr = defaultTLSHost - scheme = "https" - proto = "tcp" - } else if d.UseDefaultHost { - addr = defaultUnixSocket - proto = "unix" - scheme = "http" - transport = &http.Transport{} - } else { - addr = d.sockPath() - proto = "unix" - scheme = "http" - transport = &http.Transport{} - } - - if err := sockets.ConfigureTransport(transport, proto, addr); err != nil { - return nil, err - } - transport.DisableKeepAlives = true - if proto == "unix" { - addr = filepath.Base(addr) - } - return &clientConfig{ - transport: transport, - scheme: scheme, - addr: addr, - }, nil -} - -func (d *Daemon) queryRootDir() (string, error) { - // update daemon root by asking /info endpoint (to support user - // namespaced daemon with root remapped uid.gid directory) - clientConfig, err := d.getClientConfig() - if err != nil { - return "", err - } - - c := &http.Client{ - Transport: clientConfig.transport, - } - - req, err := http.NewRequest(http.MethodGet, "/info", nil) - if err != nil { - return "", err - } - req.Header.Set("Content-Type", "application/json") - req.URL.Host = clientConfig.addr - req.URL.Scheme = clientConfig.scheme - - resp, err := c.Do(req) - if err != nil { - return "", err - } - body := ioutils.NewReadCloserWrapper(resp.Body, func() error { - return resp.Body.Close() - }) - - type Info struct { - DockerRootDir string - } - var b []byte - var i Info - b, err = request.ReadBody(body) - if err == nil && resp.StatusCode == http.StatusOK { - // read the docker root dir - if err = json.Unmarshal(b, &i); err == nil { - return i.DockerRootDir, nil - } - } - return "", err -} - -// Info returns the info struct for this daemon -func (d *Daemon) Info(t testing.TB) types.Info { - t.Helper() - c := d.NewClientT(t) - info, err := c.Info(context.Background()) - assert.NilError(t, err) - assert.NilError(t, c.Close()) - return info -} - -// cleanupRaftDir removes swarmkit wal files if present -func cleanupRaftDir(t testing.TB, d *Daemon) { - t.Helper() - for _, p := range []string{"wal", "wal-v3-encrypted", "snap-v3-encrypted"} { - dir := filepath.Join(d.Root, "swarm/raft", p) - if err := os.RemoveAll(dir); err != nil { - t.Logf("[%s] error removing %v: %v", d.id, dir, err) - } - } -} - -// cleanupDaemonStorage removes the daemon's storage directory. -// -// Note that we don't delete the whole directory, as some files (e.g. daemon -// logs) are collected for inclusion in the "bundles" that are stored as Jenkins -// artifacts. -// -// We currently do not include container logs in the bundles, so this also -// removes the "containers" sub-directory. -func cleanupDaemonStorage(t testing.TB, d *Daemon) { - t.Helper() - dirs := []string{ - "builder", - "buildkit", - "containers", - "image", - "network", - "plugins", - "tmp", - "trust", - "volumes", - // note: this assumes storage-driver name matches the subdirectory, - // which is currently true, but not guaranteed. - d.storageDriver, - } - - for _, p := range dirs { - dir := filepath.Join(d.Root, p) - if err := os.RemoveAll(dir); err != nil { - t.Logf("[%s] error removing %v: %v", d.id, dir, err) - } - } -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/daemon_freebsd.go b/vendor/github.com/docker/docker/testutil/daemon/daemon_freebsd.go deleted file mode 100644 index 0d182d4fb9a7..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/daemon_freebsd.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build freebsd -// +build freebsd - -package daemon // import "github.com/docker/docker/testutil/daemon" - -import ( - "testing" - - "gotest.tools/v3/assert" -) - -func cleanupNetworkNamespace(_ testing.TB, _ *Daemon) {} - -// CgroupNamespace returns the cgroup namespace the daemon is running in -func (d *Daemon) CgroupNamespace(t testing.TB) string { - assert.Assert(t, false, "cgroup namespaces are not supported on FreeBSD") - return "" -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/daemon_linux.go b/vendor/github.com/docker/docker/testutil/daemon/daemon_linux.go deleted file mode 100644 index 720c52a4364e..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/daemon_linux.go +++ /dev/null @@ -1,37 +0,0 @@ -package daemon // import "github.com/docker/docker/testutil/daemon" - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "testing" - - "golang.org/x/sys/unix" - "gotest.tools/v3/assert" -) - -func cleanupNetworkNamespace(t testing.TB, d *Daemon) { - t.Helper() - // Cleanup network namespaces in the exec root of this - // daemon because this exec root is specific to this - // daemon instance and has no chance of getting - // cleaned up when a new daemon is instantiated with a - // new exec root. - netnsPath := filepath.Join(d.execRoot, "netns") - filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error { - if err := unix.Unmount(path, unix.MNT_DETACH); err != nil && err != unix.EINVAL && err != unix.ENOENT { - t.Logf("[%s] unmount of %s failed: %v", d.id, path, err) - } - os.Remove(path) - return nil - }) -} - -// CgroupNamespace returns the cgroup namespace the daemon is running in -func (d *Daemon) CgroupNamespace(t testing.TB) string { - link, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/cgroup", d.Pid())) - assert.NilError(t, err) - - return strings.TrimSpace(link) -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/daemon_unix.go b/vendor/github.com/docker/docker/testutil/daemon/daemon_unix.go deleted file mode 100644 index 5ad7812b0440..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/daemon_unix.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build !windows -// +build !windows - -package daemon // import "github.com/docker/docker/testutil/daemon" - -import ( - "os/exec" - "syscall" - "testing" - - "github.com/moby/sys/mount" - "golang.org/x/sys/unix" -) - -// cleanupMount unmounts the daemon root directory, or logs a message if -// unmounting failed. -func cleanupMount(t testing.TB, d *Daemon) { - t.Helper() - if err := mount.Unmount(d.Root); err != nil { - d.log.Logf("[%s] unable to unmount daemon root (%s): %v", d.id, d.Root, err) - } -} - -// SignalDaemonDump sends a signal to the daemon to write a dump file -func SignalDaemonDump(pid int) { - unix.Kill(pid, unix.SIGQUIT) -} - -func signalDaemonReload(pid int) error { - return unix.Kill(pid, unix.SIGHUP) -} - -func setsid(cmd *exec.Cmd) { - if cmd.SysProcAttr == nil { - cmd.SysProcAttr = &syscall.SysProcAttr{} - } - cmd.SysProcAttr.Setsid = true -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/daemon_windows.go b/vendor/github.com/docker/docker/testutil/daemon/daemon_windows.go deleted file mode 100644 index be94b5283839..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/daemon_windows.go +++ /dev/null @@ -1,38 +0,0 @@ -package daemon - -import ( - "fmt" - "os/exec" - "strconv" - "testing" - - "golang.org/x/sys/windows" - "gotest.tools/v3/assert" -) - -// SignalDaemonDump sends a signal to the daemon to write a dump file -func SignalDaemonDump(pid int) { - ev, _ := windows.UTF16PtrFromString("Global\\docker-daemon-" + strconv.Itoa(pid)) - h2, err := windows.OpenEvent(0x0002, false, ev) - if h2 == 0 || err != nil { - return - } - windows.PulseEvent(h2) -} - -func signalDaemonReload(pid int) error { - return fmt.Errorf("daemon reload not supported") -} - -func cleanupMount(_ testing.TB, _ *Daemon) {} - -func cleanupNetworkNamespace(_ testing.TB, _ *Daemon) {} - -// CgroupNamespace returns the cgroup namespace the daemon is running in -func (d *Daemon) CgroupNamespace(t testing.TB) string { - assert.Assert(t, false) - return "cgroup namespaces are not supported on Windows" -} - -func setsid(cmd *exec.Cmd) { -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/doc.go b/vendor/github.com/docker/docker/testutil/daemon/doc.go deleted file mode 100644 index add30e3cfb4a..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package daemon launches dockerd for testing purposes. -package daemon // import "github.com/docker/docker/testutil/daemon" diff --git a/vendor/github.com/docker/docker/testutil/daemon/node.go b/vendor/github.com/docker/docker/testutil/daemon/node.go deleted file mode 100644 index 89d0817b0030..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/node.go +++ /dev/null @@ -1,81 +0,0 @@ -package daemon - -import ( - "context" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "gotest.tools/v3/assert" -) - -// NodeConstructor defines a swarm node constructor -type NodeConstructor func(*swarm.Node) - -// GetNode returns a swarm node identified by the specified id -func (d *Daemon) GetNode(t testing.TB, id string, errCheck ...func(error) bool) *swarm.Node { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - node, _, err := cli.NodeInspectWithRaw(context.Background(), id) - if err != nil { - for _, f := range errCheck { - if f(err) { - return nil - } - } - } - assert.NilError(t, err, "[%s] (*Daemon).GetNode: NodeInspectWithRaw(%q) failed", d.id, id) - assert.Check(t, node.ID == id) - return &node -} - -// RemoveNode removes the specified node -func (d *Daemon) RemoveNode(t testing.TB, id string, force bool) { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - options := types.NodeRemoveOptions{ - Force: force, - } - err := cli.NodeRemove(context.Background(), id, options) - assert.NilError(t, err) -} - -// UpdateNode updates a swarm node with the specified node constructor -func (d *Daemon) UpdateNode(t testing.TB, id string, f ...NodeConstructor) { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - for i := 0; ; i++ { - node := d.GetNode(t, id) - for _, fn := range f { - fn(node) - } - - err := cli.NodeUpdate(context.Background(), node.ID, node.Version, node.Spec) - if i < 10 && err != nil && strings.Contains(err.Error(), "update out of sequence") { - time.Sleep(100 * time.Millisecond) - continue - } - assert.NilError(t, err) - return - } -} - -// ListNodes returns the list of the current swarm nodes -func (d *Daemon) ListNodes(t testing.TB) []swarm.Node { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - nodes, err := cli.NodeList(context.Background(), types.NodeListOptions{}) - assert.NilError(t, err) - - return nodes -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/ops.go b/vendor/github.com/docker/docker/testutil/daemon/ops.go deleted file mode 100644 index c977dcef44f7..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/ops.go +++ /dev/null @@ -1,124 +0,0 @@ -package daemon - -import ( - "os/user" - - "github.com/docker/docker/testutil/environment" -) - -// Option is used to configure a daemon. -type Option func(*Daemon) - -// WithContainerdSocket sets the --containerd option on the daemon. -// Use an empty string to remove the option. -// -// If unset the --containerd option will be used with a default value. -func WithContainerdSocket(socket string) Option { - return func(d *Daemon) { - d.containerdSocket = socket - } -} - -// WithDefaultCgroupNamespaceMode sets the default cgroup namespace mode for the daemon -func WithDefaultCgroupNamespaceMode(mode string) Option { - return func(d *Daemon) { - d.defaultCgroupNamespaceMode = mode - } -} - -// WithTestLogger causes the daemon to log certain actions to the provided test. -func WithTestLogger(t LogT) Option { - return func(d *Daemon) { - d.log = t - } -} - -// WithExperimental sets the daemon in experimental mode -func WithExperimental() Option { - return func(d *Daemon) { - d.experimental = true - } -} - -// WithInit sets the daemon init -func WithInit() Option { - return func(d *Daemon) { - d.init = true - } -} - -// WithDockerdBinary sets the dockerd binary to the specified one -func WithDockerdBinary(dockerdBinary string) Option { - return func(d *Daemon) { - d.dockerdBinary = dockerdBinary - } -} - -// WithSwarmPort sets the swarm port to use for swarm mode -func WithSwarmPort(port int) Option { - return func(d *Daemon) { - d.SwarmPort = port - } -} - -// WithSwarmListenAddr sets the swarm listen addr to use for swarm mode -func WithSwarmListenAddr(listenAddr string) Option { - return func(d *Daemon) { - d.swarmListenAddr = listenAddr - } -} - -// WithSwarmDefaultAddrPool sets the swarm default address pool to use for swarm mode -func WithSwarmDefaultAddrPool(defaultAddrPool []string) Option { - return func(d *Daemon) { - d.DefaultAddrPool = defaultAddrPool - } -} - -// WithSwarmDefaultAddrPoolSubnetSize sets the subnet length mask of swarm default address pool to use for swarm mode -func WithSwarmDefaultAddrPoolSubnetSize(subnetSize uint32) Option { - return func(d *Daemon) { - d.SubnetSize = subnetSize - } -} - -// WithSwarmDataPathPort sets the swarm datapath port to use for swarm mode -func WithSwarmDataPathPort(datapathPort uint32) Option { - return func(d *Daemon) { - d.DataPathPort = datapathPort - } -} - -// WithEnvironment sets options from testutil/environment.Execution struct -func WithEnvironment(e environment.Execution) Option { - return func(d *Daemon) { - if e.DaemonInfo.ExperimentalBuild { - d.experimental = true - } - } -} - -// WithStorageDriver sets store driver option -func WithStorageDriver(driver string) Option { - return func(d *Daemon) { - d.storageDriver = driver - } -} - -// WithRootlessUser sets the daemon to be rootless -func WithRootlessUser(username string) Option { - return func(d *Daemon) { - u, err := user.Lookup(username) - if err != nil { - panic(err) - } - d.rootlessUser = u - } -} - -// WithOOMScoreAdjust sets OOM score for the daemon -func WithOOMScoreAdjust(score int) Option { - return func(d *Daemon) { - d.OOMScoreAdjust = score - } -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/plugin.go b/vendor/github.com/docker/docker/testutil/daemon/plugin.go deleted file mode 100644 index 98aa6063a902..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/plugin.go +++ /dev/null @@ -1,75 +0,0 @@ -package daemon - -import ( - "context" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - "gotest.tools/v3/poll" -) - -// PluginIsRunning provides a poller to check if the specified plugin is running -func (d *Daemon) PluginIsRunning(t testing.TB, name string) func(poll.LogT) poll.Result { - return withClient(t, d, withPluginInspect(name, func(plugin *types.Plugin, t poll.LogT) poll.Result { - if plugin.Enabled { - return poll.Success() - } - return poll.Continue("plugin %q is not enabled", name) - })) -} - -// PluginIsNotRunning provides a poller to check if the specified plugin is not running -func (d *Daemon) PluginIsNotRunning(t testing.TB, name string) func(poll.LogT) poll.Result { - return withClient(t, d, withPluginInspect(name, func(plugin *types.Plugin, t poll.LogT) poll.Result { - if !plugin.Enabled { - return poll.Success() - } - return poll.Continue("plugin %q is enabled", name) - })) -} - -// PluginIsNotPresent provides a poller to check if the specified plugin is not present -func (d *Daemon) PluginIsNotPresent(t testing.TB, name string) func(poll.LogT) poll.Result { - return withClient(t, d, func(c client.APIClient, t poll.LogT) poll.Result { - _, _, err := c.PluginInspectWithRaw(context.Background(), name) - if client.IsErrNotFound(err) { - return poll.Success() - } - if err != nil { - return poll.Error(err) - } - return poll.Continue("plugin %q exists", name) - }) -} - -// PluginReferenceIs provides a poller to check if the specified plugin has the specified reference -func (d *Daemon) PluginReferenceIs(t testing.TB, name, expectedRef string) func(poll.LogT) poll.Result { - return withClient(t, d, withPluginInspect(name, func(plugin *types.Plugin, t poll.LogT) poll.Result { - if plugin.PluginReference == expectedRef { - return poll.Success() - } - return poll.Continue("plugin %q reference is not %q", name, expectedRef) - })) -} - -func withPluginInspect(name string, f func(*types.Plugin, poll.LogT) poll.Result) func(client.APIClient, poll.LogT) poll.Result { - return func(c client.APIClient, t poll.LogT) poll.Result { - plugin, _, err := c.PluginInspectWithRaw(context.Background(), name) - if client.IsErrNotFound(err) { - return poll.Continue("plugin %q not found", name) - } - if err != nil { - return poll.Error(err) - } - return f(plugin, t) - } - -} - -func withClient(t testing.TB, d *Daemon, f func(client.APIClient, poll.LogT) poll.Result) func(poll.LogT) poll.Result { - return func(pt poll.LogT) poll.Result { - c := d.NewClientT(t) - return f(c, pt) - } -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/secret.go b/vendor/github.com/docker/docker/testutil/daemon/secret.go deleted file mode 100644 index 099fdf33f118..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/secret.go +++ /dev/null @@ -1,74 +0,0 @@ -package daemon - -import ( - "context" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "gotest.tools/v3/assert" -) - -// SecretConstructor defines a swarm secret constructor -type SecretConstructor func(*swarm.Secret) - -// CreateSecret creates a secret given the specified spec -func (d *Daemon) CreateSecret(t testing.TB, secretSpec swarm.SecretSpec) string { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - scr, err := cli.SecretCreate(context.Background(), secretSpec) - assert.NilError(t, err) - - return scr.ID -} - -// ListSecrets returns the list of the current swarm secrets -func (d *Daemon) ListSecrets(t testing.TB) []swarm.Secret { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - secrets, err := cli.SecretList(context.Background(), types.SecretListOptions{}) - assert.NilError(t, err) - return secrets -} - -// GetSecret returns a swarm secret identified by the specified id -func (d *Daemon) GetSecret(t testing.TB, id string) *swarm.Secret { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - secret, _, err := cli.SecretInspectWithRaw(context.Background(), id) - assert.NilError(t, err) - return &secret -} - -// DeleteSecret removes the swarm secret identified by the specified id -func (d *Daemon) DeleteSecret(t testing.TB, id string) { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - err := cli.SecretRemove(context.Background(), id) - assert.NilError(t, err) -} - -// UpdateSecret updates the swarm secret identified by the specified id -// Currently, only label update is supported. -func (d *Daemon) UpdateSecret(t testing.TB, id string, f ...SecretConstructor) { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - secret := d.GetSecret(t, id) - for _, fn := range f { - fn(secret) - } - - err := cli.SecretUpdate(context.Background(), secret.ID, secret.Version, secret.Spec) - - assert.NilError(t, err) -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/service.go b/vendor/github.com/docker/docker/testutil/daemon/service.go deleted file mode 100644 index 0fb49b5f5b39..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/service.go +++ /dev/null @@ -1,118 +0,0 @@ -package daemon - -import ( - "context" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "gotest.tools/v3/assert" -) - -// ServiceConstructor defines a swarm service constructor function -type ServiceConstructor func(*swarm.Service) - -func (d *Daemon) createServiceWithOptions(t testing.TB, opts types.ServiceCreateOptions, f ...ServiceConstructor) string { - t.Helper() - var service swarm.Service - for _, fn := range f { - fn(&service) - } - - cli := d.NewClientT(t) - defer cli.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - res, err := cli.ServiceCreate(ctx, service.Spec, opts) - assert.NilError(t, err) - return res.ID -} - -// CreateService creates a swarm service given the specified service constructor -func (d *Daemon) CreateService(t testing.TB, f ...ServiceConstructor) string { - t.Helper() - return d.createServiceWithOptions(t, types.ServiceCreateOptions{}, f...) -} - -// GetService returns the swarm service corresponding to the specified id -func (d *Daemon) GetService(t testing.TB, id string) *swarm.Service { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - service, _, err := cli.ServiceInspectWithRaw(context.Background(), id, types.ServiceInspectOptions{}) - assert.NilError(t, err) - return &service -} - -// GetServiceTasks returns the swarm tasks for the specified service -func (d *Daemon) GetServiceTasks(t testing.TB, service string, additionalFilters ...filters.KeyValuePair) []swarm.Task { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - filterArgs := filters.NewArgs() - filterArgs.Add("desired-state", "running") - filterArgs.Add("service", service) - for _, filter := range additionalFilters { - filterArgs.Add(filter.Key, filter.Value) - } - - options := types.TaskListOptions{ - Filters: filterArgs, - } - - tasks, err := cli.TaskList(context.Background(), options) - assert.NilError(t, err) - return tasks -} - -// UpdateService updates a swarm service with the specified service constructor -func (d *Daemon) UpdateService(t testing.TB, service *swarm.Service, f ...ServiceConstructor) { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - for _, fn := range f { - fn(service) - } - - _, err := cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) - assert.NilError(t, err) -} - -// RemoveService removes the specified service -func (d *Daemon) RemoveService(t testing.TB, id string) { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - err := cli.ServiceRemove(context.Background(), id) - assert.NilError(t, err) -} - -// ListServices returns the list of the current swarm services -func (d *Daemon) ListServices(t testing.TB) []swarm.Service { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - services, err := cli.ServiceList(context.Background(), types.ServiceListOptions{}) - assert.NilError(t, err) - return services -} - -// GetTask returns the swarm task identified by the specified id -func (d *Daemon) GetTask(t testing.TB, id string) swarm.Task { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - task, _, err := cli.TaskInspectWithRaw(context.Background(), id) - assert.NilError(t, err) - return task -} diff --git a/vendor/github.com/docker/docker/testutil/daemon/swarm.go b/vendor/github.com/docker/docker/testutil/daemon/swarm.go deleted file mode 100644 index 8746a0e8a6fc..000000000000 --- a/vendor/github.com/docker/docker/testutil/daemon/swarm.go +++ /dev/null @@ -1,201 +0,0 @@ -package daemon - -import ( - "context" - "fmt" - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/pkg/errors" - "gotest.tools/v3/assert" -) - -const ( - // DefaultSwarmPort is the default port use for swarm in the tests - DefaultSwarmPort = 2477 - defaultSwarmListenAddr = "0.0.0.0" -) - -var ( - startArgs = []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} -) - -// StartNode (re)starts the daemon -func (d *Daemon) StartNode(t testing.TB) { - t.Helper() - d.Start(t, startArgs...) -} - -// StartNodeWithBusybox starts daemon to be used as a swarm node, and loads the busybox image -func (d *Daemon) StartNodeWithBusybox(t testing.TB) { - t.Helper() - d.StartWithBusybox(t, startArgs...) -} - -// RestartNode restarts a daemon to be used as a swarm node -func (d *Daemon) RestartNode(t testing.TB) { - t.Helper() - // avoid networking conflicts - d.Stop(t) - d.Start(t, startArgs...) -} - -// StartAndSwarmInit starts the daemon (with busybox) and init the swarm -func (d *Daemon) StartAndSwarmInit(t testing.TB) { - d.StartNodeWithBusybox(t) - d.SwarmInit(t, swarm.InitRequest{}) -} - -// StartAndSwarmJoin starts the daemon (with busybox) and join the specified swarm as worker or manager -func (d *Daemon) StartAndSwarmJoin(t testing.TB, leader *Daemon, manager bool) { - t.Helper() - d.StartNodeWithBusybox(t) - - tokens := leader.JoinTokens(t) - token := tokens.Worker - if manager { - token = tokens.Manager - } - t.Logf("[%s] joining swarm manager [%s]@%s, swarm listen addr %s", d.id, leader.id, leader.SwarmListenAddr(), d.SwarmListenAddr()) - d.SwarmJoin(t, swarm.JoinRequest{ - RemoteAddrs: []string{leader.SwarmListenAddr()}, - JoinToken: token, - }) -} - -// SpecConstructor defines a swarm spec constructor -type SpecConstructor func(*swarm.Spec) - -// SwarmListenAddr returns the listen-addr used for the daemon -func (d *Daemon) SwarmListenAddr() string { - return fmt.Sprintf("%s:%d", d.swarmListenAddr, d.SwarmPort) -} - -// NodeID returns the swarm mode node ID -func (d *Daemon) NodeID() string { - return d.CachedInfo.Swarm.NodeID -} - -// SwarmInit initializes a new swarm cluster. -func (d *Daemon) SwarmInit(t testing.TB, req swarm.InitRequest) { - t.Helper() - if req.ListenAddr == "" { - req.ListenAddr = fmt.Sprintf("%s:%d", d.swarmListenAddr, d.SwarmPort) - } - if req.DefaultAddrPool == nil { - req.DefaultAddrPool = d.DefaultAddrPool - req.SubnetSize = d.SubnetSize - } - if d.DataPathPort > 0 { - req.DataPathPort = d.DataPathPort - } - cli := d.NewClientT(t) - defer cli.Close() - _, err := cli.SwarmInit(context.Background(), req) - assert.NilError(t, err, "initializing swarm") - d.CachedInfo = d.Info(t) -} - -// SwarmJoin joins a daemon to an existing cluster. -func (d *Daemon) SwarmJoin(t testing.TB, req swarm.JoinRequest) { - t.Helper() - if req.ListenAddr == "" { - req.ListenAddr = fmt.Sprintf("%s:%d", d.swarmListenAddr, d.SwarmPort) - } - cli := d.NewClientT(t) - defer cli.Close() - err := cli.SwarmJoin(context.Background(), req) - assert.NilError(t, err, "[%s] joining swarm", d.id) - d.CachedInfo = d.Info(t) -} - -// SwarmLeave forces daemon to leave current cluster. -// -// The passed in testing.TB is only used to validate that the client was successfully created -// Some tests rely on error checking the result of the actual unlock, so allow -// the error to be returned. -func (d *Daemon) SwarmLeave(t testing.TB, force bool) error { - cli := d.NewClientT(t) - defer cli.Close() - return cli.SwarmLeave(context.Background(), force) -} - -// SwarmInfo returns the swarm information of the daemon -func (d *Daemon) SwarmInfo(t testing.TB) swarm.Info { - t.Helper() - cli := d.NewClientT(t) - info, err := cli.Info(context.Background()) - assert.NilError(t, err, "get swarm info") - return info.Swarm -} - -// SwarmUnlock tries to unlock a locked swarm -// -// The passed in testing.TB is only used to validate that the client was successfully created -// Some tests rely on error checking the result of the actual unlock, so allow -// the error to be returned. -func (d *Daemon) SwarmUnlock(t testing.TB, req swarm.UnlockRequest) error { - cli := d.NewClientT(t) - defer cli.Close() - - err := cli.SwarmUnlock(context.Background(), req) - if err != nil { - err = errors.Wrap(err, "unlocking swarm") - } - return err -} - -// GetSwarm returns the current swarm object -func (d *Daemon) GetSwarm(t testing.TB) swarm.Swarm { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - sw, err := cli.SwarmInspect(context.Background()) - assert.NilError(t, err) - return sw -} - -// UpdateSwarm updates the current swarm object with the specified spec constructors -func (d *Daemon) UpdateSwarm(t testing.TB, f ...SpecConstructor) { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - sw := d.GetSwarm(t) - for _, fn := range f { - fn(&sw.Spec) - } - - err := cli.SwarmUpdate(context.Background(), sw.Version, sw.Spec, swarm.UpdateFlags{}) - assert.NilError(t, err) -} - -// RotateTokens update the swarm to rotate tokens -func (d *Daemon) RotateTokens(t testing.TB) { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - sw, err := cli.SwarmInspect(context.Background()) - assert.NilError(t, err) - - flags := swarm.UpdateFlags{ - RotateManagerToken: true, - RotateWorkerToken: true, - } - - err = cli.SwarmUpdate(context.Background(), sw.Version, sw.Spec, flags) - assert.NilError(t, err) -} - -// JoinTokens returns the current swarm join tokens -func (d *Daemon) JoinTokens(t testing.TB) swarm.JoinTokens { - t.Helper() - cli := d.NewClientT(t) - defer cli.Close() - - sw, err := cli.SwarmInspect(context.Background()) - assert.NilError(t, err) - return sw.JoinTokens -} diff --git a/vendor/github.com/docker/docker/testutil/environment/clean.go b/vendor/github.com/docker/docker/testutil/environment/clean.go deleted file mode 100644 index 415615e8774e..000000000000 --- a/vendor/github.com/docker/docker/testutil/environment/clean.go +++ /dev/null @@ -1,185 +0,0 @@ -package environment // import "github.com/docker/docker/testutil/environment" - -import ( - "context" - "regexp" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/client" - "gotest.tools/v3/assert" -) - -// Clean the environment, preserving protected objects (images, containers, ...) -// and removing everything else. It's meant to run after any tests so that they don't -// depend on each others. -func (e *Execution) Clean(t testing.TB) { - t.Helper() - client := e.APIClient() - - platform := e.OSType - if (platform != "windows") || (platform == "windows" && e.DaemonInfo.Isolation == "hyperv") { - unpauseAllContainers(t, client) - } - deleteAllContainers(t, client, e.protectedElements.containers) - deleteAllImages(t, client, e.protectedElements.images) - deleteAllVolumes(t, client, e.protectedElements.volumes) - deleteAllNetworks(t, client, platform, e.protectedElements.networks) - if platform == "linux" { - deleteAllPlugins(t, client, e.protectedElements.plugins) - } -} - -func unpauseAllContainers(t testing.TB, client client.ContainerAPIClient) { - t.Helper() - ctx := context.Background() - containers := getPausedContainers(ctx, t, client) - if len(containers) > 0 { - for _, container := range containers { - err := client.ContainerUnpause(ctx, container.ID) - assert.Check(t, err, "failed to unpause container %s", container.ID) - } - } -} - -func getPausedContainers(ctx context.Context, t testing.TB, client client.ContainerAPIClient) []types.Container { - t.Helper() - filter := filters.NewArgs() - filter.Add("status", "paused") - containers, err := client.ContainerList(ctx, types.ContainerListOptions{ - Filters: filter, - All: true, - }) - assert.Check(t, err, "failed to list containers") - return containers -} - -var alreadyExists = regexp.MustCompile(`Error response from daemon: removal of container (\w+) is already in progress`) - -func deleteAllContainers(t testing.TB, apiclient client.ContainerAPIClient, protectedContainers map[string]struct{}) { - t.Helper() - ctx := context.Background() - containers := getAllContainers(ctx, t, apiclient) - if len(containers) == 0 { - return - } - - for _, container := range containers { - if _, ok := protectedContainers[container.ID]; ok { - continue - } - err := apiclient.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{ - Force: true, - RemoveVolumes: true, - }) - if err == nil || client.IsErrNotFound(err) || alreadyExists.MatchString(err.Error()) || isErrNotFoundSwarmClassic(err) { - continue - } - assert.Check(t, err, "failed to remove %s", container.ID) - } -} - -func getAllContainers(ctx context.Context, t testing.TB, client client.ContainerAPIClient) []types.Container { - t.Helper() - containers, err := client.ContainerList(ctx, types.ContainerListOptions{ - All: true, - }) - assert.Check(t, err, "failed to list containers") - return containers -} - -func deleteAllImages(t testing.TB, apiclient client.ImageAPIClient, protectedImages map[string]struct{}) { - t.Helper() - images, err := apiclient.ImageList(context.Background(), types.ImageListOptions{}) - assert.Check(t, err, "failed to list images") - - ctx := context.Background() - for _, image := range images { - tags := tagsFromImageSummary(image) - if len(tags) == 0 { - removeImage(ctx, t, apiclient, image.ID) - continue - } - for _, tag := range tags { - if _, ok := protectedImages[tag]; !ok { - removeImage(ctx, t, apiclient, tag) - } - } - } -} - -func removeImage(ctx context.Context, t testing.TB, apiclient client.ImageAPIClient, ref string) { - t.Helper() - _, err := apiclient.ImageRemove(ctx, ref, types.ImageRemoveOptions{ - Force: true, - }) - if client.IsErrNotFound(err) { - return - } - assert.Check(t, err, "failed to remove image %s", ref) -} - -func deleteAllVolumes(t testing.TB, c client.VolumeAPIClient, protectedVolumes map[string]struct{}) { - t.Helper() - volumes, err := c.VolumeList(context.Background(), filters.Args{}) - assert.Check(t, err, "failed to list volumes") - - for _, v := range volumes.Volumes { - if _, ok := protectedVolumes[v.Name]; ok { - continue - } - err := c.VolumeRemove(context.Background(), v.Name, true) - // Docker EE may list volumes that no longer exist. - if isErrNotFoundSwarmClassic(err) { - continue - } - assert.Check(t, err, "failed to remove volume %s", v.Name) - } -} - -func deleteAllNetworks(t testing.TB, c client.NetworkAPIClient, daemonPlatform string, protectedNetworks map[string]struct{}) { - t.Helper() - networks, err := c.NetworkList(context.Background(), types.NetworkListOptions{}) - assert.Check(t, err, "failed to list networks") - - for _, n := range networks { - if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { - continue - } - if _, ok := protectedNetworks[n.ID]; ok { - continue - } - if daemonPlatform == "windows" && strings.ToLower(n.Name) == "nat" { - // nat is a pre-defined network on Windows and cannot be removed - continue - } - err := c.NetworkRemove(context.Background(), n.ID) - assert.Check(t, err, "failed to remove network %s", n.ID) - } -} - -func deleteAllPlugins(t testing.TB, c client.PluginAPIClient, protectedPlugins map[string]struct{}) { - t.Helper() - plugins, err := c.PluginList(context.Background(), filters.Args{}) - // Docker EE does not allow cluster-wide plugin management. - if client.IsErrNotImplemented(err) { - return - } - assert.Check(t, err, "failed to list plugins") - - for _, p := range plugins { - if _, ok := protectedPlugins[p.Name]; ok { - continue - } - err := c.PluginRemove(context.Background(), p.Name, types.PluginRemoveOptions{Force: true}) - assert.Check(t, err, "failed to remove plugin %s", p.ID) - } -} - -// Swarm classic aggregates node errors and returns a 500 so we need to check -// the error string instead of just IsErrNotFound(). -func isErrNotFoundSwarmClassic(err error) bool { - return err != nil && strings.Contains(strings.ToLower(err.Error()), "no such") -} diff --git a/vendor/github.com/docker/docker/testutil/environment/environment.go b/vendor/github.com/docker/docker/testutil/environment/environment.go deleted file mode 100644 index 8af8ca8d6f1a..000000000000 --- a/vendor/github.com/docker/docker/testutil/environment/environment.go +++ /dev/null @@ -1,223 +0,0 @@ -package environment // import "github.com/docker/docker/testutil/environment" - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/client" - "github.com/docker/docker/testutil/fixtures/load" - "github.com/pkg/errors" - "gotest.tools/v3/assert" -) - -// Execution contains information about the current test execution and daemon -// under test -type Execution struct { - client client.APIClient - DaemonInfo types.Info - OSType string - PlatformDefaults PlatformDefaults - protectedElements protectedElements -} - -// PlatformDefaults are defaults values for the platform of the daemon under test -type PlatformDefaults struct { - BaseImage string - VolumesConfigPath string - ContainerStoragePath string -} - -// New creates a new Execution struct -// This is configured using the env client (see client.FromEnv) -func New() (*Execution, error) { - c, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - return nil, errors.Wrapf(err, "failed to create client") - } - return FromClient(c) -} - -// FromClient creates a new Execution environment from the passed in client -func FromClient(c *client.Client) (*Execution, error) { - info, err := c.Info(context.Background()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get info from daemon") - } - - osType := getOSType(info) - - return &Execution{ - client: c, - DaemonInfo: info, - OSType: osType, - PlatformDefaults: getPlatformDefaults(info, osType), - protectedElements: newProtectedElements(), - }, nil -} - -func getOSType(info types.Info) string { - // Docker EE does not set the OSType so allow the user to override this value. - userOsType := os.Getenv("TEST_OSTYPE") - if userOsType != "" { - return userOsType - } - return info.OSType -} - -func getPlatformDefaults(info types.Info, osType string) PlatformDefaults { - volumesPath := filepath.Join(info.DockerRootDir, "volumes") - containersPath := filepath.Join(info.DockerRootDir, "containers") - - switch osType { - case "linux": - return PlatformDefaults{ - BaseImage: "scratch", - VolumesConfigPath: toSlash(volumesPath), - ContainerStoragePath: toSlash(containersPath), - } - case "windows": - baseImage := "microsoft/windowsservercore" - if overrideBaseImage := os.Getenv("WINDOWS_BASE_IMAGE"); overrideBaseImage != "" { - baseImage = overrideBaseImage - if overrideBaseImageTag := os.Getenv("WINDOWS_BASE_IMAGE_TAG"); overrideBaseImageTag != "" { - baseImage = baseImage + ":" + overrideBaseImageTag - } - } - fmt.Println("INFO: Windows Base image is ", baseImage) - return PlatformDefaults{ - BaseImage: baseImage, - VolumesConfigPath: filepath.FromSlash(volumesPath), - ContainerStoragePath: filepath.FromSlash(containersPath), - } - default: - panic(fmt.Sprintf("unknown OSType for daemon: %s", osType)) - } -} - -// Make sure in context of daemon, not the local platform. Note we can't -// use filepath.FromSlash or ToSlash here as they are a no-op on Unix. -func toSlash(path string) string { - return strings.Replace(path, `\`, `/`, -1) -} - -// IsLocalDaemon is true if the daemon under test is on the same -// host as the test process. -// -// Deterministically working out the environment in which CI is running -// to evaluate whether the daemon is local or remote is not possible through -// a build tag. -// -// For example Windows to Linux CI under Jenkins tests the 64-bit -// Windows binary build with the daemon build tag, but calls a remote -// Linux daemon. -// -// We can't just say if Windows then assume the daemon is local as at -// some point, we will be testing the Windows CLI against a Windows daemon. -// -// Similarly, it will be perfectly valid to also run CLI tests from -// a Linux CLI (built with the daemon tag) against a Windows daemon. -func (e *Execution) IsLocalDaemon() bool { - return os.Getenv("DOCKER_REMOTE_DAEMON") == "" -} - -// IsRemoteDaemon is true if the daemon under test is on different host -// as the test process. -func (e *Execution) IsRemoteDaemon() bool { - return !e.IsLocalDaemon() -} - -// DaemonAPIVersion returns the negotiated daemon api version -func (e *Execution) DaemonAPIVersion() string { - version, err := e.APIClient().ServerVersion(context.TODO()) - if err != nil { - return "" - } - return version.APIVersion -} - -// Print the execution details to stdout -// TODO: print everything -func (e *Execution) Print() { - if e.IsLocalDaemon() { - fmt.Println("INFO: Testing against a local daemon") - } else { - fmt.Println("INFO: Testing against a remote daemon") - } -} - -// APIClient returns an APIClient connected to the daemon under test -func (e *Execution) APIClient() client.APIClient { - return e.client -} - -// IsUserNamespace returns whether the user namespace remapping is enabled -func (e *Execution) IsUserNamespace() bool { - root := os.Getenv("DOCKER_REMAP_ROOT") - return root != "" -} - -// RuntimeIsWindowsContainerd returns whether containerd runtime is used on Windows -func (e *Execution) RuntimeIsWindowsContainerd() bool { - return os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME") == "1" -} - -// IsRootless returns whether the rootless mode is enabled -func (e *Execution) IsRootless() bool { - return os.Getenv("DOCKER_ROOTLESS") != "" -} - -// IsUserNamespaceInKernel returns whether the kernel supports user namespaces -func (e *Execution) IsUserNamespaceInKernel() bool { - if _, err := os.Stat("/proc/self/uid_map"); os.IsNotExist(err) { - /* - * This kernel-provided file only exists if user namespaces are - * supported - */ - return false - } - - // We need extra check on redhat based distributions - if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil { - defer f.Close() - b := make([]byte, 1) - _, _ = f.Read(b) - return string(b) != "N" - } - - return true -} - -// HasExistingImage checks whether there is an image with the given reference. -// Note that this is done by filtering and then checking whether there were any -// results -- so ambiguous references might result in false-positives. -func (e *Execution) HasExistingImage(t testing.TB, reference string) bool { - client := e.APIClient() - filter := filters.NewArgs() - filter.Add("dangling", "false") - filter.Add("reference", reference) - imageList, err := client.ImageList(context.Background(), types.ImageListOptions{ - All: true, - Filters: filter, - }) - assert.NilError(t, err, "failed to list images") - - return len(imageList) > 0 -} - -// EnsureFrozenImagesLinux loads frozen test images into the daemon -// if they aren't already loaded -func EnsureFrozenImagesLinux(testEnv *Execution) error { - if testEnv.OSType == "linux" { - err := load.FrozenImagesLinux(testEnv.APIClient(), frozenImages...) - if err != nil { - return errors.Wrap(err, "error loading frozen images") - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/testutil/environment/protect.go b/vendor/github.com/docker/docker/testutil/environment/protect.go deleted file mode 100644 index d790106128fb..000000000000 --- a/vendor/github.com/docker/docker/testutil/environment/protect.go +++ /dev/null @@ -1,222 +0,0 @@ -package environment - -import ( - "context" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - dclient "github.com/docker/docker/client" - "gotest.tools/v3/assert" -) - -var frozenImages = []string{"busybox:latest", "busybox:glibc", "hello-world:frozen", "debian:bullseye-slim"} - -type protectedElements struct { - containers map[string]struct{} - images map[string]struct{} - networks map[string]struct{} - plugins map[string]struct{} - volumes map[string]struct{} -} - -func newProtectedElements() protectedElements { - return protectedElements{ - containers: map[string]struct{}{}, - images: map[string]struct{}{}, - networks: map[string]struct{}{}, - plugins: map[string]struct{}{}, - volumes: map[string]struct{}{}, - } -} - -// ProtectAll protects the existing environment (containers, images, networks, -// volumes, and, on Linux, plugins) from being cleaned up at the end of test -// runs -func ProtectAll(t testing.TB, testEnv *Execution) { - t.Helper() - ProtectContainers(t, testEnv) - ProtectImages(t, testEnv) - ProtectNetworks(t, testEnv) - ProtectVolumes(t, testEnv) - if testEnv.OSType == "linux" { - ProtectPlugins(t, testEnv) - } -} - -// ProtectContainer adds the specified container(s) to be protected in case of -// clean -func (e *Execution) ProtectContainer(t testing.TB, containers ...string) { - t.Helper() - for _, container := range containers { - e.protectedElements.containers[container] = struct{}{} - } -} - -// ProtectContainers protects existing containers from being cleaned up at the -// end of test runs -func ProtectContainers(t testing.TB, testEnv *Execution) { - t.Helper() - containers := getExistingContainers(t, testEnv) - testEnv.ProtectContainer(t, containers...) -} - -func getExistingContainers(t testing.TB, testEnv *Execution) []string { - t.Helper() - client := testEnv.APIClient() - containerList, err := client.ContainerList(context.Background(), types.ContainerListOptions{ - All: true, - }) - assert.NilError(t, err, "failed to list containers") - - var containers []string - for _, container := range containerList { - containers = append(containers, container.ID) - } - return containers -} - -// ProtectImage adds the specified image(s) to be protected in case of clean -func (e *Execution) ProtectImage(t testing.TB, images ...string) { - t.Helper() - for _, image := range images { - e.protectedElements.images[image] = struct{}{} - } -} - -// ProtectImages protects existing images and on linux frozen images from being -// cleaned up at the end of test runs -func ProtectImages(t testing.TB, testEnv *Execution) { - t.Helper() - images := getExistingImages(t, testEnv) - - if testEnv.OSType == "linux" { - images = append(images, frozenImages...) - } - testEnv.ProtectImage(t, images...) -} - -func getExistingImages(t testing.TB, testEnv *Execution) []string { - t.Helper() - client := testEnv.APIClient() - filter := filters.NewArgs() - filter.Add("dangling", "false") - imageList, err := client.ImageList(context.Background(), types.ImageListOptions{ - All: true, - Filters: filter, - }) - assert.NilError(t, err, "failed to list images") - - var images []string - for _, image := range imageList { - images = append(images, tagsFromImageSummary(image)...) - } - return images -} - -func tagsFromImageSummary(image types.ImageSummary) []string { - var result []string - for _, tag := range image.RepoTags { - if tag != ":" { - result = append(result, tag) - } - } - for _, digest := range image.RepoDigests { - if digest != "@" { - result = append(result, digest) - } - } - return result -} - -// ProtectNetwork adds the specified network(s) to be protected in case of -// clean -func (e *Execution) ProtectNetwork(t testing.TB, networks ...string) { - t.Helper() - for _, network := range networks { - e.protectedElements.networks[network] = struct{}{} - } -} - -// ProtectNetworks protects existing networks from being cleaned up at the end -// of test runs -func ProtectNetworks(t testing.TB, testEnv *Execution) { - t.Helper() - networks := getExistingNetworks(t, testEnv) - testEnv.ProtectNetwork(t, networks...) -} - -func getExistingNetworks(t testing.TB, testEnv *Execution) []string { - t.Helper() - client := testEnv.APIClient() - networkList, err := client.NetworkList(context.Background(), types.NetworkListOptions{}) - assert.NilError(t, err, "failed to list networks") - - var networks []string - for _, network := range networkList { - networks = append(networks, network.ID) - } - return networks -} - -// ProtectPlugin adds the specified plugin(s) to be protected in case of clean -func (e *Execution) ProtectPlugin(t testing.TB, plugins ...string) { - t.Helper() - for _, plugin := range plugins { - e.protectedElements.plugins[plugin] = struct{}{} - } -} - -// ProtectPlugins protects existing plugins from being cleaned up at the end of -// test runs -func ProtectPlugins(t testing.TB, testEnv *Execution) { - t.Helper() - plugins := getExistingPlugins(t, testEnv) - testEnv.ProtectPlugin(t, plugins...) -} - -func getExistingPlugins(t testing.TB, testEnv *Execution) []string { - t.Helper() - client := testEnv.APIClient() - pluginList, err := client.PluginList(context.Background(), filters.Args{}) - // Docker EE does not allow cluster-wide plugin management. - if dclient.IsErrNotImplemented(err) { - return []string{} - } - assert.NilError(t, err, "failed to list plugins") - - var plugins []string - for _, plugin := range pluginList { - plugins = append(plugins, plugin.Name) - } - return plugins -} - -// ProtectVolume adds the specified volume(s) to be protected in case of clean -func (e *Execution) ProtectVolume(t testing.TB, volumes ...string) { - t.Helper() - for _, volume := range volumes { - e.protectedElements.volumes[volume] = struct{}{} - } -} - -// ProtectVolumes protects existing volumes from being cleaned up at the end of -// test runs -func ProtectVolumes(t testing.TB, testEnv *Execution) { - t.Helper() - volumes := getExistingVolumes(t, testEnv) - testEnv.ProtectVolume(t, volumes...) -} - -func getExistingVolumes(t testing.TB, testEnv *Execution) []string { - t.Helper() - client := testEnv.APIClient() - volumeList, err := client.VolumeList(context.Background(), filters.Args{}) - assert.NilError(t, err, "failed to list volumes") - - var volumes []string - for _, volume := range volumeList.Volumes { - volumes = append(volumes, volume.Name) - } - return volumes -} diff --git a/vendor/github.com/docker/docker/testutil/fixtures/load/frozen.go b/vendor/github.com/docker/docker/testutil/fixtures/load/frozen.go deleted file mode 100644 index 6ea591b4f224..000000000000 --- a/vendor/github.com/docker/docker/testutil/fixtures/load/frozen.go +++ /dev/null @@ -1,196 +0,0 @@ -package load // import "github.com/docker/docker/testutil/fixtures/load" - -import ( - "bufio" - "bytes" - "context" - "os" - "os/exec" - "path/filepath" - "strings" - "sync" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/moby/term" - "github.com/pkg/errors" -) - -const frozenImgDir = "/docker-frozen-images" - -// FrozenImagesLinux loads the frozen image set for the integration suite -// If the images are not available locally it will download them -// TODO: This loads whatever is in the frozen image dir, regardless of what -// images were passed in. If the images need to be downloaded, then it will respect -// the passed in images -func FrozenImagesLinux(client client.APIClient, images ...string) error { - var loadImages []struct{ srcName, destName string } - for _, img := range images { - if !imageExists(client, img) { - srcName := img - // hello-world:latest gets re-tagged as hello-world:frozen - // there are some tests that use hello-world:latest specifically so it pulls - // the image and hello-world:frozen is used for when we just want a super - // small image - if img == "hello-world:frozen" { - srcName = "hello-world:latest" - } - loadImages = append(loadImages, struct{ srcName, destName string }{ - srcName: srcName, - destName: img, - }) - } - } - if len(loadImages) == 0 { - // everything is loaded, we're done - return nil - } - - ctx := context.Background() - fi, err := os.Stat(frozenImgDir) - if err != nil || !fi.IsDir() { - srcImages := make([]string, 0, len(loadImages)) - for _, img := range loadImages { - srcImages = append(srcImages, img.srcName) - } - if err := pullImages(ctx, client, srcImages); err != nil { - return errors.Wrap(err, "error pulling image list") - } - } else { - if err := loadFrozenImages(ctx, client); err != nil { - return err - } - } - - for _, img := range loadImages { - if img.srcName != img.destName { - if err := client.ImageTag(ctx, img.srcName, img.destName); err != nil { - return errors.Wrapf(err, "failed to tag %s as %s", img.srcName, img.destName) - } - if _, err := client.ImageRemove(ctx, img.srcName, types.ImageRemoveOptions{}); err != nil { - return errors.Wrapf(err, "failed to remove %s", img.srcName) - } - } - } - return nil -} - -func imageExists(client client.APIClient, name string) bool { - _, _, err := client.ImageInspectWithRaw(context.Background(), name) - return err == nil -} - -func loadFrozenImages(ctx context.Context, client client.APIClient) error { - tar, err := exec.LookPath("tar") - if err != nil { - return errors.Wrap(err, "could not find tar binary") - } - tarCmd := exec.Command(tar, "-cC", frozenImgDir, ".") - out, err := tarCmd.StdoutPipe() - if err != nil { - return errors.Wrap(err, "error getting stdout pipe for tar command") - } - - errBuf := bytes.NewBuffer(nil) - tarCmd.Stderr = errBuf - tarCmd.Start() - defer tarCmd.Wait() - - resp, err := client.ImageLoad(ctx, out, true) - if err != nil { - return errors.Wrap(err, "failed to load frozen images") - } - defer resp.Body.Close() - fd, isTerminal := term.GetFdInfo(os.Stdout) - return jsonmessage.DisplayJSONMessagesStream(resp.Body, os.Stdout, fd, isTerminal, nil) -} - -func pullImages(ctx context.Context, client client.APIClient, images []string) error { - cwd, err := os.Getwd() - if err != nil { - return errors.Wrap(err, "error getting path to dockerfile") - } - dockerfile := os.Getenv("DOCKERFILE") - if dockerfile == "" { - dockerfile = "Dockerfile" - } - dockerfilePath := filepath.Join(filepath.Dir(filepath.Clean(cwd)), dockerfile) - pullRefs, err := readFrozenImageList(dockerfilePath, images) - if err != nil { - return errors.Wrap(err, "error reading frozen image list") - } - - var wg sync.WaitGroup - chErr := make(chan error, len(images)) - for tag, ref := range pullRefs { - wg.Add(1) - go func(tag, ref string) { - defer wg.Done() - if err := pullTagAndRemove(ctx, client, ref, tag); err != nil { - chErr <- err - return - } - }(tag, ref) - } - wg.Wait() - close(chErr) - return <-chErr -} - -func pullTagAndRemove(ctx context.Context, client client.APIClient, ref string, tag string) error { - resp, err := client.ImagePull(ctx, ref, types.ImagePullOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to pull %s", ref) - } - defer resp.Close() - fd, isTerminal := term.GetFdInfo(os.Stdout) - if err := jsonmessage.DisplayJSONMessagesStream(resp, os.Stdout, fd, isTerminal, nil); err != nil { - return err - } - - if err := client.ImageTag(ctx, ref, tag); err != nil { - return errors.Wrapf(err, "failed to tag %s as %s", ref, tag) - } - _, err = client.ImageRemove(ctx, ref, types.ImageRemoveOptions{}) - return errors.Wrapf(err, "failed to remove %s", ref) - -} - -func readFrozenImageList(dockerfilePath string, images []string) (map[string]string, error) { - f, err := os.Open(dockerfilePath) - if err != nil { - return nil, errors.Wrap(err, "error reading dockerfile") - } - defer f.Close() - ls := make(map[string]string) - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := strings.Fields(scanner.Text()) - if len(line) < 3 { - continue - } - if !(line[0] == "RUN" && line[1] == "./contrib/download-frozen-image-v2.sh") { - continue - } - - for scanner.Scan() { - img := strings.TrimSpace(scanner.Text()) - img = strings.TrimSuffix(img, "\\") - img = strings.TrimSpace(img) - split := strings.Split(img, "@") - if len(split) < 2 { - break - } - - for _, i := range images { - if split[0] == i { - ls[i] = img - break - } - } - } - } - return ls, nil -} diff --git a/vendor/github.com/docker/docker/testutil/request/npipe.go b/vendor/github.com/docker/docker/testutil/request/npipe.go deleted file mode 100644 index e827ad6b8006..000000000000 --- a/vendor/github.com/docker/docker/testutil/request/npipe.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !windows -// +build !windows - -package request - -import ( - "net" - "time" -) - -func npipeDial(path string, timeout time.Duration) (net.Conn, error) { - panic("npipe protocol only supported on Windows") -} diff --git a/vendor/github.com/docker/docker/testutil/request/npipe_windows.go b/vendor/github.com/docker/docker/testutil/request/npipe_windows.go deleted file mode 100644 index 9741ae64c653..000000000000 --- a/vendor/github.com/docker/docker/testutil/request/npipe_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -package request - -import ( - "net" - "time" - - winio "github.com/Microsoft/go-winio" -) - -func npipeDial(path string, timeout time.Duration) (net.Conn, error) { - return winio.DialPipe(path, &timeout) -} diff --git a/vendor/github.com/docker/docker/testutil/request/ops.go b/vendor/github.com/docker/docker/testutil/request/ops.go deleted file mode 100644 index be4e502ccb6b..000000000000 --- a/vendor/github.com/docker/docker/testutil/request/ops.go +++ /dev/null @@ -1,77 +0,0 @@ -package request - -import ( - "bytes" - "encoding/json" - "io" - "net/http" - "strings" -) - -// Options defines request options, like request modifiers and which host to target -type Options struct { - host string - requestModifiers []func(*http.Request) error -} - -// Host creates a modifier that sets the specified host as the request URL host -func Host(host string) func(*Options) { - return func(o *Options) { - o.host = host - } -} - -// With adds a request modifier to the options -func With(f func(*http.Request) error) func(*Options) { - return func(o *Options) { - o.requestModifiers = append(o.requestModifiers, f) - } -} - -// Method creates a modifier that sets the specified string as the request method -func Method(method string) func(*Options) { - return With(func(req *http.Request) error { - req.Method = method - return nil - }) -} - -// RawString sets the specified string as body for the request -func RawString(content string) func(*Options) { - return RawContent(io.NopCloser(strings.NewReader(content))) -} - -// RawContent sets the specified reader as body for the request -func RawContent(reader io.ReadCloser) func(*Options) { - return With(func(req *http.Request) error { - req.Body = reader - return nil - }) -} - -// ContentType sets the specified Content-Type request header -func ContentType(contentType string) func(*Options) { - return With(func(req *http.Request) error { - req.Header.Set("Content-Type", contentType) - return nil - }) -} - -// JSON sets the Content-Type request header to json -func JSON(o *Options) { - ContentType("application/json")(o) -} - -// JSONBody creates a modifier that encodes the specified data to a JSON string and set it as request body. It also sets -// the Content-Type header of the request. -func JSONBody(data interface{}) func(*Options) { - return With(func(req *http.Request) error { - jsonData := bytes.NewBuffer(nil) - if err := json.NewEncoder(jsonData).Encode(data); err != nil { - return err - } - req.Body = io.NopCloser(jsonData) - req.Header.Set("Content-Type", "application/json") - return nil - }) -} diff --git a/vendor/github.com/docker/docker/testutil/request/request.go b/vendor/github.com/docker/docker/testutil/request/request.go deleted file mode 100644 index d5f559c66637..000000000000 --- a/vendor/github.com/docker/docker/testutil/request/request.go +++ /dev/null @@ -1,215 +0,0 @@ -package request // import "github.com/docker/docker/testutil/request" - -import ( - "context" - "crypto/tls" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "testing" - "time" - - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/testutil/environment" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" - "gotest.tools/v3/assert" -) - -// NewAPIClient returns a docker API client configured from environment variables -func NewAPIClient(t testing.TB, ops ...client.Opt) client.APIClient { - t.Helper() - ops = append([]client.Opt{client.FromEnv}, ops...) - clt, err := client.NewClientWithOpts(ops...) - assert.NilError(t, err) - return clt -} - -// DaemonTime provides the current time on the daemon host -func DaemonTime(ctx context.Context, t testing.TB, client client.APIClient, testEnv *environment.Execution) time.Time { - t.Helper() - if testEnv.IsLocalDaemon() { - return time.Now() - } - - info, err := client.Info(ctx) - assert.NilError(t, err) - - dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) - assert.NilError(t, err, "invalid time format in GET /info response") - return dt -} - -// DaemonUnixTime returns the current time on the daemon host with nanoseconds precision. -// It return the time formatted how the client sends timestamps to the server. -func DaemonUnixTime(ctx context.Context, t testing.TB, client client.APIClient, testEnv *environment.Execution) string { - t.Helper() - dt := DaemonTime(ctx, t, client, testEnv) - return fmt.Sprintf("%d.%09d", dt.Unix(), int64(dt.Nanosecond())) -} - -// Post creates and execute a POST request on the specified host and endpoint, with the specified request modifiers -func Post(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { - return Do(endpoint, append(modifiers, Method(http.MethodPost))...) -} - -// Delete creates and execute a DELETE request on the specified host and endpoint, with the specified request modifiers -func Delete(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { - return Do(endpoint, append(modifiers, Method(http.MethodDelete))...) -} - -// Get creates and execute a GET request on the specified host and endpoint, with the specified request modifiers -func Get(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { - return Do(endpoint, modifiers...) -} - -// Head creates and execute a HEAD request on the specified host and endpoint, with the specified request modifiers -func Head(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { - return Do(endpoint, append(modifiers, Method(http.MethodHead))...) -} - -// Do creates and execute a request on the specified endpoint, with the specified request modifiers -func Do(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { - opts := &Options{ - host: DaemonHost(), - } - for _, mod := range modifiers { - mod(opts) - } - req, err := newRequest(endpoint, opts) - if err != nil { - return nil, nil, err - } - client, err := newHTTPClient(opts.host) - if err != nil { - return nil, nil, err - } - resp, err := client.Do(req) - var body io.ReadCloser - if resp != nil { - body = ioutils.NewReadCloserWrapper(resp.Body, func() error { - defer resp.Body.Close() - return nil - }) - } - return resp, body, err -} - -// ReadBody read the specified ReadCloser content and returns it -func ReadBody(b io.ReadCloser) ([]byte, error) { - defer b.Close() - return io.ReadAll(b) -} - -// newRequest creates a new http Request to the specified host and endpoint, with the specified request modifiers -func newRequest(endpoint string, opts *Options) (*http.Request, error) { - hostURL, err := client.ParseHostURL(opts.host) - if err != nil { - return nil, errors.Wrapf(err, "failed parsing url %q", opts.host) - } - req, err := http.NewRequest(http.MethodGet, endpoint, nil) - if err != nil { - return nil, errors.Wrap(err, "failed to create request") - } - - if os.Getenv("DOCKER_TLS_VERIFY") != "" { - req.URL.Scheme = "https" - } else { - req.URL.Scheme = "http" - } - req.URL.Host = hostURL.Host - - for _, config := range opts.requestModifiers { - if err := config(req); err != nil { - return nil, err - } - } - - return req, nil -} - -// newHTTPClient creates an http client for the specific host -// TODO: Share more code with client.defaultHTTPClient -func newHTTPClient(host string) (*http.Client, error) { - // FIXME(vdemeester) 10*time.Second timeout of SockRequest… ? - hostURL, err := client.ParseHostURL(host) - if err != nil { - return nil, err - } - transport := new(http.Transport) - if hostURL.Scheme == "tcp" && os.Getenv("DOCKER_TLS_VERIFY") != "" { - // Setup the socket TLS configuration. - tlsConfig, err := getTLSConfig() - if err != nil { - return nil, err - } - transport = &http.Transport{TLSClientConfig: tlsConfig} - } - transport.DisableKeepAlives = true - err = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) - return &http.Client{Transport: transport}, err -} - -func getTLSConfig() (*tls.Config, error) { - dockerCertPath := os.Getenv("DOCKER_CERT_PATH") - - if dockerCertPath == "" { - return nil, errors.New("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") - } - - option := &tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - } - tlsConfig, err := tlsconfig.Client(*option) - if err != nil { - return nil, err - } - - return tlsConfig, nil -} - -// DaemonHost return the daemon host string for this test execution -func DaemonHost() string { - daemonURLStr := client.DefaultDockerHost - if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { - daemonURLStr = daemonHostVar - } - return daemonURLStr -} - -// SockConn opens a connection on the specified socket -func SockConn(timeout time.Duration, daemon string) (net.Conn, error) { - daemonURL, err := url.Parse(daemon) - if err != nil { - return nil, errors.Wrapf(err, "could not parse url %q", daemon) - } - - var c net.Conn - switch daemonURL.Scheme { - case "npipe": - return npipeDial(daemonURL.Path, timeout) - case "unix": - return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) - case "tcp": - if os.Getenv("DOCKER_TLS_VERIFY") != "" { - // Setup the socket TLS configuration. - tlsConfig, err := getTLSConfig() - if err != nil { - return nil, err - } - dialer := &net.Dialer{Timeout: timeout} - return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) - } - return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) - default: - return c, errors.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) - } -} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go index 85f6ab07155b..c245a89513f8 100644 --- a/vendor/github.com/docker/go-units/size.go +++ b/vendor/github.com/docker/go-units/size.go @@ -2,7 +2,6 @@ package units import ( "fmt" - "regexp" "strconv" "strings" ) @@ -26,16 +25,17 @@ const ( PiB = 1024 * TiB ) -type unitMap map[string]int64 +type unitMap map[byte]int64 var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) + decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} + binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} ) -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +var ( + decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +) func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { i := 0 @@ -89,20 +89,66 @@ func RAMInBytes(size string) (int64, error) { // Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 4 { + // TODO: rewrite to use strings.Cut if there's a space + // once Go < 1.18 is deprecated. + sep := strings.LastIndexAny(sizeStr, "01234567890. ") + if sep == -1 { + // There should be at least a digit. return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } + var num, sfx string + if sizeStr[sep] != ' ' { + num = sizeStr[:sep+1] + sfx = sizeStr[sep+1:] + } else { + // Omit the space separator. + num = sizeStr[:sep] + sfx = sizeStr[sep+1:] + } - size, err := strconv.ParseFloat(matches[1], 64) + size, err := strconv.ParseFloat(num, 64) if err != nil { return -1, err } + // Backward compatibility: reject negative sizes. + if size < 0 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + if len(sfx) == 0 { + return int64(size), nil + } - unitPrefix := strings.ToLower(matches[3]) - if mul, ok := uMap[unitPrefix]; ok { + // Process the suffix. + + if len(sfx) > 3 { // Too long. + goto badSuffix + } + sfx = strings.ToLower(sfx) + // Trivial case: b suffix. + if sfx[0] == 'b' { + if len(sfx) > 1 { // no extra characters allowed after b. + goto badSuffix + } + return int64(size), nil + } + // A suffix from the map. + if mul, ok := uMap[sfx[0]]; ok { size *= float64(mul) + } else { + goto badSuffix + } + + // The suffix may have extra "b" or "ib" (e.g. KiB or MB). + switch { + case len(sfx) == 2 && sfx[1] != 'b': + goto badSuffix + case len(sfx) == 3 && sfx[1:] != "ib": + goto badSuffix } return int64(size), nil + +badSuffix: + return -1, fmt.Errorf("invalid suffix: '%s'", sfx) } diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index ad825f5f0ae9..ab5931181317 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -105,14 +105,18 @@ with higher verbosity means more (and less important) logs will be generated. There are implementations for the following logging libraries: - **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) - **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) - **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) - **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) ## FAQ diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index b23ab9679a84..7accdb0c4003 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -351,15 +351,15 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s if v, ok := value.(logr.Marshaler); ok { // Replace the value with what the type wants to get logged. // That then gets handled below via reflection. - value = v.MarshalLog() + value = invokeMarshaler(v) } // Handle types that want to format themselves. switch v := value.(type) { case fmt.Stringer: - value = v.String() + value = invokeStringer(v) case error: - value = v.Error() + value = invokeError(v) } // Handling the most common types without reflect is a small perf win. @@ -408,8 +408,9 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s if i > 0 { buf.WriteByte(',') } + k, _ := v[i].(string) // sanitize() above means no need to check success // arbitrary keys might need escaping - buf.WriteString(prettyString(v[i].(string))) + buf.WriteString(prettyString(k)) buf.WriteByte(':') buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) } @@ -596,6 +597,33 @@ func isEmpty(v reflect.Value) bool { return false } +func invokeMarshaler(m logr.Marshaler) (ret interface{}) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + // Caller represents the original call site for a log line, after considering // logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and // Line fields will always be provided, while the Func field is optional. diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index c05482a20319..c3b56b3d2c5e 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -115,6 +115,15 @@ limitations under the License. // may be any Go value, but how the value is formatted is determined by the // LogSink implementation. // +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// Calling methods with the null logger (Logger{}) as instance will crash +// because it has no LogSink. Therefore this null logger should never be passed +// around. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// // Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but diff --git a/vendor/github.com/gofrs/flock/.travis.yml b/vendor/github.com/gofrs/flock/.travis.yml index b791a74213c2..b16d040fa89e 100644 --- a/vendor/github.com/gofrs/flock/.travis.yml +++ b/vendor/github.com/gofrs/flock/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.10.x - - 1.11.x + - 1.14.x + - 1.15.x script: go test -v -check.vv -race ./... sudo: false notifications: diff --git a/vendor/github.com/gofrs/flock/appveyor.yml b/vendor/github.com/gofrs/flock/appveyor.yml index 6848e94bf886..909b4bf7cb4e 100644 --- a/vendor/github.com/gofrs/flock/appveyor.yml +++ b/vendor/github.com/gofrs/flock/appveyor.yml @@ -7,7 +7,7 @@ clone_folder: 'c:\gopath\src\github.com\gofrs\flock' environment: GOPATH: 'c:\gopath' - GOVERSION: '1.11' + GOVERSION: '1.15' init: - git config --global core.autocrlf input diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go index 2fd16033763d..95c784ca504b 100644 --- a/vendor/github.com/gofrs/flock/flock.go +++ b/vendor/github.com/gofrs/flock/flock.go @@ -19,6 +19,7 @@ package flock import ( "context" "os" + "runtime" "sync" "time" ) @@ -116,7 +117,15 @@ func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Durati func (f *Flock) setFh() error { // open a new os.File instance // create it if it doesn't exist, and open the file read-only. - fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDONLY, os.FileMode(0600)) + flags := os.O_CREATE + if runtime.GOOS == "aix" { + // AIX cannot preform write-lock (ie exclusive) on a + // read-only file. + flags |= os.O_RDWR + } else { + flags |= os.O_RDONLY + } + fh, err := os.OpenFile(f.path, flags, os.FileMode(0600)) if err != nil { return err } diff --git a/vendor/github.com/gofrs/flock/flock_aix.go b/vendor/github.com/gofrs/flock/flock_aix.go new file mode 100644 index 000000000000..7277c1b6b265 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_aix.go @@ -0,0 +1,281 @@ +// Copyright 2019 Tim Heckman. All rights reserved. Use of this source code is +// governed by the BSD 3-Clause license that can be found in the LICENSE file. + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code implements the filelock API using POSIX 'fcntl' locks, which attach +// to an (inode, process) pair rather than a file descriptor. To avoid unlocking +// files prematurely when the same file is opened through different descriptors, +// we allow only one read-lock at a time. +// +// This code is adapted from the Go package: +// cmd/go/internal/lockedfile/internal/filelock + +//+build aix + +package flock + +import ( + "errors" + "io" + "os" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +type lockType int16 + +const ( + readLock lockType = unix.F_RDLCK + writeLock lockType = unix.F_WRLCK +) + +type cmdType int + +const ( + tryLock cmdType = unix.F_SETLK + waitLock cmdType = unix.F_SETLKW +) + +type inode = uint64 + +type inodeLock struct { + owner *Flock + queue []<-chan *Flock +} + +var ( + mu sync.Mutex + inodes = map[*Flock]inode{} + locks = map[inode]inodeLock{} +) + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already exclusive-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +// +// If the *Flock has a shared lock (RLock), this may transparently replace the +// shared lock with an exclusive lock on some UNIX-like operating systems. Be +// careful when using exclusive locks in conjunction with shared locks +// (RLock()), because calling Unlock() may accidentally release the exclusive +// lock that was once a shared lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, writeLock) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already shared-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, readLock) +} + +func (f *Flock) lock(locked *bool, flag lockType) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + defer f.ensureFhState() + } + + if _, err := f.doLock(waitLock, flag, true); err != nil { + return err + } + + *locked = true + return nil +} + +func (f *Flock) doLock(cmd cmdType, lt lockType, blocking bool) (bool, error) { + // POSIX locks apply per inode and process, and the lock for an inode is + // released when *any* descriptor for that inode is closed. So we need to + // synchronize access to each inode internally, and must serialize lock and + // unlock calls that refer to the same inode through different descriptors. + fi, err := f.fh.Stat() + if err != nil { + return false, err + } + ino := inode(fi.Sys().(*syscall.Stat_t).Ino) + + mu.Lock() + if i, dup := inodes[f]; dup && i != ino { + mu.Unlock() + return false, &os.PathError{ + Path: f.Path(), + Err: errors.New("inode for file changed since last Lock or RLock"), + } + } + + inodes[f] = ino + + var wait chan *Flock + l := locks[ino] + if l.owner == f { + // This file already owns the lock, but the call may change its lock type. + } else if l.owner == nil { + // No owner: it's ours now. + l.owner = f + } else if !blocking { + // Already owned: cannot take the lock. + mu.Unlock() + return false, nil + } else { + // Already owned: add a channel to wait on. + wait = make(chan *Flock) + l.queue = append(l.queue, wait) + } + locks[ino] = l + mu.Unlock() + + if wait != nil { + wait <- f + } + + err = setlkw(f.fh.Fd(), cmd, lt) + + if err != nil { + f.doUnlock() + if cmd == tryLock && err == unix.EACCES { + return false, nil + } + return false, err + } + + return true, nil +} + +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + if err := f.doUnlock(); err != nil { + return err + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +func (f *Flock) doUnlock() (err error) { + var owner *Flock + mu.Lock() + ino, ok := inodes[f] + if ok { + owner = locks[ino].owner + } + mu.Unlock() + + if owner == f { + err = setlkw(f.fh.Fd(), waitLock, unix.F_UNLCK) + } + + mu.Lock() + l := locks[ino] + if len(l.queue) == 0 { + // No waiters: remove the map entry. + delete(locks, ino) + } else { + // The first waiter is sending us their file now. + // Receive it and update the queue. + l.owner = <-l.queue[0] + l.queue = l.queue[1:] + locks[ino] = l + } + delete(inodes, f) + mu.Unlock() + + return err +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, writeLock) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being share-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, readLock) +} + +func (f *Flock) try(locked *bool, flag lockType) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + defer f.ensureFhState() + } + + haslock, err := f.doLock(tryLock, flag, false) + if err != nil { + return false, err + } + + *locked = haslock + return haslock, nil +} + +// setlkw calls FcntlFlock with cmd for the entire file indicated by fd. +func setlkw(fd uintptr, cmd cmdType, lt lockType) error { + for { + err := unix.FcntlFlock(fd, int(cmd), &unix.Flock_t{ + Type: int16(lt), + Whence: io.SeekStart, + Start: 0, + Len: 0, // All bytes. + }) + if err != unix.EINTR { + return err + } + } +} diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go index 366a60ca6d3b..c315a3e29084 100644 --- a/vendor/github.com/gofrs/flock/flock_unix.go +++ b/vendor/github.com/gofrs/flock/flock_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by the BSD 3-Clause // license that can be found in the LICENSE file. -// +build !windows +// +build !aix,!windows package flock diff --git a/vendor/github.com/gogo/protobuf/plugin/compare/compare.go b/vendor/github.com/gogo/protobuf/plugin/compare/compare.go new file mode 100644 index 000000000000..9ab40ef1508c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/compare/compare.go @@ -0,0 +1,580 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package compare + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "github.com/gogo/protobuf/vanity" +) + +type plugin struct { + *generator.Generator + generator.PluginImports + fmtPkg generator.Single + bytesPkg generator.Single + sortkeysPkg generator.Single + protoPkg generator.Single +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "compare" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + p.fmtPkg = p.NewImport("fmt") + p.bytesPkg = p.NewImport("bytes") + p.sortkeysPkg = p.NewImport("github.com/gogo/protobuf/sortkeys") + p.protoPkg = p.NewImport("github.com/gogo/protobuf/proto") + + for _, msg := range file.Messages() { + if msg.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + if gogoproto.HasCompare(file.FileDescriptorProto, msg.DescriptorProto) { + p.generateMessage(file, msg) + } + } +} + +func (p *plugin) generateNullableField(fieldname string) { + p.P(`if this.`, fieldname, ` != nil && that1.`, fieldname, ` != nil {`) + p.In() + p.P(`if *this.`, fieldname, ` != *that1.`, fieldname, `{`) + p.In() + p.P(`if *this.`, fieldname, ` < *that1.`, fieldname, `{`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if this.`, fieldname, ` != nil {`) + p.In() + p.P(`return 1`) + p.Out() + p.P(`} else if that1.`, fieldname, ` != nil {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) +} + +func (p *plugin) generateMsgNullAndTypeCheck(ccTypeName string) { + p.P(`if that == nil {`) + p.In() + p.P(`if this == nil {`) + p.In() + p.P(`return 0`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`}`) + p.P(``) + p.P(`that1, ok := that.(*`, ccTypeName, `)`) + p.P(`if !ok {`) + p.In() + p.P(`that2, ok := that.(`, ccTypeName, `)`) + p.P(`if ok {`) + p.In() + p.P(`that1 = &that2`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`return 1`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P(`if that1 == nil {`) + p.In() + p.P(`if this == nil {`) + p.In() + p.P(`return 0`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`} else if this == nil {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) +} + +func (p *plugin) generateField(file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + fieldname := p.GetOneOfFieldName(message, field) + repeated := field.IsRepeated() + ctype := gogoproto.IsCustomType(field) + nullable := gogoproto.IsNullable(field) + // oneof := field.OneofIndex != nil + if !repeated { + if ctype { + if nullable { + p.P(`if that1.`, fieldname, ` == nil {`) + p.In() + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + p.P(`return 1`) + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if this.`, fieldname, ` == nil {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`} else if c := this.`, fieldname, `.Compare(*that1.`, fieldname, `); c != 0 {`) + } else { + p.P(`if c := this.`, fieldname, `.Compare(that1.`, fieldname, `); c != 0 {`) + } + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + } else { + if field.IsMessage() || p.IsGroup(field) { + if nullable { + p.P(`if c := this.`, fieldname, `.Compare(that1.`, fieldname, `); c != 0 {`) + } else { + p.P(`if c := this.`, fieldname, `.Compare(&that1.`, fieldname, `); c != 0 {`) + } + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + } else if field.IsBytes() { + p.P(`if c := `, p.bytesPkg.Use(), `.Compare(this.`, fieldname, `, that1.`, fieldname, `); c != 0 {`) + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + } else if field.IsString() { + if nullable && !proto3 { + p.generateNullableField(fieldname) + } else { + p.P(`if this.`, fieldname, ` != that1.`, fieldname, `{`) + p.In() + p.P(`if this.`, fieldname, ` < that1.`, fieldname, `{`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`}`) + } + } else if field.IsBool() { + if nullable && !proto3 { + p.P(`if this.`, fieldname, ` != nil && that1.`, fieldname, ` != nil {`) + p.In() + p.P(`if *this.`, fieldname, ` != *that1.`, fieldname, `{`) + p.In() + p.P(`if !*this.`, fieldname, ` {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if this.`, fieldname, ` != nil {`) + p.In() + p.P(`return 1`) + p.Out() + p.P(`} else if that1.`, fieldname, ` != nil {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + } else { + p.P(`if this.`, fieldname, ` != that1.`, fieldname, `{`) + p.In() + p.P(`if !this.`, fieldname, ` {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`}`) + } + } else { + if nullable && !proto3 { + p.generateNullableField(fieldname) + } else { + p.P(`if this.`, fieldname, ` != that1.`, fieldname, `{`) + p.In() + p.P(`if this.`, fieldname, ` < that1.`, fieldname, `{`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`}`) + } + } + } + } else { + p.P(`if len(this.`, fieldname, `) != len(that1.`, fieldname, `) {`) + p.In() + p.P(`if len(this.`, fieldname, `) < len(that1.`, fieldname, `) {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`}`) + p.P(`for i := range this.`, fieldname, ` {`) + p.In() + if ctype { + p.P(`if c := this.`, fieldname, `[i].Compare(that1.`, fieldname, `[i]); c != 0 {`) + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + } else { + if p.IsMap(field) { + m := p.GoMapType(nil, field) + valuegoTyp, _ := p.GoType(nil, m.ValueField) + valuegoAliasTyp, _ := p.GoType(nil, m.ValueAliasField) + nullable, valuegoTyp, valuegoAliasTyp = generator.GoMapValueTypes(field, m.ValueField, valuegoTyp, valuegoAliasTyp) + + mapValue := m.ValueAliasField + if mapValue.IsMessage() || p.IsGroup(mapValue) { + if nullable && valuegoTyp == valuegoAliasTyp { + p.P(`if c := this.`, fieldname, `[i].Compare(that1.`, fieldname, `[i]); c != 0 {`) + } else { + // Compare() has a pointer receiver, but map value is a value type + a := `this.` + fieldname + `[i]` + b := `that1.` + fieldname + `[i]` + if valuegoTyp != valuegoAliasTyp { + // cast back to the type that has the generated methods on it + a = `(` + valuegoTyp + `)(` + a + `)` + b = `(` + valuegoTyp + `)(` + b + `)` + } + p.P(`a := `, a) + p.P(`b := `, b) + if nullable { + p.P(`if c := a.Compare(b); c != 0 {`) + } else { + p.P(`if c := (&a).Compare(&b); c != 0 {`) + } + } + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + } else if mapValue.IsBytes() { + p.P(`if c := `, p.bytesPkg.Use(), `.Compare(this.`, fieldname, `[i], that1.`, fieldname, `[i]); c != 0 {`) + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + } else if mapValue.IsString() { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + p.In() + p.P(`if this.`, fieldname, `[i] < that1.`, fieldname, `[i] {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`}`) + } else { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + p.In() + p.P(`if this.`, fieldname, `[i] < that1.`, fieldname, `[i] {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`}`) + } + } else if field.IsMessage() || p.IsGroup(field) { + if nullable { + p.P(`if c := this.`, fieldname, `[i].Compare(that1.`, fieldname, `[i]); c != 0 {`) + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + } else { + p.P(`if c := this.`, fieldname, `[i].Compare(&that1.`, fieldname, `[i]); c != 0 {`) + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + } + } else if field.IsBytes() { + p.P(`if c := `, p.bytesPkg.Use(), `.Compare(this.`, fieldname, `[i], that1.`, fieldname, `[i]); c != 0 {`) + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + } else if field.IsString() { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + p.In() + p.P(`if this.`, fieldname, `[i] < that1.`, fieldname, `[i] {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`}`) + } else if field.IsBool() { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + p.In() + p.P(`if !this.`, fieldname, `[i] {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`}`) + } else { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + p.In() + p.P(`if this.`, fieldname, `[i] < that1.`, fieldname, `[i] {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + p.P(`return 1`) + p.Out() + p.P(`}`) + } + } + p.Out() + p.P(`}`) + } +} + +func (p *plugin) generateMessage(file *generator.FileDescriptor, message *generator.Descriptor) { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`func (this *`, ccTypeName, `) Compare(that interface{}) int {`) + p.In() + p.generateMsgNullAndTypeCheck(ccTypeName) + oneofs := make(map[string]struct{}) + + for _, field := range message.Field { + oneof := field.OneofIndex != nil + if oneof { + fieldname := p.GetFieldName(message, field) + if _, ok := oneofs[fieldname]; ok { + continue + } else { + oneofs[fieldname] = struct{}{} + } + p.P(`if that1.`, fieldname, ` == nil {`) + p.In() + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + p.P(`return 1`) + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if this.`, fieldname, ` == nil {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`} else {`) + p.In() + + // Generate two type switches in order to compare the + // types of the oneofs. If they are of the same type + // call Compare, otherwise return 1 or -1. + p.P(`thisType := -1`) + p.P(`switch this.`, fieldname, `.(type) {`) + for i, subfield := range message.Field { + if *subfield.OneofIndex == *field.OneofIndex { + ccTypeName := p.OneOfTypeName(message, subfield) + p.P(`case *`, ccTypeName, `:`) + p.In() + p.P(`thisType = `, i) + p.Out() + } + } + p.P(`default:`) + p.In() + p.P(`panic(fmt.Sprintf("compare: unexpected type %T in oneof", this.`, fieldname, `))`) + p.Out() + p.P(`}`) + + p.P(`that1Type := -1`) + p.P(`switch that1.`, fieldname, `.(type) {`) + for i, subfield := range message.Field { + if *subfield.OneofIndex == *field.OneofIndex { + ccTypeName := p.OneOfTypeName(message, subfield) + p.P(`case *`, ccTypeName, `:`) + p.In() + p.P(`that1Type = `, i) + p.Out() + } + } + p.P(`default:`) + p.In() + p.P(`panic(fmt.Sprintf("compare: unexpected type %T in oneof", that1.`, fieldname, `))`) + p.Out() + p.P(`}`) + + p.P(`if thisType == that1Type {`) + p.In() + p.P(`if c := this.`, fieldname, `.Compare(that1.`, fieldname, `); c != 0 {`) + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if thisType < that1Type {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`} else if thisType > that1Type {`) + p.In() + p.P(`return 1`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } else { + p.generateField(file, message, field) + } + } + if message.DescriptorProto.HasExtension() { + if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`thismap := `, p.protoPkg.Use(), `.GetUnsafeExtensionsMap(this)`) + p.P(`thatmap := `, p.protoPkg.Use(), `.GetUnsafeExtensionsMap(that1)`) + p.P(`extkeys := make([]int32, 0, len(thismap)+len(thatmap))`) + p.P(`for k, _ := range thismap {`) + p.In() + p.P(`extkeys = append(extkeys, k)`) + p.Out() + p.P(`}`) + p.P(`for k, _ := range thatmap {`) + p.In() + p.P(`if _, ok := thismap[k]; !ok {`) + p.In() + p.P(`extkeys = append(extkeys, k)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P(p.sortkeysPkg.Use(), `.Int32s(extkeys)`) + p.P(`for _, k := range extkeys {`) + p.In() + p.P(`if v, ok := thismap[k]; ok {`) + p.In() + p.P(`if v2, ok := thatmap[k]; ok {`) + p.In() + p.P(`if c := v.Compare(&v2); c != 0 {`) + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`return 1`) + p.Out() + p.P(`}`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`return -1`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } else { + fieldname := "XXX_extensions" + p.P(`if c := `, p.bytesPkg.Use(), `.Compare(this.`, fieldname, `, that1.`, fieldname, `); c != 0 {`) + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + } + } + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + fieldname := "XXX_unrecognized" + p.P(`if c := `, p.bytesPkg.Use(), `.Compare(this.`, fieldname, `, that1.`, fieldname, `); c != 0 {`) + p.In() + p.P(`return c`) + p.Out() + p.P(`}`) + } + p.P(`return 0`) + p.Out() + p.P(`}`) + + //Generate Compare methods for oneof fields + m := proto.Clone(message.DescriptorProto).(*descriptor.DescriptorProto) + for _, field := range m.Field { + oneof := field.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, field) + p.P(`func (this *`, ccTypeName, `) Compare(that interface{}) int {`) + p.In() + + p.generateMsgNullAndTypeCheck(ccTypeName) + vanity.TurnOffNullableForNativeTypes(field) + p.generateField(file, message, field) + + p.P(`return 0`) + p.Out() + p.P(`}`) + } +} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/compare/comparetest.go b/vendor/github.com/gogo/protobuf/plugin/compare/comparetest.go new file mode 100644 index 000000000000..4fbdbc633cd4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/compare/comparetest.go @@ -0,0 +1,118 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package compare + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + protoPkg := imports.NewImport("github.com/gogo/protobuf/proto") + unsafePkg := imports.NewImport("unsafe") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = imports.NewImport("github.com/golang/protobuf/proto") + } + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if !gogoproto.HasCompare(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + hasUnsafe := gogoproto.IsUnsafeMarshaler(file.FileDescriptorProto, message.DescriptorProto) || + gogoproto.IsUnsafeUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) + p.P(`func Test`, ccTypeName, `Compare(t *`, testingPkg.Use(), `.T) {`) + p.In() + if hasUnsafe { + p.P(`var bigendian uint32 = 0x01020304`) + p.P(`if *(*byte)(`, unsafePkg.Use(), `.Pointer(&bigendian)) == 1 {`) + p.In() + p.P(`t.Skip("unsafe does not work on big endian architectures")`) + p.Out() + p.P(`}`) + } + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) + p.P(`dAtA, err := `, protoPkg.Use(), `.Marshal(p)`) + p.P(`if err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(dAtA, msg); err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`if c := p.Compare(msg); c != 0 {`) + p.In() + p.P(`t.Fatalf("%#v !Compare %#v, since %d", msg, p, c)`) + p.Out() + p.P(`}`) + p.P(`p2 := NewPopulated`, ccTypeName, `(popr, false)`) + p.P(`c := p.Compare(p2)`) + p.P(`c2 := p2.Compare(p)`) + p.P(`if c != (-1 * c2) {`) + p.In() + p.P(`t.Errorf("p.Compare(p2) = %d", c)`) + p.P(`t.Errorf("p2.Compare(p) = %d", c2)`) + p.P(`t.Errorf("p = %#v", p)`) + p.P(`t.Errorf("p2 = %#v", p2)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go b/vendor/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go new file mode 100644 index 000000000000..486f28771929 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go @@ -0,0 +1,133 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The defaultcheck plugin is used to check whether nullable is not used incorrectly. +For instance: +An error is caused if a nullable field: + - has a default value, + - is an enum which does not start at zero, + - is used for an extension, + - is used for a native proto3 type, + - is used for a repeated native type. + +An error is also caused if a field with a default value is used in a message: + - which is a face. + - without getters. + +It is enabled by the following extensions: + + - nullable + +For incorrect usage of nullable with tests see: + + github.com/gogo/protobuf/test/nullableconflict + +*/ +package defaultcheck + +import ( + "fmt" + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "os" +) + +type plugin struct { + *generator.Generator +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "defaultcheck" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + for _, msg := range file.Messages() { + getters := gogoproto.HasGoGetters(file.FileDescriptorProto, msg.DescriptorProto) + face := gogoproto.IsFace(file.FileDescriptorProto, msg.DescriptorProto) + for _, field := range msg.GetField() { + if len(field.GetDefaultValue()) > 0 { + if !getters { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot have a default value and not have a getter method", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + if face { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot have a default value be in a face", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + } + if gogoproto.IsNullable(field) { + continue + } + if len(field.GetDefaultValue()) > 0 { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot be non-nullable and have a default value", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + if !field.IsMessage() && !gogoproto.IsCustomType(field) { + if field.IsRepeated() { + fmt.Fprintf(os.Stderr, "WARNING: field %v.%v is a repeated non-nullable native type, nullable=false has no effect\n", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + } else if proto3 { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v is a native type and in proto3 syntax with nullable=false there exists conflicting implementations when encoding zero values", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + if field.IsBytes() { + fmt.Fprintf(os.Stderr, "WARNING: field %v.%v is a non-nullable bytes type, nullable=false has no effect\n", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + } + } + if !field.IsEnum() { + continue + } + enum := p.ObjectNamed(field.GetTypeName()).(*generator.EnumDescriptor) + if len(enum.Value) == 0 || enum.Value[0].GetNumber() != 0 { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot be non-nullable and be an enum type %v which does not start with zero", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name), enum.GetName()) + os.Exit(1) + } + } + } + for _, e := range file.GetExtension() { + if !gogoproto.IsNullable(e) { + fmt.Fprintf(os.Stderr, "ERROR: extended field %v cannot be nullable %v", generator.CamelCase(e.GetName()), generator.CamelCase(*e.Name)) + os.Exit(1) + } + } +} + +func (p *plugin) GenerateImports(*generator.FileDescriptor) {} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/description/description.go b/vendor/github.com/gogo/protobuf/plugin/description/description.go new file mode 100644 index 000000000000..f72efba61282 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/description/description.go @@ -0,0 +1,201 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The description (experimental) plugin generates a Description method for each message. +The Description method returns a populated google_protobuf.FileDescriptorSet struct. +This contains the description of the files used to generate this message. + +It is enabled by the following extensions: + + - description + - description_all + +The description plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + message B { + option (gogoproto.description) = true; + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +given to the description plugin, will generate the following code: + + func (this *B) Description() (desc *google_protobuf.FileDescriptorSet) { + return ExampleDescription() + } + +and the following test code: + + func TestDescription(t *testing9.T) { + ExampleDescription() + } + +The hope is to use this struct in some way instead of reflect. +This package is subject to change, since a use has not been figured out yet. + +*/ +package description + +import ( + "bytes" + "compress/gzip" + "fmt" + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type plugin struct { + *generator.Generator + generator.PluginImports +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "description" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + used := false + localName := generator.FileName(file) + + p.PluginImports = generator.NewPluginImports(p.Generator) + descriptorPkg := p.NewImport("github.com/gogo/protobuf/protoc-gen-gogo/descriptor") + protoPkg := p.NewImport("github.com/gogo/protobuf/proto") + gzipPkg := p.NewImport("compress/gzip") + bytesPkg := p.NewImport("bytes") + ioutilPkg := p.NewImport("io/ioutil") + + for _, message := range file.Messages() { + if !gogoproto.HasDescription(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + used = true + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`func (this *`, ccTypeName, `) Description() (desc *`, descriptorPkg.Use(), `.FileDescriptorSet) {`) + p.In() + p.P(`return `, localName, `Description()`) + p.Out() + p.P(`}`) + } + + if used { + + p.P(`func `, localName, `Description() (desc *`, descriptorPkg.Use(), `.FileDescriptorSet) {`) + p.In() + //Don't generate SourceCodeInfo, since it will create too much code. + + ss := make([]*descriptor.SourceCodeInfo, 0) + for _, f := range p.Generator.AllFiles().GetFile() { + ss = append(ss, f.SourceCodeInfo) + f.SourceCodeInfo = nil + } + b, err := proto.Marshal(p.Generator.AllFiles()) + if err != nil { + panic(err) + } + for i, f := range p.Generator.AllFiles().GetFile() { + f.SourceCodeInfo = ss[i] + } + p.P(`d := &`, descriptorPkg.Use(), `.FileDescriptorSet{}`) + var buf bytes.Buffer + w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) + w.Write(b) + w.Close() + b = buf.Bytes() + p.P("var gzipped = []byte{") + p.In() + p.P("// ", len(b), " bytes of a gzipped FileDescriptorSet") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + p.P(s) + + b = b[n:] + } + p.Out() + p.P("}") + p.P(`r := `, bytesPkg.Use(), `.NewReader(gzipped)`) + p.P(`gzipr, err := `, gzipPkg.Use(), `.NewReader(r)`) + p.P(`if err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`ungzipped, err := `, ioutilPkg.Use(), `.ReadAll(gzipr)`) + p.P(`if err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(ungzipped, d); err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`return d`) + p.Out() + p.P(`}`) + } +} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/description/descriptiontest.go b/vendor/github.com/gogo/protobuf/plugin/description/descriptiontest.go new file mode 100644 index 000000000000..babcd311da42 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/description/descriptiontest.go @@ -0,0 +1,73 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package description + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + testingPkg := imports.NewImport("testing") + for _, message := range file.Messages() { + if !gogoproto.HasDescription(file.FileDescriptorProto, message.DescriptorProto) || + !gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + used = true + } + + if used { + localName := generator.FileName(file) + p.P(`func Test`, localName, `Description(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(localName, `Description()`) + p.Out() + p.P(`}`) + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go b/vendor/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go new file mode 100644 index 000000000000..bc68efe12c7d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go @@ -0,0 +1,200 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The embedcheck plugin is used to check whether embed is not used incorrectly. +For instance: +An embedded message has a generated string method, but the is a member of a message which does not. +This causes a warning. +An error is caused by a namespace conflict. + +It is enabled by the following extensions: + + - embed + - embed_all + +For incorrect usage of embed with tests see: + + github.com/gogo/protobuf/test/embedconflict + +*/ +package embedcheck + +import ( + "fmt" + "os" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type plugin struct { + *generator.Generator +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "embedcheck" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +var overwriters []map[string]gogoproto.EnableFunc = []map[string]gogoproto.EnableFunc{ + { + "stringer": gogoproto.IsStringer, + }, + { + "gostring": gogoproto.HasGoString, + }, + { + "equal": gogoproto.HasEqual, + }, + { + "verboseequal": gogoproto.HasVerboseEqual, + }, + { + "size": gogoproto.IsSizer, + "protosizer": gogoproto.IsProtoSizer, + }, + { + "unmarshaler": gogoproto.IsUnmarshaler, + "unsafe_unmarshaler": gogoproto.IsUnsafeUnmarshaler, + }, + { + "marshaler": gogoproto.IsMarshaler, + "unsafe_marshaler": gogoproto.IsUnsafeMarshaler, + }, +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + for _, msg := range file.Messages() { + for _, os := range overwriters { + possible := true + for _, overwriter := range os { + if overwriter(file.FileDescriptorProto, msg.DescriptorProto) { + possible = false + } + } + if possible { + p.checkOverwrite(msg, os) + } + } + p.checkNameSpace(msg) + for _, field := range msg.GetField() { + if gogoproto.IsEmbed(field) && gogoproto.IsCustomName(field) { + fmt.Fprintf(os.Stderr, "ERROR: field %v with custom name %v cannot be embedded", *field.Name, gogoproto.GetCustomName(field)) + os.Exit(1) + } + } + p.checkRepeated(msg) + } + for _, e := range file.GetExtension() { + if gogoproto.IsEmbed(e) { + fmt.Fprintf(os.Stderr, "ERROR: extended field %v cannot be embedded", generator.CamelCase(*e.Name)) + os.Exit(1) + } + } +} + +func (p *plugin) checkNameSpace(message *generator.Descriptor) map[string]bool { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + names := make(map[string]bool) + for _, field := range message.Field { + fieldname := generator.CamelCase(*field.Name) + if field.IsMessage() && gogoproto.IsEmbed(field) { + desc := p.ObjectNamed(field.GetTypeName()) + moreNames := p.checkNameSpace(desc.(*generator.Descriptor)) + for another := range moreNames { + if names[another] { + fmt.Fprintf(os.Stderr, "ERROR: duplicate embedded fieldname %v in type %v\n", fieldname, ccTypeName) + os.Exit(1) + } + names[another] = true + } + } else { + if names[fieldname] { + fmt.Fprintf(os.Stderr, "ERROR: duplicate embedded fieldname %v in type %v\n", fieldname, ccTypeName) + os.Exit(1) + } + names[fieldname] = true + } + } + return names +} + +func (p *plugin) checkOverwrite(message *generator.Descriptor, enablers map[string]gogoproto.EnableFunc) { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + names := []string{} + for name := range enablers { + names = append(names, name) + } + for _, field := range message.Field { + if field.IsMessage() && gogoproto.IsEmbed(field) { + fieldname := generator.CamelCase(*field.Name) + desc := p.ObjectNamed(field.GetTypeName()) + msg := desc.(*generator.Descriptor) + for errStr, enabled := range enablers { + if enabled(msg.File().FileDescriptorProto, msg.DescriptorProto) { + fmt.Fprintf(os.Stderr, "WARNING: found non-%v %v with embedded %v %v\n", names, ccTypeName, errStr, fieldname) + } + } + p.checkOverwrite(msg, enablers) + } + } +} + +func (p *plugin) checkRepeated(message *generator.Descriptor) { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + for _, field := range message.Field { + if !gogoproto.IsEmbed(field) { + continue + } + if field.IsBytes() { + fieldname := generator.CamelCase(*field.Name) + fmt.Fprintf(os.Stderr, "ERROR: found embedded bytes field %s in message %s\n", fieldname, ccTypeName) + os.Exit(1) + } + if !field.IsRepeated() { + continue + } + fieldname := generator.CamelCase(*field.Name) + fmt.Fprintf(os.Stderr, "ERROR: found repeated embedded field %s in message %s\n", fieldname, ccTypeName) + os.Exit(1) + } +} + +func (p *plugin) GenerateImports(*generator.FileDescriptor) {} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go b/vendor/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go new file mode 100644 index 000000000000..04d6e547fc35 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go @@ -0,0 +1,104 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The enumstringer (experimental) plugin generates a String method for each enum. + +It is enabled by the following extensions: + + - enum_stringer + - enum_stringer_all + +This package is subject to change. + +*/ +package enumstringer + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type enumstringer struct { + *generator.Generator + generator.PluginImports + atleastOne bool + localName string +} + +func NewEnumStringer() *enumstringer { + return &enumstringer{} +} + +func (p *enumstringer) Name() string { + return "enumstringer" +} + +func (p *enumstringer) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *enumstringer) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + p.atleastOne = false + + p.localName = generator.FileName(file) + + strconvPkg := p.NewImport("strconv") + + for _, enum := range file.Enums() { + if !gogoproto.IsEnumStringer(file.FileDescriptorProto, enum.EnumDescriptorProto) { + continue + } + if gogoproto.IsGoEnumStringer(file.FileDescriptorProto, enum.EnumDescriptorProto) { + panic("Go enum stringer conflicts with new enumstringer plugin: please use gogoproto.goproto_enum_stringer or gogoproto.goproto_enum_string_all and set it to false") + } + p.atleastOne = true + ccTypeName := generator.CamelCaseSlice(enum.TypeName()) + p.P("func (x ", ccTypeName, ") String() string {") + p.In() + p.P(`s, ok := `, ccTypeName, `_name[int32(x)]`) + p.P(`if ok {`) + p.In() + p.P(`return s`) + p.Out() + p.P(`}`) + p.P(`return `, strconvPkg.Use(), `.Itoa(int(x))`) + p.Out() + p.P(`}`) + } + + if !p.atleastOne { + return + } + +} + +func init() { + generator.RegisterPlugin(NewEnumStringer()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/equal/equal.go b/vendor/github.com/gogo/protobuf/plugin/equal/equal.go new file mode 100644 index 000000000000..6358fc99ad11 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/equal/equal.go @@ -0,0 +1,694 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The equal plugin generates an Equal and a VerboseEqual method for each message. +These equal methods are quite obvious. +The only difference is that VerboseEqual returns a non nil error if it is not equal. +This error contains more detail on exactly which part of the message was not equal to the other message. +The idea is that this is useful for debugging. + +Equal is enabled using the following extensions: + + - equal + - equal_all + +While VerboseEqual is enable dusing the following extensions: + + - verbose_equal + - verbose_equal_all + +The equal plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.equal_all) = true; + option (gogoproto.verbose_equal_all) = true; + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +given to the equal plugin, will generate the following code: + + func (this *B) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt2.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*B) + if !ok { + return fmt2.Errorf("that is not of type *B") + } + if that1 == nil { + if this == nil { + return nil + } + return fmt2.Errorf("that is type *B but is nil && this != nil") + } else if this == nil { + return fmt2.Errorf("that is type *B but is not nil && this == nil") + } + if !this.A.Equal(&that1.A) { + return fmt2.Errorf("A this(%v) Not Equal that(%v)", this.A, that1.A) + } + if len(this.G) != len(that1.G) { + return fmt2.Errorf("G this(%v) Not Equal that(%v)", len(this.G), len(that1.G)) + } + for i := range this.G { + if !this.G[i].Equal(that1.G[i]) { + return fmt2.Errorf("G this[%v](%v) Not Equal that[%v](%v)", i, this.G[i], i, that1.G[i]) + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return fmt2.Errorf("XXX_unrecognized this(%v) Not Equal that(%v)", this.XXX_unrecognized, that1.XXX_unrecognized) + } + return nil + } + + func (this *B) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*B) + if !ok { + return false + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.A.Equal(&that1.A) { + return false + } + if len(this.G) != len(that1.G) { + return false + } + for i := range this.G { + if !this.G[i].Equal(that1.G[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true + } + +and the following test code: + + func TestBVerboseEqual(t *testing8.T) { + popr := math_rand8.New(math_rand8.NewSource(time8.Now().UnixNano())) + p := NewPopulatedB(popr, false) + dAtA, err := github_com_gogo_protobuf_proto2.Marshal(p) + if err != nil { + panic(err) + } + msg := &B{} + if err := github_com_gogo_protobuf_proto2.Unmarshal(dAtA, msg); err != nil { + panic(err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err) + } + +*/ +package equal + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "github.com/gogo/protobuf/vanity" +) + +type plugin struct { + *generator.Generator + generator.PluginImports + fmtPkg generator.Single + bytesPkg generator.Single + protoPkg generator.Single +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "equal" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + p.fmtPkg = p.NewImport("fmt") + p.bytesPkg = p.NewImport("bytes") + p.protoPkg = p.NewImport("github.com/gogo/protobuf/proto") + + for _, msg := range file.Messages() { + if msg.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + if gogoproto.HasVerboseEqual(file.FileDescriptorProto, msg.DescriptorProto) { + p.generateMessage(file, msg, true) + } + if gogoproto.HasEqual(file.FileDescriptorProto, msg.DescriptorProto) { + p.generateMessage(file, msg, false) + } + } +} + +func (p *plugin) generateNullableField(fieldname string, verbose bool) { + p.P(`if this.`, fieldname, ` != nil && that1.`, fieldname, ` != nil {`) + p.In() + p.P(`if *this.`, fieldname, ` != *that1.`, fieldname, `{`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", *this.`, fieldname, `, *that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if this.`, fieldname, ` != nil {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("this.`, fieldname, ` == nil && that.`, fieldname, ` != nil")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`} else if that1.`, fieldname, ` != nil {`) +} + +func (p *plugin) generateMsgNullAndTypeCheck(ccTypeName string, verbose bool) { + p.P(`if that == nil {`) + p.In() + if verbose { + p.P(`if this == nil {`) + p.In() + p.P(`return nil`) + p.Out() + p.P(`}`) + p.P(`return `, p.fmtPkg.Use(), `.Errorf("that == nil && this != nil")`) + } else { + p.P(`return this == nil`) + } + p.Out() + p.P(`}`) + p.P(``) + p.P(`that1, ok := that.(*`, ccTypeName, `)`) + p.P(`if !ok {`) + p.In() + p.P(`that2, ok := that.(`, ccTypeName, `)`) + p.P(`if ok {`) + p.In() + p.P(`that1 = &that2`) + p.Out() + p.P(`} else {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("that is not of type *`, ccTypeName, `")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P(`if that1 == nil {`) + p.In() + if verbose { + p.P(`if this == nil {`) + p.In() + p.P(`return nil`) + p.Out() + p.P(`}`) + p.P(`return `, p.fmtPkg.Use(), `.Errorf("that is type *`, ccTypeName, ` but is nil && this != nil")`) + } else { + p.P(`return this == nil`) + } + p.Out() + p.P(`} else if this == nil {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("that is type *`, ccTypeName, ` but is not nil && this == nil")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) +} + +func (p *plugin) generateField(file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto, verbose bool) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + fieldname := p.GetOneOfFieldName(message, field) + repeated := field.IsRepeated() + ctype := gogoproto.IsCustomType(field) + nullable := gogoproto.IsNullable(field) + isNormal := (gogoproto.IsStdDuration(field) || + gogoproto.IsStdDouble(field) || + gogoproto.IsStdFloat(field) || + gogoproto.IsStdInt64(field) || + gogoproto.IsStdUInt64(field) || + gogoproto.IsStdInt32(field) || + gogoproto.IsStdUInt32(field) || + gogoproto.IsStdBool(field) || + gogoproto.IsStdString(field)) + isBytes := gogoproto.IsStdBytes(field) + isTimestamp := gogoproto.IsStdTime(field) + // oneof := field.OneofIndex != nil + if !repeated { + if ctype || isTimestamp { + if nullable { + p.P(`if that1.`, fieldname, ` == nil {`) + p.In() + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("this.`, fieldname, ` != nil && that1.`, fieldname, ` == nil")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if !this.`, fieldname, `.Equal(*that1.`, fieldname, `) {`) + } else { + p.P(`if !this.`, fieldname, `.Equal(that1.`, fieldname, `) {`) + } + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", this.`, fieldname, `, that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + } else if isNormal { + if nullable { + p.generateNullableField(fieldname, verbose) + } else { + p.P(`if this.`, fieldname, ` != that1.`, fieldname, `{`) + } + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", this.`, fieldname, `, that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + } else if isBytes { + if nullable { + p.P(`if that1.`, fieldname, ` == nil {`) + p.In() + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("this.`, fieldname, ` != nil && that1.`, fieldname, ` == nil")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if !`, p.bytesPkg.Use(), `.Equal(*this.`, fieldname, `, *that1.`, fieldname, `) {`) + } else { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `, that1.`, fieldname, `) {`) + } + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", this.`, fieldname, `, that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + } else { + if field.IsMessage() || p.IsGroup(field) { + if nullable { + p.P(`if !this.`, fieldname, `.Equal(that1.`, fieldname, `) {`) + } else { + p.P(`if !this.`, fieldname, `.Equal(&that1.`, fieldname, `) {`) + } + } else if field.IsBytes() { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `, that1.`, fieldname, `) {`) + } else if field.IsString() { + if nullable && !proto3 { + p.generateNullableField(fieldname, verbose) + } else { + p.P(`if this.`, fieldname, ` != that1.`, fieldname, `{`) + } + } else { + if nullable && !proto3 { + p.generateNullableField(fieldname, verbose) + } else { + p.P(`if this.`, fieldname, ` != that1.`, fieldname, `{`) + } + } + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", this.`, fieldname, `, that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + } + } else { + p.P(`if len(this.`, fieldname, `) != len(that1.`, fieldname, `) {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", len(this.`, fieldname, `), len(that1.`, fieldname, `))`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.P(`for i := range this.`, fieldname, ` {`) + p.In() + if ctype && !p.IsMap(field) { + p.P(`if !this.`, fieldname, `[i].Equal(that1.`, fieldname, `[i]) {`) + } else if isTimestamp { + if nullable { + p.P(`if !this.`, fieldname, `[i].Equal(*that1.`, fieldname, `[i]) {`) + } else { + p.P(`if !this.`, fieldname, `[i].Equal(that1.`, fieldname, `[i]) {`) + } + } else if isNormal { + if nullable { + p.P(`if dthis, dthat := this.`, fieldname, `[i], that1.`, fieldname, `[i]; (dthis != nil && dthat != nil && *dthis != *dthat) || (dthis != nil && dthat == nil) || (dthis == nil && dthat != nil) {`) + } else { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + } + } else if isBytes { + if nullable { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(*this.`, fieldname, `[i], *that1.`, fieldname, `[i]) {`) + } else { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `[i], that1.`, fieldname, `[i]) {`) + } + } else { + if p.IsMap(field) { + m := p.GoMapType(nil, field) + valuegoTyp, _ := p.GoType(nil, m.ValueField) + valuegoAliasTyp, _ := p.GoType(nil, m.ValueAliasField) + nullable, valuegoTyp, valuegoAliasTyp = generator.GoMapValueTypes(field, m.ValueField, valuegoTyp, valuegoAliasTyp) + + mapValue := m.ValueAliasField + mapValueNormal := (gogoproto.IsStdDuration(mapValue) || + gogoproto.IsStdDouble(mapValue) || + gogoproto.IsStdFloat(mapValue) || + gogoproto.IsStdInt64(mapValue) || + gogoproto.IsStdUInt64(mapValue) || + gogoproto.IsStdInt32(mapValue) || + gogoproto.IsStdUInt32(mapValue) || + gogoproto.IsStdBool(mapValue) || + gogoproto.IsStdString(mapValue)) + mapValueBytes := gogoproto.IsStdBytes(mapValue) + if mapValue.IsMessage() || p.IsGroup(mapValue) { + if nullable && valuegoTyp == valuegoAliasTyp { + p.P(`if !this.`, fieldname, `[i].Equal(that1.`, fieldname, `[i]) {`) + } else { + // Equal() has a pointer receiver, but map value is a value type + a := `this.` + fieldname + `[i]` + b := `that1.` + fieldname + `[i]` + if !mapValueNormal && !mapValueBytes && valuegoTyp != valuegoAliasTyp { + // cast back to the type that has the generated methods on it + a = `(` + valuegoTyp + `)(` + a + `)` + b = `(` + valuegoTyp + `)(` + b + `)` + } + p.P(`a := `, a) + p.P(`b := `, b) + if mapValueNormal { + if nullable { + p.P(`if *a != *b {`) + } else { + p.P(`if a != b {`) + } + } else if mapValueBytes { + if nullable { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(*a, *b) {`) + } else { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(a, b) {`) + } + } else if nullable { + p.P(`if !a.Equal(b) {`) + } else { + p.P(`if !(&a).Equal(&b) {`) + } + } + } else if mapValue.IsBytes() { + if ctype { + if nullable { + p.P(`if !this.`, fieldname, `[i].Equal(*that1.`, fieldname, `[i]) { //nullable`) + } else { + p.P(`if !this.`, fieldname, `[i].Equal(that1.`, fieldname, `[i]) { //not nullable`) + } + } else { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `[i], that1.`, fieldname, `[i]) {`) + } + } else if mapValue.IsString() { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + } else { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + } + } else if field.IsMessage() || p.IsGroup(field) { + if nullable { + p.P(`if !this.`, fieldname, `[i].Equal(that1.`, fieldname, `[i]) {`) + } else { + p.P(`if !this.`, fieldname, `[i].Equal(&that1.`, fieldname, `[i]) {`) + } + } else if field.IsBytes() { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `[i], that1.`, fieldname, `[i]) {`) + } else if field.IsString() { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + } else { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + } + } + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this[%v](%v) Not Equal that[%v](%v)", i, this.`, fieldname, `[i], i, that1.`, fieldname, `[i])`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } +} + +func (p *plugin) generateMessage(file *generator.FileDescriptor, message *generator.Descriptor, verbose bool) { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if verbose { + p.P(`func (this *`, ccTypeName, `) VerboseEqual(that interface{}) error {`) + } else { + p.P(`func (this *`, ccTypeName, `) Equal(that interface{}) bool {`) + } + p.In() + p.generateMsgNullAndTypeCheck(ccTypeName, verbose) + oneofs := make(map[string]struct{}) + + for _, field := range message.Field { + oneof := field.OneofIndex != nil + if oneof { + fieldname := p.GetFieldName(message, field) + if _, ok := oneofs[fieldname]; ok { + continue + } else { + oneofs[fieldname] = struct{}{} + } + p.P(`if that1.`, fieldname, ` == nil {`) + p.In() + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("this.`, fieldname, ` != nil && that1.`, fieldname, ` == nil")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if this.`, fieldname, ` == nil {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("this.`, fieldname, ` == nil && that1.`, fieldname, ` != nil")`) + } else { + p.P(`return false`) + } + p.Out() + if verbose { + p.P(`} else if err := this.`, fieldname, `.VerboseEqual(that1.`, fieldname, `); err != nil {`) + } else { + p.P(`} else if !this.`, fieldname, `.Equal(that1.`, fieldname, `) {`) + } + p.In() + if verbose { + p.P(`return err`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + } else { + p.generateField(file, message, field, verbose) + } + } + if message.DescriptorProto.HasExtension() { + if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { + fieldname := "XXX_InternalExtensions" + p.P(`thismap := `, p.protoPkg.Use(), `.GetUnsafeExtensionsMap(this)`) + p.P(`thatmap := `, p.protoPkg.Use(), `.GetUnsafeExtensionsMap(that1)`) + p.P(`for k, v := range thismap {`) + p.In() + p.P(`if v2, ok := thatmap[k]; ok {`) + p.In() + p.P(`if !v.Equal(&v2) {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this[%v](%v) Not Equal that[%v](%v)", k, thismap[k], k, thatmap[k])`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`} else {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, `[%v] Not In that", k)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + + p.P(`for k, _ := range thatmap {`) + p.In() + p.P(`if _, ok := thismap[k]; !ok {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, `[%v] Not In this", k)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } else { + fieldname := "XXX_extensions" + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `, that1.`, fieldname, `) {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", this.`, fieldname, `, that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + } + } + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + fieldname := "XXX_unrecognized" + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `, that1.`, fieldname, `) {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", this.`, fieldname, `, that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + } + if verbose { + p.P(`return nil`) + } else { + p.P(`return true`) + } + p.Out() + p.P(`}`) + + //Generate Equal methods for oneof fields + m := proto.Clone(message.DescriptorProto).(*descriptor.DescriptorProto) + for _, field := range m.Field { + oneof := field.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, field) + if verbose { + p.P(`func (this *`, ccTypeName, `) VerboseEqual(that interface{}) error {`) + } else { + p.P(`func (this *`, ccTypeName, `) Equal(that interface{}) bool {`) + } + p.In() + + p.generateMsgNullAndTypeCheck(ccTypeName, verbose) + vanity.TurnOffNullableForNativeTypes(field) + p.generateField(file, message, field, verbose) + + if verbose { + p.P(`return nil`) + } else { + p.P(`return true`) + } + p.Out() + p.P(`}`) + } +} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/equal/equaltest.go b/vendor/github.com/gogo/protobuf/plugin/equal/equaltest.go new file mode 100644 index 000000000000..1233647a56dd --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/equal/equaltest.go @@ -0,0 +1,109 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package equal + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + protoPkg := imports.NewImport("github.com/gogo/protobuf/proto") + unsafePkg := imports.NewImport("unsafe") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = imports.NewImport("github.com/golang/protobuf/proto") + } + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if !gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + hasUnsafe := gogoproto.IsUnsafeMarshaler(file.FileDescriptorProto, message.DescriptorProto) || + gogoproto.IsUnsafeUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) + p.P(`func Test`, ccTypeName, `VerboseEqual(t *`, testingPkg.Use(), `.T) {`) + p.In() + if hasUnsafe { + if hasUnsafe { + p.P(`var bigendian uint32 = 0x01020304`) + p.P(`if *(*byte)(`, unsafePkg.Use(), `.Pointer(&bigendian)) == 1 {`) + p.In() + p.P(`t.Skip("unsafe does not work on big endian architectures")`) + p.Out() + p.P(`}`) + } + } + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) + p.P(`dAtA, err := `, protoPkg.Use(), `.Marshal(p)`) + p.P(`if err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(dAtA, msg); err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`if err := p.VerboseEqual(msg); err != nil {`) + p.In() + p.P(`t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/face/face.go b/vendor/github.com/gogo/protobuf/plugin/face/face.go new file mode 100644 index 000000000000..a02934526526 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/face/face.go @@ -0,0 +1,233 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The face plugin generates a function will be generated which can convert a structure which satisfies an interface (face) to the specified structure. +This interface contains getters for each of the fields in the struct. +The specified struct is also generated with the getters. +This means that getters should be turned off so as not to conflict with face getters. +This allows it to satisfy its own face. + +It is enabled by the following extensions: + + - face + - face_all + +Turn off getters by using the following extensions: + + - getters + - getters_all + +The face plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + message A { + option (gogoproto.face) = true; + option (gogoproto.goproto_getters) = false; + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +given to the face plugin, will generate the following code: + + type AFace interface { + Proto() github_com_gogo_protobuf_proto.Message + GetDescription() string + GetNumber() int64 + GetId() github_com_gogo_protobuf_test_custom.Uuid + } + + func (this *A) Proto() github_com_gogo_protobuf_proto.Message { + return this + } + + func (this *A) TestProto() github_com_gogo_protobuf_proto.Message { + return NewAFromFace(this) + } + + func (this *A) GetDescription() string { + return this.Description + } + + func (this *A) GetNumber() int64 { + return this.Number + } + + func (this *A) GetId() github_com_gogo_protobuf_test_custom.Uuid { + return this.Id + } + + func NewAFromFace(that AFace) *A { + this := &A{} + this.Description = that.GetDescription() + this.Number = that.GetNumber() + this.Id = that.GetId() + return this + } + +and the following test code: + + func TestAFace(t *testing7.T) { + popr := math_rand7.New(math_rand7.NewSource(time7.Now().UnixNano())) + p := NewPopulatedA(popr, true) + msg := p.TestProto() + if !p.Equal(msg) { + t.Fatalf("%#v !Face Equal %#v", msg, p) + } + } + +The struct A, representing the message, will also be generated just like always. +As you can see A satisfies its own Face, AFace. + +Creating another struct which satisfies AFace is very easy. +Simply create all these methods specified in AFace. +Implementing The Proto method is done with the helper function NewAFromFace: + + func (this *MyStruct) Proto() proto.Message { + return NewAFromFace(this) + } + +just the like TestProto method which is used to test the NewAFromFace function. + +*/ +package face + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type plugin struct { + *generator.Generator + generator.PluginImports +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "face" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + protoPkg := p.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = p.NewImport("github.com/golang/protobuf/proto") + } + for _, message := range file.Messages() { + if !gogoproto.IsFace(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + if message.DescriptorProto.HasExtension() { + panic("face does not support message with extensions") + } + if gogoproto.HasGoGetters(file.FileDescriptorProto, message.DescriptorProto) { + panic("face requires getters to be disabled please use gogoproto.getters or gogoproto.getters_all and set it to false") + } + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`type `, ccTypeName, `Face interface{`) + p.In() + p.P(`Proto() `, protoPkg.Use(), `.Message`) + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + goTyp, _ := p.GoType(message, field) + if p.IsMap(field) { + m := p.GoMapType(nil, field) + goTyp = m.GoType + } + p.P(`Get`, fieldname, `() `, goTyp) + } + p.Out() + p.P(`}`) + p.P(``) + p.P(`func (this *`, ccTypeName, `) Proto() `, protoPkg.Use(), `.Message {`) + p.In() + p.P(`return this`) + p.Out() + p.P(`}`) + p.P(``) + p.P(`func (this *`, ccTypeName, `) TestProto() `, protoPkg.Use(), `.Message {`) + p.In() + p.P(`return New`, ccTypeName, `FromFace(this)`) + p.Out() + p.P(`}`) + p.P(``) + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + goTyp, _ := p.GoType(message, field) + if p.IsMap(field) { + m := p.GoMapType(nil, field) + goTyp = m.GoType + } + p.P(`func (this *`, ccTypeName, `) Get`, fieldname, `() `, goTyp, `{`) + p.In() + p.P(` return this.`, fieldname) + p.Out() + p.P(`}`) + p.P(``) + } + p.P(``) + p.P(`func New`, ccTypeName, `FromFace(that `, ccTypeName, `Face) *`, ccTypeName, ` {`) + p.In() + p.P(`this := &`, ccTypeName, `{}`) + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + p.P(`this.`, fieldname, ` = that.Get`, fieldname, `()`) + } + p.P(`return this`) + p.Out() + p.P(`}`) + p.P(``) + } +} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/face/facetest.go b/vendor/github.com/gogo/protobuf/plugin/face/facetest.go new file mode 100644 index 000000000000..467cc0a6640d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/face/facetest.go @@ -0,0 +1,82 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package face + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if !gogoproto.IsFace(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + + p.P(`func Test`, ccTypeName, `Face(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`msg := p.TestProto()`) + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("%#v !Face Equal %#v", msg, p)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/gostring/gostring.go b/vendor/github.com/gogo/protobuf/plugin/gostring/gostring.go new file mode 100644 index 000000000000..bc89a7b871d0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/gostring/gostring.go @@ -0,0 +1,386 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The gostring plugin generates a GoString method for each message. +The GoString method is called whenever you use a fmt.Printf as such: + + fmt.Printf("%#v", mymessage) + +or whenever you actually call GoString() +The output produced by the GoString method can be copied from the output into code and used to set a variable. +It is totally valid Go Code and is populated exactly as the struct that was printed out. + +It is enabled by the following extensions: + + - gostring + - gostring_all + +The gostring plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.gostring_all) = true; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +given to the gostring plugin, will generate the following code: + + func (this *A) GoString() string { + if this == nil { + return "nil" + } + s := strings1.Join([]string{`&test.A{` + `Description:` + fmt1.Sprintf("%#v", this.Description), `Number:` + fmt1.Sprintf("%#v", this.Number), `Id:` + fmt1.Sprintf("%#v", this.Id), `XXX_unrecognized:` + fmt1.Sprintf("%#v", this.XXX_unrecognized) + `}`}, ", ") + return s + } + +and the following test code: + + func TestAGoString(t *testing6.T) { + popr := math_rand6.New(math_rand6.NewSource(time6.Now().UnixNano())) + p := NewPopulatedA(popr, false) + s1 := p.GoString() + s2 := fmt2.Sprintf("%#v", p) + if s1 != s2 { + t.Fatalf("GoString want %v got %v", s1, s2) + } + _, err := go_parser.ParseExpr(s1) + if err != nil { + panic(err) + } + } + +Typically fmt.Printf("%#v") will stop to print when it reaches a pointer and +not print their values, while the generated GoString method will always print all values, recursively. + +*/ +package gostring + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type gostring struct { + *generator.Generator + generator.PluginImports + atleastOne bool + localName string + overwrite bool +} + +func NewGoString() *gostring { + return &gostring{} +} + +func (p *gostring) Name() string { + return "gostring" +} + +func (p *gostring) Overwrite() { + p.overwrite = true +} + +func (p *gostring) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *gostring) Generate(file *generator.FileDescriptor) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + p.PluginImports = generator.NewPluginImports(p.Generator) + p.atleastOne = false + + p.localName = generator.FileName(file) + + fmtPkg := p.NewImport("fmt") + stringsPkg := p.NewImport("strings") + protoPkg := p.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = p.NewImport("github.com/golang/protobuf/proto") + } + sortPkg := p.NewImport("sort") + strconvPkg := p.NewImport("strconv") + reflectPkg := p.NewImport("reflect") + sortKeysPkg := p.NewImport("github.com/gogo/protobuf/sortkeys") + + extensionToGoStringUsed := false + for _, message := range file.Messages() { + if !p.overwrite && !gogoproto.HasGoString(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + p.atleastOne = true + packageName := file.GoPackageName() + + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`func (this *`, ccTypeName, `) GoString() string {`) + p.In() + p.P(`if this == nil {`) + p.In() + p.P(`return "nil"`) + p.Out() + p.P(`}`) + + p.P(`s := make([]string, 0, `, strconv.Itoa(len(message.Field)+4), `)`) + p.P(`s = append(s, "&`, packageName, ".", ccTypeName, `{")`) + + oneofs := make(map[string]struct{}) + for _, field := range message.Field { + nullable := gogoproto.IsNullable(field) + repeated := field.IsRepeated() + fieldname := p.GetFieldName(message, field) + oneof := field.OneofIndex != nil + if oneof { + if _, ok := oneofs[fieldname]; ok { + continue + } else { + oneofs[fieldname] = struct{}{} + } + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `) + ",\n")`) + p.Out() + p.P(`}`) + } else if p.IsMap(field) { + m := p.GoMapType(nil, field) + mapgoTyp, keyField, keyAliasField := m.GoType, m.KeyField, m.KeyAliasField + keysName := `keysFor` + fieldname + keygoTyp, _ := p.GoType(nil, keyField) + keygoTyp = strings.Replace(keygoTyp, "*", "", 1) + keygoAliasTyp, _ := p.GoType(nil, keyAliasField) + keygoAliasTyp = strings.Replace(keygoAliasTyp, "*", "", 1) + keyCapTyp := generator.CamelCase(keygoTyp) + p.P(keysName, ` := make([]`, keygoTyp, `, 0, len(this.`, fieldname, `))`) + p.P(`for k, _ := range this.`, fieldname, ` {`) + p.In() + if keygoAliasTyp == keygoTyp { + p.P(keysName, ` = append(`, keysName, `, k)`) + } else { + p.P(keysName, ` = append(`, keysName, `, `, keygoTyp, `(k))`) + } + p.Out() + p.P(`}`) + p.P(sortKeysPkg.Use(), `.`, keyCapTyp, `s(`, keysName, `)`) + mapName := `mapStringFor` + fieldname + p.P(mapName, ` := "`, mapgoTyp, `{"`) + p.P(`for _, k := range `, keysName, ` {`) + p.In() + if keygoAliasTyp == keygoTyp { + p.P(mapName, ` += fmt.Sprintf("%#v: %#v,", k, this.`, fieldname, `[k])`) + } else { + p.P(mapName, ` += fmt.Sprintf("%#v: %#v,", k, this.`, fieldname, `[`, keygoAliasTyp, `(k)])`) + } + p.Out() + p.P(`}`) + p.P(mapName, ` += "}"`) + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + p.P(`s = append(s, "`, fieldname, `: " + `, mapName, `+ ",\n")`) + p.Out() + p.P(`}`) + } else if (field.IsMessage() && !gogoproto.IsCustomType(field) && !gogoproto.IsStdType(field)) || p.IsGroup(field) { + if nullable || repeated { + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + } + if nullable { + p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `) + ",\n")`) + } else if repeated { + if nullable { + p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `) + ",\n")`) + } else { + goTyp, _ := p.GoType(message, field) + goTyp = strings.Replace(goTyp, "[]", "", 1) + p.P("vs := make([]", goTyp, ", len(this.", fieldname, "))") + p.P("for i := range vs {") + p.In() + p.P("vs[i] = this.", fieldname, "[i]") + p.Out() + p.P("}") + p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", vs) + ",\n")`) + } + } else { + p.P(`s = append(s, "`, fieldname, `: " + `, stringsPkg.Use(), `.Replace(this.`, fieldname, `.GoString()`, ",`&`,``,1)", ` + ",\n")`) + } + if nullable || repeated { + p.Out() + p.P(`}`) + } + } else { + if !proto3 && (nullable || repeated) { + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + } + if field.IsEnum() { + if nullable && !repeated && !proto3 { + goTyp, _ := p.GoType(message, field) + p.P(`s = append(s, "`, fieldname, `: " + valueToGoString`, p.localName, `(this.`, fieldname, `,"`, generator.GoTypeToName(goTyp), `"`, `) + ",\n")`) + } else { + p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `) + ",\n")`) + } + } else { + if nullable && !repeated && !proto3 { + goTyp, _ := p.GoType(message, field) + p.P(`s = append(s, "`, fieldname, `: " + valueToGoString`, p.localName, `(this.`, fieldname, `,"`, generator.GoTypeToName(goTyp), `"`, `) + ",\n")`) + } else { + p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `) + ",\n")`) + } + } + if !proto3 && (nullable || repeated) { + p.Out() + p.P(`}`) + } + } + } + if message.DescriptorProto.HasExtension() { + if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`s = append(s, "XXX_InternalExtensions: " + extensionToGoString`, p.localName, `(this) + ",\n")`) + extensionToGoStringUsed = true + } else { + p.P(`if this.XXX_extensions != nil {`) + p.In() + p.P(`s = append(s, "XXX_extensions: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.XXX_extensions) + ",\n")`) + p.Out() + p.P(`}`) + } + } + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if this.XXX_unrecognized != nil {`) + p.In() + p.P(`s = append(s, "XXX_unrecognized:" + `, fmtPkg.Use(), `.Sprintf("%#v", this.XXX_unrecognized) + ",\n")`) + p.Out() + p.P(`}`) + } + + p.P(`s = append(s, "}")`) + p.P(`return `, stringsPkg.Use(), `.Join(s, "")`) + p.Out() + p.P(`}`) + + //Generate GoString methods for oneof fields + for _, field := range message.Field { + oneof := field.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, field) + p.P(`func (this *`, ccTypeName, `) GoString() string {`) + p.In() + p.P(`if this == nil {`) + p.In() + p.P(`return "nil"`) + p.Out() + p.P(`}`) + fieldname := p.GetOneOfFieldName(message, field) + outStr := strings.Join([]string{ + "s := ", + stringsPkg.Use(), ".Join([]string{`&", packageName, ".", ccTypeName, "{` + \n", + "`", fieldname, ":` + ", fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `)`, + " + `}`", + `}`, + `,", "`, + `)`}, "") + p.P(outStr) + p.P(`return s`) + p.Out() + p.P(`}`) + } + } + + if !p.atleastOne { + return + } + + p.P(`func valueToGoString`, p.localName, `(v interface{}, typ string) string {`) + p.In() + p.P(`rv := `, reflectPkg.Use(), `.ValueOf(v)`) + p.P(`if rv.IsNil() {`) + p.In() + p.P(`return "nil"`) + p.Out() + p.P(`}`) + p.P(`pv := `, reflectPkg.Use(), `.Indirect(rv).Interface()`) + p.P(`return `, fmtPkg.Use(), `.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)`) + p.Out() + p.P(`}`) + + if extensionToGoStringUsed { + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + fmt.Fprintf(os.Stderr, "The GoString plugin for messages with extensions requires importing gogoprotobuf. Please see file %s", file.GetName()) + os.Exit(1) + } + p.P(`func extensionToGoString`, p.localName, `(m `, protoPkg.Use(), `.Message) string {`) + p.In() + p.P(`e := `, protoPkg.Use(), `.GetUnsafeExtensionsMap(m)`) + p.P(`if e == nil { return "nil" }`) + p.P(`s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"`) + p.P(`keys := make([]int, 0, len(e))`) + p.P(`for k := range e {`) + p.In() + p.P(`keys = append(keys, int(k))`) + p.Out() + p.P(`}`) + p.P(sortPkg.Use(), `.Ints(keys)`) + p.P(`ss := []string{}`) + p.P(`for _, k := range keys {`) + p.In() + p.P(`ss = append(ss, `, strconvPkg.Use(), `.Itoa(k) + ": " + e[int32(k)].GoString())`) + p.Out() + p.P(`}`) + p.P(`s+=`, stringsPkg.Use(), `.Join(ss, ",") + "})"`) + p.P(`return s`) + p.Out() + p.P(`}`) + } +} + +func init() { + generator.RegisterPlugin(NewGoString()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/gostring/gostringtest.go b/vendor/github.com/gogo/protobuf/plugin/gostring/gostringtest.go new file mode 100644 index 000000000000..c790e5908800 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/gostring/gostringtest.go @@ -0,0 +1,90 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gostring + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + fmtPkg := imports.NewImport("fmt") + parserPkg := imports.NewImport("go/parser") + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if !gogoproto.HasGoString(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Test`, ccTypeName, `GoString(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) + p.P(`s1 := p.GoString()`) + p.P(`s2 := `, fmtPkg.Use(), `.Sprintf("%#v", p)`) + p.P(`if s1 != s2 {`) + p.In() + p.P(`t.Fatalf("GoString want %v got %v", s1, s2)`) + p.Out() + p.P(`}`) + p.P(`_, err := `, parserPkg.Use(), `.ParseExpr(s1)`) + p.P(`if err != nil {`) + p.In() + p.P(`t.Fatal(err)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/marshalto/marshalto.go b/vendor/github.com/gogo/protobuf/plugin/marshalto/marshalto.go new file mode 100644 index 000000000000..f82c28c281e6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/marshalto/marshalto.go @@ -0,0 +1,1140 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The marshalto plugin generates a Marshal and MarshalTo method for each message. +The `Marshal() ([]byte, error)` method results in the fact that the message +implements the Marshaler interface. +This allows proto.Marshal to be faster by calling the generated Marshal method rather than using reflect to Marshal the struct. + +If is enabled by the following extensions: + + - marshaler + - marshaler_all + +Or the following extensions: + + - unsafe_marshaler + - unsafe_marshaler_all + +That is if you want to use the unsafe package in your generated code. +The speed up using the unsafe package is not very significant. + +The generation of marshalling tests are enabled using one of the following extensions: + + - testgen + - testgen_all + +And benchmarks given it is enabled using one of the following extensions: + + - benchgen + - benchgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + +option (gogoproto.marshaler_all) = true; + +message B { + option (gogoproto.description) = true; + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; +} + +given to the marshalto plugin, will generate the following code: + + func (m *B) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil + } + + func (m *B) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) + } + + func (m *B) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.G) > 0 { + for iNdEx := len(m.G) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.G[iNdEx].Size() + i -= size + if _, err := m.G[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintExample(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.A.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintExample(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil + } + +As shown above Marshal calculates the size of the not yet marshalled message +and allocates the appropriate buffer. +This is followed by calling the MarshalToSizedBuffer method which requires a preallocated buffer, and marshals backwards. +The MarshalTo method allows a user to rather preallocated a reusable buffer. + +The Size method is generated using the size plugin and the gogoproto.sizer, gogoproto.sizer_all extensions. +The user can also using the generated Size method to check that his reusable buffer is still big enough. + +The generated tests and benchmarks will keep you safe and show that this is really a significant speed improvement. + +An additional message-level option `stable_marshaler` (and the file-level +option `stable_marshaler_all`) exists which causes the generated marshalling +code to behave deterministically. Today, this only changes the serialization of +maps; they are serialized in sort order. +*/ +package marshalto + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "github.com/gogo/protobuf/vanity" +) + +type NumGen interface { + Next() string + Current() string +} + +type numGen struct { + index int +} + +func NewNumGen() NumGen { + return &numGen{0} +} + +func (this *numGen) Next() string { + this.index++ + return this.Current() +} + +func (this *numGen) Current() string { + return strconv.Itoa(this.index) +} + +type marshalto struct { + *generator.Generator + generator.PluginImports + atleastOne bool + errorsPkg generator.Single + protoPkg generator.Single + sortKeysPkg generator.Single + mathPkg generator.Single + typesPkg generator.Single + binaryPkg generator.Single + localName string +} + +func NewMarshal() *marshalto { + return &marshalto{} +} + +func (p *marshalto) Name() string { + return "marshalto" +} + +func (p *marshalto) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *marshalto) callFixed64(varName ...string) { + p.P(`i -= 8`) + p.P(p.binaryPkg.Use(), `.LittleEndian.PutUint64(dAtA[i:], uint64(`, strings.Join(varName, ""), `))`) +} + +func (p *marshalto) callFixed32(varName ...string) { + p.P(`i -= 4`) + p.P(p.binaryPkg.Use(), `.LittleEndian.PutUint32(dAtA[i:], uint32(`, strings.Join(varName, ""), `))`) +} + +func (p *marshalto) callVarint(varName ...string) { + p.P(`i = encodeVarint`, p.localName, `(dAtA, i, uint64(`, strings.Join(varName, ""), `))`) +} + +func (p *marshalto) encodeKey(fieldNumber int32, wireType int) { + x := uint32(fieldNumber)<<3 | uint32(wireType) + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + for i = len(keybuf) - 1; i >= 0; i-- { + p.P(`i--`) + p.P(`dAtA[i] = `, fmt.Sprintf("%#v", keybuf[i])) + } +} + +func keySize(fieldNumber int32, wireType int) int { + x := uint32(fieldNumber)<<3 | uint32(wireType) + size := 0 + for size = 0; x > 127; size++ { + x >>= 7 + } + size++ + return size +} + +func wireToType(wire string) int { + switch wire { + case "fixed64": + return proto.WireFixed64 + case "fixed32": + return proto.WireFixed32 + case "varint": + return proto.WireVarint + case "bytes": + return proto.WireBytes + case "group": + return proto.WireBytes + case "zigzag32": + return proto.WireVarint + case "zigzag64": + return proto.WireVarint + } + panic("unreachable") +} + +func (p *marshalto) mapField(numGen NumGen, field *descriptor.FieldDescriptorProto, kvField *descriptor.FieldDescriptorProto, varName string, protoSizer bool) { + switch kvField.GetType() { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(`, varName, `))`) + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(`, varName, `))`) + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM: + p.callVarint(varName) + case descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + p.callFixed64(varName) + case descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + p.callFixed32(varName) + case descriptor.FieldDescriptorProto_TYPE_BOOL: + p.P(`i--`) + p.P(`if `, varName, ` {`) + p.In() + p.P(`dAtA[i] = 1`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`dAtA[i] = 0`) + p.Out() + p.P(`}`) + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + if gogoproto.IsCustomType(field) && kvField.IsBytes() { + p.forward(varName, true, protoSizer) + } else { + p.P(`i -= len(`, varName, `)`) + p.P(`copy(dAtA[i:], `, varName, `)`) + p.callVarint(`len(`, varName, `)`) + } + case descriptor.FieldDescriptorProto_TYPE_SINT32: + p.callVarint(`(uint32(`, varName, `) << 1) ^ uint32((`, varName, ` >> 31))`) + case descriptor.FieldDescriptorProto_TYPE_SINT64: + p.callVarint(`(uint64(`, varName, `) << 1) ^ uint64((`, varName, ` >> 63))`) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if !p.marshalAllSizeOf(kvField, `(*`+varName+`)`, numGen.Next()) { + if gogoproto.IsCustomType(field) { + p.forward(varName, true, protoSizer) + } else { + p.backward(varName, true) + } + } + + } +} + +type orderFields []*descriptor.FieldDescriptorProto + +func (this orderFields) Len() int { + return len(this) +} + +func (this orderFields) Less(i, j int) bool { + return this[i].GetNumber() < this[j].GetNumber() +} + +func (this orderFields) Swap(i, j int) { + this[i], this[j] = this[j], this[i] +} + +func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto) { + fieldname := p.GetOneOfFieldName(message, field) + nullable := gogoproto.IsNullable(field) + repeated := field.IsRepeated() + required := field.IsRequired() + + protoSizer := gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) + doNilCheck := gogoproto.NeedsNilCheck(proto3, field) + if required && nullable { + p.P(`if m.`, fieldname, `== nil {`) + p.In() + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + p.P(`return 0, new(`, p.protoPkg.Use(), `.RequiredNotSetError)`) + } else { + p.P(`return 0, `, p.protoPkg.Use(), `.NewRequiredNotSetError("`, field.GetName(), `")`) + } + p.Out() + p.P(`} else {`) + } else if repeated { + p.P(`if len(m.`, fieldname, `) > 0 {`) + p.In() + } else if doNilCheck { + p.P(`if m.`, fieldname, ` != nil {`) + p.In() + } + packed := field.IsPacked() || (proto3 && field.IsPacked3()) + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = proto.WireBytes + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if packed { + val := p.reverseListRange(`m.`, fieldname) + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(float64(`, val, `))`) + p.callFixed64("f" + numGen.Current()) + p.Out() + p.P(`}`) + p.callVarint(`len(m.`, fieldname, `) * 8`) + p.encodeKey(fieldNumber, wireType) + } else if repeated { + val := p.reverseListRange(`m.`, fieldname) + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(float64(`, val, `))`) + p.callFixed64("f" + numGen.Current()) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(m.`+fieldname, `))`) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if !nullable { + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(m.`+fieldname, `))`) + p.encodeKey(fieldNumber, wireType) + } else { + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(*m.`+fieldname, `))`) + p.encodeKey(fieldNumber, wireType) + } + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + if packed { + val := p.reverseListRange(`m.`, fieldname) + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(float32(`, val, `))`) + p.callFixed32("f" + numGen.Current()) + p.Out() + p.P(`}`) + p.callVarint(`len(m.`, fieldname, `) * 4`) + p.encodeKey(fieldNumber, wireType) + } else if repeated { + val := p.reverseListRange(`m.`, fieldname) + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(float32(`, val, `))`) + p.callFixed32("f" + numGen.Current()) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(m.`+fieldname, `))`) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if !nullable { + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(m.`+fieldname, `))`) + p.encodeKey(fieldNumber, wireType) + } else { + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(*m.`+fieldname, `))`) + p.encodeKey(fieldNumber, wireType) + } + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM: + if packed { + jvar := "j" + numGen.Next() + p.P(`dAtA`, numGen.Next(), ` := make([]byte, len(m.`, fieldname, `)*10)`) + p.P(`var `, jvar, ` int`) + if *field.Type == descriptor.FieldDescriptorProto_TYPE_INT64 || + *field.Type == descriptor.FieldDescriptorProto_TYPE_INT32 { + p.P(`for _, num1 := range m.`, fieldname, ` {`) + p.In() + p.P(`num := uint64(num1)`) + } else { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + } + p.P(`for num >= 1<<7 {`) + p.In() + p.P(`dAtA`, numGen.Current(), `[`, jvar, `] = uint8(uint64(num)&0x7f|0x80)`) + p.P(`num >>= 7`) + p.P(jvar, `++`) + p.Out() + p.P(`}`) + p.P(`dAtA`, numGen.Current(), `[`, jvar, `] = uint8(num)`) + p.P(jvar, `++`) + p.Out() + p.P(`}`) + p.P(`i -= `, jvar) + p.P(`copy(dAtA[i:], dAtA`, numGen.Current(), `[:`, jvar, `])`) + p.callVarint(jvar) + p.encodeKey(fieldNumber, wireType) + } else if repeated { + val := p.reverseListRange(`m.`, fieldname) + p.callVarint(val) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.callVarint(`m.`, fieldname) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if !nullable { + p.callVarint(`m.`, fieldname) + p.encodeKey(fieldNumber, wireType) + } else { + p.callVarint(`*m.`, fieldname) + p.encodeKey(fieldNumber, wireType) + } + case descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + if packed { + val := p.reverseListRange(`m.`, fieldname) + p.callFixed64(val) + p.Out() + p.P(`}`) + p.callVarint(`len(m.`, fieldname, `) * 8`) + p.encodeKey(fieldNumber, wireType) + } else if repeated { + val := p.reverseListRange(`m.`, fieldname) + p.callFixed64(val) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.callFixed64("m." + fieldname) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if !nullable { + p.callFixed64("m." + fieldname) + p.encodeKey(fieldNumber, wireType) + } else { + p.callFixed64("*m." + fieldname) + p.encodeKey(fieldNumber, wireType) + } + case descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + if packed { + val := p.reverseListRange(`m.`, fieldname) + p.callFixed32(val) + p.Out() + p.P(`}`) + p.callVarint(`len(m.`, fieldname, `) * 4`) + p.encodeKey(fieldNumber, wireType) + } else if repeated { + val := p.reverseListRange(`m.`, fieldname) + p.callFixed32(val) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.callFixed32("m." + fieldname) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if !nullable { + p.callFixed32("m." + fieldname) + p.encodeKey(fieldNumber, wireType) + } else { + p.callFixed32("*m." + fieldname) + p.encodeKey(fieldNumber, wireType) + } + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if packed { + val := p.reverseListRange(`m.`, fieldname) + p.P(`i--`) + p.P(`if `, val, ` {`) + p.In() + p.P(`dAtA[i] = 1`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`dAtA[i] = 0`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.callVarint(`len(m.`, fieldname, `)`) + p.encodeKey(fieldNumber, wireType) + } else if repeated { + val := p.reverseListRange(`m.`, fieldname) + p.P(`i--`) + p.P(`if `, val, ` {`) + p.In() + p.P(`dAtA[i] = 1`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`dAtA[i] = 0`) + p.Out() + p.P(`}`) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` {`) + p.In() + p.P(`i--`) + p.P(`if m.`, fieldname, ` {`) + p.In() + p.P(`dAtA[i] = 1`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`dAtA[i] = 0`) + p.Out() + p.P(`}`) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if !nullable { + p.P(`i--`) + p.P(`if m.`, fieldname, ` {`) + p.In() + p.P(`dAtA[i] = 1`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`dAtA[i] = 0`) + p.Out() + p.P(`}`) + p.encodeKey(fieldNumber, wireType) + } else { + p.P(`i--`) + p.P(`if *m.`, fieldname, ` {`) + p.In() + p.P(`dAtA[i] = 1`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`dAtA[i] = 0`) + p.Out() + p.P(`}`) + p.encodeKey(fieldNumber, wireType) + } + case descriptor.FieldDescriptorProto_TYPE_STRING: + if repeated { + val := p.reverseListRange(`m.`, fieldname) + p.P(`i -= len(`, val, `)`) + p.P(`copy(dAtA[i:], `, val, `)`) + p.callVarint(`len(`, val, `)`) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if len(m.`, fieldname, `) > 0 {`) + p.In() + p.P(`i -= len(m.`, fieldname, `)`) + p.P(`copy(dAtA[i:], m.`, fieldname, `)`) + p.callVarint(`len(m.`, fieldname, `)`) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if !nullable { + p.P(`i -= len(m.`, fieldname, `)`) + p.P(`copy(dAtA[i:], m.`, fieldname, `)`) + p.callVarint(`len(m.`, fieldname, `)`) + p.encodeKey(fieldNumber, wireType) + } else { + p.P(`i -= len(*m.`, fieldname, `)`) + p.P(`copy(dAtA[i:], *m.`, fieldname, `)`) + p.callVarint(`len(*m.`, fieldname, `)`) + p.encodeKey(fieldNumber, wireType) + } + case descriptor.FieldDescriptorProto_TYPE_GROUP: + panic(fmt.Errorf("marshaler does not support group %v", fieldname)) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if p.IsMap(field) { + m := p.GoMapType(nil, field) + keygoTyp, keywire := p.GoType(nil, m.KeyField) + keygoAliasTyp, _ := p.GoType(nil, m.KeyAliasField) + // keys may not be pointers + keygoTyp = strings.Replace(keygoTyp, "*", "", 1) + keygoAliasTyp = strings.Replace(keygoAliasTyp, "*", "", 1) + keyCapTyp := generator.CamelCase(keygoTyp) + valuegoTyp, valuewire := p.GoType(nil, m.ValueField) + valuegoAliasTyp, _ := p.GoType(nil, m.ValueAliasField) + nullable, valuegoTyp, valuegoAliasTyp = generator.GoMapValueTypes(field, m.ValueField, valuegoTyp, valuegoAliasTyp) + var val string + if gogoproto.IsStableMarshaler(file.FileDescriptorProto, message.DescriptorProto) { + keysName := `keysFor` + fieldname + p.P(keysName, ` := make([]`, keygoTyp, `, 0, len(m.`, fieldname, `))`) + p.P(`for k := range m.`, fieldname, ` {`) + p.In() + p.P(keysName, ` = append(`, keysName, `, `, keygoTyp, `(k))`) + p.Out() + p.P(`}`) + p.P(p.sortKeysPkg.Use(), `.`, keyCapTyp, `s(`, keysName, `)`) + val = p.reverseListRange(keysName) + } else { + p.P(`for k := range m.`, fieldname, ` {`) + val = "k" + p.In() + } + if gogoproto.IsStableMarshaler(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`v := m.`, fieldname, `[`, keygoAliasTyp, `(`, val, `)]`) + } else { + p.P(`v := m.`, fieldname, `[`, val, `]`) + } + p.P(`baseI := i`) + accessor := `v` + + if m.ValueField.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE { + if valuegoTyp != valuegoAliasTyp && !gogoproto.IsStdType(m.ValueAliasField) { + if nullable { + // cast back to the type that has the generated methods on it + accessor = `((` + valuegoTyp + `)(` + accessor + `))` + } else { + accessor = `((*` + valuegoTyp + `)(&` + accessor + `))` + } + } else if !nullable { + accessor = `(&v)` + } + } + + nullableMsg := nullable && (m.ValueField.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE || + gogoproto.IsCustomType(field) && m.ValueField.IsBytes()) + plainBytes := m.ValueField.IsBytes() && !gogoproto.IsCustomType(field) + if nullableMsg { + p.P(`if `, accessor, ` != nil { `) + p.In() + } else if plainBytes { + if proto3 { + p.P(`if len(`, accessor, `) > 0 {`) + } else { + p.P(`if `, accessor, ` != nil {`) + } + p.In() + } + p.mapField(numGen, field, m.ValueAliasField, accessor, protoSizer) + p.encodeKey(2, wireToType(valuewire)) + if nullableMsg || plainBytes { + p.Out() + p.P(`}`) + } + + p.mapField(numGen, field, m.KeyField, val, protoSizer) + p.encodeKey(1, wireToType(keywire)) + + p.callVarint(`baseI - i`) + + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if repeated { + val := p.reverseListRange(`m.`, fieldname) + sizeOfVarName := val + if gogoproto.IsNullable(field) { + sizeOfVarName = `*` + val + } + if !p.marshalAllSizeOf(field, sizeOfVarName, ``) { + if gogoproto.IsCustomType(field) { + p.forward(val, true, protoSizer) + } else { + p.backward(val, true) + } + } + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else { + sizeOfVarName := `m.` + fieldname + if gogoproto.IsNullable(field) { + sizeOfVarName = `*` + sizeOfVarName + } + if !p.marshalAllSizeOf(field, sizeOfVarName, numGen.Next()) { + if gogoproto.IsCustomType(field) { + p.forward(`m.`+fieldname, true, protoSizer) + } else { + p.backward(`m.`+fieldname, true) + } + } + p.encodeKey(fieldNumber, wireType) + } + case descriptor.FieldDescriptorProto_TYPE_BYTES: + if !gogoproto.IsCustomType(field) { + if repeated { + val := p.reverseListRange(`m.`, fieldname) + p.P(`i -= len(`, val, `)`) + p.P(`copy(dAtA[i:], `, val, `)`) + p.callVarint(`len(`, val, `)`) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if len(m.`, fieldname, `) > 0 {`) + p.In() + p.P(`i -= len(m.`, fieldname, `)`) + p.P(`copy(dAtA[i:], m.`, fieldname, `)`) + p.callVarint(`len(m.`, fieldname, `)`) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else { + p.P(`i -= len(m.`, fieldname, `)`) + p.P(`copy(dAtA[i:], m.`, fieldname, `)`) + p.callVarint(`len(m.`, fieldname, `)`) + p.encodeKey(fieldNumber, wireType) + } + } else { + if repeated { + val := p.reverseListRange(`m.`, fieldname) + p.forward(val, true, protoSizer) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else { + p.forward(`m.`+fieldname, true, protoSizer) + p.encodeKey(fieldNumber, wireType) + } + } + case descriptor.FieldDescriptorProto_TYPE_SINT32: + if packed { + datavar := "dAtA" + numGen.Next() + jvar := "j" + numGen.Next() + p.P(datavar, ` := make([]byte, len(m.`, fieldname, ")*5)") + p.P(`var `, jvar, ` int`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + xvar := "x" + numGen.Next() + p.P(xvar, ` := (uint32(num) << 1) ^ uint32((num >> 31))`) + p.P(`for `, xvar, ` >= 1<<7 {`) + p.In() + p.P(datavar, `[`, jvar, `] = uint8(uint64(`, xvar, `)&0x7f|0x80)`) + p.P(jvar, `++`) + p.P(xvar, ` >>= 7`) + p.Out() + p.P(`}`) + p.P(datavar, `[`, jvar, `] = uint8(`, xvar, `)`) + p.P(jvar, `++`) + p.Out() + p.P(`}`) + p.P(`i -= `, jvar) + p.P(`copy(dAtA[i:], `, datavar, `[:`, jvar, `])`) + p.callVarint(jvar) + p.encodeKey(fieldNumber, wireType) + } else if repeated { + val := p.reverseListRange(`m.`, fieldname) + p.P(`x`, numGen.Next(), ` := (uint32(`, val, `) << 1) ^ uint32((`, val, ` >> 31))`) + p.callVarint(`x`, numGen.Current()) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.callVarint(`(uint32(m.`, fieldname, `) << 1) ^ uint32((m.`, fieldname, ` >> 31))`) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if !nullable { + p.callVarint(`(uint32(m.`, fieldname, `) << 1) ^ uint32((m.`, fieldname, ` >> 31))`) + p.encodeKey(fieldNumber, wireType) + } else { + p.callVarint(`(uint32(*m.`, fieldname, `) << 1) ^ uint32((*m.`, fieldname, ` >> 31))`) + p.encodeKey(fieldNumber, wireType) + } + case descriptor.FieldDescriptorProto_TYPE_SINT64: + if packed { + jvar := "j" + numGen.Next() + xvar := "x" + numGen.Next() + datavar := "dAtA" + numGen.Next() + p.P(`var `, jvar, ` int`) + p.P(datavar, ` := make([]byte, len(m.`, fieldname, `)*10)`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.P(xvar, ` := (uint64(num) << 1) ^ uint64((num >> 63))`) + p.P(`for `, xvar, ` >= 1<<7 {`) + p.In() + p.P(datavar, `[`, jvar, `] = uint8(uint64(`, xvar, `)&0x7f|0x80)`) + p.P(jvar, `++`) + p.P(xvar, ` >>= 7`) + p.Out() + p.P(`}`) + p.P(datavar, `[`, jvar, `] = uint8(`, xvar, `)`) + p.P(jvar, `++`) + p.Out() + p.P(`}`) + p.P(`i -= `, jvar) + p.P(`copy(dAtA[i:], `, datavar, `[:`, jvar, `])`) + p.callVarint(jvar) + p.encodeKey(fieldNumber, wireType) + } else if repeated { + val := p.reverseListRange(`m.`, fieldname) + p.P(`x`, numGen.Next(), ` := (uint64(`, val, `) << 1) ^ uint64((`, val, ` >> 63))`) + p.callVarint("x" + numGen.Current()) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.callVarint(`(uint64(m.`, fieldname, `) << 1) ^ uint64((m.`, fieldname, ` >> 63))`) + p.encodeKey(fieldNumber, wireType) + p.Out() + p.P(`}`) + } else if !nullable { + p.callVarint(`(uint64(m.`, fieldname, `) << 1) ^ uint64((m.`, fieldname, ` >> 63))`) + p.encodeKey(fieldNumber, wireType) + } else { + p.callVarint(`(uint64(*m.`, fieldname, `) << 1) ^ uint64((*m.`, fieldname, ` >> 63))`) + p.encodeKey(fieldNumber, wireType) + } + default: + panic("not implemented") + } + if (required && nullable) || repeated || doNilCheck { + p.Out() + p.P(`}`) + } +} + +func (p *marshalto) Generate(file *generator.FileDescriptor) { + numGen := NewNumGen() + p.PluginImports = generator.NewPluginImports(p.Generator) + + p.atleastOne = false + p.localName = generator.FileName(file) + + p.mathPkg = p.NewImport("math") + p.sortKeysPkg = p.NewImport("github.com/gogo/protobuf/sortkeys") + p.protoPkg = p.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + p.protoPkg = p.NewImport("github.com/golang/protobuf/proto") + } + p.errorsPkg = p.NewImport("errors") + p.binaryPkg = p.NewImport("encoding/binary") + p.typesPkg = p.NewImport("github.com/gogo/protobuf/types") + + for _, message := range file.Messages() { + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if !gogoproto.IsMarshaler(file.FileDescriptorProto, message.DescriptorProto) && + !gogoproto.IsUnsafeMarshaler(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + p.atleastOne = true + + p.P(`func (m *`, ccTypeName, `) Marshal() (dAtA []byte, err error) {`) + p.In() + if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`size := m.ProtoSize()`) + } else { + p.P(`size := m.Size()`) + } + p.P(`dAtA = make([]byte, size)`) + p.P(`n, err := m.MarshalToSizedBuffer(dAtA[:size])`) + p.P(`if err != nil {`) + p.In() + p.P(`return nil, err`) + p.Out() + p.P(`}`) + p.P(`return dAtA[:n], nil`) + p.Out() + p.P(`}`) + p.P(``) + p.P(`func (m *`, ccTypeName, `) MarshalTo(dAtA []byte) (int, error) {`) + p.In() + if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`size := m.ProtoSize()`) + } else { + p.P(`size := m.Size()`) + } + p.P(`return m.MarshalToSizedBuffer(dAtA[:size])`) + p.Out() + p.P(`}`) + p.P(``) + p.P(`func (m *`, ccTypeName, `) MarshalToSizedBuffer(dAtA []byte) (int, error) {`) + p.In() + p.P(`i := len(dAtA)`) + p.P(`_ = i`) + p.P(`var l int`) + p.P(`_ = l`) + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if m.XXX_unrecognized != nil {`) + p.In() + p.P(`i -= len(m.XXX_unrecognized)`) + p.P(`copy(dAtA[i:], m.XXX_unrecognized)`) + p.Out() + p.P(`}`) + } + if message.DescriptorProto.HasExtension() { + if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if n, err := `, p.protoPkg.Use(), `.EncodeInternalExtensionBackwards(m, dAtA[:i]); err != nil {`) + p.In() + p.P(`return 0, err`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`i -= n`) + p.Out() + p.P(`}`) + } else { + p.P(`if m.XXX_extensions != nil {`) + p.In() + p.P(`i -= len(m.XXX_extensions)`) + p.P(`copy(dAtA[i:], m.XXX_extensions)`) + p.Out() + p.P(`}`) + } + } + fields := orderFields(message.GetField()) + sort.Sort(fields) + oneofs := make(map[string]struct{}) + for i := len(message.Field) - 1; i >= 0; i-- { + field := message.Field[i] + oneof := field.OneofIndex != nil + if !oneof { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + p.generateField(proto3, numGen, file, message, field) + } else { + fieldname := p.GetFieldName(message, field) + if _, ok := oneofs[fieldname]; !ok { + oneofs[fieldname] = struct{}{} + p.P(`if m.`, fieldname, ` != nil {`) + p.In() + p.forward(`m.`+fieldname, false, gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto)) + p.Out() + p.P(`}`) + } + } + } + p.P(`return len(dAtA) - i, nil`) + p.Out() + p.P(`}`) + p.P() + + //Generate MarshalTo methods for oneof fields + m := proto.Clone(message.DescriptorProto).(*descriptor.DescriptorProto) + for _, field := range m.Field { + oneof := field.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, field) + p.P(`func (m *`, ccTypeName, `) MarshalTo(dAtA []byte) (int, error) {`) + p.In() + if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`size := m.ProtoSize()`) + } else { + p.P(`size := m.Size()`) + } + p.P(`return m.MarshalToSizedBuffer(dAtA[:size])`) + p.Out() + p.P(`}`) + p.P(``) + p.P(`func (m *`, ccTypeName, `) MarshalToSizedBuffer(dAtA []byte) (int, error) {`) + p.In() + p.P(`i := len(dAtA)`) + vanity.TurnOffNullableForNativeTypes(field) + p.generateField(false, numGen, file, message, field) + p.P(`return len(dAtA) - i, nil`) + p.Out() + p.P(`}`) + } + } + + if p.atleastOne { + p.P(`func encodeVarint`, p.localName, `(dAtA []byte, offset int, v uint64) int {`) + p.In() + p.P(`offset -= sov`, p.localName, `(v)`) + p.P(`base := offset`) + p.P(`for v >= 1<<7 {`) + p.In() + p.P(`dAtA[offset] = uint8(v&0x7f|0x80)`) + p.P(`v >>= 7`) + p.P(`offset++`) + p.Out() + p.P(`}`) + p.P(`dAtA[offset] = uint8(v)`) + p.P(`return base`) + p.Out() + p.P(`}`) + } + +} + +func (p *marshalto) reverseListRange(expression ...string) string { + exp := strings.Join(expression, "") + p.P(`for iNdEx := len(`, exp, `) - 1; iNdEx >= 0; iNdEx-- {`) + p.In() + return exp + `[iNdEx]` +} + +func (p *marshalto) marshalAllSizeOf(field *descriptor.FieldDescriptorProto, varName, num string) bool { + if gogoproto.IsStdTime(field) { + p.marshalSizeOf(`StdTimeMarshalTo`, `SizeOfStdTime`, varName, num) + } else if gogoproto.IsStdDuration(field) { + p.marshalSizeOf(`StdDurationMarshalTo`, `SizeOfStdDuration`, varName, num) + } else if gogoproto.IsStdDouble(field) { + p.marshalSizeOf(`StdDoubleMarshalTo`, `SizeOfStdDouble`, varName, num) + } else if gogoproto.IsStdFloat(field) { + p.marshalSizeOf(`StdFloatMarshalTo`, `SizeOfStdFloat`, varName, num) + } else if gogoproto.IsStdInt64(field) { + p.marshalSizeOf(`StdInt64MarshalTo`, `SizeOfStdInt64`, varName, num) + } else if gogoproto.IsStdUInt64(field) { + p.marshalSizeOf(`StdUInt64MarshalTo`, `SizeOfStdUInt64`, varName, num) + } else if gogoproto.IsStdInt32(field) { + p.marshalSizeOf(`StdInt32MarshalTo`, `SizeOfStdInt32`, varName, num) + } else if gogoproto.IsStdUInt32(field) { + p.marshalSizeOf(`StdUInt32MarshalTo`, `SizeOfStdUInt32`, varName, num) + } else if gogoproto.IsStdBool(field) { + p.marshalSizeOf(`StdBoolMarshalTo`, `SizeOfStdBool`, varName, num) + } else if gogoproto.IsStdString(field) { + p.marshalSizeOf(`StdStringMarshalTo`, `SizeOfStdString`, varName, num) + } else if gogoproto.IsStdBytes(field) { + p.marshalSizeOf(`StdBytesMarshalTo`, `SizeOfStdBytes`, varName, num) + } else { + return false + } + return true +} + +func (p *marshalto) marshalSizeOf(marshal, size, varName, num string) { + p.P(`n`, num, `, err`, num, ` := `, p.typesPkg.Use(), `.`, marshal, `(`, varName, `, dAtA[i-`, p.typesPkg.Use(), `.`, size, `(`, varName, `):])`) + p.P(`if err`, num, ` != nil {`) + p.In() + p.P(`return 0, err`, num) + p.Out() + p.P(`}`) + p.P(`i -= n`, num) + p.callVarint(`n`, num) +} + +func (p *marshalto) backward(varName string, varInt bool) { + p.P(`{`) + p.In() + p.P(`size, err := `, varName, `.MarshalToSizedBuffer(dAtA[:i])`) + p.P(`if err != nil {`) + p.In() + p.P(`return 0, err`) + p.Out() + p.P(`}`) + p.P(`i -= size`) + if varInt { + p.callVarint(`size`) + } + p.Out() + p.P(`}`) +} + +func (p *marshalto) forward(varName string, varInt, protoSizer bool) { + p.P(`{`) + p.In() + if protoSizer { + p.P(`size := `, varName, `.ProtoSize()`) + } else { + p.P(`size := `, varName, `.Size()`) + } + p.P(`i -= size`) + p.P(`if _, err := `, varName, `.MarshalTo(dAtA[i:]); err != nil {`) + p.In() + p.P(`return 0, err`) + p.Out() + p.P(`}`) + p.Out() + if varInt { + p.callVarint(`size`) + } + p.P(`}`) +} + +func init() { + generator.RegisterPlugin(NewMarshal()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go b/vendor/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go new file mode 100644 index 000000000000..0f822e8a8acd --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go @@ -0,0 +1,93 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The oneofcheck plugin is used to check whether oneof is not used incorrectly. +For instance: +An error is caused if a oneof field: + - is used in a face + - is an embedded field + +*/ +package oneofcheck + +import ( + "fmt" + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "os" +) + +type plugin struct { + *generator.Generator +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "oneofcheck" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + for _, msg := range file.Messages() { + face := gogoproto.IsFace(file.FileDescriptorProto, msg.DescriptorProto) + for _, field := range msg.GetField() { + if field.OneofIndex == nil { + continue + } + if face { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot be in a face and oneof\n", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + if gogoproto.IsEmbed(field) { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot be in an oneof and an embedded field\n", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + if !gogoproto.IsNullable(field) { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot be in an oneof and a non-nullable field\n", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + if gogoproto.IsUnion(file.FileDescriptorProto, msg.DescriptorProto) { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot be in an oneof and in an union (deprecated)\n", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + } + } +} + +func (p *plugin) GenerateImports(*generator.FileDescriptor) {} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/populate/populate.go b/vendor/github.com/gogo/protobuf/plugin/populate/populate.go new file mode 100644 index 000000000000..da705945c330 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/populate/populate.go @@ -0,0 +1,815 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The populate plugin generates a NewPopulated function. +This function returns a newly populated structure. + +It is enabled by the following extensions: + + - populate + - populate_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.populate_all) = true; + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +given to the populate plugin, will generate code the following code: + + func NewPopulatedB(r randyExample, easy bool) *B { + this := &B{} + v2 := NewPopulatedA(r, easy) + this.A = *v2 + if r.Intn(10) != 0 { + v3 := r.Intn(10) + this.G = make([]github_com_gogo_protobuf_test_custom.Uint128, v3) + for i := 0; i < v3; i++ { + v4 := github_com_gogo_protobuf_test_custom.NewPopulatedUint128(r) + this.G[i] = *v4 + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedExample(r, 3) + } + return this + } + +The idea that is useful for testing. +Most of the other plugins' generated test code uses it. +You will still be able to use the generated test code of other packages +if you turn off the popluate plugin and write your own custom NewPopulated function. + +If the easy flag is not set the XXX_unrecognized and XXX_extensions fields are also populated. +These have caused problems with JSON marshalling and unmarshalling tests. + +*/ +package populate + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "github.com/gogo/protobuf/vanity" +) + +type VarGen interface { + Next() string + Current() string +} + +type varGen struct { + index int64 +} + +func NewVarGen() VarGen { + return &varGen{0} +} + +func (this *varGen) Next() string { + this.index++ + return fmt.Sprintf("v%d", this.index) +} + +func (this *varGen) Current() string { + return fmt.Sprintf("v%d", this.index) +} + +type plugin struct { + *generator.Generator + generator.PluginImports + varGen VarGen + atleastOne bool + localName string + typesPkg generator.Single +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "populate" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func value(typeName string, fieldType descriptor.FieldDescriptorProto_Type) string { + switch fieldType { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + return typeName + "(r.Float64())" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + return typeName + "(r.Float32())" + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64, + descriptor.FieldDescriptorProto_TYPE_SINT64: + return typeName + "(r.Int63())" + case descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_FIXED64: + return typeName + "(uint64(r.Uint32()))" + case descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32, + descriptor.FieldDescriptorProto_TYPE_ENUM: + return typeName + "(r.Int31())" + case descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_FIXED32: + return typeName + "(r.Uint32())" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + return typeName + `(bool(r.Intn(2) == 0))` + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_GROUP, + descriptor.FieldDescriptorProto_TYPE_MESSAGE, + descriptor.FieldDescriptorProto_TYPE_BYTES: + } + panic(fmt.Errorf("unexpected type %v", typeName)) +} + +func negative(fieldType descriptor.FieldDescriptorProto_Type) bool { + switch fieldType { + case descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_BOOL: + return false + } + return true +} + +func (p *plugin) getFuncName(goTypName string, field *descriptor.FieldDescriptorProto) string { + funcName := "NewPopulated" + goTypName + goTypNames := strings.Split(goTypName, ".") + if len(goTypNames) == 2 { + funcName = goTypNames[0] + ".NewPopulated" + goTypNames[1] + } else if len(goTypNames) != 1 { + panic(fmt.Errorf("unreachable: too many dots in %v", goTypName)) + } + if field != nil { + switch { + case gogoproto.IsStdTime(field): + funcName = p.typesPkg.Use() + ".NewPopulatedStdTime" + case gogoproto.IsStdDuration(field): + funcName = p.typesPkg.Use() + ".NewPopulatedStdDuration" + case gogoproto.IsStdDouble(field): + funcName = p.typesPkg.Use() + ".NewPopulatedStdDouble" + case gogoproto.IsStdFloat(field): + funcName = p.typesPkg.Use() + ".NewPopulatedStdFloat" + case gogoproto.IsStdInt64(field): + funcName = p.typesPkg.Use() + ".NewPopulatedStdInt64" + case gogoproto.IsStdUInt64(field): + funcName = p.typesPkg.Use() + ".NewPopulatedStdUInt64" + case gogoproto.IsStdInt32(field): + funcName = p.typesPkg.Use() + ".NewPopulatedStdInt32" + case gogoproto.IsStdUInt32(field): + funcName = p.typesPkg.Use() + ".NewPopulatedStdUInt32" + case gogoproto.IsStdBool(field): + funcName = p.typesPkg.Use() + ".NewPopulatedStdBool" + case gogoproto.IsStdString(field): + funcName = p.typesPkg.Use() + ".NewPopulatedStdString" + case gogoproto.IsStdBytes(field): + funcName = p.typesPkg.Use() + ".NewPopulatedStdBytes" + } + } + return funcName +} + +func (p *plugin) getFuncCall(goTypName string, field *descriptor.FieldDescriptorProto) string { + funcName := p.getFuncName(goTypName, field) + funcCall := funcName + "(r, easy)" + return funcCall +} + +func (p *plugin) getCustomFuncCall(goTypName string) string { + funcName := p.getFuncName(goTypName, nil) + funcCall := funcName + "(r)" + return funcCall +} + +func (p *plugin) getEnumVal(field *descriptor.FieldDescriptorProto, goTyp string) string { + enum := p.ObjectNamed(field.GetTypeName()).(*generator.EnumDescriptor) + l := len(enum.Value) + values := make([]string, l) + for i := range enum.Value { + values[i] = strconv.Itoa(int(*enum.Value[i].Number)) + } + arr := "[]int32{" + strings.Join(values, ",") + "}" + val := strings.Join([]string{generator.GoTypeToName(goTyp), `(`, arr, `[r.Intn(`, fmt.Sprintf("%d", l), `)])`}, "") + return val +} + +func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + goTyp, _ := p.GoType(message, field) + fieldname := p.GetOneOfFieldName(message, field) + goTypName := generator.GoTypeToName(goTyp) + if p.IsMap(field) { + m := p.GoMapType(nil, field) + keygoTyp, _ := p.GoType(nil, m.KeyField) + keygoTyp = strings.Replace(keygoTyp, "*", "", 1) + keygoAliasTyp, _ := p.GoType(nil, m.KeyAliasField) + keygoAliasTyp = strings.Replace(keygoAliasTyp, "*", "", 1) + + valuegoTyp, _ := p.GoType(nil, m.ValueField) + valuegoAliasTyp, _ := p.GoType(nil, m.ValueAliasField) + keytypName := generator.GoTypeToName(keygoTyp) + keygoAliasTyp = generator.GoTypeToName(keygoAliasTyp) + valuetypAliasName := generator.GoTypeToName(valuegoAliasTyp) + + nullable, valuegoTyp, valuegoAliasTyp := generator.GoMapValueTypes(field, m.ValueField, valuegoTyp, valuegoAliasTyp) + + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, m.GoType, `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + keyval := "" + if m.KeyField.IsString() { + keyval = fmt.Sprintf("randString%v(r)", p.localName) + } else { + keyval = value(keytypName, m.KeyField.GetType()) + } + if keygoAliasTyp != keygoTyp { + keyval = keygoAliasTyp + `(` + keyval + `)` + } + if m.ValueField.IsMessage() || p.IsGroup(field) || + (m.ValueField.IsBytes() && gogoproto.IsCustomType(field)) { + s := `this.` + fieldname + `[` + keyval + `] = ` + if gogoproto.IsStdType(field) { + valuegoTyp = valuegoAliasTyp + } + funcCall := p.getCustomFuncCall(goTypName) + if !gogoproto.IsCustomType(field) { + goTypName = generator.GoTypeToName(valuegoTyp) + funcCall = p.getFuncCall(goTypName, m.ValueAliasField) + } + if !nullable { + funcCall = `*` + funcCall + } + if valuegoTyp != valuegoAliasTyp { + funcCall = `(` + valuegoAliasTyp + `)(` + funcCall + `)` + } + s += funcCall + p.P(s) + } else if m.ValueField.IsEnum() { + s := `this.` + fieldname + `[` + keyval + `]` + ` = ` + p.getEnumVal(m.ValueField, valuegoTyp) + p.P(s) + } else if m.ValueField.IsBytes() { + count := p.varGen.Next() + p.P(count, ` := r.Intn(100)`) + p.P(p.varGen.Next(), ` := `, keyval) + p.P(`this.`, fieldname, `[`, p.varGen.Current(), `] = make(`, valuegoTyp, `, `, count, `)`) + p.P(`for i := 0; i < `, count, `; i++ {`) + p.In() + p.P(`this.`, fieldname, `[`, p.varGen.Current(), `][i] = byte(r.Intn(256))`) + p.Out() + p.P(`}`) + } else if m.ValueField.IsString() { + s := `this.` + fieldname + `[` + keyval + `]` + ` = ` + fmt.Sprintf("randString%v(r)", p.localName) + p.P(s) + } else { + p.P(p.varGen.Next(), ` := `, keyval) + p.P(`this.`, fieldname, `[`, p.varGen.Current(), `] = `, value(valuetypAliasName, m.ValueField.GetType())) + if negative(m.ValueField.GetType()) { + p.P(`if r.Intn(2) == 0 {`) + p.In() + p.P(`this.`, fieldname, `[`, p.varGen.Current(), `] *= -1`) + p.Out() + p.P(`}`) + } + } + p.Out() + p.P(`}`) + } else if gogoproto.IsCustomType(field) { + funcCall := p.getCustomFuncCall(goTypName) + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(p.varGen.Next(), `:= `, funcCall) + p.P(`this.`, fieldname, `[i] = *`, p.varGen.Current()) + p.Out() + p.P(`}`) + } else if gogoproto.IsNullable(field) { + p.P(`this.`, fieldname, ` = `, funcCall) + } else { + p.P(p.varGen.Next(), `:= `, funcCall) + p.P(`this.`, fieldname, ` = *`, p.varGen.Current()) + } + } else if field.IsMessage() || p.IsGroup(field) { + funcCall := p.getFuncCall(goTypName, field) + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(5)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + if gogoproto.IsNullable(field) { + p.P(`this.`, fieldname, `[i] = `, funcCall) + } else { + p.P(p.varGen.Next(), `:= `, funcCall) + p.P(`this.`, fieldname, `[i] = *`, p.varGen.Current()) + } + p.Out() + p.P(`}`) + } else { + if gogoproto.IsNullable(field) { + p.P(`this.`, fieldname, ` = `, funcCall) + } else { + p.P(p.varGen.Next(), `:= `, funcCall) + p.P(`this.`, fieldname, ` = *`, p.varGen.Current()) + } + } + } else { + if field.IsEnum() { + val := p.getEnumVal(field, goTyp) + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(`this.`, fieldname, `[i] = `, val) + p.Out() + p.P(`}`) + } else if !gogoproto.IsNullable(field) || proto3 { + p.P(`this.`, fieldname, ` = `, val) + } else { + p.P(p.varGen.Next(), ` := `, val) + p.P(`this.`, fieldname, ` = &`, p.varGen.Current()) + } + } else if field.IsBytes() { + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(p.varGen.Next(), ` := r.Intn(100)`) + p.P(`this.`, fieldname, `[i] = make([]byte,`, p.varGen.Current(), `)`) + p.P(`for j := 0; j < `, p.varGen.Current(), `; j++ {`) + p.In() + p.P(`this.`, fieldname, `[i][j] = byte(r.Intn(256))`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } else { + p.P(p.varGen.Next(), ` := r.Intn(100)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(`this.`, fieldname, `[i] = byte(r.Intn(256))`) + p.Out() + p.P(`}`) + } + } else if field.IsString() { + typName := generator.GoTypeToName(goTyp) + val := fmt.Sprintf("%s(randString%v(r))", typName, p.localName) + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(`this.`, fieldname, `[i] = `, val) + p.Out() + p.P(`}`) + } else if !gogoproto.IsNullable(field) || proto3 { + p.P(`this.`, fieldname, ` = `, val) + } else { + p.P(p.varGen.Next(), `:= `, val) + p.P(`this.`, fieldname, ` = &`, p.varGen.Current()) + } + } else { + typName := generator.GoTypeToName(goTyp) + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(`this.`, fieldname, `[i] = `, value(typName, field.GetType())) + if negative(field.GetType()) { + p.P(`if r.Intn(2) == 0 {`) + p.In() + p.P(`this.`, fieldname, `[i] *= -1`) + p.Out() + p.P(`}`) + } + p.Out() + p.P(`}`) + } else if !gogoproto.IsNullable(field) || proto3 { + p.P(`this.`, fieldname, ` = `, value(typName, field.GetType())) + if negative(field.GetType()) { + p.P(`if r.Intn(2) == 0 {`) + p.In() + p.P(`this.`, fieldname, ` *= -1`) + p.Out() + p.P(`}`) + } + } else { + p.P(p.varGen.Next(), ` := `, value(typName, field.GetType())) + if negative(field.GetType()) { + p.P(`if r.Intn(2) == 0 {`) + p.In() + p.P(p.varGen.Current(), ` *= -1`) + p.Out() + p.P(`}`) + } + p.P(`this.`, fieldname, ` = &`, p.varGen.Current()) + } + } + } +} + +func (p *plugin) hasLoop(pkg string, field *descriptor.FieldDescriptorProto, visited []*generator.Descriptor, excludes []*generator.Descriptor) *generator.Descriptor { + if field.IsMessage() || p.IsGroup(field) || p.IsMap(field) { + var fieldMessage *generator.Descriptor + if p.IsMap(field) { + m := p.GoMapType(nil, field) + if !m.ValueField.IsMessage() { + return nil + } + fieldMessage = p.ObjectNamed(m.ValueField.GetTypeName()).(*generator.Descriptor) + } else { + fieldMessage = p.ObjectNamed(field.GetTypeName()).(*generator.Descriptor) + } + fieldTypeName := generator.CamelCaseSlice(fieldMessage.TypeName()) + for _, message := range visited { + messageTypeName := generator.CamelCaseSlice(message.TypeName()) + if fieldTypeName == messageTypeName { + for _, e := range excludes { + if fieldTypeName == generator.CamelCaseSlice(e.TypeName()) { + return nil + } + } + return fieldMessage + } + } + + for _, f := range fieldMessage.Field { + if strings.HasPrefix(f.GetTypeName(), "."+pkg) { + visited = append(visited, fieldMessage) + loopTo := p.hasLoop(pkg, f, visited, excludes) + if loopTo != nil { + return loopTo + } + } + } + } + return nil +} + +func (p *plugin) loops(pkg string, field *descriptor.FieldDescriptorProto, message *generator.Descriptor) int { + //fmt.Fprintf(os.Stderr, "loops %v %v\n", field.GetTypeName(), generator.CamelCaseSlice(message.TypeName())) + excludes := []*generator.Descriptor{} + loops := 0 + for { + visited := []*generator.Descriptor{} + loopTo := p.hasLoop(pkg, field, visited, excludes) + if loopTo == nil { + break + } + //fmt.Fprintf(os.Stderr, "loopTo %v\n", generator.CamelCaseSlice(loopTo.TypeName())) + excludes = append(excludes, loopTo) + loops++ + } + return loops +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + p.atleastOne = false + p.PluginImports = generator.NewPluginImports(p.Generator) + p.varGen = NewVarGen() + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + p.typesPkg = p.NewImport("github.com/gogo/protobuf/types") + p.localName = generator.FileName(file) + protoPkg := p.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = p.NewImport("github.com/golang/protobuf/proto") + } + + for _, message := range file.Messages() { + if !gogoproto.HasPopulate(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + p.atleastOne = true + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + loopLevels := make([]int, len(message.Field)) + maxLoopLevel := 0 + for i, field := range message.Field { + loopLevels[i] = p.loops(file.GetPackage(), field, message) + if loopLevels[i] > maxLoopLevel { + maxLoopLevel = loopLevels[i] + } + } + ranTotal := 0 + for i := range loopLevels { + ranTotal += int(math.Pow10(maxLoopLevel - loopLevels[i])) + } + p.P(`func NewPopulated`, ccTypeName, `(r randy`, p.localName, `, easy bool) *`, ccTypeName, ` {`) + p.In() + p.P(`this := &`, ccTypeName, `{}`) + if gogoproto.IsUnion(message.File().FileDescriptorProto, message.DescriptorProto) && len(message.Field) > 0 { + p.P(`fieldNum := r.Intn(`, fmt.Sprintf("%d", ranTotal), `)`) + p.P(`switch fieldNum {`) + k := 0 + for i, field := range message.Field { + is := []string{} + ran := int(math.Pow10(maxLoopLevel - loopLevels[i])) + for j := 0; j < ran; j++ { + is = append(is, fmt.Sprintf("%d", j+k)) + } + k += ran + p.P(`case `, strings.Join(is, ","), `:`) + p.In() + p.GenerateField(file, message, field) + p.Out() + } + p.P(`}`) + } else { + var maxFieldNumber int32 + oneofs := make(map[string]struct{}) + for fieldIndex, field := range message.Field { + if field.GetNumber() > maxFieldNumber { + maxFieldNumber = field.GetNumber() + } + oneof := field.OneofIndex != nil + if !oneof { + if field.IsRequired() || (!gogoproto.IsNullable(field) && !field.IsRepeated()) || (proto3 && !field.IsMessage()) { + p.GenerateField(file, message, field) + } else { + if loopLevels[fieldIndex] > 0 { + p.P(`if r.Intn(5) == 0 {`) + } else { + p.P(`if r.Intn(5) != 0 {`) + } + p.In() + p.GenerateField(file, message, field) + p.Out() + p.P(`}`) + } + } else { + fieldname := p.GetFieldName(message, field) + if _, ok := oneofs[fieldname]; ok { + continue + } else { + oneofs[fieldname] = struct{}{} + } + fieldNumbers := []int32{} + for _, f := range message.Field { + fname := p.GetFieldName(message, f) + if fname == fieldname { + fieldNumbers = append(fieldNumbers, f.GetNumber()) + } + } + + p.P(`oneofNumber_`, fieldname, ` := `, fmt.Sprintf("%#v", fieldNumbers), `[r.Intn(`, strconv.Itoa(len(fieldNumbers)), `)]`) + p.P(`switch oneofNumber_`, fieldname, ` {`) + for _, f := range message.Field { + fname := p.GetFieldName(message, f) + if fname != fieldname { + continue + } + p.P(`case `, strconv.Itoa(int(f.GetNumber())), `:`) + p.In() + ccTypeName := p.OneOfTypeName(message, f) + p.P(`this.`, fname, ` = NewPopulated`, ccTypeName, `(r, easy)`) + p.Out() + } + p.P(`}`) + } + } + if message.DescriptorProto.HasExtension() { + p.P(`if !easy && r.Intn(10) != 0 {`) + p.In() + p.P(`l := r.Intn(5)`) + p.P(`for i := 0; i < l; i++ {`) + p.In() + if len(message.DescriptorProto.GetExtensionRange()) > 1 { + p.P(`eIndex := r.Intn(`, strconv.Itoa(len(message.DescriptorProto.GetExtensionRange())), `)`) + p.P(`fieldNumber := 0`) + p.P(`switch eIndex {`) + for i, e := range message.DescriptorProto.GetExtensionRange() { + p.P(`case `, strconv.Itoa(i), `:`) + p.In() + p.P(`fieldNumber = r.Intn(`, strconv.Itoa(int(e.GetEnd()-e.GetStart())), `) + `, strconv.Itoa(int(e.GetStart()))) + p.Out() + if e.GetEnd() > maxFieldNumber { + maxFieldNumber = e.GetEnd() + } + } + p.P(`}`) + } else { + e := message.DescriptorProto.GetExtensionRange()[0] + p.P(`fieldNumber := r.Intn(`, strconv.Itoa(int(e.GetEnd()-e.GetStart())), `) + `, strconv.Itoa(int(e.GetStart()))) + if e.GetEnd() > maxFieldNumber { + maxFieldNumber = e.GetEnd() + } + } + p.P(`wire := r.Intn(4)`) + p.P(`if wire == 3 { wire = 5 }`) + p.P(`dAtA := randField`, p.localName, `(nil, r, fieldNumber, wire)`) + p.P(protoPkg.Use(), `.SetRawExtension(this, int32(fieldNumber), dAtA)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + + if maxFieldNumber < (1 << 10) { + p.P(`if !easy && r.Intn(10) != 0 {`) + p.In() + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`this.XXX_unrecognized = randUnrecognized`, p.localName, `(r, `, strconv.Itoa(int(maxFieldNumber+1)), `)`) + } + p.Out() + p.P(`}`) + } + } + p.P(`return this`) + p.Out() + p.P(`}`) + p.P(``) + + //Generate NewPopulated functions for oneof fields + m := proto.Clone(message.DescriptorProto).(*descriptor.DescriptorProto) + for _, f := range m.Field { + oneof := f.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, f) + p.P(`func NewPopulated`, ccTypeName, `(r randy`, p.localName, `, easy bool) *`, ccTypeName, ` {`) + p.In() + p.P(`this := &`, ccTypeName, `{}`) + vanity.TurnOffNullableForNativeTypes(f) + p.GenerateField(file, message, f) + p.P(`return this`) + p.Out() + p.P(`}`) + } + } + + if !p.atleastOne { + return + } + + p.P(`type randy`, p.localName, ` interface {`) + p.In() + p.P(`Float32() float32`) + p.P(`Float64() float64`) + p.P(`Int63() int64`) + p.P(`Int31() int32`) + p.P(`Uint32() uint32`) + p.P(`Intn(n int) int`) + p.Out() + p.P(`}`) + + p.P(`func randUTF8Rune`, p.localName, `(r randy`, p.localName, `) rune {`) + p.In() + p.P(`ru := r.Intn(62)`) + p.P(`if ru < 10 {`) + p.In() + p.P(`return rune(ru+48)`) + p.Out() + p.P(`} else if ru < 36 {`) + p.In() + p.P(`return rune(ru+55)`) + p.Out() + p.P(`}`) + p.P(`return rune(ru+61)`) + p.Out() + p.P(`}`) + + p.P(`func randString`, p.localName, `(r randy`, p.localName, `) string {`) + p.In() + p.P(p.varGen.Next(), ` := r.Intn(100)`) + p.P(`tmps := make([]rune, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(`tmps[i] = randUTF8Rune`, p.localName, `(r)`) + p.Out() + p.P(`}`) + p.P(`return string(tmps)`) + p.Out() + p.P(`}`) + + p.P(`func randUnrecognized`, p.localName, `(r randy`, p.localName, `, maxFieldNumber int) (dAtA []byte) {`) + p.In() + p.P(`l := r.Intn(5)`) + p.P(`for i := 0; i < l; i++ {`) + p.In() + p.P(`wire := r.Intn(4)`) + p.P(`if wire == 3 { wire = 5 }`) + p.P(`fieldNumber := maxFieldNumber + r.Intn(100)`) + p.P(`dAtA = randField`, p.localName, `(dAtA, r, fieldNumber, wire)`) + p.Out() + p.P(`}`) + p.P(`return dAtA`) + p.Out() + p.P(`}`) + + p.P(`func randField`, p.localName, `(dAtA []byte, r randy`, p.localName, `, fieldNumber int, wire int) []byte {`) + p.In() + p.P(`key := uint32(fieldNumber)<<3 | uint32(wire)`) + p.P(`switch wire {`) + p.P(`case 0:`) + p.In() + p.P(`dAtA = encodeVarintPopulate`, p.localName, `(dAtA, uint64(key))`) + p.P(p.varGen.Next(), ` := r.Int63()`) + p.P(`if r.Intn(2) == 0 {`) + p.In() + p.P(p.varGen.Current(), ` *= -1`) + p.Out() + p.P(`}`) + p.P(`dAtA = encodeVarintPopulate`, p.localName, `(dAtA, uint64(`, p.varGen.Current(), `))`) + p.Out() + p.P(`case 1:`) + p.In() + p.P(`dAtA = encodeVarintPopulate`, p.localName, `(dAtA, uint64(key))`) + p.P(`dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))`) + p.Out() + p.P(`case 2:`) + p.In() + p.P(`dAtA = encodeVarintPopulate`, p.localName, `(dAtA, uint64(key))`) + p.P(`ll := r.Intn(100)`) + p.P(`dAtA = encodeVarintPopulate`, p.localName, `(dAtA, uint64(ll))`) + p.P(`for j := 0; j < ll; j++ {`) + p.In() + p.P(`dAtA = append(dAtA, byte(r.Intn(256)))`) + p.Out() + p.P(`}`) + p.Out() + p.P(`default:`) + p.In() + p.P(`dAtA = encodeVarintPopulate`, p.localName, `(dAtA, uint64(key))`) + p.P(`dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))`) + p.Out() + p.P(`}`) + p.P(`return dAtA`) + p.Out() + p.P(`}`) + + p.P(`func encodeVarintPopulate`, p.localName, `(dAtA []byte, v uint64) []byte {`) + p.In() + p.P(`for v >= 1<<7 {`) + p.In() + p.P(`dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))`) + p.P(`v >>= 7`) + p.Out() + p.P(`}`) + p.P(`dAtA = append(dAtA, uint8(v))`) + p.P(`return dAtA`) + p.Out() + p.P(`}`) + +} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/size/size.go b/vendor/github.com/gogo/protobuf/plugin/size/size.go new file mode 100644 index 000000000000..1650b438751d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/size/size.go @@ -0,0 +1,696 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The size plugin generates a Size or ProtoSize method for each message. +This is useful with the MarshalTo method generated by the marshalto plugin and the +gogoproto.marshaler and gogoproto.marshaler_all extensions. + +It is enabled by the following extensions: + + - sizer + - sizer_all + - protosizer + - protosizer_all + +The size plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +And a benchmark given it is enabled using one of the following extensions: + + - benchgen + - benchgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.sizer_all) = true; + + message B { + option (gogoproto.description) = true; + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +given to the size plugin, will generate the following code: + + func (m *B) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.A.Size() + n += 1 + l + sovExample(uint64(l)) + if len(m.G) > 0 { + for _, e := range m.G { + l = e.Size() + n += 1 + l + sovExample(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n + } + +and the following test code: + + func TestBSize(t *testing5.T) { + popr := math_rand5.New(math_rand5.NewSource(time5.Now().UnixNano())) + p := NewPopulatedB(popr, true) + dAtA, err := github_com_gogo_protobuf_proto2.Marshal(p) + if err != nil { + panic(err) + } + size := p.Size() + if len(dAtA) != size { + t.Fatalf("size %v != marshalled size %v", size, len(dAtA)) + } + } + + func BenchmarkBSize(b *testing5.B) { + popr := math_rand5.New(math_rand5.NewSource(616)) + total := 0 + pops := make([]*B, 1000) + for i := 0; i < 1000; i++ { + pops[i] = NewPopulatedB(popr, false) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + total += pops[i%1000].Size() + } + b.SetBytes(int64(total / b.N)) + } + +The sovExample function is a size of varint function for the example.pb.go file. + +*/ +package size + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "github.com/gogo/protobuf/vanity" +) + +type size struct { + *generator.Generator + generator.PluginImports + atleastOne bool + localName string + typesPkg generator.Single + bitsPkg generator.Single +} + +func NewSize() *size { + return &size{} +} + +func (p *size) Name() string { + return "size" +} + +func (p *size) Init(g *generator.Generator) { + p.Generator = g +} + +func wireToType(wire string) int { + switch wire { + case "fixed64": + return proto.WireFixed64 + case "fixed32": + return proto.WireFixed32 + case "varint": + return proto.WireVarint + case "bytes": + return proto.WireBytes + case "group": + return proto.WireBytes + case "zigzag32": + return proto.WireVarint + case "zigzag64": + return proto.WireVarint + } + panic("unreachable") +} + +func keySize(fieldNumber int32, wireType int) int { + x := uint32(fieldNumber)<<3 | uint32(wireType) + size := 0 + for size = 0; x > 127; size++ { + x >>= 7 + } + size++ + return size +} + +func (p *size) sizeVarint() { + p.P(` + func sov`, p.localName, `(x uint64) (n int) { + return (`, p.bitsPkg.Use(), `.Len64(x | 1) + 6)/ 7 + }`) +} + +func (p *size) sizeZigZag() { + p.P(`func soz`, p.localName, `(x uint64) (n int) { + return sov`, p.localName, `(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + }`) +} + +func (p *size) std(field *descriptor.FieldDescriptorProto, name string) (string, bool) { + ptr := "" + if gogoproto.IsNullable(field) { + ptr = "*" + } + if gogoproto.IsStdTime(field) { + return p.typesPkg.Use() + `.SizeOfStdTime(` + ptr + name + `)`, true + } else if gogoproto.IsStdDuration(field) { + return p.typesPkg.Use() + `.SizeOfStdDuration(` + ptr + name + `)`, true + } else if gogoproto.IsStdDouble(field) { + return p.typesPkg.Use() + `.SizeOfStdDouble(` + ptr + name + `)`, true + } else if gogoproto.IsStdFloat(field) { + return p.typesPkg.Use() + `.SizeOfStdFloat(` + ptr + name + `)`, true + } else if gogoproto.IsStdInt64(field) { + return p.typesPkg.Use() + `.SizeOfStdInt64(` + ptr + name + `)`, true + } else if gogoproto.IsStdUInt64(field) { + return p.typesPkg.Use() + `.SizeOfStdUInt64(` + ptr + name + `)`, true + } else if gogoproto.IsStdInt32(field) { + return p.typesPkg.Use() + `.SizeOfStdInt32(` + ptr + name + `)`, true + } else if gogoproto.IsStdUInt32(field) { + return p.typesPkg.Use() + `.SizeOfStdUInt32(` + ptr + name + `)`, true + } else if gogoproto.IsStdBool(field) { + return p.typesPkg.Use() + `.SizeOfStdBool(` + ptr + name + `)`, true + } else if gogoproto.IsStdString(field) { + return p.typesPkg.Use() + `.SizeOfStdString(` + ptr + name + `)`, true + } else if gogoproto.IsStdBytes(field) { + return p.typesPkg.Use() + `.SizeOfStdBytes(` + ptr + name + `)`, true + } + return "", false +} + +func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto, sizeName string) { + fieldname := p.GetOneOfFieldName(message, field) + nullable := gogoproto.IsNullable(field) + repeated := field.IsRepeated() + doNilCheck := gogoproto.NeedsNilCheck(proto3, field) + if repeated { + p.P(`if len(m.`, fieldname, `) > 0 {`) + p.In() + } else if doNilCheck { + p.P(`if m.`, fieldname, ` != nil {`) + p.In() + } + packed := field.IsPacked() || (proto3 && field.IsPacked3()) + _, wire := p.GoType(message, field) + wireType := wireToType(wire) + fieldNumber := field.GetNumber() + if packed { + wireType = proto.WireBytes + } + key := keySize(fieldNumber, wireType) + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + if packed { + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(len(m.`, fieldname, `)*8))`, `+len(m.`, fieldname, `)*8`) + } else if repeated { + p.P(`n+=`, strconv.Itoa(key+8), `*len(m.`, fieldname, `)`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.P(`n+=`, strconv.Itoa(key+8)) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`n+=`, strconv.Itoa(key+8)) + } else { + p.P(`n+=`, strconv.Itoa(key+8)) + } + case descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + if packed { + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(len(m.`, fieldname, `)*4))`, `+len(m.`, fieldname, `)*4`) + } else if repeated { + p.P(`n+=`, strconv.Itoa(key+4), `*len(m.`, fieldname, `)`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.P(`n+=`, strconv.Itoa(key+4)) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`n+=`, strconv.Itoa(key+4)) + } else { + p.P(`n+=`, strconv.Itoa(key+4)) + } + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_INT32: + if packed { + p.P(`l = 0`) + p.P(`for _, e := range m.`, fieldname, ` {`) + p.In() + p.P(`l+=sov`, p.localName, `(uint64(e))`) + p.Out() + p.P(`}`) + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(l))+l`) + } else if repeated { + p.P(`for _, e := range m.`, fieldname, ` {`) + p.In() + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(e))`) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(m.`, fieldname, `))`) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(*m.`, fieldname, `))`) + } else { + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(m.`, fieldname, `))`) + } + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if packed { + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(len(m.`, fieldname, `)))`, `+len(m.`, fieldname, `)*1`) + } else if repeated { + p.P(`n+=`, strconv.Itoa(key+1), `*len(m.`, fieldname, `)`) + } else if proto3 { + p.P(`if m.`, fieldname, ` {`) + p.In() + p.P(`n+=`, strconv.Itoa(key+1)) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`n+=`, strconv.Itoa(key+1)) + } else { + p.P(`n+=`, strconv.Itoa(key+1)) + } + case descriptor.FieldDescriptorProto_TYPE_STRING: + if repeated { + p.P(`for _, s := range m.`, fieldname, ` { `) + p.In() + p.P(`l = len(s)`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`l=len(m.`, fieldname, `)`) + p.P(`if l > 0 {`) + p.In() + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`l=len(*m.`, fieldname, `)`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + } else { + p.P(`l=len(m.`, fieldname, `)`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + } + case descriptor.FieldDescriptorProto_TYPE_GROUP: + panic(fmt.Errorf("size does not support group %v", fieldname)) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if p.IsMap(field) { + m := p.GoMapType(nil, field) + _, keywire := p.GoType(nil, m.KeyAliasField) + valuegoTyp, _ := p.GoType(nil, m.ValueField) + valuegoAliasTyp, valuewire := p.GoType(nil, m.ValueAliasField) + _, fieldwire := p.GoType(nil, field) + + nullable, valuegoTyp, valuegoAliasTyp = generator.GoMapValueTypes(field, m.ValueField, valuegoTyp, valuegoAliasTyp) + + fieldKeySize := keySize(field.GetNumber(), wireToType(fieldwire)) + keyKeySize := keySize(1, wireToType(keywire)) + valueKeySize := keySize(2, wireToType(valuewire)) + p.P(`for k, v := range m.`, fieldname, ` { `) + p.In() + p.P(`_ = k`) + p.P(`_ = v`) + sum := []string{strconv.Itoa(keyKeySize)} + switch m.KeyField.GetType() { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + sum = append(sum, `8`) + case descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + sum = append(sum, `4`) + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_INT32: + sum = append(sum, `sov`+p.localName+`(uint64(k))`) + case descriptor.FieldDescriptorProto_TYPE_BOOL: + sum = append(sum, `1`) + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + sum = append(sum, `len(k)+sov`+p.localName+`(uint64(len(k)))`) + case descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + sum = append(sum, `soz`+p.localName+`(uint64(k))`) + } + switch m.ValueField.GetType() { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + sum = append(sum, strconv.Itoa(valueKeySize)) + sum = append(sum, strconv.Itoa(8)) + case descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + sum = append(sum, strconv.Itoa(valueKeySize)) + sum = append(sum, strconv.Itoa(4)) + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_INT32: + sum = append(sum, strconv.Itoa(valueKeySize)) + sum = append(sum, `sov`+p.localName+`(uint64(v))`) + case descriptor.FieldDescriptorProto_TYPE_BOOL: + sum = append(sum, strconv.Itoa(valueKeySize)) + sum = append(sum, `1`) + case descriptor.FieldDescriptorProto_TYPE_STRING: + sum = append(sum, strconv.Itoa(valueKeySize)) + sum = append(sum, `len(v)+sov`+p.localName+`(uint64(len(v)))`) + case descriptor.FieldDescriptorProto_TYPE_BYTES: + if gogoproto.IsCustomType(field) { + p.P(`l = 0`) + if nullable { + p.P(`if v != nil {`) + p.In() + } + p.P(`l = v.`, sizeName, `()`) + p.P(`l += `, strconv.Itoa(valueKeySize), ` + sov`+p.localName+`(uint64(l))`) + if nullable { + p.Out() + p.P(`}`) + } + sum = append(sum, `l`) + } else { + p.P(`l = 0`) + if proto3 { + p.P(`if len(v) > 0 {`) + } else { + p.P(`if v != nil {`) + } + p.In() + p.P(`l = `, strconv.Itoa(valueKeySize), ` + len(v)+sov`+p.localName+`(uint64(len(v)))`) + p.Out() + p.P(`}`) + sum = append(sum, `l`) + } + case descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + sum = append(sum, strconv.Itoa(valueKeySize)) + sum = append(sum, `soz`+p.localName+`(uint64(v))`) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + stdSizeCall, stdOk := p.std(m.ValueAliasField, "v") + if nullable { + p.P(`l = 0`) + p.P(`if v != nil {`) + p.In() + if stdOk { + p.P(`l = `, stdSizeCall) + } else if valuegoTyp != valuegoAliasTyp { + p.P(`l = ((`, valuegoTyp, `)(v)).`, sizeName, `()`) + } else { + p.P(`l = v.`, sizeName, `()`) + } + p.P(`l += `, strconv.Itoa(valueKeySize), ` + sov`+p.localName+`(uint64(l))`) + p.Out() + p.P(`}`) + sum = append(sum, `l`) + } else { + if stdOk { + p.P(`l = `, stdSizeCall) + } else if valuegoTyp != valuegoAliasTyp { + p.P(`l = ((*`, valuegoTyp, `)(&v)).`, sizeName, `()`) + } else { + p.P(`l = v.`, sizeName, `()`) + } + sum = append(sum, strconv.Itoa(valueKeySize)) + sum = append(sum, `l+sov`+p.localName+`(uint64(l))`) + } + } + p.P(`mapEntrySize := `, strings.Join(sum, "+")) + p.P(`n+=mapEntrySize+`, fieldKeySize, `+sov`, p.localName, `(uint64(mapEntrySize))`) + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, e := range m.`, fieldname, ` { `) + p.In() + stdSizeCall, stdOk := p.std(field, "e") + if stdOk { + p.P(`l=`, stdSizeCall) + } else { + p.P(`l=e.`, sizeName, `()`) + } + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + p.Out() + p.P(`}`) + } else { + stdSizeCall, stdOk := p.std(field, "m."+fieldname) + if stdOk { + p.P(`l=`, stdSizeCall) + } else { + p.P(`l=m.`, fieldname, `.`, sizeName, `()`) + } + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + } + case descriptor.FieldDescriptorProto_TYPE_BYTES: + if !gogoproto.IsCustomType(field) { + if repeated { + p.P(`for _, b := range m.`, fieldname, ` { `) + p.In() + p.P(`l = len(b)`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`l=len(m.`, fieldname, `)`) + p.P(`if l > 0 {`) + p.In() + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + p.Out() + p.P(`}`) + } else { + p.P(`l=len(m.`, fieldname, `)`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + } + } else { + if repeated { + p.P(`for _, e := range m.`, fieldname, ` { `) + p.In() + p.P(`l=e.`, sizeName, `()`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + p.Out() + p.P(`}`) + } else { + p.P(`l=m.`, fieldname, `.`, sizeName, `()`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + } + } + case descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + if packed { + p.P(`l = 0`) + p.P(`for _, e := range m.`, fieldname, ` {`) + p.In() + p.P(`l+=soz`, p.localName, `(uint64(e))`) + p.Out() + p.P(`}`) + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(l))+l`) + } else if repeated { + p.P(`for _, e := range m.`, fieldname, ` {`) + p.In() + p.P(`n+=`, strconv.Itoa(key), `+soz`, p.localName, `(uint64(e))`) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.P(`n+=`, strconv.Itoa(key), `+soz`, p.localName, `(uint64(m.`, fieldname, `))`) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`n+=`, strconv.Itoa(key), `+soz`, p.localName, `(uint64(*m.`, fieldname, `))`) + } else { + p.P(`n+=`, strconv.Itoa(key), `+soz`, p.localName, `(uint64(m.`, fieldname, `))`) + } + default: + panic("not implemented") + } + if repeated || doNilCheck { + p.Out() + p.P(`}`) + } +} + +func (p *size) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + p.atleastOne = false + p.localName = generator.FileName(file) + p.typesPkg = p.NewImport("github.com/gogo/protobuf/types") + protoPkg := p.NewImport("github.com/gogo/protobuf/proto") + p.bitsPkg = p.NewImport("math/bits") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = p.NewImport("github.com/golang/protobuf/proto") + } + for _, message := range file.Messages() { + sizeName := "" + if gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) && gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + fmt.Fprintf(os.Stderr, "ERROR: message %v cannot support both sizer and protosizer plugins\n", generator.CamelCase(*message.Name)) + os.Exit(1) + } + if gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName = "Size" + } else if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName = "ProtoSize" + } else { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + p.atleastOne = true + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`func (m *`, ccTypeName, `) `, sizeName, `() (n int) {`) + p.In() + p.P(`if m == nil {`) + p.In() + p.P(`return 0`) + p.Out() + p.P(`}`) + p.P(`var l int`) + p.P(`_ = l`) + oneofs := make(map[string]struct{}) + for _, field := range message.Field { + oneof := field.OneofIndex != nil + if !oneof { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + p.generateField(proto3, file, message, field, sizeName) + } else { + fieldname := p.GetFieldName(message, field) + if _, ok := oneofs[fieldname]; ok { + continue + } else { + oneofs[fieldname] = struct{}{} + } + p.P(`if m.`, fieldname, ` != nil {`) + p.In() + p.P(`n+=m.`, fieldname, `.`, sizeName, `()`) + p.Out() + p.P(`}`) + } + } + if message.DescriptorProto.HasExtension() { + if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`n += `, protoPkg.Use(), `.SizeOfInternalExtension(m)`) + } else { + p.P(`if m.XXX_extensions != nil {`) + p.In() + p.P(`n+=len(m.XXX_extensions)`) + p.Out() + p.P(`}`) + } + } + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if m.XXX_unrecognized != nil {`) + p.In() + p.P(`n+=len(m.XXX_unrecognized)`) + p.Out() + p.P(`}`) + } + p.P(`return n`) + p.Out() + p.P(`}`) + p.P() + + //Generate Size methods for oneof fields + m := proto.Clone(message.DescriptorProto).(*descriptor.DescriptorProto) + for _, f := range m.Field { + oneof := f.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, f) + p.P(`func (m *`, ccTypeName, `) `, sizeName, `() (n int) {`) + p.In() + p.P(`if m == nil {`) + p.In() + p.P(`return 0`) + p.Out() + p.P(`}`) + p.P(`var l int`) + p.P(`_ = l`) + vanity.TurnOffNullableForNativeTypes(f) + p.generateField(false, file, message, f, sizeName) + p.P(`return n`) + p.Out() + p.P(`}`) + } + } + + if !p.atleastOne { + return + } + + p.sizeVarint() + p.sizeZigZag() + +} + +func init() { + generator.RegisterPlugin(NewSize()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/size/sizetest.go b/vendor/github.com/gogo/protobuf/plugin/size/sizetest.go new file mode 100644 index 000000000000..1df987300075 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/size/sizetest.go @@ -0,0 +1,134 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package size + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + protoPkg := imports.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = imports.NewImport("github.com/golang/protobuf/proto") + } + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + sizeName := "" + if gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName = "Size" + } else if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName = "ProtoSize" + } else { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Test`, ccTypeName, sizeName, `(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`size2 := `, protoPkg.Use(), `.Size(p)`) + p.P(`dAtA, err := `, protoPkg.Use(), `.Marshal(p)`) + p.P(`if err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + p.P(`size := p.`, sizeName, `()`) + p.P(`if len(dAtA) != size {`) + p.In() + p.P(`t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))`) + p.Out() + p.P(`}`) + p.P(`if size2 != size {`) + p.In() + p.P(`t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)`) + p.Out() + p.P(`}`) + p.P(`size3 := `, protoPkg.Use(), `.Size(p)`) + p.P(`if size3 != size {`) + p.In() + p.P(`t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P() + } + + if gogoproto.HasBenchGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Benchmark`, ccTypeName, sizeName, `(b *`, testingPkg.Use(), `.B) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(616))`) + p.P(`total := 0`) + p.P(`pops := make([]*`, ccTypeName, `, 1000)`) + p.P(`for i := 0; i < 1000; i++ {`) + p.In() + p.P(`pops[i] = NewPopulated`, ccTypeName, `(popr, false)`) + p.Out() + p.P(`}`) + p.P(`b.ResetTimer()`) + p.P(`for i := 0; i < b.N; i++ {`) + p.In() + p.P(`total += pops[i%1000].`, sizeName, `()`) + p.Out() + p.P(`}`) + p.P(`b.SetBytes(int64(total / b.N))`) + p.Out() + p.P(`}`) + p.P() + } + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/stringer/stringer.go b/vendor/github.com/gogo/protobuf/plugin/stringer/stringer.go new file mode 100644 index 000000000000..df9792c7c4f4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/stringer/stringer.go @@ -0,0 +1,347 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The stringer plugin generates a String method for each message. + +It is enabled by the following extensions: + + - stringer + - stringer_all + +The stringer plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.goproto_stringer_all) = false; + option (gogoproto.stringer_all) = true; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +given to the stringer stringer, will generate the following code: + + func (this *A) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&A{`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s + } + +and the following test code: + + func TestAStringer(t *testing4.T) { + popr := math_rand4.New(math_rand4.NewSource(time4.Now().UnixNano())) + p := NewPopulatedA(popr, false) + s1 := p.String() + s2 := fmt1.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } + } + +Typically fmt.Printf("%v") will stop to print when it reaches a pointer and +not print their values, while the generated String method will always print all values, recursively. + +*/ +package stringer + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "strings" +) + +type stringer struct { + *generator.Generator + generator.PluginImports + atleastOne bool + localName string +} + +func NewStringer() *stringer { + return &stringer{} +} + +func (p *stringer) Name() string { + return "stringer" +} + +func (p *stringer) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *stringer) Generate(file *generator.FileDescriptor) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + p.PluginImports = generator.NewPluginImports(p.Generator) + p.atleastOne = false + + p.localName = generator.FileName(file) + + fmtPkg := p.NewImport("fmt") + stringsPkg := p.NewImport("strings") + reflectPkg := p.NewImport("reflect") + sortKeysPkg := p.NewImport("github.com/gogo/protobuf/sortkeys") + protoPkg := p.NewImport("github.com/gogo/protobuf/proto") + for _, message := range file.Messages() { + if !gogoproto.IsStringer(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if gogoproto.EnabledGoStringer(file.FileDescriptorProto, message.DescriptorProto) { + panic("old string method needs to be disabled, please use gogoproto.goproto_stringer or gogoproto.goproto_stringer_all and set it to false") + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + p.atleastOne = true + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`func (this *`, ccTypeName, `) String() string {`) + p.In() + p.P(`if this == nil {`) + p.In() + p.P(`return "nil"`) + p.Out() + p.P(`}`) + for _, field := range message.Field { + if p.IsMap(field) || !field.IsRepeated() { + continue + } + if (field.IsMessage() && !gogoproto.IsCustomType(field)) || p.IsGroup(field) { + nullable := gogoproto.IsNullable(field) + desc := p.ObjectNamed(field.GetTypeName()) + msgname := p.TypeName(desc) + msgnames := strings.Split(msgname, ".") + typeName := msgnames[len(msgnames)-1] + fieldMessageDesc := file.GetMessage(msgname) + gogoStringer := false + if fieldMessageDesc != nil { + gogoStringer = gogoproto.IsStringer(file.FileDescriptorProto, fieldMessageDesc) + } + fieldname := p.GetFieldName(message, field) + stringfunc := fmtPkg.Use() + `.Sprintf("%v", f)` + if gogoStringer { + stringfunc = `f.String()` + } + repeatedName := `repeatedStringFor` + fieldname + if nullable { + p.P(repeatedName, ` := "[]*`, typeName, `{"`) + } else { + p.P(repeatedName, ` := "[]`, typeName, `{"`) + } + + p.P(`for _, f := range `, `this.`, fieldname, ` {`) + p.In() + if nullable { + p.P(repeatedName, " += ", stringsPkg.Use(), `.Replace(`, stringfunc, `, "`, typeName, `","`, msgname, `"`, ", 1)", ` + ","`) + } else if gogoStringer { + p.P(repeatedName, " += ", stringsPkg.Use(), `.Replace(`, stringsPkg.Use(), `.Replace(`, stringfunc, `, "`, typeName, `","`, msgname, `"`, ", 1),`&`,``,1)", ` + ","`) + } else { + p.P(repeatedName, " += ", stringfunc, ` + ","`) + } + p.Out() + p.P(`}`) + p.P(repeatedName, ` += "}"`) + } + } + for _, field := range message.Field { + if !p.IsMap(field) { + continue + } + fieldname := p.GetFieldName(message, field) + + m := p.GoMapType(nil, field) + mapgoTyp, keyField, keyAliasField := m.GoType, m.KeyField, m.KeyAliasField + keysName := `keysFor` + fieldname + keygoTyp, _ := p.GoType(nil, keyField) + keygoTyp = strings.Replace(keygoTyp, "*", "", 1) + keygoAliasTyp, _ := p.GoType(nil, keyAliasField) + keygoAliasTyp = strings.Replace(keygoAliasTyp, "*", "", 1) + keyCapTyp := generator.CamelCase(keygoTyp) + p.P(keysName, ` := make([]`, keygoTyp, `, 0, len(this.`, fieldname, `))`) + p.P(`for k, _ := range this.`, fieldname, ` {`) + p.In() + if keygoAliasTyp == keygoTyp { + p.P(keysName, ` = append(`, keysName, `, k)`) + } else { + p.P(keysName, ` = append(`, keysName, `, `, keygoTyp, `(k))`) + } + p.Out() + p.P(`}`) + p.P(sortKeysPkg.Use(), `.`, keyCapTyp, `s(`, keysName, `)`) + mapName := `mapStringFor` + fieldname + p.P(mapName, ` := "`, mapgoTyp, `{"`) + p.P(`for _, k := range `, keysName, ` {`) + p.In() + if keygoAliasTyp == keygoTyp { + p.P(mapName, ` += fmt.Sprintf("%v: %v,", k, this.`, fieldname, `[k])`) + } else { + p.P(mapName, ` += fmt.Sprintf("%v: %v,", k, this.`, fieldname, `[`, keygoAliasTyp, `(k)])`) + } + p.Out() + p.P(`}`) + p.P(mapName, ` += "}"`) + } + p.P("s := ", stringsPkg.Use(), ".Join([]string{`&", ccTypeName, "{`,") + oneofs := make(map[string]struct{}) + for _, field := range message.Field { + nullable := gogoproto.IsNullable(field) + repeated := field.IsRepeated() + fieldname := p.GetFieldName(message, field) + oneof := field.OneofIndex != nil + if oneof { + if _, ok := oneofs[fieldname]; ok { + continue + } else { + oneofs[fieldname] = struct{}{} + } + p.P("`", fieldname, ":`", ` + `, fmtPkg.Use(), `.Sprintf("%v", this.`, fieldname, ") + `,", "`,") + } else if p.IsMap(field) { + mapName := `mapStringFor` + fieldname + p.P("`", fieldname, ":`", ` + `, mapName, " + `,", "`,") + } else if (field.IsMessage() && !gogoproto.IsCustomType(field)) || p.IsGroup(field) { + desc := p.ObjectNamed(field.GetTypeName()) + msgname := p.TypeName(desc) + msgnames := strings.Split(msgname, ".") + typeName := msgnames[len(msgnames)-1] + fieldMessageDesc := file.GetMessage(msgname) + gogoStringer := false + if fieldMessageDesc != nil { + gogoStringer = gogoproto.IsStringer(file.FileDescriptorProto, fieldMessageDesc) + } + stringfunc := fmtPkg.Use() + `.Sprintf("%v", this.` + fieldname + `)` + if gogoStringer { + stringfunc = `this.` + fieldname + `.String()` + } + if nullable && !repeated { + p.P("`", fieldname, ":`", ` + `, stringsPkg.Use(), `.Replace(`, stringfunc, `, "`, typeName, `","`, msgname, `"`, ", 1) + `,", "`,") + } else if repeated { + repeatedName := `repeatedStringFor` + fieldname + p.P("`", fieldname, ":`", ` + `, repeatedName, " + `,", "`,") + } else { + p.P("`", fieldname, ":`", ` + `, stringsPkg.Use(), `.Replace(`, stringsPkg.Use(), `.Replace(`, stringfunc, `, "`, typeName, `","`, msgname, `"`, ", 1),`&`,``,1) + `,", "`,") + } + } else { + if nullable && !repeated && !proto3 { + p.P("`", fieldname, ":`", ` + valueToString`, p.localName, `(this.`, fieldname, ") + `,", "`,") + } else { + p.P("`", fieldname, ":`", ` + `, fmtPkg.Use(), `.Sprintf("%v", this.`, fieldname, ") + `,", "`,") + } + } + } + if message.DescriptorProto.HasExtension() { + if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { + p.P("`XXX_InternalExtensions:` + ", protoPkg.Use(), ".StringFromInternalExtension(this) + `,`,") + } else { + p.P("`XXX_extensions:` + ", protoPkg.Use(), ".StringFromExtensionsBytes(this.XXX_extensions) + `,`,") + } + } + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + p.P("`XXX_unrecognized:` + ", fmtPkg.Use(), `.Sprintf("%v", this.XXX_unrecognized) + `, "`,`,") + } + p.P("`}`,") + p.P(`}`, `,""`, ")") + p.P(`return s`) + p.Out() + p.P(`}`) + + //Generate String methods for oneof fields + for _, field := range message.Field { + oneof := field.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, field) + p.P(`func (this *`, ccTypeName, `) String() string {`) + p.In() + p.P(`if this == nil {`) + p.In() + p.P(`return "nil"`) + p.Out() + p.P(`}`) + p.P("s := ", stringsPkg.Use(), ".Join([]string{`&", ccTypeName, "{`,") + fieldname := p.GetOneOfFieldName(message, field) + if field.IsMessage() || p.IsGroup(field) { + desc := p.ObjectNamed(field.GetTypeName()) + msgname := p.TypeName(desc) + msgnames := strings.Split(msgname, ".") + typeName := msgnames[len(msgnames)-1] + p.P("`", fieldname, ":`", ` + `, stringsPkg.Use(), `.Replace(`, fmtPkg.Use(), `.Sprintf("%v", this.`, fieldname, `), "`, typeName, `","`, msgname, `"`, ", 1) + `,", "`,") + } else { + p.P("`", fieldname, ":`", ` + `, fmtPkg.Use(), `.Sprintf("%v", this.`, fieldname, ") + `,", "`,") + } + p.P("`}`,") + p.P(`}`, `,""`, ")") + p.P(`return s`) + p.Out() + p.P(`}`) + } + } + + if !p.atleastOne { + return + } + + p.P(`func valueToString`, p.localName, `(v interface{}) string {`) + p.In() + p.P(`rv := `, reflectPkg.Use(), `.ValueOf(v)`) + p.P(`if rv.IsNil() {`) + p.In() + p.P(`return "nil"`) + p.Out() + p.P(`}`) + p.P(`pv := `, reflectPkg.Use(), `.Indirect(rv).Interface()`) + p.P(`return `, fmtPkg.Use(), `.Sprintf("*%v", pv)`) + p.Out() + p.P(`}`) + +} + +func init() { + generator.RegisterPlugin(NewStringer()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/stringer/stringertest.go b/vendor/github.com/gogo/protobuf/plugin/stringer/stringertest.go new file mode 100644 index 000000000000..0912a22df638 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/stringer/stringertest.go @@ -0,0 +1,83 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package stringer + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + fmtPkg := imports.NewImport("fmt") + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if !gogoproto.IsStringer(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Test`, ccTypeName, `Stringer(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) + p.P(`s1 := p.String()`) + p.P(`s2 := `, fmtPkg.Use(), `.Sprintf("%v", p)`) + p.P(`if s1 != s2 {`) + p.In() + p.P(`t.Fatalf("String want %v got %v", s1, s2)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/testgen/testgen.go b/vendor/github.com/gogo/protobuf/plugin/testgen/testgen.go new file mode 100644 index 000000000000..e0a9287e5600 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/testgen/testgen.go @@ -0,0 +1,608 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The testgen plugin generates Test and Benchmark functions for each message. + +Tests are enabled using the following extensions: + + - testgen + - testgen_all + +Benchmarks are enabled using the following extensions: + + - benchgen + - benchgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.testgen_all) = true; + option (gogoproto.benchgen_all) = true; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +given to the testgen plugin, will generate the following test code: + + func TestAProto(t *testing.T) { + popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedA(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + panic(err) + } + msg := &A{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + panic(err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("%#v !VerboseProto %#v, since %v", msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("%#v !Proto %#v", msg, p) + } + } + + func BenchmarkAProtoMarshal(b *testing.B) { + popr := math_rand.New(math_rand.NewSource(616)) + total := 0 + pops := make([]*A, 10000) + for i := 0; i < 10000; i++ { + pops[i] = NewPopulatedA(popr, false) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dAtA, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000]) + if err != nil { + panic(err) + } + total += len(dAtA) + } + b.SetBytes(int64(total / b.N)) + } + + func BenchmarkAProtoUnmarshal(b *testing.B) { + popr := math_rand.New(math_rand.NewSource(616)) + total := 0 + datas := make([][]byte, 10000) + for i := 0; i < 10000; i++ { + dAtA, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedA(popr, false)) + if err != nil { + panic(err) + } + datas[i] = dAtA + } + msg := &A{} + b.ResetTimer() + for i := 0; i < b.N; i++ { + total += len(datas[i%10000]) + if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil { + panic(err) + } + } + b.SetBytes(int64(total / b.N)) + } + + + func TestAJSON(t *testing1.T) { + popr := math_rand1.New(math_rand1.NewSource(time1.Now().UnixNano())) + p := NewPopulatedA(popr, true) + jsondata, err := encoding_json.Marshal(p) + if err != nil { + panic(err) + } + msg := &A{} + err = encoding_json.Unmarshal(jsondata, msg) + if err != nil { + panic(err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("%#v !VerboseProto %#v, since %v", msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("%#v !Json Equal %#v", msg, p) + } + } + + func TestAProtoText(t *testing2.T) { + popr := math_rand2.New(math_rand2.NewSource(time2.Now().UnixNano())) + p := NewPopulatedA(popr, true) + dAtA := github_com_gogo_protobuf_proto1.MarshalTextString(p) + msg := &A{} + if err := github_com_gogo_protobuf_proto1.UnmarshalText(dAtA, msg); err != nil { + panic(err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("%#v !VerboseProto %#v, since %v", msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("%#v !Proto %#v", msg, p) + } + } + + func TestAProtoCompactText(t *testing2.T) { + popr := math_rand2.New(math_rand2.NewSource(time2.Now().UnixNano())) + p := NewPopulatedA(popr, true) + dAtA := github_com_gogo_protobuf_proto1.CompactTextString(p) + msg := &A{} + if err := github_com_gogo_protobuf_proto1.UnmarshalText(dAtA, msg); err != nil { + panic(err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("%#v !VerboseProto %#v, since %v", msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("%#v !Proto %#v", msg, p) + } + } + +Other registered tests are also generated. +Tests are registered to this test plugin by calling the following function. + + func RegisterTestPlugin(newFunc NewTestPlugin) + +where NewTestPlugin is: + + type NewTestPlugin func(g *generator.Generator) TestPlugin + +and TestPlugin is an interface: + + type TestPlugin interface { + Generate(imports generator.PluginImports, file *generator.FileDescriptor) (used bool) + } + +Plugins that use this interface include: + + - populate + - gostring + - equal + - union + - and more + +Please look at these plugins as examples of how to create your own. +A good idea is to let each plugin generate its own tests. + +*/ +package testgen + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type TestPlugin interface { + Generate(imports generator.PluginImports, file *generator.FileDescriptor) (used bool) +} + +type NewTestPlugin func(g *generator.Generator) TestPlugin + +var testplugins = make([]NewTestPlugin, 0) + +func RegisterTestPlugin(newFunc NewTestPlugin) { + testplugins = append(testplugins, newFunc) +} + +type plugin struct { + *generator.Generator + generator.PluginImports + tests []TestPlugin +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "testgen" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g + p.tests = make([]TestPlugin, 0, len(testplugins)) + for i := range testplugins { + p.tests = append(p.tests, testplugins[i](g)) + } +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + atLeastOne := false + for i := range p.tests { + used := p.tests[i].Generate(p.PluginImports, file) + if used { + atLeastOne = true + } + } + if atLeastOne { + p.P(`//These tests are generated by github.com/gogo/protobuf/plugin/testgen`) + } +} + +type testProto struct { + *generator.Generator +} + +func newProto(g *generator.Generator) TestPlugin { + return &testProto{g} +} + +func (p *testProto) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + testingPkg := imports.NewImport("testing") + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + protoPkg := imports.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = imports.NewImport("github.com/golang/protobuf/proto") + } + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + + p.P(`func Test`, ccTypeName, `Proto(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) + p.P(`dAtA, err := `, protoPkg.Use(), `.Marshal(p)`) + p.P(`if err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(dAtA, msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + p.P(`littlefuzz := make([]byte, len(dAtA))`) + p.P(`copy(littlefuzz, dAtA)`) + p.P(`for i := range dAtA {`) + p.In() + p.P(`dAtA[i] = byte(popr.Intn(256))`) + p.Out() + p.P(`}`) + if gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if err := p.VerboseEqual(msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)`) + p.Out() + p.P(`}`) + } + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)`) + p.Out() + p.P(`}`) + p.P(`if len(littlefuzz) > 0 {`) + p.In() + p.P(`fuzzamount := 100`) + p.P(`for i := 0; i < fuzzamount; i++ {`) + p.In() + p.P(`littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))`) + p.P(`littlefuzz = append(littlefuzz, byte(popr.Intn(256)))`) + p.Out() + p.P(`}`) + p.P(`// shouldn't panic`) + p.P(`_ = `, protoPkg.Use(), `.Unmarshal(littlefuzz, msg)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P() + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + if gogoproto.IsMarshaler(file.FileDescriptorProto, message.DescriptorProto) || gogoproto.IsUnsafeMarshaler(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`func Test`, ccTypeName, `MarshalTo(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) + if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`size := p.ProtoSize()`) + } else { + p.P(`size := p.Size()`) + } + p.P(`dAtA := make([]byte, size)`) + p.P(`for i := range dAtA {`) + p.In() + p.P(`dAtA[i] = byte(popr.Intn(256))`) + p.Out() + p.P(`}`) + p.P(`_, err := p.MarshalTo(dAtA)`) + p.P(`if err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(dAtA, msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + p.P(`for i := range dAtA {`) + p.In() + p.P(`dAtA[i] = byte(popr.Intn(256))`) + p.Out() + p.P(`}`) + if gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if err := p.VerboseEqual(msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)`) + p.Out() + p.P(`}`) + } + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P() + } + } + + if gogoproto.HasBenchGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Benchmark`, ccTypeName, `ProtoMarshal(b *`, testingPkg.Use(), `.B) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(616))`) + p.P(`total := 0`) + p.P(`pops := make([]*`, ccTypeName, `, 10000)`) + p.P(`for i := 0; i < 10000; i++ {`) + p.In() + p.P(`pops[i] = NewPopulated`, ccTypeName, `(popr, false)`) + p.Out() + p.P(`}`) + p.P(`b.ResetTimer()`) + p.P(`for i := 0; i < b.N; i++ {`) + p.In() + p.P(`dAtA, err := `, protoPkg.Use(), `.Marshal(pops[i%10000])`) + p.P(`if err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`total += len(dAtA)`) + p.Out() + p.P(`}`) + p.P(`b.SetBytes(int64(total / b.N))`) + p.Out() + p.P(`}`) + p.P() + + p.P(`func Benchmark`, ccTypeName, `ProtoUnmarshal(b *`, testingPkg.Use(), `.B) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(616))`) + p.P(`total := 0`) + p.P(`datas := make([][]byte, 10000)`) + p.P(`for i := 0; i < 10000; i++ {`) + p.In() + p.P(`dAtA, err := `, protoPkg.Use(), `.Marshal(NewPopulated`, ccTypeName, `(popr, false))`) + p.P(`if err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`datas[i] = dAtA`) + p.Out() + p.P(`}`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`b.ResetTimer()`) + p.P(`for i := 0; i < b.N; i++ {`) + p.In() + p.P(`total += len(datas[i%10000])`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(datas[i%10000], msg); err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P(`b.SetBytes(int64(total / b.N))`) + p.Out() + p.P(`}`) + p.P() + } + } + return used +} + +type testJson struct { + *generator.Generator +} + +func newJson(g *generator.Generator) TestPlugin { + return &testJson{g} +} + +func (p *testJson) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + testingPkg := imports.NewImport("testing") + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + jsonPkg := imports.NewImport("github.com/gogo/protobuf/jsonpb") + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Test`, ccTypeName, `JSON(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`marshaler := `, jsonPkg.Use(), `.Marshaler{}`) + p.P(`jsondata, err := marshaler.MarshalToString(p)`) + p.P(`if err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`err = `, jsonPkg.Use(), `.UnmarshalString(jsondata, msg)`) + p.P(`if err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + if gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if err := p.VerboseEqual(msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)`) + p.Out() + p.P(`}`) + } + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + } + return used +} + +type testText struct { + *generator.Generator +} + +func newText(g *generator.Generator) TestPlugin { + return &testText{g} +} + +func (p *testText) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + testingPkg := imports.NewImport("testing") + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + protoPkg := imports.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = imports.NewImport("github.com/golang/protobuf/proto") + } + //fmtPkg := imports.NewImport("fmt") + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + + p.P(`func Test`, ccTypeName, `ProtoText(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`dAtA := `, protoPkg.Use(), `.MarshalTextString(p)`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if err := `, protoPkg.Use(), `.UnmarshalText(dAtA, msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + if gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if err := p.VerboseEqual(msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)`) + p.Out() + p.P(`}`) + } + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P() + + p.P(`func Test`, ccTypeName, `ProtoCompactText(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`dAtA := `, protoPkg.Use(), `.CompactTextString(p)`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if err := `, protoPkg.Use(), `.UnmarshalText(dAtA, msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + if gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if err := p.VerboseEqual(msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)`) + p.Out() + p.P(`}`) + } + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P() + + } + } + return used +} + +func init() { + RegisterTestPlugin(newProto) + RegisterTestPlugin(newJson) + RegisterTestPlugin(newText) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/union/union.go b/vendor/github.com/gogo/protobuf/plugin/union/union.go new file mode 100644 index 000000000000..90def721c9dc --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/union/union.go @@ -0,0 +1,209 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The onlyone plugin generates code for the onlyone extension. +All fields must be nullable and only one of the fields may be set, like a union. +Two methods are generated + + GetValue() interface{} + +and + + SetValue(v interface{}) (set bool) + +These provide easier interaction with a onlyone. + +The onlyone extension is not called union as this causes compile errors in the C++ generated code. +There can only be one ;) + +It is enabled by the following extensions: + + - onlyone + - onlyone_all + +The onlyone plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +Lets look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + message U { + option (gogoproto.onlyone) = true; + optional A A = 1; + optional B B = 2; + } + +given to the onlyone plugin, will generate code which looks a lot like this: + + func (this *U) GetValue() interface{} { + if this.A != nil { + return this.A + } + if this.B != nil { + return this.B + } + return nil + } + + func (this *U) SetValue(value interface{}) bool { + switch vt := value.(type) { + case *A: + this.A = vt + case *B: + this.B = vt + default: + return false + } + return true + } + +and the following test code: + + func TestUUnion(t *testing.T) { + popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedU(popr) + v := p.GetValue() + msg := &U{} + if !msg.SetValue(v) { + t.Fatalf("Union: Could not set Value") + } + if !p.Equal(msg) { + t.Fatalf("%#v !Union Equal %#v", msg, p) + } + } + +*/ +package union + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type union struct { + *generator.Generator + generator.PluginImports +} + +func NewUnion() *union { + return &union{} +} + +func (p *union) Name() string { + return "union" +} + +func (p *union) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *union) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + + for _, message := range file.Messages() { + if !gogoproto.IsUnion(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.HasExtension() { + panic("onlyone does not currently support extensions") + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`func (this *`, ccTypeName, `) GetValue() interface{} {`) + p.In() + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + if fieldname == "Value" { + panic("cannot have a onlyone message " + ccTypeName + " with a field named Value") + } + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + p.P(`return this.`, fieldname) + p.Out() + p.P(`}`) + } + p.P(`return nil`) + p.Out() + p.P(`}`) + p.P(``) + p.P(`func (this *`, ccTypeName, `) SetValue(value interface{}) bool {`) + p.In() + p.P(`switch vt := value.(type) {`) + p.In() + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + goTyp, _ := p.GoType(message, field) + p.P(`case `, goTyp, `:`) + p.In() + p.P(`this.`, fieldname, ` = vt`) + p.Out() + } + p.P(`default:`) + p.In() + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + if field.IsMessage() { + goTyp, _ := p.GoType(message, field) + obj := p.ObjectNamed(field.GetTypeName()).(*generator.Descriptor) + + if gogoproto.IsUnion(obj.File().FileDescriptorProto, obj.DescriptorProto) { + p.P(`this.`, fieldname, ` = new(`, generator.GoTypeToName(goTyp), `)`) + p.P(`if set := this.`, fieldname, `.SetValue(value); set {`) + p.In() + p.P(`return true`) + p.Out() + p.P(`}`) + p.P(`this.`, fieldname, ` = nil`) + } + } + } + p.P(`return false`) + p.Out() + p.P(`}`) + p.P(`return true`) + p.Out() + p.P(`}`) + } +} + +func init() { + generator.RegisterPlugin(NewUnion()) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/union/uniontest.go b/vendor/github.com/gogo/protobuf/plugin/union/uniontest.go new file mode 100644 index 000000000000..949cf8338501 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/union/uniontest.go @@ -0,0 +1,86 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package union + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + for _, message := range file.Messages() { + if !gogoproto.IsUnion(file.FileDescriptorProto, message.DescriptorProto) || + !gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + used = true + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + + p.P(`func Test`, ccTypeName, `OnlyOne(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`v := p.GetValue()`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if !msg.SetValue(v) {`) + p.In() + p.P(`t.Fatalf("OnlyOne: Could not set Value")`) + p.Out() + p.P(`}`) + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("%#v !OnlyOne Equal %#v", msg, p)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/vendor/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go b/vendor/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go new file mode 100644 index 000000000000..fae67de4fd9b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go @@ -0,0 +1,1657 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The unmarshal plugin generates a Unmarshal method for each message. +The `Unmarshal([]byte) error` method results in the fact that the message +implements the Unmarshaler interface. +The allows proto.Unmarshal to be faster by calling the generated Unmarshal method rather than using reflect. + +If is enabled by the following extensions: + + - unmarshaler + - unmarshaler_all + +Or the following extensions: + + - unsafe_unmarshaler + - unsafe_unmarshaler_all + +That is if you want to use the unsafe package in your generated code. +The speed up using the unsafe package is not very significant. + +The generation of unmarshalling tests are enabled using one of the following extensions: + + - testgen + - testgen_all + +And benchmarks given it is enabled using one of the following extensions: + + - benchgen + - benchgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.unmarshaler_all) = true; + + message B { + option (gogoproto.description) = true; + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +given to the unmarshal plugin, will generate the following code: + + func (m *B) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + switch fieldNum { + case 1: + if wireType != 2 { + return proto.ErrWrongType + } + var msglen int + for shift := uint(0); ; shift += 7 { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.A.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return proto.ErrWrongType + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.G = append(m.G, github_com_gogo_protobuf_test_custom.Uint128{}) + if err := m.G[len(m.G)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + var sizeOfWire int + for { + sizeOfWire++ + wire >>= 7 + if wire == 0 { + break + } + } + iNdEx -= sizeOfWire + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + return nil + } + +Remember when using this code to call proto.Unmarshal. +This will call m.Reset and invoke the generated Unmarshal method for you. +If you call m.Unmarshal without m.Reset you could be merging protocol buffers. + +*/ +package unmarshal + +import ( + "fmt" + "strconv" + "strings" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type unmarshal struct { + *generator.Generator + generator.PluginImports + atleastOne bool + ioPkg generator.Single + mathPkg generator.Single + typesPkg generator.Single + binaryPkg generator.Single + localName string +} + +func NewUnmarshal() *unmarshal { + return &unmarshal{} +} + +func (p *unmarshal) Name() string { + return "unmarshal" +} + +func (p *unmarshal) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *unmarshal) decodeVarint(varName string, typName string) { + p.P(`for shift := uint(0); ; shift += 7 {`) + p.In() + p.P(`if shift >= 64 {`) + p.In() + p.P(`return ErrIntOverflow` + p.localName) + p.Out() + p.P(`}`) + p.P(`if iNdEx >= l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(`b := dAtA[iNdEx]`) + p.P(`iNdEx++`) + p.P(varName, ` |= `, typName, `(b&0x7F) << shift`) + p.P(`if b < 0x80 {`) + p.In() + p.P(`break`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) +} + +func (p *unmarshal) decodeFixed32(varName string, typeName string) { + p.P(`if (iNdEx+4) > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(varName, ` = `, typeName, `(`, p.binaryPkg.Use(), `.LittleEndian.Uint32(dAtA[iNdEx:]))`) + p.P(`iNdEx += 4`) +} + +func (p *unmarshal) decodeFixed64(varName string, typeName string) { + p.P(`if (iNdEx+8) > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(varName, ` = `, typeName, `(`, p.binaryPkg.Use(), `.LittleEndian.Uint64(dAtA[iNdEx:]))`) + p.P(`iNdEx += 8`) +} + +func (p *unmarshal) declareMapField(varName string, nullable bool, customType bool, field *descriptor.FieldDescriptorProto) { + switch field.GetType() { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + p.P(`var `, varName, ` float64`) + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + p.P(`var `, varName, ` float32`) + case descriptor.FieldDescriptorProto_TYPE_INT64: + p.P(`var `, varName, ` int64`) + case descriptor.FieldDescriptorProto_TYPE_UINT64: + p.P(`var `, varName, ` uint64`) + case descriptor.FieldDescriptorProto_TYPE_INT32: + p.P(`var `, varName, ` int32`) + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + p.P(`var `, varName, ` uint64`) + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + p.P(`var `, varName, ` uint32`) + case descriptor.FieldDescriptorProto_TYPE_BOOL: + p.P(`var `, varName, ` bool`) + case descriptor.FieldDescriptorProto_TYPE_STRING: + cast, _ := p.GoType(nil, field) + cast = strings.Replace(cast, "*", "", 1) + p.P(`var `, varName, ` `, cast) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if gogoproto.IsStdTime(field) { + p.P(varName, ` := new(time.Time)`) + } else if gogoproto.IsStdDuration(field) { + p.P(varName, ` := new(time.Duration)`) + } else if gogoproto.IsStdDouble(field) { + p.P(varName, ` := new(float64)`) + } else if gogoproto.IsStdFloat(field) { + p.P(varName, ` := new(float32)`) + } else if gogoproto.IsStdInt64(field) { + p.P(varName, ` := new(int64)`) + } else if gogoproto.IsStdUInt64(field) { + p.P(varName, ` := new(uint64)`) + } else if gogoproto.IsStdInt32(field) { + p.P(varName, ` := new(int32)`) + } else if gogoproto.IsStdUInt32(field) { + p.P(varName, ` := new(uint32)`) + } else if gogoproto.IsStdBool(field) { + p.P(varName, ` := new(bool)`) + } else if gogoproto.IsStdString(field) { + p.P(varName, ` := new(string)`) + } else if gogoproto.IsStdBytes(field) { + p.P(varName, ` := new([]byte)`) + } else { + desc := p.ObjectNamed(field.GetTypeName()) + msgname := p.TypeName(desc) + if nullable { + p.P(`var `, varName, ` *`, msgname) + } else { + p.P(varName, ` := &`, msgname, `{}`) + } + } + case descriptor.FieldDescriptorProto_TYPE_BYTES: + if customType { + _, ctyp, err := generator.GetCustomType(field) + if err != nil { + panic(err) + } + p.P(`var `, varName, `1 `, ctyp) + p.P(`var `, varName, ` = &`, varName, `1`) + } else { + p.P(varName, ` := []byte{}`) + } + case descriptor.FieldDescriptorProto_TYPE_UINT32: + p.P(`var `, varName, ` uint32`) + case descriptor.FieldDescriptorProto_TYPE_ENUM: + typName := p.TypeName(p.ObjectNamed(field.GetTypeName())) + p.P(`var `, varName, ` `, typName) + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + p.P(`var `, varName, ` int32`) + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + p.P(`var `, varName, ` int64`) + case descriptor.FieldDescriptorProto_TYPE_SINT32: + p.P(`var `, varName, ` int32`) + case descriptor.FieldDescriptorProto_TYPE_SINT64: + p.P(`var `, varName, ` int64`) + } +} + +func (p *unmarshal) mapField(varName string, customType bool, field *descriptor.FieldDescriptorProto) { + switch field.GetType() { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + p.P(`var `, varName, `temp uint64`) + p.decodeFixed64(varName+"temp", "uint64") + p.P(varName, ` = `, p.mathPkg.Use(), `.Float64frombits(`, varName, `temp)`) + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + p.P(`var `, varName, `temp uint32`) + p.decodeFixed32(varName+"temp", "uint32") + p.P(varName, ` = `, p.mathPkg.Use(), `.Float32frombits(`, varName, `temp)`) + case descriptor.FieldDescriptorProto_TYPE_INT64: + p.decodeVarint(varName, "int64") + case descriptor.FieldDescriptorProto_TYPE_UINT64: + p.decodeVarint(varName, "uint64") + case descriptor.FieldDescriptorProto_TYPE_INT32: + p.decodeVarint(varName, "int32") + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + p.decodeFixed64(varName, "uint64") + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + p.decodeFixed32(varName, "uint32") + case descriptor.FieldDescriptorProto_TYPE_BOOL: + p.P(`var `, varName, `temp int`) + p.decodeVarint(varName+"temp", "int") + p.P(varName, ` = bool(`, varName, `temp != 0)`) + case descriptor.FieldDescriptorProto_TYPE_STRING: + p.P(`var stringLen`, varName, ` uint64`) + p.decodeVarint("stringLen"+varName, "uint64") + p.P(`intStringLen`, varName, ` := int(stringLen`, varName, `)`) + p.P(`if intStringLen`, varName, ` < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postStringIndex`, varName, ` := iNdEx + intStringLen`, varName) + p.P(`if postStringIndex`, varName, ` < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`if postStringIndex`, varName, ` > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + cast, _ := p.GoType(nil, field) + cast = strings.Replace(cast, "*", "", 1) + p.P(varName, ` = `, cast, `(dAtA[iNdEx:postStringIndex`, varName, `])`) + p.P(`iNdEx = postStringIndex`, varName) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + p.P(`var mapmsglen int`) + p.decodeVarint("mapmsglen", "int") + p.P(`if mapmsglen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postmsgIndex := iNdEx + mapmsglen`) + p.P(`if postmsgIndex < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`if postmsgIndex > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + buf := `dAtA[iNdEx:postmsgIndex]` + if gogoproto.IsStdTime(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(`, varName, `, `, buf, `); err != nil {`) + } else if gogoproto.IsStdDuration(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(`, varName, `, `, buf, `); err != nil {`) + } else if gogoproto.IsStdDouble(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdDoubleUnmarshal(`, varName, `, `, buf, `); err != nil {`) + } else if gogoproto.IsStdFloat(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdFloatUnmarshal(`, varName, `, `, buf, `); err != nil {`) + } else if gogoproto.IsStdInt64(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdInt64Unmarshal(`, varName, `, `, buf, `); err != nil {`) + } else if gogoproto.IsStdUInt64(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt64Unmarshal(`, varName, `, `, buf, `); err != nil {`) + } else if gogoproto.IsStdInt32(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdInt32Unmarshal(`, varName, `, `, buf, `); err != nil {`) + } else if gogoproto.IsStdUInt32(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt32Unmarshal(`, varName, `, `, buf, `); err != nil {`) + } else if gogoproto.IsStdBool(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdBoolUnmarshal(`, varName, `, `, buf, `); err != nil {`) + } else if gogoproto.IsStdString(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdStringUnmarshal(`, varName, `, `, buf, `); err != nil {`) + } else if gogoproto.IsStdBytes(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdBytesUnmarshal(`, varName, `, `, buf, `); err != nil {`) + } else { + desc := p.ObjectNamed(field.GetTypeName()) + msgname := p.TypeName(desc) + p.P(varName, ` = &`, msgname, `{}`) + p.P(`if err := `, varName, `.Unmarshal(`, buf, `); err != nil {`) + } + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + p.P(`iNdEx = postmsgIndex`) + case descriptor.FieldDescriptorProto_TYPE_BYTES: + p.P(`var mapbyteLen uint64`) + p.decodeVarint("mapbyteLen", "uint64") + p.P(`intMapbyteLen := int(mapbyteLen)`) + p.P(`if intMapbyteLen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postbytesIndex := iNdEx + intMapbyteLen`) + p.P(`if postbytesIndex < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`if postbytesIndex > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + if customType { + p.P(`if err := `, varName, `.Unmarshal(dAtA[iNdEx:postbytesIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } else { + p.P(varName, ` = make([]byte, mapbyteLen)`) + p.P(`copy(`, varName, `, dAtA[iNdEx:postbytesIndex])`) + } + p.P(`iNdEx = postbytesIndex`) + case descriptor.FieldDescriptorProto_TYPE_UINT32: + p.decodeVarint(varName, "uint32") + case descriptor.FieldDescriptorProto_TYPE_ENUM: + typName := p.TypeName(p.ObjectNamed(field.GetTypeName())) + p.decodeVarint(varName, typName) + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + p.decodeFixed32(varName, "int32") + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + p.decodeFixed64(varName, "int64") + case descriptor.FieldDescriptorProto_TYPE_SINT32: + p.P(`var `, varName, `temp int32`) + p.decodeVarint(varName+"temp", "int32") + p.P(varName, `temp = int32((uint32(`, varName, `temp) >> 1) ^ uint32(((`, varName, `temp&1)<<31)>>31))`) + p.P(varName, ` = int32(`, varName, `temp)`) + case descriptor.FieldDescriptorProto_TYPE_SINT64: + p.P(`var `, varName, `temp uint64`) + p.decodeVarint(varName+"temp", "uint64") + p.P(varName, `temp = (`, varName, `temp >> 1) ^ uint64((int64(`, varName, `temp&1)<<63)>>63)`) + p.P(varName, ` = int64(`, varName, `temp)`) + } +} + +func (p *unmarshal) noStarOrSliceType(msg *generator.Descriptor, field *descriptor.FieldDescriptorProto) string { + typ, _ := p.GoType(msg, field) + if typ[0] == '*' { + return typ[1:] + } + if typ[0] == '[' && typ[1] == ']' { + return typ[2:] + } + return typ +} + +func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descriptor, field *descriptor.FieldDescriptorProto, fieldname string, proto3 bool) { + repeated := field.IsRepeated() + nullable := gogoproto.IsNullable(field) + typ := p.noStarOrSliceType(msg, field) + oneof := field.OneofIndex != nil + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + p.P(`var v uint64`) + p.decodeFixed64("v", "uint64") + if oneof { + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))}`) + } else if repeated { + p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))`) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v2)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = `, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))`) + } else { + p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))`) + p.P(`m.`, fieldname, ` = &v2`) + } + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + p.P(`var v uint32`) + p.decodeFixed32("v", "uint32") + if oneof { + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))}`) + } else if repeated { + p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))`) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v2)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = `, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))`) + } else { + p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))`) + p.P(`m.`, fieldname, ` = &v2`) + } + case descriptor.FieldDescriptorProto_TYPE_INT64: + if oneof { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeVarint("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_UINT64: + if oneof { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeVarint("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_INT32: + if oneof { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeVarint("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + if oneof { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeFixed64("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + if oneof { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeFixed32("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_BOOL: + p.P(`var v int`) + p.decodeVarint("v", "int") + if oneof { + p.P(`b := `, typ, `(v != 0)`) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{b}`) + } else if repeated { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, typ, `(v != 0))`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = `, typ, `(v != 0)`) + } else { + p.P(`b := `, typ, `(v != 0)`) + p.P(`m.`, fieldname, ` = &b`) + } + case descriptor.FieldDescriptorProto_TYPE_STRING: + p.P(`var stringLen uint64`) + p.decodeVarint("stringLen", "uint64") + p.P(`intStringLen := int(stringLen)`) + p.P(`if intStringLen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postIndex := iNdEx + intStringLen`) + p.P(`if postIndex < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`if postIndex > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + if oneof { + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, `(dAtA[iNdEx:postIndex])}`) + } else if repeated { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, typ, `(dAtA[iNdEx:postIndex]))`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = `, typ, `(dAtA[iNdEx:postIndex])`) + } else { + p.P(`s := `, typ, `(dAtA[iNdEx:postIndex])`) + p.P(`m.`, fieldname, ` = &s`) + } + p.P(`iNdEx = postIndex`) + case descriptor.FieldDescriptorProto_TYPE_GROUP: + panic(fmt.Errorf("unmarshaler does not support group %v", fieldname)) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + desc := p.ObjectNamed(field.GetTypeName()) + msgname := p.TypeName(desc) + p.P(`var msglen int`) + p.decodeVarint("msglen", "int") + p.P(`if msglen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postIndex := iNdEx + msglen`) + p.P(`if postIndex < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`if postIndex > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + if oneof { + buf := `dAtA[iNdEx:postIndex]` + if gogoproto.IsStdTime(field) { + if nullable { + p.P(`v := new(time.Time)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`v := time.Time{}`) + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(&v, `, buf, `); err != nil {`) + } + } else if gogoproto.IsStdDuration(field) { + if nullable { + p.P(`v := new(time.Duration)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`v := time.Duration(0)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(&v, `, buf, `); err != nil {`) + } + } else if gogoproto.IsStdDouble(field) { + if nullable { + p.P(`v := new(float64)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdDoubleUnmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`v := 0`) + p.P(`if err := `, p.typesPkg.Use(), `.StdDoubleUnmarshal(&v, `, buf, `); err != nil {`) + } + } else if gogoproto.IsStdFloat(field) { + if nullable { + p.P(`v := new(float32)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdFloatUnmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`v := 0`) + p.P(`if err := `, p.typesPkg.Use(), `.StdFloatUnmarshal(&v, `, buf, `); err != nil {`) + } + } else if gogoproto.IsStdInt64(field) { + if nullable { + p.P(`v := new(int64)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdInt64Unmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`v := 0`) + p.P(`if err := `, p.typesPkg.Use(), `.StdInt64Unmarshal(&v, `, buf, `); err != nil {`) + } + } else if gogoproto.IsStdUInt64(field) { + if nullable { + p.P(`v := new(uint64)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt64Unmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`v := 0`) + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt64Unmarshal(&v, `, buf, `); err != nil {`) + } + } else if gogoproto.IsStdInt32(field) { + if nullable { + p.P(`v := new(int32)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdInt32Unmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`v := 0`) + p.P(`if err := `, p.typesPkg.Use(), `.StdInt32Unmarshal(&v, `, buf, `); err != nil {`) + } + } else if gogoproto.IsStdUInt32(field) { + if nullable { + p.P(`v := new(uint32)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt32Unmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`v := 0`) + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt32Unmarshal(&v, `, buf, `); err != nil {`) + } + } else if gogoproto.IsStdBool(field) { + if nullable { + p.P(`v := new(bool)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdBoolUnmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`v := false`) + p.P(`if err := `, p.typesPkg.Use(), `.StdBoolUnmarshal(&v, `, buf, `); err != nil {`) + } + } else if gogoproto.IsStdString(field) { + if nullable { + p.P(`v := new(string)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdStringUnmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`v := ""`) + p.P(`if err := `, p.typesPkg.Use(), `.StdStringUnmarshal(&v, `, buf, `); err != nil {`) + } + } else if gogoproto.IsStdBytes(field) { + if nullable { + p.P(`v := new([]byte)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdBytesUnmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`var v []byte`) + p.P(`if err := `, p.typesPkg.Use(), `.StdBytesUnmarshal(&v, `, buf, `); err != nil {`) + } + } else { + p.P(`v := &`, msgname, `{}`) + p.P(`if err := v.Unmarshal(`, buf, `); err != nil {`) + } + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if p.IsMap(field) { + m := p.GoMapType(nil, field) + + keygoTyp, _ := p.GoType(nil, m.KeyField) + keygoAliasTyp, _ := p.GoType(nil, m.KeyAliasField) + // keys may not be pointers + keygoTyp = strings.Replace(keygoTyp, "*", "", 1) + keygoAliasTyp = strings.Replace(keygoAliasTyp, "*", "", 1) + + valuegoTyp, _ := p.GoType(nil, m.ValueField) + valuegoAliasTyp, _ := p.GoType(nil, m.ValueAliasField) + + // if the map type is an alias and key or values are aliases (type Foo map[Bar]Baz), + // we need to explicitly record their use here. + if gogoproto.IsCastKey(field) { + p.RecordTypeUse(m.KeyAliasField.GetTypeName()) + } + if gogoproto.IsCastValue(field) { + p.RecordTypeUse(m.ValueAliasField.GetTypeName()) + } + + nullable, valuegoTyp, valuegoAliasTyp = generator.GoMapValueTypes(field, m.ValueField, valuegoTyp, valuegoAliasTyp) + if gogoproto.IsStdType(field) { + valuegoTyp = valuegoAliasTyp + } + + p.P(`if m.`, fieldname, ` == nil {`) + p.In() + p.P(`m.`, fieldname, ` = make(`, m.GoType, `)`) + p.Out() + p.P(`}`) + + p.declareMapField("mapkey", false, false, m.KeyAliasField) + p.declareMapField("mapvalue", nullable, gogoproto.IsCustomType(field), m.ValueAliasField) + p.P(`for iNdEx < postIndex {`) + p.In() + + p.P(`entryPreIndex := iNdEx`) + p.P(`var wire uint64`) + p.decodeVarint("wire", "uint64") + p.P(`fieldNum := int32(wire >> 3)`) + + p.P(`if fieldNum == 1 {`) + p.In() + p.mapField("mapkey", false, m.KeyAliasField) + p.Out() + p.P(`} else if fieldNum == 2 {`) + p.In() + p.mapField("mapvalue", gogoproto.IsCustomType(field), m.ValueAliasField) + p.Out() + p.P(`} else {`) + p.In() + p.P(`iNdEx = entryPreIndex`) + p.P(`skippy, err := skip`, p.localName, `(dAtA[iNdEx:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + p.P(`if (skippy < 0) || (iNdEx + skippy) < 0 {`) + p.In() + p.P(`return ErrInvalidLength`, p.localName) + p.Out() + p.P(`}`) + p.P(`if (iNdEx + skippy) > postIndex {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(`iNdEx += skippy`) + p.Out() + p.P(`}`) + + p.Out() + p.P(`}`) + + s := `m.` + fieldname + if keygoTyp == keygoAliasTyp { + s += `[mapkey]` + } else { + s += `[` + keygoAliasTyp + `(mapkey)]` + } + + v := `mapvalue` + if (m.ValueField.IsMessage() || gogoproto.IsCustomType(field)) && !nullable { + v = `*` + v + } + if valuegoTyp != valuegoAliasTyp { + v = `((` + valuegoAliasTyp + `)(` + v + `))` + } + + p.P(s, ` = `, v) + } else if repeated { + if gogoproto.IsStdTime(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new(time.Time))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, time.Time{})`) + } + } else if gogoproto.IsStdDuration(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new(time.Duration))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, time.Duration(0))`) + } + } else if gogoproto.IsStdDouble(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new(float64))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, 0)`) + } + } else if gogoproto.IsStdFloat(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new(float32))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, 0)`) + } + } else if gogoproto.IsStdInt64(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new(int64))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, 0)`) + } + } else if gogoproto.IsStdUInt64(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new(uint64))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, 0)`) + } + } else if gogoproto.IsStdInt32(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new(int32))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, 0)`) + } + } else if gogoproto.IsStdUInt32(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new(uint32))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, 0)`) + } + } else if gogoproto.IsStdBool(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new(bool))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, false)`) + } + } else if gogoproto.IsStdString(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new(string))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, "")`) + } + } else if gogoproto.IsStdBytes(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new([]byte))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, []byte{})`) + } + } else if nullable && !gogoproto.IsCustomType(field) { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, &`, msgname, `{})`) + } else { + goType, _ := p.GoType(nil, field) + // remove the slice from the type, i.e. []*T -> *T + goType = goType[2:] + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, goType, `{})`) + } + varName := `m.` + fieldname + `[len(m.` + fieldname + `)-1]` + buf := `dAtA[iNdEx:postIndex]` + if gogoproto.IsStdTime(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else if gogoproto.IsStdDuration(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else if gogoproto.IsStdDouble(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdDoubleUnmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdDoubleUnmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else if gogoproto.IsStdFloat(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdFloatUnmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdFloatUnmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else if gogoproto.IsStdInt64(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdInt64Unmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdInt64Unmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else if gogoproto.IsStdUInt64(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt64Unmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt64Unmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else if gogoproto.IsStdInt32(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdInt32Unmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdInt32Unmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else if gogoproto.IsStdUInt32(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt32Unmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt32Unmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else if gogoproto.IsStdBool(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdBoolUnmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdBoolUnmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else if gogoproto.IsStdString(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdStringUnmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdStringUnmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else if gogoproto.IsStdBytes(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdBytesUnmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdBytesUnmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else { + p.P(`if err := `, varName, `.Unmarshal(`, buf, `); err != nil {`) + } + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`if m.`, fieldname, ` == nil {`) + p.In() + if gogoproto.IsStdTime(field) { + p.P(`m.`, fieldname, ` = new(time.Time)`) + } else if gogoproto.IsStdDuration(field) { + p.P(`m.`, fieldname, ` = new(time.Duration)`) + } else if gogoproto.IsStdDouble(field) { + p.P(`m.`, fieldname, ` = new(float64)`) + } else if gogoproto.IsStdFloat(field) { + p.P(`m.`, fieldname, ` = new(float32)`) + } else if gogoproto.IsStdInt64(field) { + p.P(`m.`, fieldname, ` = new(int64)`) + } else if gogoproto.IsStdUInt64(field) { + p.P(`m.`, fieldname, ` = new(uint64)`) + } else if gogoproto.IsStdInt32(field) { + p.P(`m.`, fieldname, ` = new(int32)`) + } else if gogoproto.IsStdUInt32(field) { + p.P(`m.`, fieldname, ` = new(uint32)`) + } else if gogoproto.IsStdBool(field) { + p.P(`m.`, fieldname, ` = new(bool)`) + } else if gogoproto.IsStdString(field) { + p.P(`m.`, fieldname, ` = new(string)`) + } else if gogoproto.IsStdBytes(field) { + p.P(`m.`, fieldname, ` = new([]byte)`) + } else { + goType, _ := p.GoType(nil, field) + // remove the star from the type + p.P(`m.`, fieldname, ` = &`, goType[1:], `{}`) + } + p.Out() + p.P(`}`) + if gogoproto.IsStdTime(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdDuration(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdDouble(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdDoubleUnmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdFloat(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdFloatUnmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdInt64(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdInt64Unmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdUInt64(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt64Unmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdInt32(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdInt32Unmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdUInt32(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt32Unmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdBool(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdBoolUnmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdString(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdStringUnmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdBytes(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdBytesUnmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else { + p.P(`if err := m.`, fieldname, `.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {`) + } + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } else { + if gogoproto.IsStdTime(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdDuration(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdDouble(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdDoubleUnmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdFloat(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdFloatUnmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdInt64(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdInt64Unmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdUInt64(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt64Unmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdInt32(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdInt32Unmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdUInt32(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdUInt32Unmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdBool(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdBoolUnmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdString(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdStringUnmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdBytes(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdBytesUnmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else { + p.P(`if err := m.`, fieldname, `.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {`) + } + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } + p.P(`iNdEx = postIndex`) + + case descriptor.FieldDescriptorProto_TYPE_BYTES: + p.P(`var byteLen int`) + p.decodeVarint("byteLen", "int") + p.P(`if byteLen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postIndex := iNdEx + byteLen`) + p.P(`if postIndex < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`if postIndex > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + if !gogoproto.IsCustomType(field) { + if oneof { + p.P(`v := make([]byte, postIndex-iNdEx)`) + p.P(`copy(v, dAtA[iNdEx:postIndex])`) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, make([]byte, postIndex-iNdEx))`) + p.P(`copy(m.`, fieldname, `[len(m.`, fieldname, `)-1], dAtA[iNdEx:postIndex])`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `[:0] , dAtA[iNdEx:postIndex]...)`) + p.P(`if m.`, fieldname, ` == nil {`) + p.In() + p.P(`m.`, fieldname, ` = []byte{}`) + p.Out() + p.P(`}`) + } + } else { + _, ctyp, err := generator.GetCustomType(field) + if err != nil { + panic(err) + } + if oneof { + p.P(`var vv `, ctyp) + p.P(`v := &vv`) + p.P(`if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{*v}`) + } else if repeated { + p.P(`var v `, ctyp) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + p.P(`if err := m.`, fieldname, `[len(m.`, fieldname, `)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`var v `, ctyp) + p.P(`m.`, fieldname, ` = &v`) + p.P(`if err := m.`, fieldname, `.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } else { + p.P(`if err := m.`, fieldname, `.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } + } + p.P(`iNdEx = postIndex`) + case descriptor.FieldDescriptorProto_TYPE_UINT32: + if oneof { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeVarint("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_ENUM: + typName := p.TypeName(p.ObjectNamed(field.GetTypeName())) + if oneof { + p.P(`var v `, typName) + p.decodeVarint("v", typName) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typName) + p.decodeVarint("v", typName) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeVarint("m."+fieldname, typName) + } else { + p.P(`var v `, typName) + p.decodeVarint("v", typName) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + if oneof { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeFixed32("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + if oneof { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeFixed64("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_SINT32: + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`v = `, typ, `((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))`) + if oneof { + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = v`) + } else { + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_SINT64: + p.P(`var v uint64`) + p.decodeVarint("v", "uint64") + p.P(`v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)`) + if oneof { + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, `(v)}`) + } else if repeated { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, typ, `(v))`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = `, typ, `(v)`) + } else { + p.P(`v2 := `, typ, `(v)`) + p.P(`m.`, fieldname, ` = &v2`) + } + default: + panic("not implemented") + } +} + +func (p *unmarshal) Generate(file *generator.FileDescriptor) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + p.PluginImports = generator.NewPluginImports(p.Generator) + p.atleastOne = false + p.localName = generator.FileName(file) + + p.ioPkg = p.NewImport("io") + p.mathPkg = p.NewImport("math") + p.typesPkg = p.NewImport("github.com/gogo/protobuf/types") + p.binaryPkg = p.NewImport("encoding/binary") + fmtPkg := p.NewImport("fmt") + protoPkg := p.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = p.NewImport("github.com/golang/protobuf/proto") + } + + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if !gogoproto.IsUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) && + !gogoproto.IsUnsafeUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + p.atleastOne = true + + // build a map required field_id -> bitmask offset + rfMap := make(map[int32]uint) + rfNextId := uint(0) + for _, field := range message.Field { + if field.IsRequired() { + rfMap[field.GetNumber()] = rfNextId + rfNextId++ + } + } + rfCount := len(rfMap) + + p.P(`func (m *`, ccTypeName, `) Unmarshal(dAtA []byte) error {`) + p.In() + if rfCount > 0 { + p.P(`var hasFields [`, strconv.Itoa(1+(rfCount-1)/64), `]uint64`) + } + p.P(`l := len(dAtA)`) + p.P(`iNdEx := 0`) + p.P(`for iNdEx < l {`) + p.In() + p.P(`preIndex := iNdEx`) + p.P(`var wire uint64`) + p.decodeVarint("wire", "uint64") + p.P(`fieldNum := int32(wire >> 3)`) + if len(message.Field) > 0 || !message.IsGroup() { + p.P(`wireType := int(wire & 0x7)`) + } + if !message.IsGroup() { + p.P(`if wireType == `, strconv.Itoa(proto.WireEndGroup), ` {`) + p.In() + p.P(`return `, fmtPkg.Use(), `.Errorf("proto: `+message.GetName()+`: wiretype end group for non-group")`) + p.Out() + p.P(`}`) + } + p.P(`if fieldNum <= 0 {`) + p.In() + p.P(`return `, fmtPkg.Use(), `.Errorf("proto: `+message.GetName()+`: illegal tag %d (wire type %d)", fieldNum, wire)`) + p.Out() + p.P(`}`) + p.P(`switch fieldNum {`) + p.In() + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + errFieldname := fieldname + if field.OneofIndex != nil { + errFieldname = p.GetOneOfFieldName(message, field) + } + possiblyPacked := field.IsScalar() && field.IsRepeated() + p.P(`case `, strconv.Itoa(int(field.GetNumber())), `:`) + p.In() + wireType := field.WireType() + if possiblyPacked { + p.P(`if wireType == `, strconv.Itoa(wireType), `{`) + p.In() + p.field(file, message, field, fieldname, false) + p.Out() + p.P(`} else if wireType == `, strconv.Itoa(proto.WireBytes), `{`) + p.In() + p.P(`var packedLen int`) + p.decodeVarint("packedLen", "int") + p.P(`if packedLen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postIndex := iNdEx + packedLen`) + p.P(`if postIndex < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`if postIndex > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + + p.P(`var elementCount int`) + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, descriptor.FieldDescriptorProto_TYPE_FIXED64, descriptor.FieldDescriptorProto_TYPE_SFIXED64: + p.P(`elementCount = packedLen/`, 8) + case descriptor.FieldDescriptorProto_TYPE_FLOAT, descriptor.FieldDescriptorProto_TYPE_FIXED32, descriptor.FieldDescriptorProto_TYPE_SFIXED32: + p.P(`elementCount = packedLen/`, 4) + case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64, descriptor.FieldDescriptorProto_TYPE_INT32, descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_SINT32, descriptor.FieldDescriptorProto_TYPE_SINT64: + p.P(`var count int`) + p.P(`for _, integer := range dAtA[iNdEx:postIndex] {`) + p.In() + p.P(`if integer < 128 {`) + p.In() + p.P(`count++`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P(`elementCount = count`) + case descriptor.FieldDescriptorProto_TYPE_BOOL: + p.P(`elementCount = packedLen`) + } + p.P(`if elementCount != 0 && len(m.`, fieldname, `) == 0 {`) + p.In() + p.P(`m.`, fieldname, ` = make([]`, p.noStarOrSliceType(message, field), `, 0, elementCount)`) + p.Out() + p.P(`}`) + + p.P(`for iNdEx < postIndex {`) + p.In() + p.field(file, message, field, fieldname, false) + p.Out() + p.P(`}`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`return ` + fmtPkg.Use() + `.Errorf("proto: wrong wireType = %d for field ` + errFieldname + `", wireType)`) + p.Out() + p.P(`}`) + } else { + p.P(`if wireType != `, strconv.Itoa(wireType), `{`) + p.In() + p.P(`return ` + fmtPkg.Use() + `.Errorf("proto: wrong wireType = %d for field ` + errFieldname + `", wireType)`) + p.Out() + p.P(`}`) + p.field(file, message, field, fieldname, proto3) + } + + if field.IsRequired() { + fieldBit, ok := rfMap[field.GetNumber()] + if !ok { + panic("field is required, but no bit registered") + } + p.P(`hasFields[`, strconv.Itoa(int(fieldBit/64)), `] |= uint64(`, fmt.Sprintf("0x%08x", uint64(1)<<(fieldBit%64)), `)`) + } + } + p.Out() + p.P(`default:`) + p.In() + if message.DescriptorProto.HasExtension() { + c := []string{} + for _, erange := range message.GetExtensionRange() { + c = append(c, `((fieldNum >= `+strconv.Itoa(int(erange.GetStart()))+") && (fieldNum<"+strconv.Itoa(int(erange.GetEnd()))+`))`) + } + p.P(`if `, strings.Join(c, "||"), `{`) + p.In() + p.P(`var sizeOfWire int`) + p.P(`for {`) + p.In() + p.P(`sizeOfWire++`) + p.P(`wire >>= 7`) + p.P(`if wire == 0 {`) + p.In() + p.P(`break`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P(`iNdEx-=sizeOfWire`) + p.P(`skippy, err := skip`, p.localName+`(dAtA[iNdEx:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + p.P(`if (skippy < 0) || (iNdEx + skippy) < 0 {`) + p.In() + p.P(`return ErrInvalidLength`, p.localName) + p.Out() + p.P(`}`) + p.P(`if (iNdEx + skippy) > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(protoPkg.Use(), `.AppendExtension(m, int32(fieldNum), dAtA[iNdEx:iNdEx+skippy])`) + p.P(`iNdEx += skippy`) + p.Out() + p.P(`} else {`) + p.In() + } + p.P(`iNdEx=preIndex`) + p.P(`skippy, err := skip`, p.localName, `(dAtA[iNdEx:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + p.P(`if (skippy < 0) || (iNdEx + skippy) < 0 {`) + p.In() + p.P(`return ErrInvalidLength`, p.localName) + p.Out() + p.P(`}`) + p.P(`if (iNdEx + skippy) > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)`) + } + p.P(`iNdEx += skippy`) + p.Out() + if message.DescriptorProto.HasExtension() { + p.Out() + p.P(`}`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + + for _, field := range message.Field { + if !field.IsRequired() { + continue + } + + fieldBit, ok := rfMap[field.GetNumber()] + if !ok { + panic("field is required, but no bit registered") + } + + p.P(`if hasFields[`, strconv.Itoa(int(fieldBit/64)), `] & uint64(`, fmt.Sprintf("0x%08x", uint64(1)<<(fieldBit%64)), `) == 0 {`) + p.In() + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + p.P(`return new(`, protoPkg.Use(), `.RequiredNotSetError)`) + } else { + p.P(`return `, protoPkg.Use(), `.NewRequiredNotSetError("`, field.GetName(), `")`) + } + p.Out() + p.P(`}`) + } + p.P() + p.P(`if iNdEx > l {`) + p.In() + p.P(`return ` + p.ioPkg.Use() + `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(`return nil`) + p.Out() + p.P(`}`) + } + if !p.atleastOne { + return + } + + p.P(`func skip` + p.localName + `(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow` + p.localName + ` + } + if iNdEx >= l { + return 0, ` + p.ioPkg.Use() + `.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow` + p.localName + ` + } + if iNdEx >= l { + return 0, ` + p.ioPkg.Use() + `.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow` + p.localName + ` + } + if iNdEx >= l { + return 0, ` + p.ioPkg.Use() + `.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength` + p.localName + ` + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup` + p.localName + ` + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, ` + fmtPkg.Use() + `.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength` + p.localName + ` + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, ` + p.ioPkg.Use() + `.ErrUnexpectedEOF + } + + var ( + ErrInvalidLength` + p.localName + ` = ` + fmtPkg.Use() + `.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow` + p.localName + ` = ` + fmtPkg.Use() + `.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup` + p.localName + ` = ` + fmtPkg.Use() + `.Errorf("proto: unexpected end of group") + ) + `) +} + +func init() { + generator.RegisterPlugin(NewUnmarshal()) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/Makefile new file mode 100644 index 000000000000..52e2d4e70471 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/Makefile @@ -0,0 +1,41 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +all: test + +test: + go test + make -C testdata test + +regenerate: + go test --regenerate + make -C descriptor regenerate + make -C plugin regenerate diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/doc.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/doc.go new file mode 100644 index 000000000000..15c7cf43c282 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/doc.go @@ -0,0 +1,51 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + A plugin for the Google protocol buffer compiler to generate Go code. + Run it by building this program and putting it in your path with the name + protoc-gen-gogo + That word 'gogo' at the end becomes part of the option string set for the + protocol compiler, so once the protocol compiler (protoc) is installed + you can run + protoc --gogo_out=output_directory input_directory/file.proto + to generate Go bindings for the protocol defined by file.proto. + With that input, the output will be written to + output_directory/go_package/file.pb.go + + The generated code is documented in the package comment for + the library. + + See the README and documentation for protocol buffers to learn more: + https://developers.google.com/protocol-buffers/ + +*/ +package documentation diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go new file mode 100644 index 000000000000..ab07ed61ef03 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go @@ -0,0 +1,3444 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + The code generator for the plugin for the Google protocol buffer compiler. + It generates Go code from the protocol buffer description files read by the + main routine. +*/ +package generator + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "path" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator/internal/remap" + plugin "github.com/gogo/protobuf/protoc-gen-gogo/plugin" +) + +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// proto package is introduced; the generated code references +// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 3 + +// A Plugin provides functionality to add to the output during Go code generation, +// such as to produce RPC stubs. +type Plugin interface { + // Name identifies the plugin. + Name() string + // Init is called once after data structures are built but before + // code generation begins. + Init(g *Generator) + // Generate produces the code generated by the plugin for this file, + // except for the imports, by calling the generator's methods P, In, and Out. + Generate(file *FileDescriptor) + // GenerateImports produces the import declarations for this file. + // It is called after Generate. + GenerateImports(file *FileDescriptor) +} + +type pluginSlice []Plugin + +func (ps pluginSlice) Len() int { + return len(ps) +} + +func (ps pluginSlice) Less(i, j int) bool { + return ps[i].Name() < ps[j].Name() +} + +func (ps pluginSlice) Swap(i, j int) { + ps[i], ps[j] = ps[j], ps[i] +} + +var plugins pluginSlice + +// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. +// It is typically called during initialization. +func RegisterPlugin(p Plugin) { + plugins = append(plugins, p) +} + +// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf". +type GoImportPath string + +func (p GoImportPath) String() string { return strconv.Quote(string(p)) } + +// A GoPackageName is the name of a Go package. e.g., "protobuf". +type GoPackageName string + +// Each type we import as a protocol buffer (other than FileDescriptorProto) needs +// a pointer to the FileDescriptorProto that represents it. These types achieve that +// wrapping by placing each Proto inside a struct with the pointer to its File. The +// structs have the same names as their contents, with "Proto" removed. +// FileDescriptor is used to store the things that it points to. + +// The file and package name method are common to messages and enums. +type common struct { + file *FileDescriptor // File this object comes from. +} + +// GoImportPath is the import path of the Go package containing the type. +func (c *common) GoImportPath() GoImportPath { + return c.file.importPath +} + +func (c *common) File() *FileDescriptor { return c.file } + +func fileIsProto3(file *descriptor.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) } + +// Descriptor represents a protocol buffer message. +type Descriptor struct { + common + *descriptor.DescriptorProto + parent *Descriptor // The containing message, if any. + nested []*Descriptor // Inner messages, if any. + enums []*EnumDescriptor // Inner enums, if any. + ext []*ExtensionDescriptor // Extensions, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or another message. + path string // The SourceCodeInfo path as comma-separated integers. + group bool +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (d *Descriptor) TypeName() []string { + if d.typename != nil { + return d.typename + } + n := 0 + for parent := d; parent != nil; parent = parent.parent { + n++ + } + s := make([]string, n) + for parent := d; parent != nil; parent = parent.parent { + n-- + s[n] = parent.GetName() + } + d.typename = s + return s +} + +func (d *Descriptor) allowOneof() bool { + return true +} + +// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type EnumDescriptor struct { + common + *descriptor.EnumDescriptorProto + parent *Descriptor // The containing message, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or a message. + path string // The SourceCodeInfo path as comma-separated integers. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *EnumDescriptor) TypeName() (s []string) { + if e.typename != nil { + return e.typename + } + name := e.GetName() + if e.parent == nil { + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + e.typename = s + return s +} + +// alias provides the TypeName corrected for the application of any naming +// extensions on the enum type. It should be used for generating references to +// the Go types and for calculating prefixes. +func (e *EnumDescriptor) alias() (s []string) { + s = e.TypeName() + if gogoproto.IsEnumCustomName(e.EnumDescriptorProto) { + s[len(s)-1] = gogoproto.GetEnumCustomName(e.EnumDescriptorProto) + } + + return +} + +// Everything but the last element of the full type name, CamelCased. +// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . +func (e *EnumDescriptor) prefix() string { + typeName := e.alias() + if e.parent == nil { + // If the enum is not part of a message, the prefix is just the type name. + return CamelCase(typeName[len(typeName)-1]) + "_" + } + return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" +} + +// The integer value of the named constant in this enumerated type. +func (e *EnumDescriptor) integerValueAsString(name string) string { + for _, c := range e.Value { + if c.GetName() == name { + return fmt.Sprint(c.GetNumber()) + } + } + log.Fatal("cannot find value for enum constant") + return "" +} + +// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type ExtensionDescriptor struct { + common + *descriptor.FieldDescriptorProto + parent *Descriptor // The containing message, if any. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *ExtensionDescriptor) TypeName() (s []string) { + name := e.GetName() + if e.parent == nil { + // top-level extension + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + return s +} + +// DescName returns the variable name used for the generated descriptor. +func (e *ExtensionDescriptor) DescName() string { + // The full type name. + typeName := e.TypeName() + // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. + for i, s := range typeName { + typeName[i] = CamelCase(s) + } + return "E_" + strings.Join(typeName, "_") +} + +// ImportedDescriptor describes a type that has been publicly imported from another file. +type ImportedDescriptor struct { + common + o Object +} + +func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } + +// FileDescriptor describes an protocol buffer descriptor file (.proto). +// It includes slices of all the messages and enums defined within it. +// Those slices are constructed by WrapTypes. +type FileDescriptor struct { + *descriptor.FileDescriptorProto + desc []*Descriptor // All the messages defined in this file. + enum []*EnumDescriptor // All the enums defined in this file. + ext []*ExtensionDescriptor // All the top-level extensions defined in this file. + imp []*ImportedDescriptor // All types defined in files publicly imported by this file. + + // Comments, stored as a map of path (comma-separated integers) to the comment. + comments map[string]*descriptor.SourceCodeInfo_Location + + // The full list of symbols that are exported, + // as a map from the exported object to its symbols. + // This is used for supporting public imports. + exported map[Object][]symbol + + importPath GoImportPath // Import path of this file's package. + packageName GoPackageName // Name of this file's Go package. + + proto3 bool // whether to generate proto3 code for this file +} + +// VarName is the variable name we'll use in the generated code to refer +// to the compressed bytes of this descriptor. It is not exported, so +// it is only valid inside the generated package. +func (d *FileDescriptor) VarName() string { + h := sha256.Sum256([]byte(d.GetName())) + return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8])) +} + +// goPackageOption interprets the file's go_package option. +// If there is no go_package, it returns ("", "", false). +// If there's a simple name, it returns ("", pkg, true). +// If the option implies an import path, it returns (impPath, pkg, true). +func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) { + opt := d.GetOptions().GetGoPackage() + if opt == "" { + return "", "", false + } + // A semicolon-delimited suffix delimits the import path and package name. + sc := strings.Index(opt, ";") + if sc >= 0 { + return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true + } + // The presence of a slash implies there's an import path. + slash := strings.LastIndex(opt, "/") + if slash >= 0 { + return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true + } + return "", cleanPackageName(opt), true +} + +// goFileName returns the output name for the generated Go file. +func (d *FileDescriptor) goFileName(pathType pathType) string { + name := *d.Name + if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { + name = name[:len(name)-len(ext)] + } + name += ".pb.go" + + if pathType == pathTypeSourceRelative { + return name + } + + // Does the file have a "go_package" option? + // If it does, it may override the filename. + if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { + // Replace the existing dirname with the declared import path. + _, name = path.Split(name) + name = path.Join(string(impPath), name) + return name + } + + return name +} + +func (d *FileDescriptor) addExport(obj Object, sym symbol) { + d.exported[obj] = append(d.exported[obj], sym) +} + +// symbol is an interface representing an exported Go symbol. +type symbol interface { + // GenerateAlias should generate an appropriate alias + // for the symbol from the named package. + GenerateAlias(g *Generator, filename string, pkg GoPackageName) +} + +type messageSymbol struct { + sym string + hasExtensions, isMessageSet bool + oneofTypes []string +} + +type getterSymbol struct { + name string + typ string + typeName string // canonical name in proto world; empty for proto.Message and similar + genType bool // whether typ contains a generated type (message/group/enum) +} + +func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + g.P("// ", ms.sym, " from public import ", filename) + g.P("type ", ms.sym, " = ", pkg, ".", ms.sym) + for _, name := range ms.oneofTypes { + g.P("type ", name, " = ", pkg, ".", name) + } +} + +type enumSymbol struct { + name string + proto3 bool // Whether this came from a proto3 file. +} + +func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + s := es.name + g.P("// ", s, " from public import ", filename) + g.P("type ", s, " = ", pkg, ".", s) + g.P("var ", s, "_name = ", pkg, ".", s, "_name") + g.P("var ", s, "_value = ", pkg, ".", s, "_value") +} + +type constOrVarSymbol struct { + sym string + typ string // either "const" or "var" + cast string // if non-empty, a type cast is required (used for enums) +} + +func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + v := string(pkg) + "." + cs.sym + if cs.cast != "" { + v = cs.cast + "(" + v + ")" + } + g.P(cs.typ, " ", cs.sym, " = ", v) +} + +// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. +type Object interface { + GoImportPath() GoImportPath + TypeName() []string + File() *FileDescriptor +} + +// Generator is the type whose methods generate the output, stored in the associated response structure. +type Generator struct { + *bytes.Buffer + + Request *plugin.CodeGeneratorRequest // The input. + Response *plugin.CodeGeneratorResponse // The output. + + Param map[string]string // Command-line parameters. + PackageImportPath string // Go import path of the package we're generating code for + ImportPrefix string // String to prefix to imported package file names. + ImportMap map[string]string // Mapping from .proto file name to import path + + Pkg map[string]string // The names under which we import support packages + + outputImportPath GoImportPath // Package we're generating code for. + allFiles []*FileDescriptor // All files in the tree + allFilesByName map[string]*FileDescriptor // All files by filename. + genFiles []*FileDescriptor // Those files we will generate output for. + file *FileDescriptor // The file we are compiling now. + packageNames map[GoImportPath]GoPackageName // Imported package names in the current file. + usedPackages map[GoImportPath]bool // Packages used in current file. + usedPackageNames map[GoPackageName]bool // Package names used in the current file. + addedImports map[GoImportPath]bool // Additional imports to emit.` + typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. + init []string // Lines to emit in the init function. + indent string + pathType pathType // How to generate output filenames. + writeOutput bool + annotateCode bool // whether to store annotations + annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store + + customImports []string + writtenImports map[string]bool // For de-duplicating written imports +} + +type pathType int + +const ( + pathTypeImport pathType = iota + pathTypeSourceRelative +) + +// New creates a new generator and allocates the request and response protobufs. +func New() *Generator { + g := new(Generator) + g.Buffer = new(bytes.Buffer) + g.Request = new(plugin.CodeGeneratorRequest) + g.Response = new(plugin.CodeGeneratorResponse) + g.writtenImports = make(map[string]bool) + g.addedImports = make(map[GoImportPath]bool) + return g +} + +// Error reports a problem, including an error, and exits the program. +func (g *Generator) Error(err error, msgs ...string) { + s := strings.Join(msgs, " ") + ":" + err.Error() + log.Print("protoc-gen-gogo: error:", s) + os.Exit(1) +} + +// Fail reports a problem and exits the program. +func (g *Generator) Fail(msgs ...string) { + s := strings.Join(msgs, " ") + log.Print("protoc-gen-gogo: error:", s) + os.Exit(1) +} + +// CommandLineParameters breaks the comma-separated list of key=value pairs +// in the parameter (a member of the request protobuf) into a key/value map. +// It then sets file name mappings defined by those entries. +func (g *Generator) CommandLineParameters(parameter string) { + g.Param = make(map[string]string) + for _, p := range strings.Split(parameter, ",") { + if i := strings.Index(p, "="); i < 0 { + g.Param[p] = "" + } else { + g.Param[p[0:i]] = p[i+1:] + } + } + + g.ImportMap = make(map[string]string) + pluginList := "none" // Default list of plugin names to enable (empty means all). + for k, v := range g.Param { + switch k { + case "import_prefix": + g.ImportPrefix = v + case "import_path": + g.PackageImportPath = v + case "paths": + switch v { + case "import": + g.pathType = pathTypeImport + case "source_relative": + g.pathType = pathTypeSourceRelative + default: + g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v)) + } + case "plugins": + pluginList = v + case "annotate_code": + if v == "true" { + g.annotateCode = true + } + default: + if len(k) > 0 && k[0] == 'M' { + g.ImportMap[k[1:]] = v + } + } + } + if pluginList == "" { + return + } + if pluginList == "none" { + pluginList = "" + } + gogoPluginNames := []string{"unmarshal", "unsafeunmarshaler", "union", "stringer", "size", "protosizer", "populate", "marshalto", "unsafemarshaler", "gostring", "face", "equal", "enumstringer", "embedcheck", "description", "defaultcheck", "oneofcheck", "compare"} + pluginList = strings.Join(append(gogoPluginNames, pluginList), "+") + if pluginList != "" { + // Amend the set of plugins. + enabled := make(map[string]bool) + for _, name := range strings.Split(pluginList, "+") { + enabled[name] = true + } + var nplugins pluginSlice + for _, p := range plugins { + if enabled[p.Name()] { + nplugins = append(nplugins, p) + } + } + sort.Sort(nplugins) + plugins = nplugins + } +} + +// DefaultPackageName returns the package name printed for the object. +// If its file is in a different package, it returns the package name we're using for this file, plus ".". +// Otherwise it returns the empty string. +func (g *Generator) DefaultPackageName(obj Object) string { + importPath := obj.GoImportPath() + if importPath == g.outputImportPath { + return "" + } + return string(g.GoPackageName(importPath)) + "." +} + +// GoPackageName returns the name used for a package. +func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName { + if name, ok := g.packageNames[importPath]; ok { + return name + } + name := cleanPackageName(baseName(string(importPath))) + for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) + } + if g.packageNames == nil { + g.packageNames = make(map[GoImportPath]GoPackageName) + } + g.packageNames[importPath] = name + if g.usedPackageNames == nil { + g.usedPackageNames = make(map[GoPackageName]bool) + } + g.usedPackageNames[name] = true + return name +} + +// AddImport adds a package to the generated file's import section. +// It returns the name used for the package. +func (g *Generator) AddImport(importPath GoImportPath) GoPackageName { + g.addedImports[importPath] = true + return g.GoPackageName(importPath) +} + +var globalPackageNames = map[GoPackageName]bool{ + "fmt": true, + "math": true, + "proto": true, +} + +// Create and remember a guaranteed unique package name. Pkg is the candidate name. +// The FileDescriptor parameter is unused. +func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { + name := cleanPackageName(pkg) + for i, orig := 1, name; globalPackageNames[name]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) + } + globalPackageNames[name] = true + return string(name) +} + +var isGoKeyword = map[string]bool{ + "break": true, + "case": true, + "chan": true, + "const": true, + "continue": true, + "default": true, + "else": true, + "defer": true, + "fallthrough": true, + "for": true, + "func": true, + "go": true, + "goto": true, + "if": true, + "import": true, + "interface": true, + "map": true, + "package": true, + "range": true, + "return": true, + "select": true, + "struct": true, + "switch": true, + "type": true, + "var": true, +} + +var isGoPredeclaredIdentifier = map[string]bool{ + "append": true, + "bool": true, + "byte": true, + "cap": true, + "close": true, + "complex": true, + "complex128": true, + "complex64": true, + "copy": true, + "delete": true, + "error": true, + "false": true, + "float32": true, + "float64": true, + "imag": true, + "int": true, + "int16": true, + "int32": true, + "int64": true, + "int8": true, + "iota": true, + "len": true, + "make": true, + "new": true, + "nil": true, + "panic": true, + "print": true, + "println": true, + "real": true, + "recover": true, + "rune": true, + "string": true, + "true": true, + "uint": true, + "uint16": true, + "uint32": true, + "uint64": true, + "uint8": true, + "uintptr": true, +} + +func cleanPackageName(name string) GoPackageName { + name = strings.Map(badToUnderscore, name) + // Identifier must not be keyword: insert _. + if isGoKeyword[name] { + name = "_" + name + } + // Identifier must not begin with digit: insert _. + if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) { + name = "_" + name + } + return GoPackageName(name) +} + +// defaultGoPackage returns the package name to use, +// derived from the import path of the package we're building code for. +func (g *Generator) defaultGoPackage() GoPackageName { + p := g.PackageImportPath + if i := strings.LastIndex(p, "/"); i >= 0 { + p = p[i+1:] + } + return cleanPackageName(p) +} + +// SetPackageNames sets the package name for this run. +// The package name must agree across all files being generated. +// It also defines unique package names for all imported files. +func (g *Generator) SetPackageNames() { + g.outputImportPath = g.genFiles[0].importPath + + defaultPackageNames := make(map[GoImportPath]GoPackageName) + for _, f := range g.genFiles { + if _, p, ok := f.goPackageOption(); ok { + defaultPackageNames[f.importPath] = p + } + } + for _, f := range g.genFiles { + if _, p, ok := f.goPackageOption(); ok { + // Source file: option go_package = "quux/bar"; + f.packageName = p + } else if p, ok := defaultPackageNames[f.importPath]; ok { + // A go_package option in another file in the same package. + // + // This is a poor choice in general, since every source file should + // contain a go_package option. Supported mainly for historical + // compatibility. + f.packageName = p + } else if p := g.defaultGoPackage(); p != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets a package name for files which don't + // contain a go_package option. + f.packageName = p + } else if p := f.GetPackage(); p != "" { + // Source file: package quux.bar; + f.packageName = cleanPackageName(p) + } else { + // Source filename. + f.packageName = cleanPackageName(baseName(f.GetName())) + } + } + + // Check that all files have a consistent package name and import path. + for _, f := range g.genFiles[1:] { + if a, b := g.genFiles[0].importPath, f.importPath; a != b { + g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b)) + } + if a, b := g.genFiles[0].packageName, f.packageName; a != b { + g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b)) + } + } + + // Names of support packages. These never vary (if there are conflicts, + // we rename the conflicting package), so this could be removed someday. + g.Pkg = map[string]string{ + "fmt": "fmt", + "math": "math", + "proto": "proto", + "golang_proto": "golang_proto", + } +} + +// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos +// and FileDescriptorProtos into file-referenced objects within the Generator. +// It also creates the list of files to generate and so should be called before GenerateAllFiles. +func (g *Generator) WrapTypes() { + g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) + g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) + genFileNames := make(map[string]bool) + for _, n := range g.Request.FileToGenerate { + genFileNames[n] = true + } + for _, f := range g.Request.ProtoFile { + fd := &FileDescriptor{ + FileDescriptorProto: f, + exported: make(map[Object][]symbol), + proto3: fileIsProto3(f), + } + // The import path may be set in a number of ways. + if substitution, ok := g.ImportMap[f.GetName()]; ok { + // Command-line: M=foo.proto=quux/bar. + // + // Explicit mapping of source file to import path. + fd.importPath = GoImportPath(substitution) + } else if genFileNames[f.GetName()] && g.PackageImportPath != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets the import path for every file that + // we generate code for. + fd.importPath = GoImportPath(g.PackageImportPath) + } else if p, _, _ := fd.goPackageOption(); p != "" { + // Source file: option go_package = "quux/bar"; + // + // The go_package option sets the import path. Most users should use this. + fd.importPath = p + } else { + // Source filename. + // + // Last resort when nothing else is available. + fd.importPath = GoImportPath(path.Dir(f.GetName())) + } + // We must wrap the descriptors before we wrap the enums + fd.desc = wrapDescriptors(fd) + g.buildNestedDescriptors(fd.desc) + fd.enum = wrapEnumDescriptors(fd, fd.desc) + g.buildNestedEnums(fd.desc, fd.enum) + fd.ext = wrapExtensions(fd) + extractComments(fd) + g.allFiles = append(g.allFiles, fd) + g.allFilesByName[f.GetName()] = fd + } + for _, fd := range g.allFiles { + fd.imp = wrapImported(fd, g) + } + + g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) + for _, fileName := range g.Request.FileToGenerate { + fd := g.allFilesByName[fileName] + if fd == nil { + g.Fail("could not find file named", fileName) + } + g.genFiles = append(g.genFiles, fd) + } +} + +// Scan the descriptors in this file. For each one, build the slice of nested descriptors +func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { + for _, desc := range descs { + if len(desc.NestedType) != 0 { + for _, nest := range descs { + if nest.parent == desc { + desc.nested = append(desc.nested, nest) + } + } + if len(desc.nested) != len(desc.NestedType) { + g.Fail("internal error: nesting failure for", desc.GetName()) + } + } + } +} + +func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { + for _, desc := range descs { + if len(desc.EnumType) != 0 { + for _, enum := range enums { + if enum.parent == desc { + desc.enums = append(desc.enums, enum) + } + } + if len(desc.enums) != len(desc.EnumType) { + g.Fail("internal error: enum nesting failure for", desc.GetName()) + } + } + } +} + +// Construct the Descriptor +func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor { + d := &Descriptor{ + common: common{file}, + DescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + d.path = fmt.Sprintf("%d,%d", messagePath, index) + } else { + d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) + } + + // The only way to distinguish a group from a message is whether + // the containing message has a TYPE_GROUP field that matches. + if parent != nil { + parts := d.TypeName() + if file.Package != nil { + parts = append([]string{*file.Package}, parts...) + } + exp := "." + strings.Join(parts, ".") + for _, field := range parent.Field { + if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { + d.group = true + break + } + } + } + + for _, field := range desc.Extension { + d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) + } + + return d +} + +// Return a slice of all the Descriptors defined within this file +func wrapDescriptors(file *FileDescriptor) []*Descriptor { + sl := make([]*Descriptor, 0, len(file.MessageType)+10) + for i, desc := range file.MessageType { + sl = wrapThisDescriptor(sl, desc, nil, file, i) + } + return sl +} + +// Wrap this Descriptor, recursively +func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor { + sl = append(sl, newDescriptor(desc, parent, file, index)) + me := sl[len(sl)-1] + for i, nested := range desc.NestedType { + sl = wrapThisDescriptor(sl, nested, me, file, i) + } + return sl +} + +// Construct the EnumDescriptor +func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor { + ed := &EnumDescriptor{ + common: common{file}, + EnumDescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + ed.path = fmt.Sprintf("%d,%d", enumPath, index) + } else { + ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) + } + return ed +} + +// Return a slice of all the EnumDescriptors defined within this file +func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor { + sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) + // Top-level enums. + for i, enum := range file.EnumType { + sl = append(sl, newEnumDescriptor(enum, nil, file, i)) + } + // Enums within messages. Enums within embedded messages appear in the outer-most message. + for _, nested := range descs { + for i, enum := range nested.EnumType { + sl = append(sl, newEnumDescriptor(enum, nested, file, i)) + } + } + return sl +} + +// Return a slice of all the top-level ExtensionDescriptors defined within this file. +func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor { + var sl []*ExtensionDescriptor + for _, field := range file.Extension { + sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) + } + return sl +} + +// Return a slice of all the types that are publicly imported into this file. +func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) { + for _, index := range file.PublicDependency { + df := g.fileByName(file.Dependency[index]) + for _, d := range df.desc { + if d.GetOptions().GetMapEntry() { + continue + } + sl = append(sl, &ImportedDescriptor{common{file}, d}) + } + for _, e := range df.enum { + sl = append(sl, &ImportedDescriptor{common{file}, e}) + } + for _, ext := range df.ext { + sl = append(sl, &ImportedDescriptor{common{file}, ext}) + } + } + return +} + +func extractComments(file *FileDescriptor) { + file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) + for _, loc := range file.GetSourceCodeInfo().GetLocation() { + if loc.LeadingComments == nil { + continue + } + var p []string + for _, n := range loc.Path { + p = append(p, strconv.Itoa(int(n))) + } + file.comments[strings.Join(p, ",")] = loc + } +} + +// BuildTypeNameMap builds the map from fully qualified type names to objects. +// The key names for the map come from the input data, which puts a period at the beginning. +// It should be called after SetPackageNames and before GenerateAllFiles. +func (g *Generator) BuildTypeNameMap() { + g.typeNameToObject = make(map[string]Object) + for _, f := range g.allFiles { + // The names in this loop are defined by the proto world, not us, so the + // package name may be empty. If so, the dotted package name of X will + // be ".X"; otherwise it will be ".pkg.X". + dottedPkg := "." + f.GetPackage() + if dottedPkg != "." { + dottedPkg += "." + } + for _, enum := range f.enum { + name := dottedPkg + dottedSlice(enum.TypeName()) + g.typeNameToObject[name] = enum + } + for _, desc := range f.desc { + name := dottedPkg + dottedSlice(desc.TypeName()) + g.typeNameToObject[name] = desc + } + } +} + +// ObjectNamed, given a fully-qualified input type name as it appears in the input data, +// returns the descriptor for the message or enum with that name. +func (g *Generator) ObjectNamed(typeName string) Object { + o, ok := g.typeNameToObject[typeName] + if !ok { + g.Fail("can't find object with type", typeName) + } + return o +} + +// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated. +type AnnotatedAtoms struct { + source string + path string + atoms []interface{} +} + +// Annotate records the file name and proto AST path of a list of atoms +// so that a later call to P can emit a link from each atom to its origin. +func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms { + return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms} +} + +// printAtom prints the (atomic, non-annotation) argument to the generated output. +func (g *Generator) printAtom(v interface{}) { + switch v := v.(type) { + case string: + g.WriteString(v) + case *string: + g.WriteString(*v) + case bool: + fmt.Fprint(g, v) + case *bool: + fmt.Fprint(g, *v) + case int: + fmt.Fprint(g, v) + case *int32: + fmt.Fprint(g, *v) + case *int64: + fmt.Fprint(g, *v) + case float64: + fmt.Fprint(g, v) + case *float64: + fmt.Fprint(g, *v) + case GoPackageName: + g.WriteString(string(v)) + case GoImportPath: + g.WriteString(strconv.Quote(string(v))) + default: + g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + } +} + +// P prints the arguments to the generated output. It handles strings and int32s, plus +// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit +// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode +// is true). +func (g *Generator) P(str ...interface{}) { + if !g.writeOutput { + return + } + g.WriteString(g.indent) + for _, v := range str { + switch v := v.(type) { + case *AnnotatedAtoms: + begin := int32(g.Len()) + for _, v := range v.atoms { + g.printAtom(v) + } + if g.annotateCode { + end := int32(g.Len()) + var path []int32 + for _, token := range strings.Split(v.path, ",") { + val, err := strconv.ParseInt(token, 10, 32) + if err != nil { + g.Fail("could not parse proto AST path: ", err.Error()) + } + path = append(path, int32(val)) + } + g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{ + Path: path, + SourceFile: &v.source, + Begin: &begin, + End: &end, + }) + } + default: + g.printAtom(v) + } + } + g.WriteByte('\n') +} + +// addInitf stores the given statement to be printed inside the file's init function. +// The statement is given as a format specifier and arguments. +func (g *Generator) addInitf(stmt string, a ...interface{}) { + g.init = append(g.init, fmt.Sprintf(stmt, a...)) +} + +func (g *Generator) PrintImport(alias GoPackageName, pkg GoImportPath) { + statement := string(alias) + " " + strconv.Quote(string(pkg)) + if g.writtenImports[statement] { + return + } + g.P(statement) + g.writtenImports[statement] = true +} + +// In Indents the output one tab stop. +func (g *Generator) In() { g.indent += "\t" } + +// Out unindents the output one tab stop. +func (g *Generator) Out() { + if len(g.indent) > 0 { + g.indent = g.indent[1:] + } +} + +// GenerateAllFiles generates the output for all the files we're outputting. +func (g *Generator) GenerateAllFiles() { + // Initialize the plugins + for _, p := range plugins { + p.Init(g) + } + // Generate the output. The generator runs for every file, even the files + // that we don't generate output for, so that we can collate the full list + // of exported symbols to support public imports. + genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) + for _, file := range g.genFiles { + genFileMap[file] = true + } + for _, file := range g.allFiles { + g.Reset() + g.annotations = nil + g.writeOutput = genFileMap[file] + g.generate(file) + if !g.writeOutput { + continue + } + fname := file.goFileName(g.pathType) + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(fname), + Content: proto.String(g.String()), + }) + if g.annotateCode { + // Store the generated code annotations in text, as the protoc plugin protocol requires that + // strings contain valid UTF-8. + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(file.goFileName(g.pathType) + ".meta"), + Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})), + }) + } + } +} + +// Run all the plugins associated with the file. +func (g *Generator) runPlugins(file *FileDescriptor) { + for _, p := range plugins { + p.Generate(file) + } +} + +// Fill the response protocol buffer with the generated output for all the files we're +// supposed to generate. +func (g *Generator) generate(file *FileDescriptor) { + g.customImports = make([]string, 0) + g.file = file + g.usedPackages = make(map[GoImportPath]bool) + g.packageNames = make(map[GoImportPath]GoPackageName) + g.usedPackageNames = make(map[GoPackageName]bool) + g.addedImports = make(map[GoImportPath]bool) + for name := range globalPackageNames { + g.usedPackageNames[name] = true + } + + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the proto package it is being compiled against.") + g.P("// A compilation error at this line likely means your copy of the") + g.P("// proto package needs to be updated.") + if gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + g.P("const _ = ", g.Pkg["proto"], ".GoGoProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") + } else { + g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") + } + g.P() + // Reset on each file + g.writtenImports = make(map[string]bool) + for _, td := range g.file.imp { + g.generateImported(td) + } + for _, enum := range g.file.enum { + g.generateEnum(enum) + } + for _, desc := range g.file.desc { + // Don't generate virtual messages for maps. + if desc.GetOptions().GetMapEntry() { + continue + } + g.generateMessage(desc) + } + for _, ext := range g.file.ext { + g.generateExtension(ext) + } + g.generateInitFunction() + g.generateFileDescriptor(file) + + // Run the plugins before the imports so we know which imports are necessary. + g.runPlugins(file) + + // Generate header and imports last, though they appear first in the output. + rem := g.Buffer + remAnno := g.annotations + g.Buffer = new(bytes.Buffer) + g.annotations = nil + g.generateHeader() + g.generateImports() + if !g.writeOutput { + return + } + // Adjust the offsets for annotations displaced by the header and imports. + for _, anno := range remAnno { + *anno.Begin += int32(g.Len()) + *anno.End += int32(g.Len()) + g.annotations = append(g.annotations, anno) + } + g.Write(rem.Bytes()) + + // Reformat generated code and patch annotation locations. + fset := token.NewFileSet() + original := g.Bytes() + if g.annotateCode { + // make a copy independent of g; we'll need it after Reset. + original = append([]byte(nil), original...) + } + fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code, + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(original)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + if serr := s.Err(); serr != nil { + g.Fail("bad Go source code was generated:", err.Error(), "\n"+string(original)) + } else { + g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) + } + } + ast.SortImports(fset, fileAST) + g.Reset() + err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST) + if err != nil { + g.Fail("generated Go source code could not be reformatted:", err.Error()) + } + if g.annotateCode { + m, err := remap.Compute(original, g.Bytes()) + if err != nil { + g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error()) + } + for _, anno := range g.annotations { + new, ok := m.Find(int(*anno.Begin), int(*anno.End)) + if !ok { + g.Fail("span in formatted generated Go source code could not be mapped back to the original code") + } + *anno.Begin = int32(new.Pos) + *anno.End = int32(new.End) + } + } +} + +// Generate the header, including package definition +func (g *Generator) generateHeader() { + g.P("// Code generated by protoc-gen-gogo. DO NOT EDIT.") + if g.file.GetOptions().GetDeprecated() { + g.P("// ", *g.file.Name, " is a deprecated file.") + } else { + g.P("// source: ", *g.file.Name) + } + g.P() + g.PrintComments(strconv.Itoa(packagePath)) + g.P() + g.P("package ", g.file.packageName) + g.P() +} + +// deprecationComment is the standard comment added to deprecated +// messages, fields, enums, and enum values. +var deprecationComment = "// Deprecated: Do not use." + +// PrintComments prints any comments from the source .proto file. +// The path is a comma-separated list of integers. +// It returns an indication of whether any comments were printed. +// See descriptor.proto for its format. +func (g *Generator) PrintComments(path string) bool { + if !g.writeOutput { + return false + } + if c, ok := g.makeComments(path); ok { + g.P(c) + return true + } + return false +} + +// makeComments generates the comment string for the field, no "\n" at the end +func (g *Generator) makeComments(path string) (string, bool) { + loc, ok := g.file.comments[path] + if !ok { + return "", false + } + w := new(bytes.Buffer) + nl := "" + for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") { + fmt.Fprintf(w, "%s//%s", nl, line) + nl = "\n" + } + return w.String(), true +} + +// Comments returns any comments from the source .proto file and empty string if comments not found. +// The path is a comma-separated list of intergers. +// See descriptor.proto for its format. +func (g *Generator) Comments(path string) string { + loc, ok := g.file.comments[path] + if !ok { + return "" + } + text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") + return text +} + +func (g *Generator) fileByName(filename string) *FileDescriptor { + return g.allFilesByName[filename] +} + +// weak returns whether the ith import of the current file is a weak import. +func (g *Generator) weak(i int32) bool { + for _, j := range g.file.WeakDependency { + if j == i { + return true + } + } + return false +} + +// Generate the imports +func (g *Generator) generateImports() { + imports := make(map[GoImportPath]GoPackageName) + for i, s := range g.file.Dependency { + fd := g.fileByName(s) + importPath := fd.importPath + // Do not import our own package. + if importPath == g.file.importPath { + continue + } + // Do not import weak imports. + if g.weak(int32(i)) { + continue + } + // Do not import a package twice. + if _, ok := imports[importPath]; ok { + continue + } + // We need to import all the dependencies, even if we don't reference them, + // because other code and tools depend on having the full transitive closure + // of protocol buffer types in the binary. + packageName := g.GoPackageName(importPath) + if _, ok := g.usedPackages[importPath]; !ok { + packageName = "_" + } + imports[importPath] = packageName + } + for importPath := range g.addedImports { + imports[importPath] = g.GoPackageName(importPath) + } + // We almost always need a proto import. Rather than computing when we + // do, which is tricky when there's a plugin, just import it and + // reference it later. The same argument applies to the fmt and math packages. + g.P("import (") + g.PrintImport(GoPackageName(g.Pkg["fmt"]), "fmt") + g.PrintImport(GoPackageName(g.Pkg["math"]), "math") + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) { + g.PrintImport(GoPackageName(g.Pkg["proto"]), GoImportPath(g.ImportPrefix)+GoImportPath("github.com/gogo/protobuf/proto")) + if gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.PrintImport(GoPackageName(g.Pkg["golang_proto"]), GoImportPath(g.ImportPrefix)+GoImportPath("github.com/golang/protobuf/proto")) + } + } else { + g.PrintImport(GoPackageName(g.Pkg["proto"]), GoImportPath(g.ImportPrefix)+GoImportPath("github.com/golang/protobuf/proto")) + } + for importPath, packageName := range imports { + g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath) + } + // Custom gogo imports + for _, s := range g.customImports { + s1 := strings.Map(badToUnderscore, s) + g.PrintImport(GoPackageName(s1), GoImportPath(s)) + } + // gogo plugin imports + // TODO: may need to worry about uniqueness across plugins and could change this + // to use the `addedImports` technique + for _, p := range plugins { + p.GenerateImports(g.file) + } + g.P(")") + + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ = ", g.Pkg["proto"], ".Marshal") + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) && gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.P("var _ = ", g.Pkg["golang_proto"], ".Marshal") + } + g.P("var _ = ", g.Pkg["fmt"], ".Errorf") + g.P("var _ = ", g.Pkg["math"], ".Inf") + for _, cimport := range g.customImports { + if cimport == "time" { + g.P("var _ = time.Kitchen") + break + } + } + g.P() +} + +func (g *Generator) generateImported(id *ImportedDescriptor) { + df := id.o.File() + filename := *df.Name + if df.importPath == g.file.importPath { + // Don't generate type aliases for files in the same Go package as this one. + return + } + if !supportTypeAliases { + g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename)) + } + g.usedPackages[df.importPath] = true + + for _, sym := range df.exported[id.o] { + sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath)) + } + g.P() +} + +// Generate the enum definitions for this EnumDescriptor. +func (g *Generator) generateEnum(enum *EnumDescriptor) { + // The full type name + typeName := enum.alias() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + ccPrefix := enum.prefix() + + deprecatedEnum := "" + if enum.GetOptions().GetDeprecated() { + deprecatedEnum = deprecationComment + } + + g.PrintComments(enum.path) + if !gogoproto.EnabledGoEnumPrefix(enum.file.FileDescriptorProto, enum.EnumDescriptorProto) { + ccPrefix = "" + } + + if gogoproto.HasEnumDecl(enum.file.FileDescriptorProto, enum.EnumDescriptorProto) { + g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum) + g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) + g.P("const (") + g.In() + for i, e := range enum.Value { + etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i) + g.PrintComments(etorPath) + + deprecatedValue := "" + if e.GetOptions().GetDeprecated() { + deprecatedValue = deprecationComment + } + name := *e.Name + if gogoproto.IsEnumValueCustomName(e) { + name = gogoproto.GetEnumValueCustomName(e) + } + name = ccPrefix + name + + g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue) + g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) + } + g.Out() + g.P(")") + } + g.P() + g.P("var ", ccTypeName, "_name = map[int32]string{") + g.In() + generated := make(map[int32]bool) // avoid duplicate values + for _, e := range enum.Value { + duplicate := "" + if _, present := generated[*e.Number]; present { + duplicate = "// Duplicate value: " + } + g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") + generated[*e.Number] = true + } + g.Out() + g.P("}") + g.P() + g.P("var ", ccTypeName, "_value = map[string]int32{") + g.In() + for _, e := range enum.Value { + g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") + } + g.Out() + g.P("}") + g.P() + + if !enum.proto3() { + g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") + g.In() + g.P("p := new(", ccTypeName, ")") + g.P("*p = x") + g.P("return p") + g.Out() + g.P("}") + g.P() + } + + if gogoproto.IsGoEnumStringer(g.file.FileDescriptorProto, enum.EnumDescriptorProto) { + g.P("func (x ", ccTypeName, ") String() string {") + g.In() + g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") + g.Out() + g.P("}") + g.P() + } + + if !enum.proto3() && !gogoproto.IsGoEnumStringer(g.file.FileDescriptorProto, enum.EnumDescriptorProto) { + g.P("func (x ", ccTypeName, ") MarshalJSON() ([]byte, error) {") + g.In() + g.P("return ", g.Pkg["proto"], ".MarshalJSONEnum(", ccTypeName, "_name, int32(x))") + g.Out() + g.P("}") + g.P() + } + if !enum.proto3() { + g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") + g.In() + g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) + g.P("if err != nil {") + g.In() + g.P("return err") + g.Out() + g.P("}") + g.P("*x = ", ccTypeName, "(value)") + g.P("return nil") + g.Out() + g.P("}") + g.P() + } + + var indexes []string + for m := enum.parent; m != nil; m = m.parent { + // XXX: skip groups? + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + indexes = append(indexes, strconv.Itoa(enum.index)) + g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {") + g.In() + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.Out() + g.P("}") + g.P() + if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { + g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) + g.P() + } + + g.generateEnumRegistration(enum) +} + +// The tag is a string like "varint,2,opt,name=fieldname,def=7" that +// identifies details of the field for the protocol buffer marshaling and unmarshaling +// code. The fields are: +// wire encoding +// protocol tag number +// opt,req,rep for optional, required, or repeated +// packed whether the encoding is "packed" (optional; repeated primitives only) +// name= the original declared name +// enum= the name of the enum type if it is an enum-typed field. +// proto3 if this field is in a proto3 message +// def= string representation of the default value, if any. +// The default value must be in a representation that can be used at run-time +// to generate the default value. Thus bools become 0 and 1, for instance. +func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { + optrepreq := "" + switch { + case isOptional(field): + optrepreq = "opt" + case isRequired(field): + optrepreq = "req" + case isRepeated(field): + optrepreq = "rep" + } + var defaultValue string + if dv := field.DefaultValue; dv != nil { // set means an explicit default + defaultValue = *dv + // Some types need tweaking. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if defaultValue == "true" { + defaultValue = "1" + } else { + defaultValue = "0" + } + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + // Nothing to do. Quoting is done for the whole tag. + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // For enums we need to provide the integer constant. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + // It is an enum that was publicly imported. + // We need the underlying type. + obj = id.o + } + enum, ok := obj.(*EnumDescriptor) + if !ok { + log.Printf("obj is a %T", obj) + if id, ok := obj.(*ImportedDescriptor); ok { + log.Printf("id.o is a %T", id.o) + } + g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) + } + defaultValue = enum.integerValueAsString(defaultValue) + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { + if f, err := strconv.ParseFloat(defaultValue, 32); err == nil { + defaultValue = fmt.Sprint(float32(f)) + } + } + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { + if f, err := strconv.ParseFloat(defaultValue, 64); err == nil { + defaultValue = fmt.Sprint(f) + } + } + } + defaultValue = ",def=" + defaultValue + } + enum := "" + if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { + // We avoid using obj.goPackageNamehe + // original (proto-world) package name. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + obj = id.o + } + enum = ",enum=" + if pkg := obj.File().GetPackage(); pkg != "" { + enum += pkg + "." + } + enum += CamelCaseSlice(obj.TypeName()) + } + packed := "" + if (field.Options != nil && field.Options.GetPacked()) || + // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: + // "In proto3, repeated fields of scalar numeric types use packed encoding by default." + (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && + isRepeated(field) && IsScalar(field)) { + packed = ",packed" + } + fieldName := field.GetName() + name := fieldName + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + // We must use the type name for groups instead of + // the field name to preserve capitalization. + // type_name in FieldDescriptorProto is fully-qualified, + // but we only want the local part. + name = *field.TypeName + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[i+1:] + } + } + if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name { + // TODO: escaping might be needed, in which case + // perhaps this should be in its own "json" tag. + name += ",json=" + json + } + name = ",name=" + name + + embed := "" + if gogoproto.IsEmbed(field) { + embed = ",embedded=" + fieldName + } + + ctype := "" + if gogoproto.IsCustomType(field) { + ctype = ",customtype=" + gogoproto.GetCustomType(field) + } + + casttype := "" + if gogoproto.IsCastType(field) { + casttype = ",casttype=" + gogoproto.GetCastType(field) + } + + castkey := "" + if gogoproto.IsCastKey(field) { + castkey = ",castkey=" + gogoproto.GetCastKey(field) + } + + castvalue := "" + if gogoproto.IsCastValue(field) { + castvalue = ",castvalue=" + gogoproto.GetCastValue(field) + // record the original message type for jsonpb reconstruction + desc := g.ObjectNamed(field.GetTypeName()) + if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { + valueField := d.Field[1] + if valueField.IsMessage() { + castvalue += ",castvaluetype=" + strings.TrimPrefix(valueField.GetTypeName(), ".") + } + } + } + + if message.proto3() { + name += ",proto3" + } + oneof := "" + if field.OneofIndex != nil { + oneof = ",oneof" + } + stdtime := "" + if gogoproto.IsStdTime(field) { + stdtime = ",stdtime" + } + stdduration := "" + if gogoproto.IsStdDuration(field) { + stdduration = ",stdduration" + } + wktptr := "" + if gogoproto.IsWktPtr(field) { + wktptr = ",wktptr" + } + return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + wiretype, + field.GetNumber(), + optrepreq, + packed, + name, + enum, + oneof, + defaultValue, + embed, + ctype, + casttype, + castkey, + castvalue, + stdtime, + stdduration, + wktptr)) +} + +func needsStar(field *descriptor.FieldDescriptorProto, proto3 bool, allowOneOf bool) bool { + if isRepeated(field) && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE || gogoproto.IsCustomType(field)) && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_GROUP) { + return false + } + if *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES && !gogoproto.IsCustomType(field) { + return false + } + if !gogoproto.IsNullable(field) { + return false + } + if field.OneofIndex != nil && allowOneOf && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE) && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_GROUP) { + return false + } + if proto3 && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE) && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_GROUP) && + !gogoproto.IsCustomType(field) { + return false + } + return true +} + +// TypeName is the printed name appropriate for an item. If the object is in the current file, +// TypeName drops the package name and underscores the rest. +// Otherwise the object is from another package; and the result is the underscored +// package name followed by the item name. +// The result always has an initial capital. +func (g *Generator) TypeName(obj Object) string { + return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) +} + +// GoType returns a string representing the type name, and the wire type +func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { + // TODO: Options. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + typ, wire = "float64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + typ, wire = "float32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_INT64: + typ, wire = "int64", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT64: + typ, wire = "uint64", "varint" + case descriptor.FieldDescriptorProto_TYPE_INT32: + typ, wire = "int32", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT32: + typ, wire = "uint32", "varint" + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + typ, wire = "uint64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + typ, wire = "uint32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + typ, wire = "bool", "varint" + case descriptor.FieldDescriptorProto_TYPE_STRING: + typ, wire = "string", "bytes" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "group" + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "bytes" + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typ, wire = "[]byte", "bytes" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "varint" + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + typ, wire = "int32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + typ, wire = "int64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + typ, wire = "int32", "zigzag32" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + typ, wire = "int64", "zigzag64" + default: + g.Fail("unknown type for", field.GetName()) + } + switch { + case gogoproto.IsCustomType(field) && gogoproto.IsCastType(field): + g.Fail(field.GetName() + " cannot be custom type and cast type") + case gogoproto.IsCustomType(field): + var packageName string + var err error + packageName, typ, err = getCustomType(field) + if err != nil { + g.Fail(err.Error()) + } + if len(packageName) > 0 { + g.customImports = append(g.customImports, packageName) + } + case gogoproto.IsCastType(field): + var packageName string + var err error + packageName, typ, err = getCastType(field) + if err != nil { + g.Fail(err.Error()) + } + if len(packageName) > 0 { + g.customImports = append(g.customImports, packageName) + } + case gogoproto.IsStdTime(field): + g.customImports = append(g.customImports, "time") + typ = "time.Time" + case gogoproto.IsStdDuration(field): + g.customImports = append(g.customImports, "time") + typ = "time.Duration" + case gogoproto.IsStdDouble(field): + typ = "float64" + case gogoproto.IsStdFloat(field): + typ = "float32" + case gogoproto.IsStdInt64(field): + typ = "int64" + case gogoproto.IsStdUInt64(field): + typ = "uint64" + case gogoproto.IsStdInt32(field): + typ = "int32" + case gogoproto.IsStdUInt32(field): + typ = "uint32" + case gogoproto.IsStdBool(field): + typ = "bool" + case gogoproto.IsStdString(field): + typ = "string" + case gogoproto.IsStdBytes(field): + typ = "[]byte" + } + if needsStar(field, g.file.proto3 && field.Extendee == nil, message != nil && message.allowOneof()) { + typ = "*" + typ + } + if isRepeated(field) { + typ = "[]" + typ + } + return +} + +// GoMapDescriptor is a full description of the map output struct. +type GoMapDescriptor struct { + GoType string + + KeyField *descriptor.FieldDescriptorProto + KeyAliasField *descriptor.FieldDescriptorProto + KeyTag string + + ValueField *descriptor.FieldDescriptorProto + ValueAliasField *descriptor.FieldDescriptorProto + ValueTag string +} + +func (g *Generator) GoMapType(d *Descriptor, field *descriptor.FieldDescriptorProto) *GoMapDescriptor { + if d == nil { + byName := g.ObjectNamed(field.GetTypeName()) + desc, ok := byName.(*Descriptor) + if byName == nil || !ok || !desc.GetOptions().GetMapEntry() { + g.Fail(fmt.Sprintf("field %s is not a map", field.GetTypeName())) + return nil + } + d = desc + } + + m := &GoMapDescriptor{ + KeyField: d.Field[0], + ValueField: d.Field[1], + } + + // Figure out the Go types and tags for the key and value types. + m.KeyAliasField, m.ValueAliasField = g.GetMapKeyField(field, m.KeyField), g.GetMapValueField(field, m.ValueField) + keyType, keyWire := g.GoType(d, m.KeyAliasField) + valType, valWire := g.GoType(d, m.ValueAliasField) + + m.KeyTag, m.ValueTag = g.goTag(d, m.KeyField, keyWire), g.goTag(d, m.ValueField, valWire) + + if gogoproto.IsCastType(field) { + var packageName string + var err error + packageName, typ, err := getCastType(field) + if err != nil { + g.Fail(err.Error()) + } + if len(packageName) > 0 { + g.customImports = append(g.customImports, packageName) + } + m.GoType = typ + return m + } + + // We don't use stars, except for message-typed values. + // Message and enum types are the only two possibly foreign types used in maps, + // so record their use. They are not permitted as map keys. + keyType = strings.TrimPrefix(keyType, "*") + switch *m.ValueAliasField.Type { + case descriptor.FieldDescriptorProto_TYPE_ENUM: + valType = strings.TrimPrefix(valType, "*") + g.RecordTypeUse(m.ValueAliasField.GetTypeName()) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if !gogoproto.IsNullable(m.ValueAliasField) { + valType = strings.TrimPrefix(valType, "*") + } + if !gogoproto.IsStdType(m.ValueAliasField) && !gogoproto.IsCustomType(field) && !gogoproto.IsCastType(field) { + g.RecordTypeUse(m.ValueAliasField.GetTypeName()) + } + default: + if gogoproto.IsCustomType(m.ValueAliasField) { + if !gogoproto.IsNullable(m.ValueAliasField) { + valType = strings.TrimPrefix(valType, "*") + } + if !gogoproto.IsStdType(field) { + g.RecordTypeUse(m.ValueAliasField.GetTypeName()) + } + } else { + valType = strings.TrimPrefix(valType, "*") + } + } + + m.GoType = fmt.Sprintf("map[%s]%s", keyType, valType) + return m +} + +func (g *Generator) RecordTypeUse(t string) { + if _, ok := g.typeNameToObject[t]; !ok { + return + } + importPath := g.ObjectNamed(t).GoImportPath() + if importPath == g.outputImportPath { + // Don't record use of objects in our package. + return + } + g.AddImport(importPath) + g.usedPackages[importPath] = true +} + +// Method names that may be generated. Fields with these names get an +// underscore appended. Any change to this set is a potential incompatible +// API change because it changes generated field names. +var methodNames = [...]string{ + "Reset", + "String", + "ProtoMessage", + "Marshal", + "Unmarshal", + "ExtensionRangeArray", + "ExtensionMap", + "Descriptor", + "MarshalTo", + "Equal", + "VerboseEqual", + "GoString", + "ProtoSize", +} + +// Names of messages in the `google.protobuf` package for which +// we will generate XXX_WellKnownType methods. +var wellKnownTypes = map[string]bool{ + "Any": true, + "Duration": true, + "Empty": true, + "Struct": true, + "Timestamp": true, + + "Value": true, + "ListValue": true, + "DoubleValue": true, + "FloatValue": true, + "Int64Value": true, + "UInt64Value": true, + "Int32Value": true, + "UInt32Value": true, + "BoolValue": true, + "StringValue": true, + "BytesValue": true, +} + +// getterDefault finds the default value for the field to return from a getter, +// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName" +func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType, goTypeName string) string { + if isRepeated(field) { + return "nil" + } + if def := field.GetDefaultValue(); def != "" { + defaultConstant := g.defaultConstantName(goMessageType, field.GetName()) + if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { + return defaultConstant + } + return "append([]byte(nil), " + defaultConstant + "...)" + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_GROUP, + descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if field.OneofIndex != nil { + return "nil" + } else { + if !gogoproto.IsNullable(field) && (gogoproto.IsStdDuration(field) || + gogoproto.IsStdDouble(field) || gogoproto.IsStdFloat(field) || + gogoproto.IsStdInt64(field) || gogoproto.IsStdUInt64(field) || + gogoproto.IsStdInt32(field) || gogoproto.IsStdUInt32(field)) { + return "0" + } else if !gogoproto.IsNullable(field) && gogoproto.IsStdBool(field) { + return "false" + } else if !gogoproto.IsNullable(field) && gogoproto.IsStdString(field) { + return "\"\"" + } else if !gogoproto.IsNullable(field) && gogoproto.IsStdBytes(field) { + return "[]byte{}" + } else { + return goTypeName + "{}" + } + } + case descriptor.FieldDescriptorProto_TYPE_BOOL: + return "false" + case descriptor.FieldDescriptorProto_TYPE_STRING: + return "\"\"" + case descriptor.FieldDescriptorProto_TYPE_BYTES: + // This is only possible for oneof fields. + return "nil" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // The default default for an enum is the first value in the enum, + // not zero. + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate getter for %s", field.GetName()) + return "nil" + } + if len(enum.Value) == 0 { + return "0 // empty enum" + } else { + first := enum.Value[0].GetName() + if gogoproto.IsEnumValueCustomName(enum.Value[0]) { + first = gogoproto.GetEnumValueCustomName(enum.Value[0]) + } + if gogoproto.EnabledGoEnumPrefix(enum.file.FileDescriptorProto, enum.EnumDescriptorProto) { + return g.DefaultPackageName(obj) + enum.prefix() + first + } else { + return g.DefaultPackageName(obj) + first + } + } + default: + return "0" + } +} + +// defaultConstantName builds the name of the default constant from the message +// type name and the untouched field name, e.g. "Default_MessageType_FieldName" +func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string { + return "Default_" + goMessageType + "_" + CamelCase(protoFieldName) +} + +// The different types of fields in a message and how to actually print them +// Most of the logic for generateMessage is in the methods of these types. +// +// Note that the content of the field is irrelevant, a simpleField can contain +// anything from a scalar to a group (which is just a message). +// +// Extension fields (and message sets) are however handled separately. +// +// simpleField - a field that is neiter weak nor oneof, possibly repeated +// oneofField - field containing list of subfields: +// - oneofSubField - a field within the oneof + +// msgCtx contains the context for the generator functions. +type msgCtx struct { + goName string // Go struct name of the message, e.g. MessageName + message *Descriptor // The descriptor for the message +} + +// fieldCommon contains data common to all types of fields. +type fieldCommon struct { + goName string // Go name of field, e.g. "FieldName" or "Descriptor_" + protoName string // Name of field in proto language, e.g. "field_name" or "descriptor" + getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_" + goType string // The Go type as a string, e.g. "*int32" or "*OtherMessage" + tags string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"` + fullPath string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0" + protoField *descriptor.FieldDescriptorProto // gogo. Passing in the fieldDescriptor in for gogo options. TODO rethink this, we might need a better way of getting options. +} + +// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor". +func (f *fieldCommon) getProtoName() string { + return f.protoName +} + +// getGoType returns the go type of the field as a string, e.g. "*int32". +func (f *fieldCommon) getGoType() string { + return f.goType +} + +// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated. +type simpleField struct { + fieldCommon + protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" + protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 + deprecated string // Deprecation comment, if any, e.g. "// Deprecated: Do not use." + getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" + protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" + comment string // The full comment for the field, e.g. "// Useful information" +} + +// decl prints the declaration of the field in the struct (if any). +func (f *simpleField) decl(g *Generator, mc *msgCtx) { + g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated) +} + +// getter prints the getter for the field. +func (f *simpleField) getter(g *Generator, mc *msgCtx) { + oneof := false + if !oneof && !gogoproto.HasGoGetters(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + return + } + if gogoproto.IsEmbed(f.protoField) || gogoproto.IsCustomType(f.protoField) { + return + } + if f.deprecated != "" { + g.P(f.deprecated) + } + g.generateGet(mc, f.protoField, f.protoType, false, f.goName, f.goType, "", "", f.fullPath, f.getterName, f.getterDef) +} + +// setter prints the setter method of the field. +func (f *simpleField) setter(g *Generator, mc *msgCtx) { + // No setter for regular fields yet +} + +// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". +func (f *simpleField) getProtoDef() string { + return f.protoDef +} + +// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". +func (f *simpleField) getProtoTypeName() string { + return f.protoTypeName +} + +// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. +func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type { + return f.protoType +} + +func (f *simpleField) getProto() *descriptor.FieldDescriptorProto { + return f.protoField +} + +// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message. +type oneofSubField struct { + fieldCommon + protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" + protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 + oneofTypeName string // Type name of the enclosing struct, e.g. "MessageName_FieldName" + fieldNumber int // Actual field number, as defined in proto, e.g. 12 + getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" + protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" + deprecated string // Deprecation comment, if any. +} + +// typedNil prints a nil casted to the pointer to this field. +// - for XXX_OneofWrappers +func (f *oneofSubField) typedNil(g *Generator) { + g.P("(*", f.oneofTypeName, ")(nil),") +} + +// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". +func (f *oneofSubField) getProtoDef() string { + return f.protoDef +} + +// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". +func (f *oneofSubField) getProtoTypeName() string { + return f.protoTypeName +} + +// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. +func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type { + return f.protoType +} + +func (f *oneofSubField) getProto() *descriptor.FieldDescriptorProto { + return f.protoField +} + +// oneofField represents the oneof on top level. +// The alternative fields within the oneof are represented by oneofSubField. +type oneofField struct { + fieldCommon + subFields []*oneofSubField // All the possible oneof fields + comment string // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\" +} + +// decl prints the declaration of the field in the struct (if any). +func (f *oneofField) decl(g *Generator, mc *msgCtx) { + comment := f.comment + for _, sf := range f.subFields { + comment += "//\t*" + sf.oneofTypeName + "\n" + } + g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`") +} + +// getter for a oneof field will print additional discriminators and interfaces for the oneof, +// also it prints all the getters for the sub fields. +func (f *oneofField) getter(g *Generator, mc *msgCtx) { + oneof := true + if !oneof && !gogoproto.HasGoGetters(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + return + } + + for _, sf := range f.subFields { + if gogoproto.IsEmbed(sf.protoField) || gogoproto.IsCustomType(sf.protoField) { + continue + } + if sf.deprecated != "" { + g.P(sf.deprecated) + } + g.generateGet(mc, sf.protoField, sf.protoType, true, sf.goName, sf.goType, f.goName, sf.oneofTypeName, sf.fullPath, sf.getterName, sf.getterDef) + } +} + +// setter prints the setter method of the field. +func (f *oneofField) setter(g *Generator, mc *msgCtx) { + // No setters for oneof yet +} + +// topLevelField interface implemented by all types of fields on the top level (not oneofSubField). +type topLevelField interface { + decl(g *Generator, mc *msgCtx) // print declaration within the struct + getter(g *Generator, mc *msgCtx) // print getter + setter(g *Generator, mc *msgCtx) // print setter if applicable +} + +// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField). +type defField interface { + getProtoDef() string // default value explicitly stated in the proto file, e.g "yoshi" or "5" + getProtoName() string // proto name of a field, e.g. "field_name" or "descriptor" + getGoType() string // go type of the field as a string, e.g. "*int32" + getProtoTypeName() string // protobuf type name for the field, e.g. ".google.protobuf.Duration" + getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 + getProto() *descriptor.FieldDescriptorProto +} + +// generateDefaultConstants adds constants for default values if needed, which is only if the default value is. +// explicit in the proto. +func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) { + // Collect fields that can have defaults + dFields := []defField{} + for _, pf := range topLevelFields { + if f, ok := pf.(*oneofField); ok { + for _, osf := range f.subFields { + dFields = append(dFields, osf) + } + continue + } + dFields = append(dFields, pf.(defField)) + } + for _, df := range dFields { + def := df.getProtoDef() + if def == "" { + continue + } + if !gogoproto.IsNullable(df.getProto()) { + g.Fail("illegal default value: ", df.getProtoName(), " in ", mc.message.GetName(), " is not nullable and is thus not allowed to have a default value") + } + fieldname := g.defaultConstantName(mc.goName, df.getProtoName()) + typename := df.getGoType() + if typename[0] == '*' { + typename = typename[1:] + } + kind := "const " + switch { + case typename == "bool": + case typename == "string": + def = strconv.Quote(def) + case typename == "[]byte": + def = "[]byte(" + strconv.Quote(unescape(def)) + ")" + kind = "var " + case def == "inf", def == "-inf", def == "nan": + // These names are known to, and defined by, the protocol language. + switch def { + case "inf": + def = "math.Inf(1)" + case "-inf": + def = "math.Inf(-1)" + case "nan": + def = "math.NaN()" + } + if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT { + def = "float32(" + def + ")" + } + kind = "var " + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT: + if f, err := strconv.ParseFloat(def, 32); err == nil { + def = fmt.Sprint(float32(f)) + } + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if f, err := strconv.ParseFloat(def, 64); err == nil { + def = fmt.Sprint(f) + } + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM: + // Must be an enum. Need to construct the prefixed name. + obj := g.ObjectNamed(df.getProtoTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate constant for %s", fieldname) + continue + } + + // hunt down the actual enum corresponding to the default + var enumValue *descriptor.EnumValueDescriptorProto + for _, ev := range enum.Value { + if def == ev.GetName() { + enumValue = ev + } + } + + if enumValue != nil { + if gogoproto.IsEnumValueCustomName(enumValue) { + def = gogoproto.GetEnumValueCustomName(enumValue) + } + } else { + g.Fail(fmt.Sprintf("could not resolve default enum value for %v.%v", g.DefaultPackageName(obj), def)) + } + + if gogoproto.EnabledGoEnumPrefix(enum.file.FileDescriptorProto, enum.EnumDescriptorProto) { + def = g.DefaultPackageName(obj) + enum.prefix() + def + } else { + def = g.DefaultPackageName(obj) + def + } + } + g.P(kind, fieldname, " ", typename, " = ", def) + g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""}) + } + g.P() +} + +// generateGet generates the getter for both the simpleField and oneofSubField. +// We did not want to duplicate the code since it is quite intricate so we came +// up with this ugly method. At least the logic is in one place. This can be reworked. +func (g *Generator) generateGet(mc *msgCtx, protoField *descriptor.FieldDescriptorProto, protoType descriptor.FieldDescriptorProto_Type, + oneof bool, fname, tname, uname, oneoftname, fullpath, gname, def string) { + star := "" + if (protoType != descriptor.FieldDescriptorProto_TYPE_MESSAGE) && + (protoType != descriptor.FieldDescriptorProto_TYPE_GROUP) && + needsStar(protoField, g.file.proto3, mc.message != nil && mc.message.allowOneof()) && tname[0] == '*' { + tname = tname[1:] + star = "*" + } + typeDefaultIsNil := false // whether this field type's default value is a literal nil unless specified + switch protoType { + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typeDefaultIsNil = def == "nil" + case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE: + typeDefaultIsNil = gogoproto.IsNullable(protoField) + } + if isRepeated(protoField) { + typeDefaultIsNil = true + } + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, fullpath, gname), "() "+tname+" {") + if !oneof && typeDefaultIsNil { + // A bytes field with no explicit default needs less generated code, + // as does a message or group field, or a repeated field. + g.P("if m != nil {") + g.In() + g.P("return m." + fname) + g.Out() + g.P("}") + g.P("return nil") + g.Out() + g.P("}") + g.P() + return + } + if !gogoproto.IsNullable(protoField) { + g.P("if m != nil {") + g.In() + g.P("return m." + fname) + g.Out() + g.P("}") + } else if !oneof { + if mc.message.proto3() { + g.P("if m != nil {") + } else { + g.P("if m != nil && m." + fname + " != nil {") + } + g.In() + g.P("return " + star + "m." + fname) + g.Out() + g.P("}") + } else { + uname := uname + tname := oneoftname + g.P("if x, ok := m.Get", uname, "().(*", tname, "); ok {") + g.P("return x.", fname) + g.P("}") + } + g.P("return ", def) + g.Out() + g.P("}") + g.P() +} + +// generateInternalStructFields just adds the XXX_ fields to the message struct. +func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) { + if gogoproto.HasUnkeyed(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals + } + if len(mc.message.ExtensionRange) > 0 { + if gogoproto.HasExtensionsMap(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + messageset := "" + if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() { + messageset = "protobuf_messageset:\"1\" " + } + g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`") + } else { + g.P("XXX_extensions\t\t[]byte `protobuf:\"bytes,0,opt\" json:\"-\"`") + } + } + if gogoproto.HasUnrecognized(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + } + if gogoproto.HasSizecache(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P("XXX_sizecache\tint32 `json:\"-\"`") + } +} + +// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer. +func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) { + ofields := []*oneofField{} + for _, f := range topLevelFields { + if o, ok := f.(*oneofField); ok { + ofields = append(ofields, o) + } + } + if len(ofields) == 0 { + return + } + + // OneofFuncs + g.P("// XXX_OneofWrappers is for the internal use of the proto package.") + g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {") + g.P("return []interface{}{") + for _, of := range ofields { + for _, sf := range of.subFields { + sf.typedNil(g) + } + } + g.P("}") + g.P("}") + g.P() +} + +func (g *Generator) generateOneofDecls(mc *msgCtx, topLevelFields []topLevelField) { + ofields := []*oneofField{} + for _, f := range topLevelFields { + if o, ok := f.(*oneofField); ok { + ofields = append(ofields, o) + } + } + if len(ofields) == 0 { + return + } + // Oneof per-field types, discriminants and getters. + // Generate unexported named types for the discriminant interfaces. + // We shouldn't have to do this, but there was (~19 Aug 2015) a compiler/linker bug + // that was triggered by using anonymous interfaces here. + // TODO: Revisit this and consider reverting back to anonymous interfaces. + // for oi := range message.OneofDecl { + for _, of := range ofields { + dname := of.goType + g.P("type ", dname, " interface {") + g.In() + g.P(dname, "()") + if gogoproto.HasEqual(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P(`Equal(interface{}) bool`) + } + if gogoproto.HasVerboseEqual(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P(`VerboseEqual(interface{}) error`) + } + if gogoproto.IsMarshaler(g.file.FileDescriptorProto, mc.message.DescriptorProto) || + gogoproto.IsUnsafeMarshaler(g.file.FileDescriptorProto, mc.message.DescriptorProto) || + gogoproto.IsStableMarshaler(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P(`MarshalTo([]byte) (int, error)`) + } + if gogoproto.IsSizer(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P(`Size() int`) + } + if gogoproto.IsProtoSizer(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P(`ProtoSize() int`) + } + if gogoproto.HasCompare(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P(`Compare(interface{}) int`) + } + g.Out() + g.P("}") + } + g.P() + for _, of := range ofields { + for i, sf := range of.subFields { + fieldFullPath := fmt.Sprintf("%s,%d,%d", mc.message.path, messageFieldPath, i) + g.P("type ", Annotate(mc.message.file, fieldFullPath, sf.oneofTypeName), " struct{ ", Annotate(mc.message.file, fieldFullPath, sf.goName), " ", sf.goType, " `", sf.tags, "` }") + if !gogoproto.IsStdType(sf.protoField) && !gogoproto.IsCustomType(sf.protoField) && !gogoproto.IsCastType(sf.protoField) { + g.RecordTypeUse(sf.protoField.GetTypeName()) + } + } + } + g.P() + for _, of := range ofields { + for _, sf := range of.subFields { + g.P("func (*", sf.oneofTypeName, ") ", of.goType, "() {}") + } + } + g.P() + for _, of := range ofields { + fname := of.goName + g.P("func (m *", mc.goName, ") Get", fname, "() ", of.goType, " {") + g.P("if m != nil { return m.", fname, " }") + g.P("return nil") + g.P("}") + } + g.P() +} + +// generateMessageStruct adds the actual struct with it's members (but not methods) to the output. +func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) { + comments := g.PrintComments(mc.message.path) + + // Guarantee deprecation comments appear after user-provided comments. + if mc.message.GetOptions().GetDeprecated() { + if comments { + // Convention: Separate deprecation comments from original + // comments with an empty line. + g.P("//") + } + g.P(deprecationComment) + } + g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {") + for _, pf := range topLevelFields { + pf.decl(g, mc) + } + g.generateInternalStructFields(mc, topLevelFields) + g.P("}") +} + +// generateGetters adds getters for all fields, including oneofs and weak fields when applicable. +func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) { + for _, pf := range topLevelFields { + pf.getter(g, mc) + + } +} + +// generateSetters add setters for all fields, including oneofs and weak fields when applicable. +func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) { + for _, pf := range topLevelFields { + pf.setter(g, mc) + } +} + +// generateCommonMethods adds methods to the message that are not on a per field basis. +func (g *Generator) generateCommonMethods(mc *msgCtx) { + // Reset, String and ProtoMessage methods. + g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }") + if gogoproto.EnabledGoStringer(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") + } + g.P("func (*", mc.goName, ") ProtoMessage() {}") + var indexes []string + for m := mc.message; m != nil; m = m.parent { + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {") + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.P("}") + // TODO: Revisit the decision to use a XXX_WellKnownType method + // if we change proto.MessageName to work with multiple equivalents. + if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] { + g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`) + } + + // Extension support methods + if len(mc.message.ExtensionRange) > 0 { + g.P() + g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{") + g.In() + for _, r := range mc.message.ExtensionRange { + end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends + g.P("{Start: ", r.Start, ", End: ", end, "},") + } + g.Out() + g.P("}") + g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") + g.In() + g.P("return extRange_", mc.goName) + g.Out() + g.P("}") + g.P() + if !gogoproto.HasExtensionsMap(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P("func (m *", mc.goName, ") GetExtensions() *[]byte {") + g.In() + g.P("if m.XXX_extensions == nil {") + g.In() + g.P("m.XXX_extensions = make([]byte, 0)") + g.Out() + g.P("}") + g.P("return &m.XXX_extensions") + g.Out() + g.P("}") + } + } + + // TODO: It does not scale to keep adding another method for every + // operation on protos that we want to switch over to using the + // table-driven approach. Instead, we should only add a single method + // that allows getting access to the *InternalMessageInfo struct and then + // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that. + + // Wrapper for table-driven marshaling and unmarshaling. + g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {") + g.In() + if gogoproto.IsUnmarshaler(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P("return m.Unmarshal(b)") + } else { + g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)") + } + g.Out() + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {") + g.In() + if gogoproto.IsMarshaler(g.file.FileDescriptorProto, mc.message.DescriptorProto) || + gogoproto.IsUnsafeMarshaler(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + if gogoproto.IsStableMarshaler(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P("b = b[:cap(b)]") + g.P("n, err := m.MarshalToSizedBuffer(b)") + g.P("if err != nil {") + g.In() + g.P("return nil, err") + g.Out() + g.P("}") + g.P("return b[:n], nil") + } else { + g.P("if deterministic {") + g.In() + g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)") + g.P("} else {") + g.In() + g.P("b = b[:cap(b)]") + g.P("n, err := m.MarshalToSizedBuffer(b)") + g.P("if err != nil {") + g.In() + g.P("return nil, err") + g.Out() + g.P("}") + g.Out() + g.P("return b[:n], nil") + g.Out() + g.P("}") + } + } else { + g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)") + } + g.Out() + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {") + g.In() + g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)") + g.Out() + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message + g.In() + if (gogoproto.IsMarshaler(g.file.FileDescriptorProto, mc.message.DescriptorProto) || + gogoproto.IsUnsafeMarshaler(g.file.FileDescriptorProto, mc.message.DescriptorProto)) && + gogoproto.IsSizer(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P("return m.Size()") + } else if (gogoproto.IsMarshaler(g.file.FileDescriptorProto, mc.message.DescriptorProto) || + gogoproto.IsUnsafeMarshaler(g.file.FileDescriptorProto, mc.message.DescriptorProto)) && + gogoproto.IsProtoSizer(g.file.FileDescriptorProto, mc.message.DescriptorProto) { + g.P("return m.ProtoSize()") + } else { + g.P("return xxx_messageInfo_", mc.goName, ".Size(m)") + } + g.Out() + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {") + g.In() + g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)") + g.Out() + g.P("}") + + g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo") +} + +// Generate the type and default constant definitions for this Descriptor. +func (g *Generator) generateMessage(message *Descriptor) { + topLevelFields := []topLevelField{} + oFields := make(map[int32]*oneofField) + // The full type name + typeName := message.TypeName() + // The full type name, CamelCased. + goTypeName := CamelCaseSlice(typeName) + + usedNames := make(map[string]bool) + for _, n := range methodNames { + usedNames[n] = true + } + if !gogoproto.IsProtoSizer(message.file.FileDescriptorProto, message.DescriptorProto) { + usedNames["Size"] = true + } + + // allocNames finds a conflict-free variation of the given strings, + // consistently mutating their suffixes. + // It returns the same number of strings. + allocNames := func(ns ...string) []string { + Loop: + for { + for _, n := range ns { + if usedNames[n] { + for i := range ns { + ns[i] += "_" + } + continue Loop + } + } + for _, n := range ns { + usedNames[n] = true + } + return ns + } + } + + mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later + + for i, field := range message.Field { + // Allocate the getter and the field at the same time so name + // collisions create field/method consistent names. + // TODO: This allocation occurs based on the order of the fields + // in the proto file, meaning that a change in the field + // ordering can change generated Method/Field names. + base := CamelCase(*field.Name) + if gogoproto.IsCustomName(field) { + base = gogoproto.GetCustomName(field) + } + ns := allocNames(base, "Get"+base) + fieldName, fieldGetterName := ns[0], ns[1] + + typename, wiretype := g.GoType(message, field) + jsonName := *field.Name + jsonTag := jsonName + ",omitempty" + repeatedNativeType := (!field.IsMessage() && !gogoproto.IsCustomType(field) && field.IsRepeated()) + if !gogoproto.IsNullable(field) && !repeatedNativeType { + jsonTag = jsonName + } + gogoJsonTag := gogoproto.GetJsonTag(field) + if gogoJsonTag != nil { + jsonTag = *gogoJsonTag + } + gogoMoreTags := gogoproto.GetMoreTags(field) + moreTags := "" + if gogoMoreTags != nil { + moreTags = " " + *gogoMoreTags + } + tag := fmt.Sprintf("protobuf:%s json:%q%s", g.goTag(message, field, wiretype), jsonTag, moreTags) + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE && gogoproto.IsEmbed(field) { + fieldName = "" + } + + oneof := field.OneofIndex != nil && message.allowOneof() + if oneof && oFields[*field.OneofIndex] == nil { + odp := message.OneofDecl[int(*field.OneofIndex)] + base := CamelCase(odp.GetName()) + names := allocNames(base, "Get"+base) + fname, gname := names[0], names[1] + + // This is the first field of a oneof we haven't seen before. + // Generate the union field. + oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex) + c, ok := g.makeComments(oneofFullPath) + if ok { + c += "\n//\n" + } + c += "// Types that are valid to be assigned to " + fname + ":\n" + // Generate the rest of this comment later, + // when we've computed any disambiguation. + + dname := "is" + goTypeName + "_" + fname + oneOftag := `protobuf_oneof:"` + odp.GetName() + `"` + of := oneofField{ + fieldCommon: fieldCommon{ + goName: fname, + getterName: gname, + goType: dname, + tags: oneOftag, + protoName: odp.GetName(), + fullPath: oneofFullPath, + protoField: field, + }, + comment: c, + } + topLevelFields = append(topLevelFields, &of) + oFields[*field.OneofIndex] = &of + } + + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { + desc := g.ObjectNamed(field.GetTypeName()) + if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { + m := g.GoMapType(d, field) + typename = m.GoType + mapFieldTypes[field] = typename // record for the getter generation + + tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", m.KeyTag, m.ValueTag) + } + } + goTyp, _ := g.GoType(message, field) + fieldDeprecated := "" + if field.GetOptions().GetDeprecated() { + fieldDeprecated = deprecationComment + } + dvalue := g.getterDefault(field, goTypeName, GoTypeToName(goTyp)) + if oneof { + tname := goTypeName + "_" + fieldName + // It is possible for this to collide with a message or enum + // nested in this message. Check for collisions. + for { + ok := true + for _, desc := range message.nested { + if CamelCaseSlice(desc.TypeName()) == tname { + ok = false + break + } + } + for _, enum := range message.enums { + if CamelCaseSlice(enum.TypeName()) == tname { + ok = false + break + } + } + if !ok { + tname += "_" + continue + } + break + } + + oneofField := oFields[*field.OneofIndex] + sf := oneofSubField{ + fieldCommon: fieldCommon{ + goName: fieldName, + getterName: fieldGetterName, + goType: typename, + tags: tag, + protoName: field.GetName(), + fullPath: fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i), + protoField: field, + }, + protoTypeName: field.GetTypeName(), + fieldNumber: int(*field.Number), + protoType: *field.Type, + getterDef: dvalue, + protoDef: field.GetDefaultValue(), + oneofTypeName: tname, + deprecated: fieldDeprecated, + } + + oneofField.subFields = append(oneofField.subFields, &sf) + if !gogoproto.IsStdType(field) && !gogoproto.IsCustomType(field) && !gogoproto.IsCastType(field) { + g.RecordTypeUse(field.GetTypeName()) + } + continue + } + + fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) + c, ok := g.makeComments(fieldFullPath) + if ok { + c += "\n" + } + rf := simpleField{ + fieldCommon: fieldCommon{ + goName: fieldName, + getterName: fieldGetterName, + goType: typename, + tags: tag, + protoName: field.GetName(), + fullPath: fieldFullPath, + protoField: field, + }, + protoTypeName: field.GetTypeName(), + protoType: *field.Type, + deprecated: fieldDeprecated, + getterDef: dvalue, + protoDef: field.GetDefaultValue(), + comment: c, + } + var pf topLevelField = &rf + + topLevelFields = append(topLevelFields, pf) + + if gogoproto.HasTypeDecl(message.file.FileDescriptorProto, message.DescriptorProto) { + if !gogoproto.IsStdType(field) && !gogoproto.IsCustomType(field) && !gogoproto.IsCastType(field) { + g.RecordTypeUse(field.GetTypeName()) + } + } else { + // Even if the type does not need to be generated, we need to iterate + // over all its fields to be able to mark as used any imported types + // used by those fields. + for _, mfield := range message.Field { + if !gogoproto.IsStdType(mfield) && !gogoproto.IsCustomType(mfield) && !gogoproto.IsCastType(mfield) { + g.RecordTypeUse(mfield.GetTypeName()) + } + } + } + } + + mc := &msgCtx{ + goName: goTypeName, + message: message, + } + + if gogoproto.HasTypeDecl(message.file.FileDescriptorProto, message.DescriptorProto) { + g.generateMessageStruct(mc, topLevelFields) + g.P() + } + g.generateCommonMethods(mc) + g.P() + g.generateDefaultConstants(mc, topLevelFields) + g.P() + g.generateOneofDecls(mc, topLevelFields) + g.P() + g.generateGetters(mc, topLevelFields) + g.P() + g.generateSetters(mc, topLevelFields) + g.P() + g.generateOneofFuncs(mc, topLevelFields) + g.P() + + var oneofTypes []string + for _, f := range topLevelFields { + if of, ok := f.(*oneofField); ok { + for _, osf := range of.subFields { + oneofTypes = append(oneofTypes, osf.oneofTypeName) + } + } + } + + opts := message.Options + ms := &messageSymbol{ + sym: goTypeName, + hasExtensions: len(message.ExtensionRange) > 0, + isMessageSet: opts != nil && opts.GetMessageSetWireFormat(), + oneofTypes: oneofTypes, + } + g.file.addExport(message, ms) + + for _, ext := range message.ext { + g.generateExtension(ext) + } + + fullName := strings.Join(message.TypeName(), ".") + if g.file.Package != nil { + fullName = *g.file.Package + "." + fullName + } + + g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName) + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) && gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["golang_proto"], goTypeName, fullName) + } + if gogoproto.HasMessageName(g.file.FileDescriptorProto, message.DescriptorProto) { + g.P("func (*", goTypeName, ") XXX_MessageName() string {") + g.In() + g.P("return ", strconv.Quote(fullName)) + g.Out() + g.P("}") + } + // Register types for native map types. + for _, k := range mapFieldKeys(mapFieldTypes) { + fullName := strings.TrimPrefix(*k.TypeName, ".") + g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName) + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) && gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["golang_proto"], mapFieldTypes[k], fullName) + } + } +} + +type byTypeName []*descriptor.FieldDescriptorProto + +func (a byTypeName) Len() int { return len(a) } +func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName } + +// mapFieldKeys returns the keys of m in a consistent order. +func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto { + keys := make([]*descriptor.FieldDescriptorProto, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(byTypeName(keys)) + return keys +} + +var escapeChars = [256]byte{ + 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', +} + +// unescape reverses the "C" escaping that protoc does for default values of bytes fields. +// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape +// sequences are conveyed, unmodified, into the decoded result. +func unescape(s string) string { + // NB: Sadly, we can't use strconv.Unquote because protoc will escape both + // single and double quotes, but strconv.Unquote only allows one or the + // other (based on actual surrounding quotes of its input argument). + + var out []byte + for len(s) > 0 { + // regular character, or too short to be valid escape + if s[0] != '\\' || len(s) < 2 { + out = append(out, s[0]) + s = s[1:] + } else if c := escapeChars[s[1]]; c != 0 { + // escape sequence + out = append(out, c) + s = s[2:] + } else if s[1] == 'x' || s[1] == 'X' { + // hex escape, e.g. "\x80 + if len(s) < 4 { + // too short to be valid + out = append(out, s[:2]...) + s = s[2:] + continue + } + v, err := strconv.ParseUint(s[2:4], 16, 8) + if err != nil { + out = append(out, s[:4]...) + } else { + out = append(out, byte(v)) + } + s = s[4:] + } else if '0' <= s[1] && s[1] <= '7' { + // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" + // so consume up to 2 more bytes or up to end-of-string + n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(s[1:1+n], 8, 8) + if err != nil { + out = append(out, s[:1+n]...) + } else { + out = append(out, byte(v)) + } + s = s[1+n:] + } else { + // bad escape, just propagate the slash as-is + out = append(out, s[0]) + s = s[1:] + } + } + + return string(out) +} + +func (g *Generator) generateExtension(ext *ExtensionDescriptor) { + ccTypeName := ext.DescName() + + extObj := g.ObjectNamed(*ext.Extendee) + var extDesc *Descriptor + if id, ok := extObj.(*ImportedDescriptor); ok { + // This is extending a publicly imported message. + // We need the underlying type for goTag. + extDesc = id.o.(*Descriptor) + } else { + extDesc = extObj.(*Descriptor) + } + extendedType := "*" + g.TypeName(extObj) // always use the original + field := ext.FieldDescriptorProto + fieldType, wireType := g.GoType(ext.parent, field) + tag := g.goTag(extDesc, field, wireType) + g.RecordTypeUse(*ext.Extendee) + if n := ext.FieldDescriptorProto.TypeName; n != nil { + // foreign extension type + g.RecordTypeUse(*n) + } + + typeName := ext.TypeName() + + // Special case for proto2 message sets: If this extension is extending + // proto2.bridge.MessageSet, and its final name component is "message_set_extension", + // then drop that last component. + // + // TODO: This should be implemented in the text formatter rather than the generator. + // In addition, the situation for when to apply this special case is implemented + // differently in other languages: + // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560 + if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" { + typeName = typeName[:len(typeName)-1] + } + + // For text formatting, the package must be exactly what the .proto file declares, + // ignoring overrides such as the go_package option, and with no dot/underscore mapping. + extName := strings.Join(typeName, ".") + if g.file.Package != nil { + extName = *g.file.Package + "." + extName + } + + g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") + g.In() + g.P("ExtendedType: (", extendedType, ")(nil),") + g.P("ExtensionType: (", fieldType, ")(nil),") + g.P("Field: ", field.Number, ",") + g.P(`Name: "`, extName, `",`) + g.P("Tag: ", tag, ",") + g.P(`Filename: "`, g.file.GetName(), `",`) + + g.Out() + g.P("}") + g.P() + + g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) + + g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) +} + +func (g *Generator) generateInitFunction() { + if len(g.init) == 0 { + return + } + g.P("func init() {") + g.In() + for _, l := range g.init { + g.P(l) + } + g.Out() + g.P("}") + g.init = nil +} + +func (g *Generator) generateFileDescriptor(file *FileDescriptor) { + // Make a copy and trim source_code_info data. + // TODO: Trim this more when we know exactly what we need. + pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) + pb.SourceCodeInfo = nil + + b, err := proto.Marshal(pb) + if err != nil { + g.Fail(err.Error()) + } + + var buf bytes.Buffer + w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) + w.Write(b) + w.Close() + b = buf.Bytes() + + v := file.VarName() + g.P() + g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) && gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.P("func init() { ", g.Pkg["golang_proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") + } + g.P("var ", v, " = []byte{") + g.In() + g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.Out() + g.P("}") +} + +func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { + // // We always print the full (proto-world) package name here. + pkg := enum.File().GetPackage() + if pkg != "" { + pkg += "." + } + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) && gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["golang_proto"], pkg+ccTypeName, ccTypeName) + } +} + +// And now lots of helper functions. + +// Is c an ASCII lower-case letter? +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +// Is c an ASCII digit? +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +// CamelCase returns the CamelCased name. +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +// There is a remote possibility of this rewrite causing a name collision, +// but it's so remote we're prepared to pretend it's nonexistent - since the +// C++ generator lowercases names, it's extremely unlikely to have two fields +// with different capitalizations. +// In short, _my_field_name_2 becomes XMyFieldName_2. +func CamelCase(s string) string { + if s == "" { + return "" + } + t := make([]byte, 0, 32) + i := 0 + if s[0] == '_' { + // Need a capital letter; drop the '_'. + t = append(t, 'X') + i++ + } + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + for ; i < len(s); i++ { + c := s[i] + if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { + continue // Skip the underscore in s. + } + if isASCIIDigit(c) { + t = append(t, c) + continue + } + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c ^= ' ' // Make it a capital letter. + } + t = append(t, c) // Guaranteed not lower case. + // Accept lower case sequence that follows. + for i+1 < len(s) && isASCIILower(s[i+1]) { + i++ + t = append(t, s[i]) + } + } + return string(t) +} + +// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to +// be joined with "_". +func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } + +// dottedSlice turns a sliced name into a dotted name. +func dottedSlice(elem []string) string { return strings.Join(elem, ".") } + +// Is this field optional? +func isOptional(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL +} + +// Is this field required? +func isRequired(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED +} + +// Is this field repeated? +func isRepeated(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED +} + +// Is this field a scalar numeric type? +func IsScalar(field *descriptor.FieldDescriptorProto) bool { + if field.Type == nil { + return false + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_BOOL, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_SFIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED64, + descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} + +// badToUnderscore is the mapping function used to generate Go names from package names, +// which can be dotted in the input .proto file. It replaces non-identifier characters such as +// dot or dash with underscore. +func badToUnderscore(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + return r + } + return '_' +} + +// baseName returns the last path element of the name, with the last dotted suffix removed. +func baseName(name string) string { + // First, find the last element + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i+1:] + } + // Now drop the suffix + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[0:i] + } + return name +} + +// The SourceCodeInfo message describes the location of elements of a parsed +// .proto file by way of a "path", which is a sequence of integers that +// describe the route from a FileDescriptorProto to the relevant submessage. +// The path alternates between a field number of a repeated field, and an index +// into that repeated field. The constants below define the field numbers that +// are used. +// +// See descriptor.proto for more information about this. +const ( + // tag numbers in FileDescriptorProto + packagePath = 2 // package + messagePath = 4 // message_type + enumPath = 5 // enum_type + // tag numbers in DescriptorProto + messageFieldPath = 2 // field + messageMessagePath = 3 // nested_type + messageEnumPath = 4 // enum_type + messageOneofPath = 8 // oneof_decl + // tag numbers in EnumDescriptorProto + enumValuePath = 2 // value +) + +var supportTypeAliases bool + +func init() { + for _, tag := range build.Default.ReleaseTags { + if tag == "go1.9" { + supportTypeAliases = true + return + } + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go new file mode 100644 index 000000000000..7091e281cb1a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go @@ -0,0 +1,461 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package generator + +import ( + "bytes" + "go/parser" + "go/printer" + "go/token" + "path" + "strings" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + plugin "github.com/gogo/protobuf/protoc-gen-gogo/plugin" +) + +func (d *FileDescriptor) Messages() []*Descriptor { + return d.desc +} + +func (d *FileDescriptor) Enums() []*EnumDescriptor { + return d.enum +} + +func (d *Descriptor) IsGroup() bool { + return d.group +} + +func (g *Generator) IsGroup(field *descriptor.FieldDescriptorProto) bool { + if d, ok := g.typeNameToObject[field.GetTypeName()].(*Descriptor); ok { + return d.IsGroup() + } + return false +} + +func (g *Generator) TypeNameByObject(typeName string) Object { + o, ok := g.typeNameToObject[typeName] + if !ok { + g.Fail("can't find object with type", typeName) + } + return o +} + +func (g *Generator) OneOfTypeName(message *Descriptor, field *descriptor.FieldDescriptorProto) string { + typeName := message.TypeName() + ccTypeName := CamelCaseSlice(typeName) + fieldName := g.GetOneOfFieldName(message, field) + tname := ccTypeName + "_" + fieldName + // It is possible for this to collide with a message or enum + // nested in this message. Check for collisions. + ok := true + for _, desc := range message.nested { + if strings.Join(desc.TypeName(), "_") == tname { + ok = false + break + } + } + for _, enum := range message.enums { + if strings.Join(enum.TypeName(), "_") == tname { + ok = false + break + } + } + if !ok { + tname += "_" + } + return tname +} + +type PluginImports interface { + NewImport(pkg string) Single + GenerateImports(file *FileDescriptor) +} + +type pluginImports struct { + generator *Generator + singles []Single +} + +func NewPluginImports(generator *Generator) *pluginImports { + return &pluginImports{generator, make([]Single, 0)} +} + +func (this *pluginImports) NewImport(pkg string) Single { + imp := newImportedPackage(this.generator.ImportPrefix, pkg) + this.singles = append(this.singles, imp) + return imp +} + +func (this *pluginImports) GenerateImports(file *FileDescriptor) { + for _, s := range this.singles { + if s.IsUsed() { + this.generator.PrintImport(GoPackageName(s.Name()), GoImportPath(s.Location())) + } + } +} + +type Single interface { + Use() string + IsUsed() bool + Name() string + Location() string +} + +type importedPackage struct { + used bool + pkg string + name string + importPrefix string +} + +func newImportedPackage(importPrefix string, pkg string) *importedPackage { + return &importedPackage{ + pkg: pkg, + importPrefix: importPrefix, + } +} + +func (this *importedPackage) Use() string { + if !this.used { + this.name = string(cleanPackageName(this.pkg)) + this.used = true + } + return this.name +} + +func (this *importedPackage) IsUsed() bool { + return this.used +} + +func (this *importedPackage) Name() string { + return this.name +} + +func (this *importedPackage) Location() string { + return this.importPrefix + this.pkg +} + +func (g *Generator) GetFieldName(message *Descriptor, field *descriptor.FieldDescriptorProto) string { + goTyp, _ := g.GoType(message, field) + fieldname := CamelCase(*field.Name) + if gogoproto.IsCustomName(field) { + fieldname = gogoproto.GetCustomName(field) + } + if gogoproto.IsEmbed(field) { + fieldname = EmbedFieldName(goTyp) + } + if field.OneofIndex != nil { + fieldname = message.OneofDecl[int(*field.OneofIndex)].GetName() + fieldname = CamelCase(fieldname) + } + for _, f := range methodNames { + if f == fieldname { + return fieldname + "_" + } + } + if !gogoproto.IsProtoSizer(message.file.FileDescriptorProto, message.DescriptorProto) { + if fieldname == "Size" { + return fieldname + "_" + } + } + return fieldname +} + +func (g *Generator) GetOneOfFieldName(message *Descriptor, field *descriptor.FieldDescriptorProto) string { + goTyp, _ := g.GoType(message, field) + fieldname := CamelCase(*field.Name) + if gogoproto.IsCustomName(field) { + fieldname = gogoproto.GetCustomName(field) + } + if gogoproto.IsEmbed(field) { + fieldname = EmbedFieldName(goTyp) + } + for _, f := range methodNames { + if f == fieldname { + return fieldname + "_" + } + } + if !gogoproto.IsProtoSizer(message.file.FileDescriptorProto, message.DescriptorProto) { + if fieldname == "Size" { + return fieldname + "_" + } + } + return fieldname +} + +func (g *Generator) IsMap(field *descriptor.FieldDescriptorProto) bool { + if !field.IsMessage() { + return false + } + byName := g.ObjectNamed(field.GetTypeName()) + desc, ok := byName.(*Descriptor) + if byName == nil || !ok || !desc.GetOptions().GetMapEntry() { + return false + } + return true +} + +func (g *Generator) GetMapKeyField(field, keyField *descriptor.FieldDescriptorProto) *descriptor.FieldDescriptorProto { + if !gogoproto.IsCastKey(field) { + return keyField + } + keyField = proto.Clone(keyField).(*descriptor.FieldDescriptorProto) + if keyField.Options == nil { + keyField.Options = &descriptor.FieldOptions{} + } + keyType := gogoproto.GetCastKey(field) + if err := proto.SetExtension(keyField.Options, gogoproto.E_Casttype, &keyType); err != nil { + g.Fail(err.Error()) + } + return keyField +} + +func (g *Generator) GetMapValueField(field, valField *descriptor.FieldDescriptorProto) *descriptor.FieldDescriptorProto { + if gogoproto.IsCustomType(field) && gogoproto.IsCastValue(field) { + g.Fail("cannot have a customtype and casttype: ", field.String()) + } + valField = proto.Clone(valField).(*descriptor.FieldDescriptorProto) + if valField.Options == nil { + valField.Options = &descriptor.FieldOptions{} + } + + stdtime := gogoproto.IsStdTime(field) + if stdtime { + if err := proto.SetExtension(valField.Options, gogoproto.E_Stdtime, &stdtime); err != nil { + g.Fail(err.Error()) + } + } + + stddur := gogoproto.IsStdDuration(field) + if stddur { + if err := proto.SetExtension(valField.Options, gogoproto.E_Stdduration, &stddur); err != nil { + g.Fail(err.Error()) + } + } + + wktptr := gogoproto.IsWktPtr(field) + if wktptr { + if err := proto.SetExtension(valField.Options, gogoproto.E_Wktpointer, &wktptr); err != nil { + g.Fail(err.Error()) + } + } + + if valType := gogoproto.GetCastValue(field); len(valType) > 0 { + if err := proto.SetExtension(valField.Options, gogoproto.E_Casttype, &valType); err != nil { + g.Fail(err.Error()) + } + } + if valType := gogoproto.GetCustomType(field); len(valType) > 0 { + if err := proto.SetExtension(valField.Options, gogoproto.E_Customtype, &valType); err != nil { + g.Fail(err.Error()) + } + } + + nullable := gogoproto.IsNullable(field) + if err := proto.SetExtension(valField.Options, gogoproto.E_Nullable, &nullable); err != nil { + g.Fail(err.Error()) + } + return valField +} + +// GoMapValueTypes returns the map value Go type and the alias map value Go type (for casting), taking into +// account whether the map is nullable or the value is a message. +func GoMapValueTypes(mapField, valueField *descriptor.FieldDescriptorProto, goValueType, goValueAliasType string) (nullable bool, outGoType string, outGoAliasType string) { + nullable = gogoproto.IsNullable(mapField) && (valueField.IsMessage() || gogoproto.IsCustomType(mapField)) + if nullable { + // ensure the non-aliased Go value type is a pointer for consistency + if strings.HasPrefix(goValueType, "*") { + outGoType = goValueType + } else { + outGoType = "*" + goValueType + } + outGoAliasType = goValueAliasType + } else { + outGoType = strings.Replace(goValueType, "*", "", 1) + outGoAliasType = strings.Replace(goValueAliasType, "*", "", 1) + } + return +} + +func GoTypeToName(goTyp string) string { + return strings.Replace(strings.Replace(goTyp, "*", "", -1), "[]", "", -1) +} + +func EmbedFieldName(goTyp string) string { + goTyp = GoTypeToName(goTyp) + goTyps := strings.Split(goTyp, ".") + if len(goTyps) == 1 { + return goTyp + } + if len(goTyps) == 2 { + return goTyps[1] + } + panic("unreachable") +} + +func (g *Generator) GeneratePlugin(p Plugin) { + plugins = []Plugin{p} + p.Init(g) + // Generate the output. The generator runs for every file, even the files + // that we don't generate output for, so that we can collate the full list + // of exported symbols to support public imports. + genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) + for _, file := range g.genFiles { + genFileMap[file] = true + } + for _, file := range g.allFiles { + g.Reset() + g.writeOutput = genFileMap[file] + g.generatePlugin(file, p) + if !g.writeOutput { + continue + } + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(file.goFileName(g.pathType)), + Content: proto.String(g.String()), + }) + } +} + +func (g *Generator) SetFile(filename string) { + g.file = g.fileByName(filename) +} + +func (g *Generator) generatePlugin(file *FileDescriptor, p Plugin) { + g.writtenImports = make(map[string]bool) + g.usedPackages = make(map[GoImportPath]bool) + g.packageNames = make(map[GoImportPath]GoPackageName) + g.usedPackageNames = make(map[GoPackageName]bool) + g.addedImports = make(map[GoImportPath]bool) + g.file = file + + // Run the plugins before the imports so we know which imports are necessary. + p.Generate(file) + + // Generate header and imports last, though they appear first in the output. + rem := g.Buffer + g.Buffer = new(bytes.Buffer) + g.generateHeader() + // p.GenerateImports(g.file) + g.generateImports() + if !g.writeOutput { + return + } + g.Write(rem.Bytes()) + + // Reformat generated code. + contents := string(g.Buffer.Bytes()) + fset := token.NewFileSet() + ast, err := parser.ParseFile(fset, "", g, parser.ParseComments) + if err != nil { + g.Fail("bad Go source code was generated:", contents, err.Error()) + return + } + g.Reset() + err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, ast) + if err != nil { + g.Fail("generated Go source code could not be reformatted:", err.Error()) + } +} + +func GetCustomType(field *descriptor.FieldDescriptorProto) (packageName string, typ string, err error) { + return getCustomType(field) +} + +func getCustomType(field *descriptor.FieldDescriptorProto) (packageName string, typ string, err error) { + if field.Options != nil { + var v interface{} + v, err = proto.GetExtension(field.Options, gogoproto.E_Customtype) + if err == nil && v.(*string) != nil { + ctype := *(v.(*string)) + packageName, typ = splitCPackageType(ctype) + return packageName, typ, nil + } + } + return "", "", err +} + +func splitCPackageType(ctype string) (packageName string, typ string) { + ss := strings.Split(ctype, ".") + if len(ss) == 1 { + return "", ctype + } + packageName = strings.Join(ss[0:len(ss)-1], ".") + typeName := ss[len(ss)-1] + importStr := strings.Map(badToUnderscore, packageName) + typ = importStr + "." + typeName + return packageName, typ +} + +func getCastType(field *descriptor.FieldDescriptorProto) (packageName string, typ string, err error) { + if field.Options != nil { + var v interface{} + v, err = proto.GetExtension(field.Options, gogoproto.E_Casttype) + if err == nil && v.(*string) != nil { + ctype := *(v.(*string)) + packageName, typ = splitCPackageType(ctype) + return packageName, typ, nil + } + } + return "", "", err +} + +func FileName(file *FileDescriptor) string { + fname := path.Base(file.FileDescriptorProto.GetName()) + fname = strings.Replace(fname, ".proto", "", -1) + fname = strings.Replace(fname, "-", "_", -1) + fname = strings.Replace(fname, ".", "_", -1) + return CamelCase(fname) +} + +func (g *Generator) AllFiles() *descriptor.FileDescriptorSet { + set := &descriptor.FileDescriptorSet{} + set.File = make([]*descriptor.FileDescriptorProto, len(g.allFiles)) + for i := range g.allFiles { + set.File[i] = g.allFiles[i].FileDescriptorProto + } + return set +} + +func (d *Descriptor) Path() string { + return d.path +} + +func (g *Generator) useTypes() string { + pkg := strings.Map(badToUnderscore, "github.com/gogo/protobuf/types") + g.customImports = append(g.customImports, "github.com/gogo/protobuf/types") + return pkg +} + +func (d *FileDescriptor) GoPackageName() string { + return string(d.packageName) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/internal/remap/remap.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/internal/remap/remap.go new file mode 100644 index 000000000000..a9b61036cc0f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/internal/remap/remap.go @@ -0,0 +1,117 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package remap handles tracking the locations of Go tokens in a source text +across a rewrite by the Go formatter. +*/ +package remap + +import ( + "fmt" + "go/scanner" + "go/token" +) + +// A Location represents a span of byte offsets in the source text. +type Location struct { + Pos, End int // End is exclusive +} + +// A Map represents a mapping between token locations in an input source text +// and locations in the correspnding output text. +type Map map[Location]Location + +// Find reports whether the specified span is recorded by m, and if so returns +// the new location it was mapped to. If the input span was not found, the +// returned location is the same as the input. +func (m Map) Find(pos, end int) (Location, bool) { + key := Location{ + Pos: pos, + End: end, + } + if loc, ok := m[key]; ok { + return loc, true + } + return key, false +} + +func (m Map) add(opos, oend, npos, nend int) { + m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} +} + +// Compute constructs a location mapping from input to output. An error is +// reported if any of the tokens of output cannot be mapped. +func Compute(input, output []byte) (Map, error) { + itok := tokenize(input) + otok := tokenize(output) + if len(itok) != len(otok) { + return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) + } + m := make(Map) + for i, ti := range itok { + to := otok[i] + if ti.Token != to.Token { + return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) + } + m.add(ti.pos, ti.end, to.pos, to.end) + } + return m, nil +} + +// tokinfo records the span and type of a source token. +type tokinfo struct { + pos, end int + token.Token +} + +func tokenize(src []byte) []tokinfo { + fs := token.NewFileSet() + var s scanner.Scanner + s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) + var info []tokinfo + for { + pos, next, lit := s.Scan() + switch next { + case token.SEMICOLON: + continue + } + info = append(info, tokinfo{ + pos: int(pos - 1), + end: int(pos + token.Pos(len(lit)) - 1), + Token: next, + }) + if next == token.EOF { + break + } + } + return info +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/grpc/grpc.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/grpc/grpc.go new file mode 100644 index 000000000000..cf527f8e015e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/grpc/grpc.go @@ -0,0 +1,536 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package grpc outputs gRPC service descriptions in Go code. +// It runs as a plugin for the Go protocol buffer compiler plugin. +// It is linked in to protoc-gen-go. +package grpc + +import ( + "fmt" + "strconv" + "strings" + + pb "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// the grpc package is introduced; the generated code references +// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 4 + +// Paths for packages used by code generated in this file, +// relative to the import_prefix of the generator.Generator. +const ( + contextPkgPath = "context" + grpcPkgPath = "google.golang.org/grpc" + codePkgPath = "google.golang.org/grpc/codes" + statusPkgPath = "google.golang.org/grpc/status" +) + +func init() { + generator.RegisterPlugin(new(grpc)) +} + +// grpc is an implementation of the Go protocol buffer compiler's +// plugin architecture. It generates bindings for gRPC support. +type grpc struct { + gen *generator.Generator +} + +// Name returns the name of this plugin, "grpc". +func (g *grpc) Name() string { + return "grpc" +} + +// The names for packages imported in the generated code. +// They may vary from the final path component of the import path +// if the name is used by other packages. +var ( + contextPkg string + grpcPkg string +) + +// Init initializes the plugin. +func (g *grpc) Init(gen *generator.Generator) { + g.gen = gen +} + +// Given a type name defined in a .proto, return its object. +// Also record that we're using it, to guarantee the associated import. +func (g *grpc) objectNamed(name string) generator.Object { + g.gen.RecordTypeUse(name) + return g.gen.ObjectNamed(name) +} + +// Given a type name defined in a .proto, return its name as we will print it. +func (g *grpc) typeName(str string) string { + return g.gen.TypeName(g.objectNamed(str)) +} + +// P forwards to g.gen.P. +func (g *grpc) P(args ...interface{}) { g.gen.P(args...) } + +// Generate generates code for the services in the given file. +func (g *grpc) Generate(file *generator.FileDescriptor) { + if len(file.FileDescriptorProto.Service) == 0 { + return + } + + contextPkg = string(g.gen.AddImport(contextPkgPath)) + grpcPkg = string(g.gen.AddImport(grpcPkgPath)) + + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ ", contextPkg, ".Context") + g.P("var _ ", grpcPkg, ".ClientConn") + g.P() + + // Assert version compatibility. + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the grpc package it is being compiled against.") + g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion) + g.P() + + for i, service := range file.FileDescriptorProto.Service { + g.generateService(file, service, i) + } +} + +// GenerateImports generates the import declaration for this file. +func (g *grpc) GenerateImports(file *generator.FileDescriptor) {} + +// reservedClientName records whether a client name is reserved on the client side. +var reservedClientName = map[string]bool{ + // TODO: do we need any in gRPC? +} + +func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } + +// deprecationComment is the standard comment added to deprecated +// messages, fields, enums, and enum values. +var deprecationComment = "// Deprecated: Do not use." + +// generateService generates all the code for the named service. +func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) { + path := fmt.Sprintf("6,%d", index) // 6 means service. + + origServName := service.GetName() + fullServName := origServName + if pkg := file.GetPackage(); pkg != "" { + fullServName = pkg + "." + fullServName + } + servName := generator.CamelCase(origServName) + deprecated := service.GetOptions().GetDeprecated() + + g.P() + g.P(fmt.Sprintf(`// %sClient is the client API for %s service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.`, servName, servName)) + + // Client interface. + if deprecated { + g.P("//") + g.P(deprecationComment) + } + g.P("type ", servName, "Client interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + g.P(g.generateClientSignature(servName, method)) + } + g.P("}") + g.P() + + // Client structure. + g.P("type ", unexport(servName), "Client struct {") + g.P("cc *", grpcPkg, ".ClientConn") + g.P("}") + g.P() + + // NewClient factory. + if deprecated { + g.P(deprecationComment) + } + g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {") + g.P("return &", unexport(servName), "Client{cc}") + g.P("}") + g.P() + + var methodIndex, streamIndex int + serviceDescVar := "_" + servName + "_serviceDesc" + // Client method implementations. + for _, method := range service.Method { + var descExpr string + if !method.GetServerStreaming() && !method.GetClientStreaming() { + // Unary RPC method + descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex) + methodIndex++ + } else { + // Streaming RPC method + descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex) + streamIndex++ + } + g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr) + } + + // Server interface. + serverType := servName + "Server" + g.P("// ", serverType, " is the server API for ", servName, " service.") + if deprecated { + g.P("//") + g.P(deprecationComment) + } + g.P("type ", serverType, " interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + g.P(g.generateServerSignature(servName, method)) + } + g.P("}") + g.P() + + // Server Unimplemented struct for forward compatability. + if deprecated { + g.P(deprecationComment) + } + g.generateUnimplementedServer(servName, service) + + // Server registration. + if deprecated { + g.P(deprecationComment) + } + g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {") + g.P("s.RegisterService(&", serviceDescVar, `, srv)`) + g.P("}") + g.P() + + // Server handler implementations. + var handlerNames []string + for _, method := range service.Method { + hname := g.generateServerMethod(servName, fullServName, method) + handlerNames = append(handlerNames, hname) + } + + // Service descriptor. + g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {") + g.P("ServiceName: ", strconv.Quote(fullServName), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPkg, ".MethodDesc{") + for i, method := range service.Method { + if method.GetServerStreaming() || method.GetClientStreaming() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPkg, ".StreamDesc{") + for i, method := range service.Method { + if !method.GetServerStreaming() && !method.GetClientStreaming() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.GetServerStreaming() { + g.P("ServerStreams: true,") + } + if method.GetClientStreaming() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("Metadata: \"", file.GetName(), "\",") + g.P("}") + g.P() +} + +// generateUnimplementedServer creates the unimplemented server struct +func (g *grpc) generateUnimplementedServer(servName string, service *pb.ServiceDescriptorProto) { + serverType := servName + "Server" + g.P("// Unimplemented", serverType, " can be embedded to have forward compatible implementations.") + g.P("type Unimplemented", serverType, " struct {") + g.P("}") + g.P() + // UnimplementedServer's concrete methods + for _, method := range service.Method { + g.generateServerMethodConcrete(servName, method) + } + g.P() +} + +// generateServerMethodConcrete returns unimplemented methods which ensure forward compatibility +func (g *grpc) generateServerMethodConcrete(servName string, method *pb.MethodDescriptorProto) { + header := g.generateServerSignatureWithParamNames(servName, method) + g.P("func (*Unimplemented", servName, "Server) ", header, " {") + var nilArg string + if !method.GetServerStreaming() && !method.GetClientStreaming() { + nilArg = "nil, " + } + methName := generator.CamelCase(method.GetName()) + statusPkg := string(g.gen.AddImport(statusPkgPath)) + codePkg := string(g.gen.AddImport(codePkgPath)) + g.P("return ", nilArg, statusPkg, `.Errorf(`, codePkg, `.Unimplemented, "method `, methName, ` not implemented")`) + g.P("}") +} + +// generateClientSignature returns the client-side signature for a method. +func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + reqArg := ", in *" + g.typeName(method.GetInputType()) + if method.GetClientStreaming() { + reqArg = "" + } + respName := "*" + g.typeName(method.GetOutputType()) + if method.GetServerStreaming() || method.GetClientStreaming() { + respName = servName + "_" + generator.CamelCase(origMethName) + "Client" + } + return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName) +} + +func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) { + sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName()) + methName := generator.CamelCase(method.GetName()) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + if method.GetOptions().GetDeprecated() { + g.P(deprecationComment) + } + g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{") + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("out := new(", outType, ")") + // TODO: Pass descExpr to Invoke. + g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`) + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return + } + streamType := unexport(servName) + methName + "Client" + g.P("stream, err := c.cc.NewStream(ctx, ", descExpr, `, "`, sname, `", opts...)`) + g.P("if err != nil { return nil, err }") + g.P("x := &", streamType, "{stream}") + if !method.GetClientStreaming() { + g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + } + g.P("return x, nil") + g.P("}") + g.P() + + genSend := method.GetClientStreaming() + genRecv := method.GetServerStreaming() + genCloseAndRecv := !method.GetServerStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Client interface {") + if genSend { + g.P("Send(*", inType, ") error") + } + if genRecv { + g.P("Recv() (*", outType, ", error)") + } + if genCloseAndRecv { + g.P("CloseAndRecv() (*", outType, ", error)") + } + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", inType, ") error {") + g.P("return x.ClientStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + if genCloseAndRecv { + g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } +} + +// generateServerSignatureWithParamNames returns the server-side signature for a method with parameter names. +func (g *grpc) generateServerSignatureWithParamNames(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + + var reqArgs []string + ret := "error" + if !method.GetServerStreaming() && !method.GetClientStreaming() { + reqArgs = append(reqArgs, "ctx "+contextPkg+".Context") + ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" + } + if !method.GetClientStreaming() { + reqArgs = append(reqArgs, "req *"+g.typeName(method.GetInputType())) + } + if method.GetServerStreaming() || method.GetClientStreaming() { + reqArgs = append(reqArgs, "srv "+servName+"_"+generator.CamelCase(origMethName)+"Server") + } + + return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +// generateServerSignature returns the server-side signature for a method. +func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + + var reqArgs []string + ret := "error" + if !method.GetServerStreaming() && !method.GetClientStreaming() { + reqArgs = append(reqArgs, contextPkg+".Context") + ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" + } + if !method.GetClientStreaming() { + reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType())) + } + if method.GetServerStreaming() || method.GetClientStreaming() { + reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server") + } + + return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string { + methName := generator.CamelCase(method.GetName()) + hname := fmt.Sprintf("_%s_%s_Handler", servName, methName) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {") + g.P("in := new(", inType, ")") + g.P("if err := dec(in); err != nil { return nil, err }") + g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }") + g.P("info := &", grpcPkg, ".UnaryServerInfo{") + g.P("Server: srv,") + g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",") + g.P("}") + g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {") + g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))") + g.P("}") + g.P("return interceptor(ctx, in, info, handler)") + g.P("}") + g.P() + return hname + } + streamType := unexport(servName) + methName + "Server" + g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {") + if !method.GetClientStreaming() { + g.P("m := new(", inType, ")") + g.P("if err := stream.RecvMsg(m); err != nil { return err }") + g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})") + } else { + g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})") + } + g.P("}") + g.P() + + genSend := method.GetServerStreaming() + genSendAndClose := !method.GetServerStreaming() + genRecv := method.GetClientStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Server interface {") + if genSend { + g.P("Send(*", outType, ") error") + } + if genSendAndClose { + g.P("SendAndClose(*", outType, ") error") + } + if genRecv { + g.P("Recv() (*", inType, ", error)") + } + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genSendAndClose { + g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {") + g.P("m := new(", inType, ")") + g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + + return hname +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/main.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/main.go new file mode 100644 index 000000000000..dd8e795030cb --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/main.go @@ -0,0 +1,57 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate +// Go code. Run it by building this program and putting it in your path with +// the name +// protoc-gen-gogo +// That word 'gogo' at the end becomes part of the option string set for the +// protocol compiler, so once the protocol compiler (protoc) is installed +// you can run +// protoc --gogo_out=output_directory input_directory/file.proto +// to generate Go bindings for the protocol defined by file.proto. +// With that input, the output will be written to +// output_directory/file.pb.go +// +// The generated code is documented in the package comment for +// the library. +// +// See the README and documentation for protocol buffers to learn more: +// https://developers.google.com/protocol-buffers/ +package main + +import ( + "github.com/gogo/protobuf/vanity/command" +) + +func main() { + command.Write(command.Generate(command.Read())) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile new file mode 100644 index 000000000000..95234a75539b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile @@ -0,0 +1,37 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Not stored here, but plugin.proto is in https://github.com/google/protobuf/ +# at src/google/protobuf/compiler/plugin.proto +# Also we need to fix an import. +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. -I=../../protobuf/google/protobuf/compiler/:../../protobuf/ ../../protobuf/google/protobuf/compiler/plugin.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go new file mode 100644 index 000000000000..8c9cb58b0dd8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go @@ -0,0 +1,365 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: plugin.proto + +package plugin_go + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// The version number of protocol compiler. +type Version struct { + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_22a625af4bc1cc87, []int{0} +} +func (m *Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (m *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(m, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetMajor() int32 { + if m != nil && m.Major != nil { + return *m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil && m.Minor != nil { + return *m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil && m.Patch != nil { + return *m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil && m.Suffix != nil { + return *m.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*descriptor.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } +func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorRequest) ProtoMessage() {} +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_22a625af4bc1cc87, []int{1} +} +func (m *CodeGeneratorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) +} +func (m *CodeGeneratorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) +} +func (m *CodeGeneratorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorRequest.Merge(m, src) +} +func (m *CodeGeneratorRequest) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorRequest.Size(m) +} +func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo + +func (m *CodeGeneratorRequest) GetFileToGenerate() []string { + if m != nil { + return m.FileToGenerate + } + return nil +} + +func (m *CodeGeneratorRequest) GetParameter() string { + if m != nil && m.Parameter != nil { + return *m.Parameter + } + return "" +} + +func (m *CodeGeneratorRequest) GetProtoFile() []*descriptor.FileDescriptorProto { + if m != nil { + return m.ProtoFile + } + return nil +} + +func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } +func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse) ProtoMessage() {} +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_22a625af4bc1cc87, []int{2} +} +func (m *CodeGeneratorResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) +} +func (m *CodeGeneratorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse.Merge(m, src) +} +func (m *CodeGeneratorResponse) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse.Size(m) +} +func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo + +func (m *CodeGeneratorResponse) GetError() string { + if m != nil && m.Error != nil { + return *m.Error + } + return "" +} + +func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if m != nil { + return m.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } +func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { + return fileDescriptor_22a625af4bc1cc87, []int{2, 0} +} +func (m *CodeGeneratorResponse_File) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse_File) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) +} +func (m *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse_File.Merge(m, src) +} +func (m *CodeGeneratorResponse_File) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) +} +func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo + +func (m *CodeGeneratorResponse_File) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { + if m != nil && m.InsertionPoint != nil { + return *m.InsertionPoint + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetContent() string { + if m != nil && m.Content != nil { + return *m.Content + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") + proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") + proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") + proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") +} + +func init() { proto.RegisterFile("plugin.proto", fileDescriptor_22a625af4bc1cc87) } + +var fileDescriptor_22a625af4bc1cc87 = []byte{ + // 383 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcd, 0x6a, 0xd5, 0x40, + 0x14, 0xc7, 0x89, 0x37, 0xb5, 0xe4, 0xb4, 0x34, 0x65, 0xa8, 0x32, 0x94, 0x2e, 0xe2, 0x45, 0x30, + 0xab, 0x14, 0x8a, 0xe0, 0xbe, 0x15, 0x75, 0xe1, 0xe2, 0x32, 0x88, 0x0b, 0x41, 0x42, 0x4c, 0x4f, + 0xe2, 0x48, 0x32, 0x67, 0x9c, 0x99, 0x88, 0x4f, 0xea, 0x7b, 0xf8, 0x06, 0x32, 0x1f, 0xa9, 0x72, + 0xf1, 0xee, 0xe6, 0xff, 0x3b, 0xf3, 0x71, 0xce, 0x8f, 0x81, 0x53, 0x3d, 0x2d, 0xa3, 0x54, 0x8d, + 0x36, 0xe4, 0x88, 0xf1, 0x91, 0x68, 0x9c, 0x30, 0xa6, 0x2f, 0xcb, 0xd0, 0xf4, 0x34, 0x6b, 0x39, + 0xa1, 0xb9, 0xac, 0x62, 0xe5, 0x7a, 0xad, 0x5c, 0xdf, 0xa3, 0xed, 0x8d, 0xd4, 0x8e, 0x4c, 0xdc, + 0xbd, 0xed, 0xe1, 0xf8, 0x23, 0x1a, 0x2b, 0x49, 0xb1, 0x0b, 0x38, 0x9a, 0xbb, 0x6f, 0x64, 0x78, + 0x56, 0x65, 0xf5, 0x91, 0x88, 0x21, 0x50, 0xa9, 0xc8, 0xf0, 0x47, 0x89, 0xfa, 0xe0, 0xa9, 0xee, + 0x5c, 0xff, 0x95, 0x6f, 0x22, 0x0d, 0x81, 0x3d, 0x85, 0xc7, 0x76, 0x19, 0x06, 0xf9, 0x93, 0xe7, + 0x55, 0x56, 0x17, 0x22, 0xa5, 0xed, 0xef, 0x0c, 0x2e, 0xee, 0xe8, 0x1e, 0xdf, 0xa2, 0x42, 0xd3, + 0x39, 0x32, 0x02, 0xbf, 0x2f, 0x68, 0x1d, 0xab, 0xe1, 0x7c, 0x90, 0x13, 0xb6, 0x8e, 0xda, 0x31, + 0xd6, 0x90, 0x67, 0xd5, 0xa6, 0x2e, 0xc4, 0x99, 0xe7, 0x1f, 0x28, 0x9d, 0x40, 0x76, 0x05, 0x85, + 0xee, 0x4c, 0x37, 0xa3, 0xc3, 0xd8, 0x4a, 0x21, 0xfe, 0x02, 0x76, 0x07, 0x10, 0xc6, 0x69, 0xfd, + 0x29, 0x5e, 0x56, 0x9b, 0xfa, 0xe4, 0xe6, 0x79, 0xb3, 0xaf, 0xe5, 0x8d, 0x9c, 0xf0, 0xf5, 0x83, + 0x80, 0x9d, 0xc7, 0xa2, 0x08, 0x55, 0x5f, 0x61, 0xef, 0xe1, 0x7c, 0x15, 0xd7, 0xfe, 0x88, 0x4e, + 0xc2, 0x78, 0x27, 0x37, 0xcf, 0x9a, 0x43, 0x86, 0x9b, 0x24, 0x4f, 0x94, 0x2b, 0x49, 0x60, 0xfb, + 0x2b, 0x83, 0x27, 0x7b, 0x33, 0x5b, 0x4d, 0xca, 0xa2, 0x77, 0x87, 0xc6, 0x24, 0xcf, 0x85, 0x88, + 0x81, 0xbd, 0x83, 0xfc, 0x9f, 0xe6, 0x5f, 0x1e, 0x7e, 0xf1, 0xbf, 0x97, 0x86, 0xd9, 0x44, 0xb8, + 0xe1, 0xf2, 0x33, 0xe4, 0x61, 0x1e, 0x06, 0xb9, 0xea, 0x66, 0x4c, 0xcf, 0x84, 0x35, 0x7b, 0x01, + 0xa5, 0x54, 0x16, 0x8d, 0x93, 0xa4, 0x5a, 0x4d, 0x52, 0xb9, 0x24, 0xf3, 0xec, 0x01, 0xef, 0x3c, + 0x65, 0x1c, 0x8e, 0x7b, 0x52, 0x0e, 0x95, 0xe3, 0x65, 0xd8, 0xb0, 0xc6, 0xdb, 0x57, 0x70, 0xd5, + 0xd3, 0x7c, 0xb0, 0xbf, 0xdb, 0xd3, 0x5d, 0xf8, 0x9b, 0x41, 0xaf, 0xfd, 0x54, 0xc4, 0x9f, 0xda, + 0x8e, 0xf4, 0x27, 0x00, 0x00, 0xff, 0xff, 0x7a, 0x72, 0x3d, 0x18, 0xb5, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogofaster/main.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogofaster/main.go new file mode 100644 index 000000000000..356fcfa0ac08 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogofaster/main.go @@ -0,0 +1,52 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2015, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "github.com/gogo/protobuf/vanity" + "github.com/gogo/protobuf/vanity/command" +) + +func main() { + req := command.Read() + files := req.GetProtoFile() + files = vanity.FilterFiles(files, vanity.NotGoogleProtobufDescriptorProto) + + vanity.ForEachFile(files, vanity.TurnOnMarshalerAll) + vanity.ForEachFile(files, vanity.TurnOnSizerAll) + vanity.ForEachFile(files, vanity.TurnOnUnmarshalerAll) + + vanity.ForEachFieldInFilesExcludingExtensions(vanity.OnlyProto2(files), vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly) + vanity.ForEachFile(files, vanity.TurnOffGoUnrecognizedAll) + vanity.ForEachFile(files, vanity.TurnOffGoUnkeyedAll) + vanity.ForEachFile(files, vanity.TurnOffGoSizecacheAll) + + resp := command.Generate(req) + command.Write(resp) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogoslick/main.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogoslick/main.go new file mode 100644 index 000000000000..a5b988ed3081 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogoslick/main.go @@ -0,0 +1,61 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2015, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "github.com/gogo/protobuf/vanity" + "github.com/gogo/protobuf/vanity/command" +) + +func main() { + req := command.Read() + files := req.GetProtoFile() + files = vanity.FilterFiles(files, vanity.NotGoogleProtobufDescriptorProto) + + vanity.ForEachFile(files, vanity.TurnOnMarshalerAll) + vanity.ForEachFile(files, vanity.TurnOnSizerAll) + vanity.ForEachFile(files, vanity.TurnOnUnmarshalerAll) + + vanity.ForEachFieldInFilesExcludingExtensions(vanity.OnlyProto2(files), vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly) + vanity.ForEachFile(files, vanity.TurnOffGoUnrecognizedAll) + vanity.ForEachFile(files, vanity.TurnOffGoUnkeyedAll) + vanity.ForEachFile(files, vanity.TurnOffGoSizecacheAll) + + vanity.ForEachFile(files, vanity.TurnOffGoEnumPrefixAll) + vanity.ForEachFile(files, vanity.TurnOffGoEnumStringerAll) + vanity.ForEachFile(files, vanity.TurnOnEnumStringerAll) + + vanity.ForEachFile(files, vanity.TurnOnEqualAll) + vanity.ForEachFile(files, vanity.TurnOnGoStringAll) + vanity.ForEachFile(files, vanity.TurnOffGoStringerAll) + vanity.ForEachFile(files, vanity.TurnOnStringerAll) + + resp := command.Generate(req) + command.Write(resp) +} diff --git a/vendor/github.com/gogo/protobuf/vanity/command/command.go b/vendor/github.com/gogo/protobuf/vanity/command/command.go new file mode 100644 index 000000000000..eeca42ba0d07 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/vanity/command/command.go @@ -0,0 +1,161 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2015, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package command + +import ( + "fmt" + "go/format" + "io/ioutil" + "os" + "strings" + + _ "github.com/gogo/protobuf/plugin/compare" + _ "github.com/gogo/protobuf/plugin/defaultcheck" + _ "github.com/gogo/protobuf/plugin/description" + _ "github.com/gogo/protobuf/plugin/embedcheck" + _ "github.com/gogo/protobuf/plugin/enumstringer" + _ "github.com/gogo/protobuf/plugin/equal" + _ "github.com/gogo/protobuf/plugin/face" + _ "github.com/gogo/protobuf/plugin/gostring" + _ "github.com/gogo/protobuf/plugin/marshalto" + _ "github.com/gogo/protobuf/plugin/oneofcheck" + _ "github.com/gogo/protobuf/plugin/populate" + _ "github.com/gogo/protobuf/plugin/size" + _ "github.com/gogo/protobuf/plugin/stringer" + "github.com/gogo/protobuf/plugin/testgen" + _ "github.com/gogo/protobuf/plugin/union" + _ "github.com/gogo/protobuf/plugin/unmarshal" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + _ "github.com/gogo/protobuf/protoc-gen-gogo/grpc" + plugin "github.com/gogo/protobuf/protoc-gen-gogo/plugin" +) + +func Read() *plugin.CodeGeneratorRequest { + g := generator.New() + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + g.Error(err, "reading input") + } + + if err := proto.Unmarshal(data, g.Request); err != nil { + g.Error(err, "parsing input proto") + } + + if len(g.Request.FileToGenerate) == 0 { + g.Fail("no files to generate") + } + return g.Request +} + +// filenameSuffix replaces the .pb.go at the end of each filename. +func GeneratePlugin(req *plugin.CodeGeneratorRequest, p generator.Plugin, filenameSuffix string) *plugin.CodeGeneratorResponse { + g := generator.New() + g.Request = req + if len(g.Request.FileToGenerate) == 0 { + g.Fail("no files to generate") + } + + g.CommandLineParameters(g.Request.GetParameter()) + + g.WrapTypes() + g.SetPackageNames() + g.BuildTypeNameMap() + g.GeneratePlugin(p) + + for i := 0; i < len(g.Response.File); i++ { + g.Response.File[i].Name = proto.String( + strings.Replace(*g.Response.File[i].Name, ".pb.go", filenameSuffix, -1), + ) + } + if err := goformat(g.Response); err != nil { + g.Error(err) + } + return g.Response +} + +func goformat(resp *plugin.CodeGeneratorResponse) error { + for i := 0; i < len(resp.File); i++ { + formatted, err := format.Source([]byte(resp.File[i].GetContent())) + if err != nil { + return fmt.Errorf("go format error: %v", err) + } + fmts := string(formatted) + resp.File[i].Content = &fmts + } + return nil +} + +func Generate(req *plugin.CodeGeneratorRequest) *plugin.CodeGeneratorResponse { + // Begin by allocating a generator. The request and response structures are stored there + // so we can do error handling easily - the response structure contains the field to + // report failure. + g := generator.New() + g.Request = req + + g.CommandLineParameters(g.Request.GetParameter()) + + // Create a wrapped version of the Descriptors and EnumDescriptors that + // point to the file that defines them. + g.WrapTypes() + + g.SetPackageNames() + g.BuildTypeNameMap() + + g.GenerateAllFiles() + + if err := goformat(g.Response); err != nil { + g.Error(err) + } + + testReq := proto.Clone(req).(*plugin.CodeGeneratorRequest) + + testResp := GeneratePlugin(testReq, testgen.NewPlugin(), "pb_test.go") + + for i := 0; i < len(testResp.File); i++ { + if strings.Contains(*testResp.File[i].Content, `//These tests are generated by github.com/gogo/protobuf/plugin/testgen`) { + g.Response.File = append(g.Response.File, testResp.File[i]) + } + } + + return g.Response +} + +func Write(resp *plugin.CodeGeneratorResponse) { + g := generator.New() + // Send back the results. + data, err := proto.Marshal(resp) + if err != nil { + g.Error(err, "failed to marshal output proto") + } + _, err = os.Stdout.Write(data) + if err != nil { + g.Error(err, "failed to write output proto") + } +} diff --git a/vendor/github.com/gogo/protobuf/vanity/enum.go b/vendor/github.com/gogo/protobuf/vanity/enum.go new file mode 100644 index 000000000000..466d07b54eb8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/vanity/enum.go @@ -0,0 +1,78 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2015, The GoGo Authors. rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package vanity + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +func EnumHasBoolExtension(enum *descriptor.EnumDescriptorProto, extension *proto.ExtensionDesc) bool { + if enum.Options == nil { + return false + } + value, err := proto.GetExtension(enum.Options, extension) + if err != nil { + return false + } + if value == nil { + return false + } + if value.(*bool) == nil { + return false + } + return true +} + +func SetBoolEnumOption(extension *proto.ExtensionDesc, value bool) func(enum *descriptor.EnumDescriptorProto) { + return func(enum *descriptor.EnumDescriptorProto) { + if EnumHasBoolExtension(enum, extension) { + return + } + if enum.Options == nil { + enum.Options = &descriptor.EnumOptions{} + } + if err := proto.SetExtension(enum.Options, extension, &value); err != nil { + panic(err) + } + } +} + +func TurnOffGoEnumPrefix(enum *descriptor.EnumDescriptorProto) { + SetBoolEnumOption(gogoproto.E_GoprotoEnumPrefix, false)(enum) +} + +func TurnOffGoEnumStringer(enum *descriptor.EnumDescriptorProto) { + SetBoolEnumOption(gogoproto.E_GoprotoEnumStringer, false)(enum) +} + +func TurnOnEnumStringer(enum *descriptor.EnumDescriptorProto) { + SetBoolEnumOption(gogoproto.E_EnumStringer, true)(enum) +} diff --git a/vendor/github.com/gogo/protobuf/vanity/field.go b/vendor/github.com/gogo/protobuf/vanity/field.go new file mode 100644 index 000000000000..62cdddfabb49 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/vanity/field.go @@ -0,0 +1,90 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2015, The GoGo Authors. rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package vanity + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +func FieldHasBoolExtension(field *descriptor.FieldDescriptorProto, extension *proto.ExtensionDesc) bool { + if field.Options == nil { + return false + } + value, err := proto.GetExtension(field.Options, extension) + if err != nil { + return false + } + if value == nil { + return false + } + if value.(*bool) == nil { + return false + } + return true +} + +func SetBoolFieldOption(extension *proto.ExtensionDesc, value bool) func(field *descriptor.FieldDescriptorProto) { + return func(field *descriptor.FieldDescriptorProto) { + if FieldHasBoolExtension(field, extension) { + return + } + if field.Options == nil { + field.Options = &descriptor.FieldOptions{} + } + if err := proto.SetExtension(field.Options, extension, &value); err != nil { + panic(err) + } + } +} + +func TurnOffNullable(field *descriptor.FieldDescriptorProto) { + if field.IsRepeated() && !field.IsMessage() { + return + } + SetBoolFieldOption(gogoproto.E_Nullable, false)(field) +} + +func TurnOffNullableForNativeTypes(field *descriptor.FieldDescriptorProto) { + if field.IsRepeated() || field.IsMessage() { + return + } + SetBoolFieldOption(gogoproto.E_Nullable, false)(field) +} + +func TurnOffNullableForNativeTypesWithoutDefaultsOnly(field *descriptor.FieldDescriptorProto) { + if field.IsRepeated() || field.IsMessage() { + return + } + if field.DefaultValue != nil { + return + } + SetBoolFieldOption(gogoproto.E_Nullable, false)(field) +} diff --git a/vendor/github.com/gogo/protobuf/vanity/file.go b/vendor/github.com/gogo/protobuf/vanity/file.go new file mode 100644 index 000000000000..2055c66152e3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/vanity/file.go @@ -0,0 +1,197 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2015, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package vanity + +import ( + "path/filepath" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +func NotGoogleProtobufDescriptorProto(file *descriptor.FileDescriptorProto) bool { + // can not just check if file.GetName() == "google/protobuf/descriptor.proto" because we do not want to assume compile path + _, fileName := filepath.Split(file.GetName()) + return !(file.GetPackage() == "google.protobuf" && fileName == "descriptor.proto") +} + +func FilterFiles(files []*descriptor.FileDescriptorProto, f func(file *descriptor.FileDescriptorProto) bool) []*descriptor.FileDescriptorProto { + filtered := make([]*descriptor.FileDescriptorProto, 0, len(files)) + for i := range files { + if !f(files[i]) { + continue + } + filtered = append(filtered, files[i]) + } + return filtered +} + +func FileHasBoolExtension(file *descriptor.FileDescriptorProto, extension *proto.ExtensionDesc) bool { + if file.Options == nil { + return false + } + value, err := proto.GetExtension(file.Options, extension) + if err != nil { + return false + } + if value == nil { + return false + } + if value.(*bool) == nil { + return false + } + return true +} + +func SetBoolFileOption(extension *proto.ExtensionDesc, value bool) func(file *descriptor.FileDescriptorProto) { + return func(file *descriptor.FileDescriptorProto) { + if FileHasBoolExtension(file, extension) { + return + } + if file.Options == nil { + file.Options = &descriptor.FileOptions{} + } + if err := proto.SetExtension(file.Options, extension, &value); err != nil { + panic(err) + } + } +} + +func TurnOffGoGettersAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoGettersAll, false)(file) +} + +func TurnOffGoEnumPrefixAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoEnumPrefixAll, false)(file) +} + +func TurnOffGoStringerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoStringerAll, false)(file) +} + +func TurnOnVerboseEqualAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_VerboseEqualAll, true)(file) +} + +func TurnOnFaceAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_FaceAll, true)(file) +} + +func TurnOnGoStringAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GostringAll, true)(file) +} + +func TurnOnPopulateAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_PopulateAll, true)(file) +} + +func TurnOnStringerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_StringerAll, true)(file) +} + +func TurnOnEqualAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_EqualAll, true)(file) +} + +func TurnOnDescriptionAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_DescriptionAll, true)(file) +} + +func TurnOnTestGenAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_TestgenAll, true)(file) +} + +func TurnOnBenchGenAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_BenchgenAll, true)(file) +} + +func TurnOnMarshalerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_MarshalerAll, true)(file) +} + +func TurnOnUnmarshalerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_UnmarshalerAll, true)(file) +} + +func TurnOnStable_MarshalerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_StableMarshalerAll, true)(file) +} + +func TurnOnSizerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_SizerAll, true)(file) +} + +func TurnOffGoEnumStringerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoEnumStringerAll, false)(file) +} + +func TurnOnEnumStringerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_EnumStringerAll, true)(file) +} + +func TurnOnUnsafeUnmarshalerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_UnsafeUnmarshalerAll, true)(file) +} + +func TurnOnUnsafeMarshalerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_UnsafeMarshalerAll, true)(file) +} + +func TurnOffGoExtensionsMapAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoExtensionsMapAll, false)(file) +} + +func TurnOffGoUnrecognizedAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoUnrecognizedAll, false)(file) +} + +func TurnOffGoUnkeyedAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoUnkeyedAll, false)(file) +} + +func TurnOffGoSizecacheAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoSizecacheAll, false)(file) +} + +func TurnOffGogoImport(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GogoprotoImport, false)(file) +} + +func TurnOnCompareAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_CompareAll, true)(file) +} + +func TurnOnMessageNameAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_MessagenameAll, true)(file) +} + +func TurnOnGoRegistration(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoRegistration, true)(file) +} diff --git a/vendor/github.com/gogo/protobuf/vanity/foreach.go b/vendor/github.com/gogo/protobuf/vanity/foreach.go new file mode 100644 index 000000000000..888b6d04d594 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/vanity/foreach.go @@ -0,0 +1,125 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2015, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package vanity + +import descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +func ForEachFile(files []*descriptor.FileDescriptorProto, f func(file *descriptor.FileDescriptorProto)) { + for _, file := range files { + f(file) + } +} + +func OnlyProto2(files []*descriptor.FileDescriptorProto) []*descriptor.FileDescriptorProto { + outs := make([]*descriptor.FileDescriptorProto, 0, len(files)) + for i, file := range files { + if file.GetSyntax() == "proto3" { + continue + } + outs = append(outs, files[i]) + } + return outs +} + +func OnlyProto3(files []*descriptor.FileDescriptorProto) []*descriptor.FileDescriptorProto { + outs := make([]*descriptor.FileDescriptorProto, 0, len(files)) + for i, file := range files { + if file.GetSyntax() != "proto3" { + continue + } + outs = append(outs, files[i]) + } + return outs +} + +func ForEachMessageInFiles(files []*descriptor.FileDescriptorProto, f func(msg *descriptor.DescriptorProto)) { + for _, file := range files { + ForEachMessage(file.MessageType, f) + } +} + +func ForEachMessage(msgs []*descriptor.DescriptorProto, f func(msg *descriptor.DescriptorProto)) { + for _, msg := range msgs { + f(msg) + ForEachMessage(msg.NestedType, f) + } +} + +func ForEachFieldInFilesExcludingExtensions(files []*descriptor.FileDescriptorProto, f func(field *descriptor.FieldDescriptorProto)) { + for _, file := range files { + ForEachFieldExcludingExtensions(file.MessageType, f) + } +} + +func ForEachFieldInFiles(files []*descriptor.FileDescriptorProto, f func(field *descriptor.FieldDescriptorProto)) { + for _, file := range files { + for _, ext := range file.Extension { + f(ext) + } + ForEachField(file.MessageType, f) + } +} + +func ForEachFieldExcludingExtensions(msgs []*descriptor.DescriptorProto, f func(field *descriptor.FieldDescriptorProto)) { + for _, msg := range msgs { + for _, field := range msg.Field { + f(field) + } + ForEachField(msg.NestedType, f) + } +} + +func ForEachField(msgs []*descriptor.DescriptorProto, f func(field *descriptor.FieldDescriptorProto)) { + for _, msg := range msgs { + for _, field := range msg.Field { + f(field) + } + for _, ext := range msg.Extension { + f(ext) + } + ForEachField(msg.NestedType, f) + } +} + +func ForEachEnumInFiles(files []*descriptor.FileDescriptorProto, f func(enum *descriptor.EnumDescriptorProto)) { + for _, file := range files { + for _, enum := range file.EnumType { + f(enum) + } + } +} + +func ForEachEnum(msgs []*descriptor.DescriptorProto, f func(field *descriptor.EnumDescriptorProto)) { + for _, msg := range msgs { + for _, field := range msg.EnumType { + f(field) + } + ForEachEnum(msg.NestedType, f) + } +} diff --git a/vendor/github.com/gogo/protobuf/vanity/msg.go b/vendor/github.com/gogo/protobuf/vanity/msg.go new file mode 100644 index 000000000000..390ff5ad44f3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/vanity/msg.go @@ -0,0 +1,154 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2015, The GoGo Authors. rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package vanity + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +func MessageHasBoolExtension(msg *descriptor.DescriptorProto, extension *proto.ExtensionDesc) bool { + if msg.Options == nil { + return false + } + value, err := proto.GetExtension(msg.Options, extension) + if err != nil { + return false + } + if value == nil { + return false + } + if value.(*bool) == nil { + return false + } + return true +} + +func SetBoolMessageOption(extension *proto.ExtensionDesc, value bool) func(msg *descriptor.DescriptorProto) { + return func(msg *descriptor.DescriptorProto) { + if MessageHasBoolExtension(msg, extension) { + return + } + if msg.Options == nil { + msg.Options = &descriptor.MessageOptions{} + } + if err := proto.SetExtension(msg.Options, extension, &value); err != nil { + panic(err) + } + } +} + +func TurnOffGoGetters(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_GoprotoGetters, false)(msg) +} + +func TurnOffGoStringer(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_GoprotoStringer, false)(msg) +} + +func TurnOnVerboseEqual(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_VerboseEqual, true)(msg) +} + +func TurnOnFace(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Face, true)(msg) +} + +func TurnOnGoString(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Face, true)(msg) +} + +func TurnOnPopulate(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Populate, true)(msg) +} + +func TurnOnStringer(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Stringer, true)(msg) +} + +func TurnOnEqual(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Equal, true)(msg) +} + +func TurnOnDescription(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Description, true)(msg) +} + +func TurnOnTestGen(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Testgen, true)(msg) +} + +func TurnOnBenchGen(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Benchgen, true)(msg) +} + +func TurnOnMarshaler(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Marshaler, true)(msg) +} + +func TurnOnUnmarshaler(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Unmarshaler, true)(msg) +} + +func TurnOnSizer(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Sizer, true)(msg) +} + +func TurnOnUnsafeUnmarshaler(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_UnsafeUnmarshaler, true)(msg) +} + +func TurnOnUnsafeMarshaler(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_UnsafeMarshaler, true)(msg) +} + +func TurnOffGoExtensionsMap(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_GoprotoExtensionsMap, false)(msg) +} + +func TurnOffGoUnrecognized(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_GoprotoUnrecognized, false)(msg) +} + +func TurnOffGoUnkeyed(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_GoprotoUnkeyed, false)(msg) +} + +func TurnOffGoSizecache(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_GoprotoSizecache, false)(msg) +} + +func TurnOnCompare(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Compare, true)(msg) +} + +func TurnOnMessageName(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Messagename, true)(msg) +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md index 3072d24a9da6..f5d551ca8fd8 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/README.md +++ b/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -36,9 +36,23 @@ The part in the middle is the interesting bit. It's called the Claims and conta This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. +## Installation Guidelines + +1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program. + +```sh +go get -u github.com/golang-jwt/jwt/v4 +``` + +2. Import it in your code: + +```go +import "github.com/golang-jwt/jwt/v4" +``` + ## Examples -See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage: +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage: * [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) * [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) @@ -46,9 +60,17 @@ See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) fo ## Extensions -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. +This library publishes all the necessary components for adding your own signing methods or key functions. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`. + +A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards. -Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go +| Extension | Purpose | Repo | +| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go | +| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms | +| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc | + +*Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers ## Compliance @@ -112,3 +134,5 @@ This library uses descriptive error messages whenever possible. If you are not g Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. + +[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version of the JWT logo, which is distributed under the terms of the [MIT License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt). diff --git a/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md new file mode 100644 index 000000000000..b08402c3427f --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +As of February 2022 (and until this document is updated), the latest version `v4` is supported. + +## Reporting a Vulnerability + +If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s). + +You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. + +## Public Discussions + +Please avoid publicly discussing a potential security vulnerability. + +Let's take this offline and find a solution first, this limits the potential impact as much as possible. + +We appreciate your help! diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go index b07ac02de094..9d95cad2bf27 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/claims.go +++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go @@ -56,17 +56,17 @@ func (c RegisteredClaims) Valid() error { // default value in Go, let's not fail the verification for them. if !c.VerifyExpiresAt(now, false) { delta := now.Sub(c.ExpiresAt.Time) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) vErr.Errors |= ValidationErrorExpired } if !c.VerifyIssuedAt(now, false) { - vErr.Inner = fmt.Errorf("token used before issued") + vErr.Inner = ErrTokenUsedBeforeIssued vErr.Errors |= ValidationErrorIssuedAt } if !c.VerifyNotBefore(now, false) { - vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Inner = ErrTokenNotValidYet vErr.Errors |= ValidationErrorNotValidYet } @@ -83,7 +83,7 @@ func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool { return verifyAud(c.Audience, cmp, req) } -// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). // If req is false, it will return true, if exp is unset. func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool { if c.ExpiresAt == nil { @@ -113,6 +113,12 @@ func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool { return verifyNbf(&c.NotBefore.Time, cmp, req) } +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + // StandardClaims are a structured version of the JWT Claims Set, as referenced at // https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the // specification exactly, since they were based on an earlier draft of the @@ -143,17 +149,17 @@ func (c StandardClaims) Valid() error { // default value in Go, let's not fail the verification for them. if !c.VerifyExpiresAt(now, false) { delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) vErr.Errors |= ValidationErrorExpired } if !c.VerifyIssuedAt(now, false) { - vErr.Inner = fmt.Errorf("token used before issued") + vErr.Inner = ErrTokenUsedBeforeIssued vErr.Errors |= ValidationErrorIssuedAt } if !c.VerifyNotBefore(now, false) { - vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Inner = ErrTokenNotValidYet vErr.Errors |= ValidationErrorNotValidYet } @@ -170,7 +176,7 @@ func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { return verifyAud([]string{c.Audience}, cmp, req) } -// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). // If req is false, it will return true, if exp is unset. func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { if c.ExpiresAt == 0 { diff --git a/vendor/github.com/golang-jwt/jwt/v4/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go index f309878b30d6..10ac8835cc88 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/errors.go +++ b/vendor/github.com/golang-jwt/jwt/v4/errors.go @@ -9,6 +9,18 @@ var ( ErrInvalidKey = errors.New("key is invalid") ErrInvalidKeyType = errors.New("key is of invalid type") ErrHashUnavailable = errors.New("the requested hash function is unavailable") + + ErrTokenMalformed = errors.New("token is malformed") + ErrTokenUnverifiable = errors.New("token is unverifiable") + ErrTokenSignatureInvalid = errors.New("token signature is invalid") + + ErrTokenInvalidAudience = errors.New("token has invalid audience") + ErrTokenExpired = errors.New("token is expired") + ErrTokenUsedBeforeIssued = errors.New("token used before issued") + ErrTokenInvalidIssuer = errors.New("token has invalid issuer") + ErrTokenNotValidYet = errors.New("token is not valid yet") + ErrTokenInvalidId = errors.New("token has invalid id") + ErrTokenInvalidClaims = errors.New("token has invalid claims") ) // The errors that might occur when parsing and validating a token @@ -53,7 +65,48 @@ func (e ValidationError) Error() string { } } +// Unwrap gives errors.Is and errors.As access to the inner error. +func (e *ValidationError) Unwrap() error { + return e.Inner +} + // No errors func (e *ValidationError) valid() bool { return e.Errors == 0 } + +// Is checks if this ValidationError is of the supplied error. We are first checking for the exact error message +// by comparing the inner error message. If that fails, we compare using the error flags. This way we can use +// custom error messages (mainly for backwards compatability) and still leverage errors.Is using the global error variables. +func (e *ValidationError) Is(err error) bool { + // Check, if our inner error is a direct match + if errors.Is(errors.Unwrap(e), err) { + return true + } + + // Otherwise, we need to match using our error flags + switch err { + case ErrTokenMalformed: + return e.Errors&ValidationErrorMalformed != 0 + case ErrTokenUnverifiable: + return e.Errors&ValidationErrorUnverifiable != 0 + case ErrTokenSignatureInvalid: + return e.Errors&ValidationErrorSignatureInvalid != 0 + case ErrTokenInvalidAudience: + return e.Errors&ValidationErrorAudience != 0 + case ErrTokenExpired: + return e.Errors&ValidationErrorExpired != 0 + case ErrTokenUsedBeforeIssued: + return e.Errors&ValidationErrorIssuedAt != 0 + case ErrTokenInvalidIssuer: + return e.Errors&ValidationErrorIssuer != 0 + case ErrTokenNotValidYet: + return e.Errors&ValidationErrorNotValidYet != 0 + case ErrTokenInvalidId: + return e.Errors&ValidationErrorId != 0 + case ErrTokenInvalidClaims: + return e.Errors&ValidationErrorClaimsInvalid != 0 + } + + return false +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go index e7da633b93c6..2700d64a0d09 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go +++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go @@ -126,16 +126,19 @@ func (m MapClaims) Valid() error { now := TimeFunc().Unix() if !m.VerifyExpiresAt(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenExpired vErr.Inner = errors.New("Token is expired") vErr.Errors |= ValidationErrorExpired } if !m.VerifyIssuedAt(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued vErr.Inner = errors.New("Token used before issued") vErr.Errors |= ValidationErrorIssuedAt } if !m.VerifyNotBefore(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenNotValidYet vErr.Inner = errors.New("Token is not valid yet") vErr.Errors |= ValidationErrorNotValidYet } diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go index 0c811f311b63..2f61a69d7fcb 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -8,14 +8,36 @@ import ( ) type Parser struct { - ValidMethods []string // If populated, only these methods will be considered valid - UseJSONNumber bool // Use JSON Number format in JSON decoder - SkipClaimsValidation bool // Skip claims validation during token parsing + // If populated, only these methods will be considered valid. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + ValidMethods []string + + // Use JSON Number format in JSON decoder. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + UseJSONNumber bool + + // Skip claims validation during token parsing. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + SkipClaimsValidation bool } -// Parse parses, validates, and returns a token. +// NewParser creates a new Parser with the specified options +func NewParser(options ...ParserOption) *Parser { + p := &Parser{} + + // loop through our parsing options and apply them + for _, option := range options { + option(p) + } + + return p +} + +// Parse parses, validates, verifies the signature and returns the parsed token. // keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) } diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser_option.go b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go new file mode 100644 index 000000000000..6ea6f9527de6 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go @@ -0,0 +1,29 @@ +package jwt + +// ParserOption is used to implement functional-style options that modify the behavior of the parser. To add +// new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that +// takes a *Parser type as input and manipulates its configuration accordingly. +type ParserOption func(*Parser) + +// WithValidMethods is an option to supply algorithm methods that the parser will check. Only those methods will be considered valid. +// It is heavily encouraged to use this option in order to prevent attacks such as https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/. +func WithValidMethods(methods []string) ParserOption { + return func(p *Parser) { + p.ValidMethods = methods + } +} + +// WithJSONNumber is an option to configure the underlying JSON parser with UseNumber +func WithJSONNumber() ParserOption { + return func(p *Parser) { + p.UseJSONNumber = true + } +} + +// WithoutClaimsValidation is an option to disable claims validation. This option should only be used if you exactly know +// what you are doing. +func WithoutClaimsValidation() ParserOption { + return func(p *Parser) { + p.SkipClaimsValidation = true + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go index 5a8502feb34b..4fd6f9e610b0 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go @@ -1,3 +1,4 @@ +//go:build go1.4 // +build go1.4 package jwt diff --git a/vendor/github.com/golang-jwt/jwt/v4/signing_method.go b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go index 3269170f31f1..241ae9c60d08 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/signing_method.go +++ b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go @@ -33,3 +33,14 @@ func GetSigningMethod(alg string) (method SigningMethod) { } return } + +// GetAlgorithms returns a list of registered "alg" names +func GetAlgorithms() (algs []string) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + for alg := range signingMethods { + algs = append(algs, alg) + } + return +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go index b896acb0b4aa..3cb0f3f0e4c0 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/token.go +++ b/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -7,6 +7,13 @@ import ( "time" ) +// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515 +// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations +// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global +// variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe. +// To use the non-recommended decoding, set this boolean to `true` prior to using this package. +var DecodePaddingAllowed bool + // TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). // You can override it to use another time value. This is useful for testing or if your // server uses a different time zone than your tokens. @@ -29,11 +36,12 @@ type Token struct { Valid bool // Is the token valid? Populated when you Parse/Verify a token } -// New creates a new Token. Takes a signing method +// New creates a new Token with the specified signing method and an empty map of claims. func New(method SigningMethod) *Token { return NewWithClaims(method, MapClaims{}) } +// NewWithClaims creates a new Token with the specified signing method and claims. func NewWithClaims(method SigningMethod, claims Claims) *Token { return &Token{ Header: map[string]interface{}{ @@ -45,7 +53,8 @@ func NewWithClaims(method SigningMethod, claims Claims) *Token { } } -// SignedString retrieves the complete, signed token +// SignedString creates and returns a complete, signed JWT. +// The token is signed using the SigningMethod specified in the token. func (t *Token) SignedString(key interface{}) (string, error) { var sig, sstr string var err error @@ -64,33 +73,34 @@ func (t *Token) SignedString(key interface{}) (string, error) { // the SignedString. func (t *Token) SigningString() (string, error) { var err error - parts := make([]string, 2) - for i := range parts { - var jsonValue []byte - if i == 0 { - if jsonValue, err = json.Marshal(t.Header); err != nil { - return "", err - } - } else { - if jsonValue, err = json.Marshal(t.Claims); err != nil { - return "", err - } - } + var jsonValue []byte + + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + header := EncodeSegment(jsonValue) - parts[i] = EncodeSegment(jsonValue) + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err } - return strings.Join(parts, "."), nil + claim := EncodeSegment(jsonValue) + + return strings.Join([]string{header, claim}, "."), nil } -// Parse parses, validates, and returns a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return new(Parser).Parse(tokenString, keyFunc) +// Parse parses, validates, verifies the signature and returns the parsed token. +// keyFunc will receive the parsed token and should return the cryptographic key +// for verifying the signature. +// The caller is strongly encouraged to set the WithValidMethods option to +// validate the 'alg' claim in the token matches the expected algorithm. +// For more details about the importance of validating the 'alg' claim, +// see https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ +func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).Parse(tokenString, keyFunc) } -func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) } // EncodeSegment encodes a JWT specific base64url encoding with padding stripped @@ -106,5 +116,12 @@ func EncodeSegment(seg []byte) string { // Deprecated: In a future release, we will demote this function to a non-exported function, since it // should only be used internally func DecodeSegment(seg string) ([]byte, error) { + if DecodePaddingAllowed { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + return base64.URLEncoding.DecodeString(seg) + } + return base64.RawURLEncoding.DecodeString(seg) } diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go index 15c39a302183..ac8e140eb119 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/types.go +++ b/vendor/github.com/golang-jwt/jwt/v4/types.go @@ -3,6 +3,7 @@ package jwt import ( "encoding/json" "fmt" + "math" "reflect" "strconv" "time" @@ -41,15 +42,34 @@ func NewNumericDate(t time.Time) *NumericDate { // newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a // UNIX epoch with the float fraction representing non-integer seconds. func newNumericDateFromSeconds(f float64) *NumericDate { - return NewNumericDate(time.Unix(0, int64(f*float64(time.Second)))) + round, frac := math.Modf(f) + return NewNumericDate(time.Unix(int64(round), int64(frac*1e9))) } // MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch // represented in NumericDate to a byte array, using the precision specified in TimePrecision. func (date NumericDate) MarshalJSON() (b []byte, err error) { - f := float64(date.Truncate(TimePrecision).UnixNano()) / float64(time.Second) - - return []byte(strconv.FormatFloat(f, 'f', -1, 64)), nil + var prec int + if TimePrecision < time.Second { + prec = int(math.Log10(float64(time.Second) / float64(TimePrecision))) + } + truncatedDate := date.Truncate(TimePrecision) + + // For very large timestamps, UnixNano would overflow an int64, but this + // function requires nanosecond level precision, so we have to use the + // following technique to get round the issue: + // 1. Take the normal unix timestamp to form the whole number part of the + // output, + // 2. Take the result of the Nanosecond function, which retuns the offset + // within the second of the particular unix time instance, to form the + // decimal part of the output + // 3. Concatenate them to produce the final result + seconds := strconv.FormatInt(truncatedDate.Unix(), 10) + nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64) + + output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...) + + return output, nil } // UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a diff --git a/vendor/github.com/golang/protobuf/internal/gengogrpc/grpc.go b/vendor/github.com/golang/protobuf/internal/gengogrpc/grpc.go new file mode 100644 index 000000000000..fd2f51d89011 --- /dev/null +++ b/vendor/github.com/golang/protobuf/internal/gengogrpc/grpc.go @@ -0,0 +1,398 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gengogrpc contains the gRPC code generator. +package gengogrpc + +import ( + "fmt" + "strconv" + "strings" + + "google.golang.org/protobuf/compiler/protogen" + + "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + contextPackage = protogen.GoImportPath("context") + grpcPackage = protogen.GoImportPath("google.golang.org/grpc") + codesPackage = protogen.GoImportPath("google.golang.org/grpc/codes") + statusPackage = protogen.GoImportPath("google.golang.org/grpc/status") +) + +// GenerateFile generates a _grpc.pb.go file containing gRPC service definitions. +func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { + if len(file.Services) == 0 { + return nil + } + filename := file.GeneratedFilenamePrefix + "_grpc.pb.go" + g := gen.NewGeneratedFile(filename, file.GoImportPath) + g.P("// Code generated by protoc-gen-go-grpc. DO NOT EDIT.") + g.P() + g.P("package ", file.GoPackageName) + g.P() + GenerateFileContent(gen, file, g) + return g +} + +// GenerateFileContent generates the gRPC service definitions, excluding the package statement. +func GenerateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile) { + if len(file.Services) == 0 { + return + } + + // TODO: Remove this. We don't need to include these references any more. + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ ", contextPackage.Ident("Context")) + g.P("var _ ", grpcPackage.Ident("ClientConnInterface")) + g.P() + + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the grpc package it is being compiled against.") + g.P("const _ = ", grpcPackage.Ident("SupportPackageIsVersion6")) + g.P() + for _, service := range file.Services { + genService(gen, file, g, service) + } +} + +func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) { + clientName := service.GoName + "Client" + + g.P("// ", clientName, " is the client API for ", service.GoName, " service.") + g.P("//") + g.P("// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.") + + // Client interface. + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P("//") + g.P(deprecationComment) + } + g.Annotate(clientName, service.Location) + g.P("type ", clientName, " interface {") + for _, method := range service.Methods { + g.Annotate(clientName+"."+method.GoName, method.Location) + if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P(method.Comments.Leading, + clientSignature(g, method)) + } + g.P("}") + g.P() + + // Client structure. + g.P("type ", unexport(clientName), " struct {") + g.P("cc ", grpcPackage.Ident("ClientConnInterface")) + g.P("}") + g.P() + + // NewClient factory. + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P("func New", clientName, " (cc ", grpcPackage.Ident("ClientConnInterface"), ") ", clientName, " {") + g.P("return &", unexport(clientName), "{cc}") + g.P("}") + g.P() + + var methodIndex, streamIndex int + // Client method implementations. + for _, method := range service.Methods { + if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() { + // Unary RPC method + genClientMethod(gen, file, g, method, methodIndex) + methodIndex++ + } else { + // Streaming RPC method + genClientMethod(gen, file, g, method, streamIndex) + streamIndex++ + } + } + + // Server interface. + serverType := service.GoName + "Server" + g.P("// ", serverType, " is the server API for ", service.GoName, " service.") + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P("//") + g.P(deprecationComment) + } + g.Annotate(serverType, service.Location) + g.P("type ", serverType, " interface {") + for _, method := range service.Methods { + g.Annotate(serverType+"."+method.GoName, method.Location) + if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P(method.Comments.Leading, + serverSignature(g, method)) + } + g.P("}") + g.P() + + // Server Unimplemented struct for forward compatibility. + g.P("// Unimplemented", serverType, " can be embedded to have forward compatible implementations.") + g.P("type Unimplemented", serverType, " struct {") + g.P("}") + g.P() + for _, method := range service.Methods { + nilArg := "" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + nilArg = "nil," + } + g.P("func (*Unimplemented", serverType, ") ", serverSignature(g, method), "{") + g.P("return ", nilArg, statusPackage.Ident("Errorf"), "(", codesPackage.Ident("Unimplemented"), `, "method `, method.GoName, ` not implemented")`) + g.P("}") + } + g.P() + + // Server registration. + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P(deprecationComment) + } + serviceDescVar := "_" + service.GoName + "_serviceDesc" + g.P("func Register", service.GoName, "Server(s *", grpcPackage.Ident("Server"), ", srv ", serverType, ") {") + g.P("s.RegisterService(&", serviceDescVar, `, srv)`) + g.P("}") + g.P() + + // Server handler implementations. + var handlerNames []string + for _, method := range service.Methods { + hname := genServerMethod(gen, file, g, method) + handlerNames = append(handlerNames, hname) + } + + // Service descriptor. + g.P("var ", serviceDescVar, " = ", grpcPackage.Ident("ServiceDesc"), " {") + g.P("ServiceName: ", strconv.Quote(string(service.Desc.FullName())), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPackage.Ident("MethodDesc"), "{") + for i, method := range service.Methods { + if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(string(method.Desc.Name())), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPackage.Ident("StreamDesc"), "{") + for i, method := range service.Methods { + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(string(method.Desc.Name())), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.Desc.IsStreamingServer() { + g.P("ServerStreams: true,") + } + if method.Desc.IsStreamingClient() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("Metadata: \"", file.Desc.Path(), "\",") + g.P("}") + g.P() +} + +func clientSignature(g *protogen.GeneratedFile, method *protogen.Method) string { + s := method.GoName + "(ctx " + g.QualifiedGoIdent(contextPackage.Ident("Context")) + if !method.Desc.IsStreamingClient() { + s += ", in *" + g.QualifiedGoIdent(method.Input.GoIdent) + } + s += ", opts ..." + g.QualifiedGoIdent(grpcPackage.Ident("CallOption")) + ") (" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + s += "*" + g.QualifiedGoIdent(method.Output.GoIdent) + } else { + s += method.Parent.GoName + "_" + method.GoName + "Client" + } + s += ", error)" + return s +} + +func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, index int) { + service := method.Parent + sname := fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name()) + + if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P("func (c *", unexport(service.GoName), "Client) ", clientSignature(g, method), "{") + if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() { + g.P("out := new(", method.Output.GoIdent, ")") + g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`) + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return + } + streamType := unexport(service.GoName) + method.GoName + "Client" + serviceDescVar := "_" + service.GoName + "_serviceDesc" + g.P("stream, err := c.cc.NewStream(ctx, &", serviceDescVar, ".Streams[", index, `], "`, sname, `", opts...)`) + g.P("if err != nil { return nil, err }") + g.P("x := &", streamType, "{stream}") + if !method.Desc.IsStreamingClient() { + g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + } + g.P("return x, nil") + g.P("}") + g.P() + + genSend := method.Desc.IsStreamingClient() + genRecv := method.Desc.IsStreamingServer() + genCloseAndRecv := !method.Desc.IsStreamingServer() + + // Stream auxiliary types and methods. + g.P("type ", service.GoName, "_", method.GoName, "Client interface {") + if genSend { + g.P("Send(*", method.Input.GoIdent, ") error") + } + if genRecv { + g.P("Recv() (*", method.Output.GoIdent, ", error)") + } + if genCloseAndRecv { + g.P("CloseAndRecv() (*", method.Output.GoIdent, ", error)") + } + g.P(grpcPackage.Ident("ClientStream")) + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPackage.Ident("ClientStream")) + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", method.Input.GoIdent, ") error {") + g.P("return x.ClientStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", method.Output.GoIdent, ", error) {") + g.P("m := new(", method.Output.GoIdent, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + if genCloseAndRecv { + g.P("func (x *", streamType, ") CloseAndRecv() (*", method.Output.GoIdent, ", error) {") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + g.P("m := new(", method.Output.GoIdent, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } +} + +func serverSignature(g *protogen.GeneratedFile, method *protogen.Method) string { + var reqArgs []string + ret := "error" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + reqArgs = append(reqArgs, g.QualifiedGoIdent(contextPackage.Ident("Context"))) + ret = "(*" + g.QualifiedGoIdent(method.Output.GoIdent) + ", error)" + } + if !method.Desc.IsStreamingClient() { + reqArgs = append(reqArgs, "*"+g.QualifiedGoIdent(method.Input.GoIdent)) + } + if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { + reqArgs = append(reqArgs, method.Parent.GoName+"_"+method.GoName+"Server") + } + return method.GoName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method) string { + service := method.Parent + hname := fmt.Sprintf("_%s_%s_Handler", service.GoName, method.GoName) + + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + g.P("func ", hname, "(srv interface{}, ctx ", contextPackage.Ident("Context"), ", dec func(interface{}) error, interceptor ", grpcPackage.Ident("UnaryServerInterceptor"), ") (interface{}, error) {") + g.P("in := new(", method.Input.GoIdent, ")") + g.P("if err := dec(in); err != nil { return nil, err }") + g.P("if interceptor == nil { return srv.(", service.GoName, "Server).", method.GoName, "(ctx, in) }") + g.P("info := &", grpcPackage.Ident("UnaryServerInfo"), "{") + g.P("Server: srv,") + g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.GoName)), ",") + g.P("}") + g.P("handler := func(ctx ", contextPackage.Ident("Context"), ", req interface{}) (interface{}, error) {") + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(ctx, req.(*", method.Input.GoIdent, "))") + g.P("}") + g.P("return interceptor(ctx, in, info, handler)") + g.P("}") + g.P() + return hname + } + streamType := unexport(service.GoName) + method.GoName + "Server" + g.P("func ", hname, "(srv interface{}, stream ", grpcPackage.Ident("ServerStream"), ") error {") + if !method.Desc.IsStreamingClient() { + g.P("m := new(", method.Input.GoIdent, ")") + g.P("if err := stream.RecvMsg(m); err != nil { return err }") + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(m, &", streamType, "{stream})") + } else { + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(&", streamType, "{stream})") + } + g.P("}") + g.P() + + genSend := method.Desc.IsStreamingServer() + genSendAndClose := !method.Desc.IsStreamingServer() + genRecv := method.Desc.IsStreamingClient() + + // Stream auxiliary types and methods. + g.P("type ", service.GoName, "_", method.GoName, "Server interface {") + if genSend { + g.P("Send(*", method.Output.GoIdent, ") error") + } + if genSendAndClose { + g.P("SendAndClose(*", method.Output.GoIdent, ") error") + } + if genRecv { + g.P("Recv() (*", method.Input.GoIdent, ", error)") + } + g.P(grpcPackage.Ident("ServerStream")) + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPackage.Ident("ServerStream")) + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", method.Output.GoIdent, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genSendAndClose { + g.P("func (x *", streamType, ") SendAndClose(m *", method.Output.GoIdent, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", method.Input.GoIdent, ", error) {") + g.P("m := new(", method.Input.GoIdent, ")") + g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + + return hname +} + +const deprecationComment = "// Deprecated: Do not use." + +func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go new file mode 100644 index 000000000000..d45b719d1c6e --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go @@ -0,0 +1,74 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate +// Go code. Install it by building this program and making it accessible within +// your PATH with the name: +// protoc-gen-go +// +// The 'go' suffix becomes part of the argument for the protocol compiler, +// such that it can be invoked as: +// protoc --go_out=paths=source_relative:. path/to/file.proto +// +// This generates Go bindings for the protocol buffer defined by file.proto. +// With that input, the output will be written to: +// path/to/file.pb.go +// +// See the README and documentation for protocol buffers to learn more: +// https://developers.google.com/protocol-buffers/ +package main + +import ( + "flag" + "fmt" + "strings" + + "github.com/golang/protobuf/internal/gengogrpc" + gengo "google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo" + "google.golang.org/protobuf/compiler/protogen" +) + +func main() { + var ( + flags flag.FlagSet + plugins = flags.String("plugins", "", "list of plugins to enable (supported values: grpc)") + importPrefix = flags.String("import_prefix", "", "prefix to prepend to import paths") + ) + importRewriteFunc := func(importPath protogen.GoImportPath) protogen.GoImportPath { + switch importPath { + case "context", "fmt", "math": + return importPath + } + if *importPrefix != "" { + return protogen.GoImportPath(*importPrefix) + importPath + } + return importPath + } + protogen.Options{ + ParamFunc: flags.Set, + ImportRewriteFunc: importRewriteFunc, + }.Run(func(gen *protogen.Plugin) error { + grpc := false + for _, plugin := range strings.Split(*plugins, ",") { + switch plugin { + case "grpc": + grpc = true + case "": + default: + return fmt.Errorf("protoc-gen-go: unknown plugin %q", plugin) + } + } + for _, f := range gen.Files { + if !f.Generate { + continue + } + g := gengo.GenerateFile(gen, f) + if grpc { + gengogrpc.GenerateFileContent(gen, f, g) + } + } + gen.SupportedFeatures = gengo.SupportedFeatures + return nil + }) +} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 2a5446762c51..087320da7f0f 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -13,21 +13,21 @@ // // The primary features of cmp are: // -// • When the default behavior of equality does not suit the needs of the test, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as they -// are within some tolerance of each other. +// - When the default behavior of equality does not suit the test's needs, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as +// they are within some tolerance of each other. // -// • Types that have an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation for the types -// that they define. +// - Types with an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation +// for the types that they define. // -// • If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on both -// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported -// fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly -// compared using the Exporter option. +// - If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on +// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, +// unexported fields are not compared by default; they result in panics +// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) +// or explicitly compared using the Exporter option. package cmp import ( @@ -40,28 +40,30 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) +// TODO(≥go1.18): Use any instead of interface{}. + // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// • Let S be the set of all Ignore, Transformer, and Comparer options that -// remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is greater than one, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform the current -// values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. +// - Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is non-zero, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform +// the current values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. // -// • If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and -// evaluation proceeds to the next rule. +// - If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. // -// • Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, and -// channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. +// - Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, +// and channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. // // Structs are equal if recursively calling Equal on all fields report equal. // If a struct contains unexported fields, Equal panics unless an Ignore option @@ -142,7 +144,7 @@ func rootStep(x, y interface{}) PathStep { // so that they have the same parent type. var t reflect.Type if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { - t = reflect.TypeOf((*interface{})(nil)).Elem() + t = anyType if vx.IsValid() { vvx := reflect.New(t).Elem() vvx.Set(vx) @@ -637,7 +639,9 @@ type dynChecker struct{ curr, next int } // Next increments the state and reports whether a check should be performed. // // Checks occur every Nth function call, where N is a triangular number: +// // 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// // See https://en.wikipedia.org/wiki/Triangular_number // // This sequence ensures that the cost of checks drops significantly as diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index bc196b16cfaa..a248e5436d98 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 // This function returns an edit-script, which is a sequence of operations // needed to convert one list into the other. The following invariants for // the edit-script are maintained: -// • eq == (es.Dist()==0) -// • nx == es.LenX() -// • ny == es.LenY() +// - eq == (es.Dist()==0) +// - nx == es.LenX() +// - ny == es.LenY() // // This algorithm is not guaranteed to be an optimal solution (i.e., one that // produces an edit-script with a minimal Levenshtein distance). This algorithm @@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A diagonal edge is equivalent to a matching symbol between both X and Y. // Invariants: - // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny // // In general: - // • fwdFrontier.X < revFrontier.X - // • fwdFrontier.Y < revFrontier.Y + // - fwdFrontier.X < revFrontier.X + // - fwdFrontier.Y < revFrontier.Y + // // Unless, it is time for the algorithm to terminate. fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} revPath := path{-1, point{nx, ny}, make(EditScript, 0)} @@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // computing sub-optimal edit-scripts between two lists. // // The algorithm is approximately as follows: - // • Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). The goal of - // the search is connect with the search from the opposite corner. - // • As we search, we build a path in a greedy manner, where the first - // match seen is added to the path (this is sub-optimal, but provides a - // decent result in practice). When matches are found, we try the next pair - // of symbols in the lists and follow all matches as far as possible. - // • When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, we advance the - // frontier towards the opposite corner. - // • This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. + // - Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). + // The goal of the search is connect with the search + // from the opposite corner. + // - As we search, we build a path in a greedy manner, + // where the first match seen is added to the path (this is sub-optimal, + // but provides a decent result in practice). When matches are found, + // we try the next pair of symbols in the lists and follow all matches + // as far as possible. + // - When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, + // we advance the frontier towards the opposite corner. + // - This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. // This algorithm is correct even if searching only in the forward direction // or in the reverse direction. We do both because it is commonly observed @@ -389,6 +392,7 @@ type point struct{ X, Y int } func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } // zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// // [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] func zigzag(x int) int { if x&1 != 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go deleted file mode 100644 index 9147a2997311..000000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "math" - "reflect" -) - -// IsZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func IsZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !IsZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !IsZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index e57b9eb5392d..1f9ca9c4892b 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -33,6 +33,7 @@ type Option interface { } // applicableOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Grouping: Options type applicableOption interface { @@ -43,6 +44,7 @@ type applicableOption interface { } // coreOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Filters: *pathFilter | *valuesFilter type coreOption interface { @@ -336,9 +338,9 @@ func (tr transformer) String() string { // both implement T. // // The equality function must be: -// • Symmetric: equal(x, y) == equal(y, x) -// • Deterministic: equal(x, y) == equal(x, y) -// • Pure: equal(x, y) does not modify x or y +// - Symmetric: equal(x, y) == equal(y, x) +// - Deterministic: equal(x, y) == equal(x, y) +// - Pure: equal(x, y) does not modify x or y func Comparer(f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Equal) || v.IsNil() { @@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { } // Result represents the comparison result for a single node and -// is provided by cmp when calling Result (see Reporter). +// is provided by cmp when calling Report (see Reporter). type Result struct { _ [0]func() // Make Result incomparable flags resultFlags diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index c7100346323b..a0a588502ed6 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -41,13 +41,13 @@ type PathStep interface { // The type of each valid value is guaranteed to be identical to Type. // // In some cases, one or both may be invalid or have restrictions: - // • For StructField, both are not interface-able if the current field - // is unexported and the struct type is not explicitly permitted by - // an Exporter to traverse unexported fields. - // • For SliceIndex, one may be invalid if an element is missing from - // either the x or y slice. - // • For MapIndex, one may be invalid if an entry is missing from - // either the x or y map. + // - For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // - For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // - For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. // // The provided values must not be mutated. Values() (vx, vy reflect.Value) @@ -94,6 +94,7 @@ func (pa Path) Index(i int) PathStep { // The simplified path only contains struct field accesses. // // For example: +// // MyMap.MySlices.MyField func (pa Path) String() string { var ss []string @@ -108,6 +109,7 @@ func (pa Path) String() string { // GoString returns the path to a specific node using Go syntax. // // For example: +// // (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField func (pa Path) GoString() string { var ssPre, ssPost []string @@ -159,7 +161,7 @@ func (ps pathStep) String() string { if ps.typ == nil { return "" } - s := ps.typ.String() + s := value.TypeString(ps.typ, false) if s == "" || strings.ContainsAny(s, "{}\n") { return "root" // Type too simple or complex to print } @@ -282,7 +284,7 @@ type typeAssertion struct { func (ta TypeAssertion) Type() reflect.Type { return ta.typ } func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } -func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } // Transform is a transformation from the parent type to the current type. type Transform struct{ *transform } diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 104bb30538bc..2050bf6b46b7 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -7,8 +7,6 @@ package cmp import ( "fmt" "reflect" - - "github.com/google/go-cmp/cmp/internal/value" ) // numContextRecords is the number of surrounding equal records to print. @@ -116,7 +114,10 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out } // For leaf nodes, format the value based on the reflect.Values alone. - if v.MaxDepth == 0 { + // As a special case, treat equal []byte as a leaf nodes. + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType + isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 + if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. @@ -245,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt var isZero bool switch opts.DiffMode { case diffIdentical: - isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() case diffRemoved: - isZero = value.IsZero(r.Value.ValueX) + isZero = r.Value.ValueX.IsZero() case diffInserted: - isZero = value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueY.IsZero() } if isZero { continue diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 76c04fdbd6a7..2ab41fad3fb5 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -16,6 +16,13 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) +var ( + anyType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + byteType = reflect.TypeOf((*byte)(nil)).Elem() +) + type formatValueOptions struct { // AvoidStringer controls whether to avoid calling custom stringer // methods like error.Error or fmt.Stringer.String. @@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) - if value.IsZero(vv) { + if vv.IsZero() { continue // Elide fields with zero values } if len(list) == maxLen { @@ -205,13 +212,13 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Check whether this is a []byte of text data. - if t.Elem() == reflect.TypeOf(byte(0)) { + if t.Elem() == byteType { b := v.Bytes() isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { out = opts.formatString("", string(b)) skipType = true - return opts.WithTypeMode(emitType).FormatType(t, out) + return opts.FormatType(t, out) } } @@ -282,7 +289,12 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } defer ptrs.Pop() - skipType = true // Let the underlying value print the type instead + // Skip the name only if this is an unnamed pointer type. + // Otherwise taking the address of a value does not reproduce + // the named pointer type. + if v.Type().Name() == "" { + skipType = true // Let the underlying value print the type instead + } out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) out = &textWrap{Prefix: "&", Value: out} @@ -293,7 +305,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. - skipType = true // Print the concrete type instead return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 68b5c1ae164d..23e444f62f36 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -104,7 +104,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() isString = true - case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + case t.Kind() == reflect.Slice && t.Elem() == byteType: sx, sy = string(vx.Bytes()), string(vy.Bytes()) isString = true case t.Kind() == reflect.Array: @@ -147,7 +147,10 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }) efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) - isPureLinedText = efficiencyLines < 4*efficiencyBytes + quotedLength := len(strconv.Quote(sx + sy)) + unquotedLength := len(sx) + len(sy) + escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) + isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 } } @@ -171,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // differences in a string literal. This format is more readable, // but has edge-cases where differences are visually indistinguishable. // This format is avoided under the following conditions: - // • A line starts with `"""` - // • A line starts with "..." - // • A line contains non-printable characters - // • Adjacent different lines differ only by whitespace + // - A line starts with `"""` + // - A line starts with "..." + // - A line contains non-printable characters + // - Adjacent different lines differ only by whitespace // // For example: + // // """ // ... // 3 identical lines // foo @@ -231,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} switch t.Kind() { case reflect.String: - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: @@ -326,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Kind() { case reflect.String: out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf([]byte(nil)) { + if t != bytesType { out = opts.FormatType(t, out) } } @@ -446,7 +450,6 @@ func (opts formatOptions) formatDiffSlice( // {NumIdentical: 3}, // {NumInserted: 1}, // ] -// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { var prevMode byte lastStats := func(mode byte) *diffStats { @@ -503,7 +506,6 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, // {NumIdentical: 63}, // ] -// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -548,7 +550,6 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat // {NumRemoved: 9}, // {NumIdentical: 64}, // incremented by 10 // ] -// func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { var ix, iy int // indexes into sequence x and y for i, ds := range groups { diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 0fd46d7ffb6e..388fcf571208 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats { // String prints a humanly-readable summary of coalesced records. // // Example: +// // diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" func (s diffStats) String() string { var ss []string diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md index 8943becf19bf..09f5eaf22176 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/README.md +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -45,6 +45,25 @@ The returned response object is an `*http.Response`, the same thing you would usually get from `net/http`. Had the request failed one or more times, the above call would block and retry with exponential backoff. +## Retrying cases that fail after a seeming success + +It's possible for a request to succeed in the sense that the expected response headers are received, but then to encounter network-level errors while reading the response body. In go-retryablehttp's most basic usage, this error would not be retryable, due to the out-of-band handling of the response body. In some cases it may be desirable to handle the response body as part of the retryable operation. + +A toy example (which will retry the full request and succeed on the second attempt) is shown below: + +```go +c := retryablehttp.NewClient() +r := retryablehttp.NewRequest("GET", "://foo", nil) +handlerShouldRetry := true +r.SetResponseHandler(func(*http.Response) error { + if !handlerShouldRetry { + return nil + } + handlerShouldRetry = false + return errors.New("retryable error") +}) +``` + ## Getting a stdlib `*http.Client` with retries It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`. diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index adbdd92e3ba2..57116e960723 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -69,11 +69,21 @@ var ( // scheme specified in the URL is invalid. This error isn't typed // specifically so we resort to matching on the error string. schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) + + // A regular expression to match the error returned by net/http when the + // TLS certificate is not trusted. This error isn't typed + // specifically so we resort to matching on the error string. + notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`) ) // ReaderFunc is the type of function that can be given natively to NewRequest type ReaderFunc func() (io.Reader, error) +// ResponseHandlerFunc is a type of function that takes in a Response, and does something with it. +// It only runs if the initial part of the request was successful. +// If an error is returned, the client's retry policy will be used to determine whether to retry the whole request. +type ResponseHandlerFunc func(*http.Response) error + // LenReader is an interface implemented by many in-memory io.Reader's. Used // for automatically sending the right Content-Length header when possible. type LenReader interface { @@ -86,6 +96,8 @@ type Request struct { // used to rewind the request data in between retries. body ReaderFunc + responseHandler ResponseHandlerFunc + // Embed an HTTP request directly. This makes a *Request act exactly // like an *http.Request so that all meta methods are supported. *http.Request @@ -94,8 +106,16 @@ type Request struct { // WithContext returns wrapped Request with a shallow copy of underlying *http.Request // with its context changed to ctx. The provided ctx must be non-nil. func (r *Request) WithContext(ctx context.Context) *Request { - r.Request = r.Request.WithContext(ctx) - return r + return &Request{ + body: r.body, + responseHandler: r.responseHandler, + Request: r.Request.WithContext(ctx), + } +} + +// SetResponseHandler allows setting the response handler. +func (r *Request) SetResponseHandler(fn ResponseHandlerFunc) { + r.responseHandler = fn } // BodyBytes allows accessing the request body. It is an analogue to @@ -252,23 +272,31 @@ func FromRequest(r *http.Request) (*Request, error) { return nil, err } // Could assert contentLength == r.ContentLength - return &Request{bodyReader, r}, nil + return &Request{body: bodyReader, Request: r}, nil } // NewRequest creates a new wrapped request. func NewRequest(method, url string, rawBody interface{}) (*Request, error) { + return NewRequestWithContext(context.Background(), method, url, rawBody) +} + +// NewRequestWithContext creates a new wrapped request with the provided context. +// +// The context controls the entire lifetime of a request and its response: +// obtaining a connection, sending the request, and reading the response headers and body. +func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) { bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) if err != nil { return nil, err } - httpReq, err := http.NewRequest(method, url, nil) + httpReq, err := http.NewRequestWithContext(ctx, method, url, nil) if err != nil { return nil, err } httpReq.ContentLength = contentLength - return &Request{bodyReader, httpReq}, nil + return &Request{body: bodyReader, Request: httpReq}, nil } // Logger interface allows to use other loggers than @@ -435,6 +463,9 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) { } // Don't retry if the error was due to TLS cert verification failure. + if notTrustedErrorRe.MatchString(v.Error()) { + return false, v + } if _, ok := v.Err.(x509.UnknownAuthorityError); ok { return false, v } @@ -455,7 +486,7 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) { // the server time to recover, as 500's are typically not permanent // errors and may relate to outages on the server side. This will catch // invalid response codes as well, like 0 and 999. - if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) { return true, fmt.Errorf("unexpected HTTP status %s", resp.Status) } @@ -555,13 +586,12 @@ func (c *Client) Do(req *Request) (*http.Response, error) { var resp *http.Response var attempt int var shouldRetry bool - var doErr, checkErr error + var doErr, respErr, checkErr error for i := 0; ; i++ { + doErr, respErr = nil, nil attempt++ - var code int // HTTP response code - // Always rewind the request body when non-nil. if req.body != nil { body, err := req.body() @@ -589,19 +619,24 @@ func (c *Client) Do(req *Request) (*http.Response, error) { // Attempt the request resp, doErr = c.HTTPClient.Do(req.Request) - if resp != nil { - code = resp.StatusCode - } // Check if we should continue with retries. shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr) + if !shouldRetry && doErr == nil && req.responseHandler != nil { + respErr = req.responseHandler(resp) + shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, respErr) + } - if doErr != nil { + err := doErr + if respErr != nil { + err = respErr + } + if err != nil { switch v := logger.(type) { case LeveledLogger: - v.Error("request failed", "error", doErr, "method", req.Method, "url", req.URL) + v.Error("request failed", "error", err, "method", req.Method, "url", req.URL) case Logger: - v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, doErr) + v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) } } else { // Call this here to maintain the behavior of logging all requests, @@ -636,11 +671,11 @@ func (c *Client) Do(req *Request) (*http.Response, error) { } wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) - desc := fmt.Sprintf("%s %s", req.Method, req.URL) - if code > 0 { - desc = fmt.Sprintf("%s (status: %d)", desc, code) - } if logger != nil { + desc := fmt.Sprintf("%s %s", req.Method, req.URL) + if resp != nil { + desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode) + } switch v := logger.(type) { case LeveledLogger: v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain) @@ -648,11 +683,13 @@ func (c *Client) Do(req *Request) (*http.Response, error) { v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) } } + timer := time.NewTimer(wait) select { case <-req.Context().Done(): + timer.Stop() c.HTTPClient.CloseIdleConnections() return nil, req.Context().Err() - case <-time.After(wait): + case <-timer.C: } // Make shallow copy of http Request so that we can modify its body @@ -662,15 +699,19 @@ func (c *Client) Do(req *Request) (*http.Response, error) { } // this is the closest we have to success criteria - if doErr == nil && checkErr == nil && !shouldRetry { + if doErr == nil && respErr == nil && checkErr == nil && !shouldRetry { return resp, nil } defer c.HTTPClient.CloseIdleConnections() - err := doErr + var err error if checkErr != nil { err = checkErr + } else if respErr != nil { + err = respErr + } else { + err = doErr } if c.ErrorHandler != nil { diff --git a/vendor/github.com/in-toto/in-toto-golang/LICENSE b/vendor/github.com/in-toto/in-toto-golang/LICENSE new file mode 100644 index 000000000000..963ee949e8e1 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/LICENSE @@ -0,0 +1,13 @@ +Copyright 2018 New York University + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go new file mode 100644 index 000000000000..9b1de12b182d --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go @@ -0,0 +1,156 @@ +package in_toto + +import ( + "crypto/x509" + "fmt" + "net/url" +) + +const ( + AllowAllConstraint = "*" +) + +// CertificateConstraint defines the attributes a certificate must have to act as a functionary. +// A wildcard `*` allows any value in the specified attribute, where as an empty array or value +// asserts that the certificate must have nothing for that attribute. A certificate must have +// every value defined in a constraint to match. +type CertificateConstraint struct { + CommonName string `json:"common_name"` + DNSNames []string `json:"dns_names"` + Emails []string `json:"emails"` + Organizations []string `json:"organizations"` + Roots []string `json:"roots"` + URIs []string `json:"uris"` +} + +// checkResult is a data structure used to hold +// certificate constraint errors +type checkResult struct { + errors []error +} + +// newCheckResult initializes a new checkResult +func newCheckResult() *checkResult { + return &checkResult{ + errors: make([]error, 0), + } +} + +// evaluate runs a constraint check on a certificate +func (cr *checkResult) evaluate(cert *x509.Certificate, constraintCheck func(*x509.Certificate) error) *checkResult { + err := constraintCheck(cert) + if err != nil { + cr.errors = append(cr.errors, err) + } + return cr +} + +// error reduces all of the errors into one error with a +// combined error message. If there are no errors, nil +// will be returned. +func (cr *checkResult) error() error { + if len(cr.errors) == 0 { + return nil + } + return fmt.Errorf("cert failed constraints check: %+q", cr.errors) +} + +// Check tests the provided certificate against the constraint. An error is returned if the certificate +// fails any of the constraints. nil is returned if the certificate passes all of the constraints. +func (cc CertificateConstraint) Check(cert *x509.Certificate, rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) error { + return newCheckResult(). + evaluate(cert, cc.checkCommonName). + evaluate(cert, cc.checkDNSNames). + evaluate(cert, cc.checkEmails). + evaluate(cert, cc.checkOrganizations). + evaluate(cert, cc.checkRoots(rootCAIDs, rootCertPool, intermediateCertPool)). + evaluate(cert, cc.checkURIs). + error() +} + +// checkCommonName verifies that the certificate's common name matches the constraint. +func (cc CertificateConstraint) checkCommonName(cert *x509.Certificate) error { + return checkCertConstraint("common name", []string{cc.CommonName}, []string{cert.Subject.CommonName}) +} + +// checkDNSNames verifies that the certificate's dns names matches the constraint. +func (cc CertificateConstraint) checkDNSNames(cert *x509.Certificate) error { + return checkCertConstraint("dns name", cc.DNSNames, cert.DNSNames) +} + +// checkEmails verifies that the certificate's emails matches the constraint. +func (cc CertificateConstraint) checkEmails(cert *x509.Certificate) error { + return checkCertConstraint("email", cc.Emails, cert.EmailAddresses) +} + +// checkOrganizations verifies that the certificate's organizations matches the constraint. +func (cc CertificateConstraint) checkOrganizations(cert *x509.Certificate) error { + return checkCertConstraint("organization", cc.Organizations, cert.Subject.Organization) +} + +// checkRoots verifies that the certificate's roots matches the constraint. +// The certificates trust chain must also be verified. +func (cc CertificateConstraint) checkRoots(rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) func(*x509.Certificate) error { + return func(cert *x509.Certificate) error { + _, err := VerifyCertificateTrust(cert, rootCertPool, intermediateCertPool) + if err != nil { + return fmt.Errorf("failed to verify roots: %w", err) + } + return checkCertConstraint("root", cc.Roots, rootCAIDs) + } +} + +// checkURIs verifies that the certificate's URIs matches the constraint. +func (cc CertificateConstraint) checkURIs(cert *x509.Certificate) error { + return checkCertConstraint("uri", cc.URIs, urisToStrings(cert.URIs)) +} + +// urisToStrings is a helper that converts a list of URL objects to the string that represents them +func urisToStrings(uris []*url.URL) []string { + res := make([]string, 0, len(uris)) + for _, uri := range uris { + res = append(res, uri.String()) + } + + return res +} + +// checkCertConstraint tests that the provided test values match the allowed values of the constraint. +// All allowed values must be met one-to-one to be considered a successful match. +func checkCertConstraint(attributeName string, constraints, values []string) error { + // If the only constraint is to allow all, the check succeeds + if len(constraints) == 1 && constraints[0] == AllowAllConstraint { + return nil + } + + if len(constraints) == 1 && constraints[0] == "" { + constraints = []string{} + } + + if len(values) == 1 && values[0] == "" { + values = []string{} + } + + // If no constraints are specified, but the certificate has values for the attribute, then the check fails + if len(constraints) == 0 && len(values) > 0 { + return fmt.Errorf("not expecting any %s(s), but cert has %d %s(s)", attributeName, len(values), attributeName) + } + + unmet := NewSet(constraints...) + for _, v := range values { + // if the cert has a value we didn't expect, fail early + if !unmet.Has(v) { + return fmt.Errorf("cert has an unexpected %s %s given constraints %+q", attributeName, v, constraints) + } + + // consider the constraint met + unmet.Remove(v) + } + + // if we have any unmet left after going through each test value, fail. + if len(unmet) > 0 { + return fmt.Errorf("cert with %s(s) %+q did not pass all constraints %+q", attributeName, values, constraints) + } + + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go new file mode 100644 index 000000000000..bdfc65d69f99 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go @@ -0,0 +1,30 @@ +package in_toto + +import ( + "crypto/sha256" + "crypto/sha512" + "hash" +) + +/* +getHashMapping returns a mapping from hash algorithm to supported hash +interface. +*/ +func getHashMapping() map[string]func() hash.Hash { + return map[string]func() hash.Hash{ + "sha256": sha256.New, + "sha512": sha512.New, + "sha384": sha512.New384, + } +} + +/* +hashToHex calculates the hash over data based on hash algorithm h. +*/ +func hashToHex(h hash.Hash, data []byte) []byte { + h.Write(data) + // We need to use h.Sum(nil) here, because otherwise hash.Sum() appends + // the hash to the passed data. So instead of having only the hash + // we would get: "dataHASH" + return h.Sum(nil) +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go new file mode 100644 index 000000000000..7de482821ad4 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go @@ -0,0 +1,670 @@ +package in_toto + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" +) + +// ErrFailedPEMParsing gets returned when PKCS1, PKCS8 or PKIX key parsing fails +var ErrFailedPEMParsing = errors.New("failed parsing the PEM block: unsupported PEM type") + +// ErrNoPEMBlock gets triggered when there is no PEM block in the provided file +var ErrNoPEMBlock = errors.New("failed to decode the data as PEM block (are you sure this is a pem file?)") + +// ErrUnsupportedKeyType is returned when we are dealing with a key type different to ed25519 or RSA +var ErrUnsupportedKeyType = errors.New("unsupported key type") + +// ErrInvalidSignature is returned when the signature is invalid +var ErrInvalidSignature = errors.New("invalid signature") + +// ErrInvalidKey is returned when a given key is none of RSA, ECDSA or ED25519 +var ErrInvalidKey = errors.New("invalid key") + +const ( + rsaKeyType string = "rsa" + ecdsaKeyType string = "ecdsa" + ed25519KeyType string = "ed25519" + rsassapsssha256Scheme string = "rsassa-pss-sha256" + ecdsaSha2nistp224 string = "ecdsa-sha2-nistp224" + ecdsaSha2nistp256 string = "ecdsa-sha2-nistp256" + ecdsaSha2nistp384 string = "ecdsa-sha2-nistp384" + ecdsaSha2nistp521 string = "ecdsa-sha2-nistp521" + ed25519Scheme string = "ed25519" + pemPublicKey string = "PUBLIC KEY" + pemPrivateKey string = "PRIVATE KEY" + pemRSAPrivateKey string = "RSA PRIVATE KEY" +) + +/* +getSupportedKeyIDHashAlgorithms returns a string slice of supported +KeyIDHashAlgorithms. We need to use this function instead of a constant, +because Go does not support global constant slices. +*/ +func getSupportedKeyIDHashAlgorithms() Set { + return NewSet("sha256", "sha512") +} + +/* +getSupportedRSASchemes returns a string slice of supported RSA Key schemes. +We need to use this function instead of a constant because Go does not support +global constant slices. +*/ +func getSupportedRSASchemes() []string { + return []string{rsassapsssha256Scheme} +} + +/* +getSupportedEcdsaSchemes returns a string slice of supported ecdsa Key schemes. +We need to use this function instead of a constant because Go does not support +global constant slices. +*/ +func getSupportedEcdsaSchemes() []string { + return []string{ecdsaSha2nistp224, ecdsaSha2nistp256, ecdsaSha2nistp384, ecdsaSha2nistp521} +} + +/* +getSupportedEd25519Schemes returns a string slice of supported ed25519 Key +schemes. We need to use this function instead of a constant because Go does +not support global constant slices. +*/ +func getSupportedEd25519Schemes() []string { + return []string{ed25519Scheme} +} + +/* +generateKeyID creates a partial key map and generates the key ID +based on the created partial key map via the SHA256 method. +The resulting keyID will be directly saved in the corresponding key object. +On success generateKeyID will return nil, in case of errors while encoding +there will be an error. +*/ +func (k *Key) generateKeyID() error { + // Create partial key map used to create the keyid + // Unfortunately, we can't use the Key object because this also carries + // yet unwanted fields, such as KeyID and KeyVal.Private and therefore + // produces a different hash. We generate the keyID exactly as we do in + // the securesystemslib to keep interoperability between other in-toto + // implementations. + var keyToBeHashed = map[string]interface{}{ + "keytype": k.KeyType, + "scheme": k.Scheme, + "keyid_hash_algorithms": k.KeyIDHashAlgorithms, + "keyval": map[string]string{ + "public": k.KeyVal.Public, + }, + } + keyCanonical, err := cjson.EncodeCanonical(keyToBeHashed) + if err != nil { + return err + } + // calculate sha256 and return string representation of keyID + keyHashed := sha256.Sum256(keyCanonical) + k.KeyID = fmt.Sprintf("%x", keyHashed) + err = validateKey(*k) + if err != nil { + return err + } + return nil +} + +/* +generatePEMBlock creates a PEM block from scratch via the keyBytes and the pemType. +If successful it returns a PEM block as []byte slice. This function should always +succeed, if keyBytes is empty the PEM block will have an empty byte block. +Therefore only header and footer will exist. +*/ +func generatePEMBlock(keyBytes []byte, pemType string) []byte { + // construct PEM block + pemBlock := &pem.Block{ + Type: pemType, + Headers: nil, + Bytes: keyBytes, + } + return pem.EncodeToMemory(pemBlock) +} + +/* +setKeyComponents sets all components in our key object. +Furthermore it makes sure to remove any trailing and leading whitespaces or newlines. +We treat key types differently for interoperability reasons to the in-toto python +implementation and the securesystemslib. +*/ +func (k *Key) setKeyComponents(pubKeyBytes []byte, privateKeyBytes []byte, keyType string, scheme string, KeyIDHashAlgorithms []string) error { + // assume we have a privateKey if the key size is bigger than 0 + + switch keyType { + case rsaKeyType: + if len(privateKeyBytes) > 0 { + k.KeyVal = KeyVal{ + Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemRSAPrivateKey))), + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } else { + k.KeyVal = KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } + case ecdsaKeyType: + if len(privateKeyBytes) > 0 { + k.KeyVal = KeyVal{ + Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemPrivateKey))), + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } else { + k.KeyVal = KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } + case ed25519KeyType: + if len(privateKeyBytes) > 0 { + k.KeyVal = KeyVal{ + Private: strings.TrimSpace(hex.EncodeToString(privateKeyBytes)), + Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)), + } + } else { + k.KeyVal = KeyVal{ + Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)), + } + } + default: + return fmt.Errorf("%w: %s", ErrUnsupportedKeyType, keyType) + } + k.KeyType = keyType + k.Scheme = scheme + k.KeyIDHashAlgorithms = KeyIDHashAlgorithms + if err := k.generateKeyID(); err != nil { + return err + } + return nil +} + +/* +parseKey tries to parse a PEM []byte slice. Using the following standards +in the given order: + + - PKCS8 + - PKCS1 + - PKIX + +On success it returns the parsed key and nil. +On failure it returns nil and the error ErrFailedPEMParsing +*/ +func parseKey(data []byte) (interface{}, error) { + key, err := x509.ParsePKCS8PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKCS1PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKIXPublicKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParseCertificate(data) + if err == nil { + return key, nil + } + key, err = x509.ParseECPrivateKey(data) + if err == nil { + return key, nil + } + return nil, ErrFailedPEMParsing +} + +/* +decodeAndParse receives potential PEM bytes decodes them via pem.Decode +and pushes them to parseKey. If any error occurs during this process, +the function will return nil and an error (either ErrFailedPEMParsing +or ErrNoPEMBlock). On success it will return the decoded pemData, the +key object interface and nil as error. We need the decoded pemData, +because LoadKey relies on decoded pemData for operating system +interoperability. +*/ +func decodeAndParse(pemBytes []byte) (*pem.Block, interface{}, error) { + // pem.Decode returns the parsed pem block and a rest. + // The rest is everything, that could not be parsed as PEM block. + // Therefore we can drop this via using the blank identifier "_" + data, _ := pem.Decode(pemBytes) + if data == nil { + return nil, nil, ErrNoPEMBlock + } + + // Try to load private key, if this fails try to load + // key as public key + key, err := parseKey(data.Bytes) + if err != nil { + return nil, nil, err + } + return data, key, nil +} + +/* +LoadKey loads the key file at specified file path into the key object. +It automatically derives the PEM type and the key type. +Right now the following PEM types are supported: + + - PKCS1 for private keys + - PKCS8 for private keys + - PKIX for public keys + +The following key types are supported and will be automatically assigned to +the key type field: + + - ed25519 + - rsa + - ecdsa + +The following schemes are supported: + + - ed25519 -> ed25519 + - rsa -> rsassa-pss-sha256 + - ecdsa -> ecdsa-sha256-nistp256 + +Note that, this behavior is consistent with the securesystemslib, except for +ecdsa. We do not use the scheme string as key type in in-toto-golang. +Instead we are going with a ecdsa/ecdsa-sha2-nistp256 pair. + +On success it will return nil. The following errors can happen: + + - path not found or not readable + - no PEM block in the loaded file + - no valid PKCS8/PKCS1 private key or PKIX public key + - errors while marshalling + - unsupported key types +*/ +func (k *Key) LoadKey(path string, scheme string, KeyIDHashAlgorithms []string) error { + pemFile, err := os.Open(path) + if err != nil { + return err + } + defer pemFile.Close() + + err = k.LoadKeyReader(pemFile, scheme, KeyIDHashAlgorithms) + if err != nil { + return err + } + + return pemFile.Close() +} + +func (k *Key) LoadKeyDefaults(path string) error { + pemFile, err := os.Open(path) + if err != nil { + return err + } + defer pemFile.Close() + + err = k.LoadKeyReaderDefaults(pemFile) + if err != nil { + return err + } + + return pemFile.Close() +} + +// LoadKeyReader loads the key from a supplied reader. The logic matches LoadKey otherwise. +func (k *Key) LoadKeyReader(r io.Reader, scheme string, KeyIDHashAlgorithms []string) error { + if r == nil { + return ErrNoPEMBlock + } + // Read key bytes + pemBytes, err := ioutil.ReadAll(r) + if err != nil { + return err + } + // decodeAndParse returns the pemData for later use + // and a parsed key object (for operations on that key, like extracting the public Key) + pemData, key, err := decodeAndParse(pemBytes) + if err != nil { + return err + } + + return k.loadKey(key, pemData, scheme, KeyIDHashAlgorithms) +} + +func (k *Key) LoadKeyReaderDefaults(r io.Reader) error { + if r == nil { + return ErrNoPEMBlock + } + // Read key bytes + pemBytes, err := ioutil.ReadAll(r) + if err != nil { + return err + } + // decodeAndParse returns the pemData for later use + // and a parsed key object (for operations on that key, like extracting the public Key) + pemData, key, err := decodeAndParse(pemBytes) + if err != nil { + return err + } + + scheme, keyIDHashAlgorithms, err := getDefaultKeyScheme(key) + if err != nil { + return err + } + + return k.loadKey(key, pemData, scheme, keyIDHashAlgorithms) +} + +func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms []string, err error) { + keyIDHashAlgorithms = []string{"sha256", "sha512"} + + switch key.(type) { + case *rsa.PublicKey, *rsa.PrivateKey: + scheme = rsassapsssha256Scheme + case ed25519.PrivateKey, ed25519.PublicKey: + scheme = ed25519Scheme + case *ecdsa.PrivateKey, *ecdsa.PublicKey: + scheme = ecdsaSha2nistp256 + case *x509.Certificate: + return getDefaultKeyScheme(key.(*x509.Certificate).PublicKey) + default: + err = ErrUnsupportedKeyType + } + + return scheme, keyIDHashAlgorithms, err +} + +func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error { + + switch key.(type) { + case *rsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PublicKey)) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, []byte{}, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *rsa.PrivateKey: + // Note: RSA Public Keys will get stored as X.509 SubjectPublicKeyInfo (RFC5280) + // This behavior is consistent to the securesystemslib + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PrivateKey).Public()) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case ed25519.PublicKey: + if err := k.setKeyComponents(key.(ed25519.PublicKey), []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case ed25519.PrivateKey: + pubKeyBytes := key.(ed25519.PrivateKey).Public() + if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key.(ed25519.PrivateKey), ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *ecdsa.PrivateKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PrivateKey).Public()) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *ecdsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PublicKey)) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, []byte{}, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *x509.Certificate: + err := k.loadKey(key.(*x509.Certificate).PublicKey, pemData, scheme, keyIDHashAlgorithms) + if err != nil { + return err + } + + k.KeyVal.Certificate = string(pem.EncodeToMemory(pemData)) + + default: + // We should never get here, because we implement all from Go supported Key Types + return errors.New("unexpected Error in LoadKey function") + } + + return nil +} + +/* +GenerateSignature will automatically detect the key type and sign the signable data +with the provided key. If everything goes right GenerateSignature will return +a for the key valid signature and err=nil. If something goes wrong it will +return a not initialized signature and an error. Possible errors are: + + - ErrNoPEMBlock + - ErrUnsupportedKeyType + +Currently supported is only one scheme per key. + +Note that in-toto-golang has different requirements to an ecdsa key. +In in-toto-golang we use the string 'ecdsa' as string for the key type. +In the key scheme we use: ecdsa-sha2-nistp256. +*/ +func GenerateSignature(signable []byte, key Key) (Signature, error) { + err := validateKey(key) + if err != nil { + return Signature{}, err + } + var signature Signature + var signatureBuffer []byte + hashMapping := getHashMapping() + // The following switch block is needed for keeping interoperability + // with the securesystemslib and the python implementation + // in which we are storing RSA keys in PEM format, but ed25519 keys hex encoded. + switch key.KeyType { + case rsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) + if err != nil { + return Signature{}, err + } + parsedKey, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return Signature{}, ErrKeyKeyTypeMismatch + } + switch key.Scheme { + case rsassapsssha256Scheme: + hashed := hashToHex(hashMapping["sha256"](), signable) + // We use rand.Reader as secure random source for rsa.SignPSS() + signatureBuffer, err = rsa.SignPSS(rand.Reader, parsedKey.(*rsa.PrivateKey), crypto.SHA256, hashed, + &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) + if err != nil { + return signature, err + } + default: + // supported key schemes will get checked in validateKey + panic("unexpected Error in GenerateSignature function") + } + case ecdsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) + if err != nil { + return Signature{}, err + } + parsedKey, ok := parsedKey.(*ecdsa.PrivateKey) + if !ok { + return Signature{}, ErrKeyKeyTypeMismatch + } + curveSize := parsedKey.(*ecdsa.PrivateKey).Curve.Params().BitSize + var hashed []byte + if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil { + return Signature{}, ErrCurveSizeSchemeMismatch + } + // implement https://tools.ietf.org/html/rfc5656#section-6.2.1 + // We determine the curve size and choose the correct hashing + // method based on the curveSize + switch { + case curveSize <= 256: + hashed = hashToHex(hashMapping["sha256"](), signable) + case 256 < curveSize && curveSize <= 384: + hashed = hashToHex(hashMapping["sha384"](), signable) + case curveSize > 384: + hashed = hashToHex(hashMapping["sha512"](), signable) + default: + panic("unexpected Error in GenerateSignature function") + } + // Generate the ecdsa signature on the same way, as we do in the securesystemslib + // We are marshalling the ecdsaSignature struct as ASN.1 INTEGER SEQUENCES + // into an ASN.1 Object. + signatureBuffer, err = ecdsa.SignASN1(rand.Reader, parsedKey.(*ecdsa.PrivateKey), hashed[:]) + if err != nil { + return signature, err + } + case ed25519KeyType: + // We do not need a scheme switch here, because ed25519 + // only consist of sha256 and curve25519. + privateHex, err := hex.DecodeString(key.KeyVal.Private) + if err != nil { + return signature, ErrInvalidHexString + } + // Note: We can directly use the key for signing and do not + // need to use ed25519.NewKeyFromSeed(). + signatureBuffer = ed25519.Sign(privateHex, signable) + default: + // We should never get here, because we call validateKey in the first + // line of the function. + panic("unexpected Error in GenerateSignature function") + } + signature.Sig = hex.EncodeToString(signatureBuffer) + signature.KeyID = key.KeyID + signature.Certificate = key.KeyVal.Certificate + return signature, nil +} + +/* +VerifySignature will verify unverified byte data via a passed key and signature. +Supported key types are: + + - rsa + - ed25519 + - ecdsa + +When encountering an RSA key, VerifySignature will decode the PEM block in the key +and will call rsa.VerifyPSS() for verifying the RSA signature. +When encountering an ed25519 key, VerifySignature will decode the hex string encoded +public key and will use ed25519.Verify() for verifying the ed25519 signature. +When the given key is an ecdsa key, VerifySignature will unmarshall the ASN1 object +and will use the retrieved ecdsa components 'r' and 's' for verifying the signature. +On success it will return nil. In case of an unsupported key type or any other error +it will return an error. + +Note that in-toto-golang has different requirements to an ecdsa key. +In in-toto-golang we use the string 'ecdsa' as string for the key type. +In the key scheme we use: ecdsa-sha2-nistp256. +*/ +func VerifySignature(key Key, sig Signature, unverified []byte) error { + err := validateKey(key) + if err != nil { + return err + } + sigBytes, err := hex.DecodeString(sig.Sig) + if err != nil { + return err + } + hashMapping := getHashMapping() + switch key.KeyType { + case rsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) + if err != nil { + return err + } + parsedKey, ok := parsedKey.(*rsa.PublicKey) + if !ok { + return ErrKeyKeyTypeMismatch + } + switch key.Scheme { + case rsassapsssha256Scheme: + hashed := hashToHex(hashMapping["sha256"](), unverified) + err = rsa.VerifyPSS(parsedKey.(*rsa.PublicKey), crypto.SHA256, hashed, sigBytes, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) + if err != nil { + return fmt.Errorf("%w: %s", ErrInvalidSignature, err) + } + default: + // supported key schemes will get checked in validateKey + panic("unexpected Error in VerifySignature function") + } + case ecdsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) + if err != nil { + return err + } + parsedKey, ok := parsedKey.(*ecdsa.PublicKey) + if !ok { + return ErrKeyKeyTypeMismatch + } + curveSize := parsedKey.(*ecdsa.PublicKey).Curve.Params().BitSize + var hashed []byte + if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil { + return ErrCurveSizeSchemeMismatch + } + // implement https://tools.ietf.org/html/rfc5656#section-6.2.1 + // We determine the curve size and choose the correct hashing + // method based on the curveSize + switch { + case curveSize <= 256: + hashed = hashToHex(hashMapping["sha256"](), unverified) + case 256 < curveSize && curveSize <= 384: + hashed = hashToHex(hashMapping["sha384"](), unverified) + case curveSize > 384: + hashed = hashToHex(hashMapping["sha512"](), unverified) + default: + panic("unexpected Error in VerifySignature function") + } + if ok := ecdsa.VerifyASN1(parsedKey.(*ecdsa.PublicKey), hashed[:], sigBytes); !ok { + return ErrInvalidSignature + } + case ed25519KeyType: + // We do not need a scheme switch here, because ed25519 + // only consist of sha256 and curve25519. + pubHex, err := hex.DecodeString(key.KeyVal.Public) + if err != nil { + return ErrInvalidHexString + } + if ok := ed25519.Verify(pubHex, unverified, sigBytes); !ok { + return fmt.Errorf("%w: ed25519", ErrInvalidSignature) + } + default: + // We should never get here, because we call validateKey in the first + // line of the function. + panic("unexpected Error in VerifySignature function") + } + return nil +} + +/* +VerifyCertificateTrust verifies that the certificate has a chain of trust +to a root in rootCertPool, possibly using any intermediates in +intermediateCertPool +*/ +func VerifyCertificateTrust(cert *x509.Certificate, rootCertPool, intermediateCertPool *x509.CertPool) ([][]*x509.Certificate, error) { + verifyOptions := x509.VerifyOptions{ + Roots: rootCertPool, + Intermediates: intermediateCertPool, + } + chains, err := cert.Verify(verifyOptions) + if len(chains) == 0 || err != nil { + return nil, fmt.Errorf("cert cannot be verified by provided roots and intermediates") + } + return chains, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go new file mode 100644 index 000000000000..52373aa75f57 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go @@ -0,0 +1,227 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at https://golang.org/LICENSE. + +// this is a modified version of path.Match that removes handling of path separators + +package in_toto + +import ( + "errors" + "unicode/utf8" +) + +// errBadPattern indicates a pattern was malformed. +var errBadPattern = errors.New("syntax error in pattern") + +// match reports whether name matches the shell pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-/ characters +// '?' matches any single non-/ character +// '[' [ '^' ] { character-range } ']' +// character class (must be non-empty) +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +func match(pattern, name string) (matched bool, err error) { +Pattern: + for len(pattern) > 0 { + var star bool + var chunk string + star, chunk, pattern = scanChunk(pattern) + if star && chunk == "" { + // Trailing * matches everything + return true, nil + } + // Look for match at current position. + t, ok, err := matchChunk(chunk, name) + // if we're the last chunk, make sure we've exhausted the name + // otherwise we'll give a false result even if we could still match + // using the star + if ok && (len(t) == 0 || len(pattern) > 0) { + name = t + continue + } + if err != nil { + return false, err + } + if star { + // Look for match skipping i+1 bytes. + for i := 0; i < len(name); i++ { + t, ok, err := matchChunk(chunk, name[i+1:]) + if ok { + // if we're the last chunk, make sure we exhausted the name + if len(pattern) == 0 && len(t) > 0 { + continue + } + name = t + continue Pattern + } + if err != nil { + return false, err + } + } + } + // Before returning false with no error, + // check that the remainder of the pattern is syntactically valid. + for len(pattern) > 0 { + _, chunk, pattern = scanChunk(pattern) + if _, _, err := matchChunk(chunk, ""); err != nil { + return false, err + } + } + return false, nil + } + return len(name) == 0, nil +} + +// scanChunk gets the next segment of pattern, which is a non-star string +// possibly preceded by a star. +func scanChunk(pattern string) (star bool, chunk, rest string) { + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + star = true + } + inrange := false + var i int +Scan: + for i = 0; i < len(pattern); i++ { + switch pattern[i] { + case '\\': + // error check handled in matchChunk: bad pattern. + if i+1 < len(pattern) { + i++ + } + case '[': + inrange = true + case ']': + inrange = false + case '*': + if !inrange { + break Scan + } + } + } + return star, pattern[0:i], pattern[i:] +} + +// matchChunk checks whether chunk matches the beginning of s. +// If so, it returns the remainder of s (after the match). +// Chunk is all single-character operators: literals, char classes, and ?. +func matchChunk(chunk, s string) (rest string, ok bool, err error) { + // failed records whether the match has failed. + // After the match fails, the loop continues on processing chunk, + // checking that the pattern is well-formed but no longer reading s. + failed := false + for len(chunk) > 0 { + if !failed && len(s) == 0 { + failed = true + } + switch chunk[0] { + case '[': + // character class + var r rune + if !failed { + var n int + r, n = utf8.DecodeRuneInString(s) + s = s[n:] + } + chunk = chunk[1:] + // possibly negated + negated := false + if len(chunk) > 0 && chunk[0] == '^' { + negated = true + chunk = chunk[1:] + } + // parse all ranges + match := false + nrange := 0 + for { + if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { + chunk = chunk[1:] + break + } + var lo, hi rune + if lo, chunk, err = getEsc(chunk); err != nil { + return "", false, err + } + hi = lo + if chunk[0] == '-' { + if hi, chunk, err = getEsc(chunk[1:]); err != nil { + return "", false, err + } + } + if lo <= r && r <= hi { + match = true + } + nrange++ + } + if match == negated { + failed = true + } + + case '?': + if !failed { + _, n := utf8.DecodeRuneInString(s) + s = s[n:] + } + chunk = chunk[1:] + + case '\\': + chunk = chunk[1:] + if len(chunk) == 0 { + return "", false, errBadPattern + } + fallthrough + + default: + if !failed { + if chunk[0] != s[0] { + failed = true + } + s = s[1:] + } + chunk = chunk[1:] + } + } + if failed { + return "", false, nil + } + return s, true, nil +} + +// getEsc gets a possibly-escaped character from chunk, for a character class. +func getEsc(chunk string) (r rune, nchunk string, err error) { + if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { + err = errBadPattern + return + } + if chunk[0] == '\\' { + chunk = chunk[1:] + if len(chunk) == 0 { + err = errBadPattern + return + } + } + r, n := utf8.DecodeRuneInString(chunk) + if r == utf8.RuneError && n == 1 { + err = errBadPattern + } + nchunk = chunk[n:] + if len(nchunk) == 0 { + err = errBadPattern + } + return +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go new file mode 100644 index 000000000000..e22b79da320e --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go @@ -0,0 +1,1073 @@ +package in_toto + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + slsa01 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1" + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/secure-systems-lab/go-securesystemslib/dsse" +) + +/* +KeyVal contains the actual values of a key, as opposed to key metadata such as +a key identifier or key type. For RSA keys, the key value is a pair of public +and private keys in PEM format stored as strings. For public keys the Private +field may be an empty string. +*/ +type KeyVal struct { + Private string `json:"private"` + Public string `json:"public"` + Certificate string `json:"certificate,omitempty"` +} + +/* +Key represents a generic in-toto key that contains key metadata, such as an +identifier, supported hash algorithms to create the identifier, the key type +and the supported signature scheme, and the actual key value. +*/ +type Key struct { + KeyID string `json:"keyid"` + KeyIDHashAlgorithms []string `json:"keyid_hash_algorithms"` + KeyType string `json:"keytype"` + KeyVal KeyVal `json:"keyval"` + Scheme string `json:"scheme"` +} + +// PayloadType is the payload type used for links and layouts. +const PayloadType = "application/vnd.in-toto+json" + +// ErrEmptyKeyField will be thrown if a field in our Key struct is empty. +var ErrEmptyKeyField = errors.New("empty field in key") + +// ErrInvalidHexString will be thrown, if a string doesn't match a hex string. +var ErrInvalidHexString = errors.New("invalid hex string") + +// ErrSchemeKeyTypeMismatch will be thrown, if the given scheme and key type are not supported together. +var ErrSchemeKeyTypeMismatch = errors.New("the scheme and key type are not supported together") + +// ErrUnsupportedKeyIDHashAlgorithms will be thrown, if the specified KeyIDHashAlgorithms is not supported. +var ErrUnsupportedKeyIDHashAlgorithms = errors.New("the given keyID hash algorithm is not supported") + +// ErrKeyKeyTypeMismatch will be thrown, if the specified keyType does not match the key +var ErrKeyKeyTypeMismatch = errors.New("the given key does not match its key type") + +// ErrNoPublicKey gets returned when the private key value is not empty. +var ErrNoPublicKey = errors.New("the given key is not a public key") + +// ErrCurveSizeSchemeMismatch gets returned, when the scheme and curve size are incompatible +// for example: curve size = "521" and scheme = "ecdsa-sha2-nistp224" +var ErrCurveSizeSchemeMismatch = errors.New("the scheme does not match the curve size") + +const ( + // StatementInTotoV01 is the statement type for the generalized link format + // containing statements. This is constant for all predicate types. + StatementInTotoV01 = "https://in-toto.io/Statement/v0.1" + // PredicateSPDX represents a SBOM using the SPDX standard. + // The SPDX mandates 'spdxVersion' field, so predicate type can omit + // version. + PredicateSPDX = "https://spdx.dev/Document" + // PredicateCycloneDX represents a CycloneDX SBOM + PredicateCycloneDX = "https://cyclonedx.org/bom" + // PredicateLinkV1 represents an in-toto 0.9 link. + PredicateLinkV1 = "https://in-toto.io/Link/v1" +) + +// ErrInvalidPayloadType indicates that the envelope used an unkown payload type +var ErrInvalidPayloadType = errors.New("unknown payload type") + +/* +matchEcdsaScheme checks if the scheme suffix, matches the ecdsa key +curve size. We do not need a full regex match here, because +our validateKey functions are already checking for a valid scheme string. +*/ +func matchEcdsaScheme(curveSize int, scheme string) error { + if !strings.HasSuffix(scheme, strconv.Itoa(curveSize)) { + return ErrCurveSizeSchemeMismatch + } + return nil +} + +/* +validateHexString is used to validate that a string passed to it contains +only valid hexadecimal characters. +*/ +func validateHexString(str string) error { + formatCheck, _ := regexp.MatchString("^[a-fA-F0-9]+$", str) + if !formatCheck { + return fmt.Errorf("%w: %s", ErrInvalidHexString, str) + } + return nil +} + +/* +validateKeyVal validates the KeyVal struct. In case of an ed25519 key, +it will check for a hex string for private and public key. In any other +case, validateKeyVal will try to decode the PEM block. If this succeeds, +we have a valid PEM block in our KeyVal struct. On success it will return nil +on failure it will return the corresponding error. This can be either +an ErrInvalidHexString, an ErrNoPEMBlock or an ErrUnsupportedKeyType +if the KeyType is unknown. +*/ +func validateKeyVal(key Key) error { + switch key.KeyType { + case ed25519KeyType: + // We cannot use matchPublicKeyKeyType or matchPrivateKeyKeyType here, + // because we retrieve the key not from PEM. Hence we are dealing with + // plain ed25519 key bytes. These bytes can't be typechecked like in the + // matchKeyKeytype functions. + err := validateHexString(key.KeyVal.Public) + if err != nil { + return err + } + if key.KeyVal.Private != "" { + err := validateHexString(key.KeyVal.Private) + if err != nil { + return err + } + } + case rsaKeyType, ecdsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) + if err != nil { + return err + } + err = matchPublicKeyKeyType(parsedKey, key.KeyType) + if err != nil { + return err + } + if key.KeyVal.Private != "" { + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) + if err != nil { + return err + } + err = matchPrivateKeyKeyType(parsedKey, key.KeyType) + if err != nil { + return err + } + } + default: + return ErrUnsupportedKeyType + } + return nil +} + +/* +matchPublicKeyKeyType validates an interface if it can be asserted to a +the RSA or ECDSA public key type. We can only check RSA and ECDSA this way, +because we are storing them in PEM format. Ed25519 keys are stored as plain +ed25519 keys encoded as hex strings, thus we have no metadata for them. +This function will return nil on success. If the key type does not match +it will return an ErrKeyKeyTypeMismatch. +*/ +func matchPublicKeyKeyType(key interface{}, keyType string) error { + switch key.(type) { + case *rsa.PublicKey: + if keyType != rsaKeyType { + return ErrKeyKeyTypeMismatch + } + case *ecdsa.PublicKey: + if keyType != ecdsaKeyType { + return ErrKeyKeyTypeMismatch + } + default: + return ErrInvalidKey + } + return nil +} + +/* +matchPrivateKeyKeyType validates an interface if it can be asserted to a +the RSA or ECDSA private key type. We can only check RSA and ECDSA this way, +because we are storing them in PEM format. Ed25519 keys are stored as plain +ed25519 keys encoded as hex strings, thus we have no metadata for them. +This function will return nil on success. If the key type does not match +it will return an ErrKeyKeyTypeMismatch. +*/ +func matchPrivateKeyKeyType(key interface{}, keyType string) error { + // we can only check RSA and ECDSA this way, because we are storing them in PEM + // format. ed25519 keys are stored as plain ed25519 keys encoded as hex strings + // so we have no metadata for them. + switch key.(type) { + case *rsa.PrivateKey: + if keyType != rsaKeyType { + return ErrKeyKeyTypeMismatch + } + case *ecdsa.PrivateKey: + if keyType != ecdsaKeyType { + return ErrKeyKeyTypeMismatch + } + default: + return ErrInvalidKey + } + return nil +} + +/* +matchKeyTypeScheme checks if the specified scheme matches our specified +keyType. If the keyType is not supported it will return an +ErrUnsupportedKeyType. If the keyType and scheme do not match it will return +an ErrSchemeKeyTypeMismatch. If the specified keyType and scheme are +compatible matchKeyTypeScheme will return nil. +*/ +func matchKeyTypeScheme(key Key) error { + switch key.KeyType { + case rsaKeyType: + for _, scheme := range getSupportedRSASchemes() { + if key.Scheme == scheme { + return nil + } + } + case ed25519KeyType: + for _, scheme := range getSupportedEd25519Schemes() { + if key.Scheme == scheme { + return nil + } + } + case ecdsaKeyType: + for _, scheme := range getSupportedEcdsaSchemes() { + if key.Scheme == scheme { + return nil + } + } + default: + return fmt.Errorf("%w: %s", ErrUnsupportedKeyType, key.KeyType) + } + return ErrSchemeKeyTypeMismatch +} + +/* +validateKey checks the outer key object (everything, except the KeyVal struct). +It verifies the keyID for being a hex string and checks for empty fields. +On success it will return nil, on error it will return the corresponding error. +Either: ErrEmptyKeyField or ErrInvalidHexString. +*/ +func validateKey(key Key) error { + err := validateHexString(key.KeyID) + if err != nil { + return err + } + // This probably can be done more elegant with reflection + // but we care about performance, do we?! + if key.KeyType == "" { + return fmt.Errorf("%w: keytype", ErrEmptyKeyField) + } + if key.KeyVal.Public == "" && key.KeyVal.Certificate == "" { + return fmt.Errorf("%w: keyval.public and keyval.certificate cannot both be blank", ErrEmptyKeyField) + } + if key.Scheme == "" { + return fmt.Errorf("%w: scheme", ErrEmptyKeyField) + } + err = matchKeyTypeScheme(key) + if err != nil { + return err + } + // only check for supported KeyIDHashAlgorithms, if the variable has been set + if key.KeyIDHashAlgorithms != nil { + supportedKeyIDHashAlgorithms := getSupportedKeyIDHashAlgorithms() + if !supportedKeyIDHashAlgorithms.IsSubSet(NewSet(key.KeyIDHashAlgorithms...)) { + return fmt.Errorf("%w: %#v, supported are: %#v", ErrUnsupportedKeyIDHashAlgorithms, key.KeyIDHashAlgorithms, getSupportedKeyIDHashAlgorithms()) + } + } + return nil +} + +/* +validatePublicKey is a wrapper around validateKey. It test if the private key +value in the key is empty and then validates the key via calling validateKey. +On success it will return nil, on error it will return an ErrNoPublicKey error. +*/ +func validatePublicKey(key Key) error { + if key.KeyVal.Private != "" { + return ErrNoPublicKey + } + err := validateKey(key) + if err != nil { + return err + } + return nil +} + +/* +Signature represents a generic in-toto signature that contains the identifier +of the Key, which was used to create the signature and the signature data. The +used signature scheme is found in the corresponding Key. +*/ +type Signature struct { + KeyID string `json:"keyid"` + Sig string `json:"sig"` + Certificate string `json:"cert,omitempty"` +} + +// GetCertificate returns the parsed x509 certificate attached to the signature, +// if it exists. +func (sig Signature) GetCertificate() (Key, error) { + key := Key{} + if len(sig.Certificate) == 0 { + return key, errors.New("Signature has empty Certificate") + } + + err := key.LoadKeyReaderDefaults(strings.NewReader(sig.Certificate)) + return key, err +} + +/* +validateSignature is a function used to check if a passed signature is valid, +by inspecting the key ID and the signature itself. +*/ +func validateSignature(signature Signature) error { + if err := validateHexString(signature.KeyID); err != nil { + return err + } + if err := validateHexString(signature.Sig); err != nil { + return err + } + return nil +} + +/* +validateSliceOfSignatures is a helper function used to validate multiple +signatures stored in a slice. +*/ +func validateSliceOfSignatures(slice []Signature) error { + for _, signature := range slice { + if err := validateSignature(signature); err != nil { + return err + } + } + return nil +} + +/* +Link represents the evidence of a supply chain step performed by a functionary. +It should be contained in a generic Metablock object, which provides +functionality for signing and signature verification, and reading from and +writing to disk. +*/ +type Link struct { + Type string `json:"_type"` + Name string `json:"name"` + Materials map[string]interface{} `json:"materials"` + Products map[string]interface{} `json:"products"` + ByProducts map[string]interface{} `json:"byproducts"` + Command []string `json:"command"` + Environment map[string]interface{} `json:"environment"` +} + +/* +validateArtifacts is a general function used to validate products and materials. +*/ +func validateArtifacts(artifacts map[string]interface{}) error { + for artifactName, artifact := range artifacts { + artifactValue := reflect.ValueOf(artifact).MapRange() + for artifactValue.Next() { + value := artifactValue.Value().Interface().(string) + hashType := artifactValue.Key().Interface().(string) + if err := validateHexString(value); err != nil { + return fmt.Errorf("in artifact '%s', %s hash value: %s", + artifactName, hashType, err.Error()) + } + } + } + return nil +} + +/* +validateLink is a function used to ensure that a passed item of type Link +matches the necessary format. +*/ +func validateLink(link Link) error { + if link.Type != "link" { + return fmt.Errorf("invalid type for link '%s': should be 'link'", + link.Name) + } + + if err := validateArtifacts(link.Materials); err != nil { + return fmt.Errorf("in materials of link '%s': %s", link.Name, + err.Error()) + } + + if err := validateArtifacts(link.Products); err != nil { + return fmt.Errorf("in products of link '%s': %s", link.Name, + err.Error()) + } + + return nil +} + +/* +LinkNameFormat represents a format string used to create the filename for a +signed Link (wrapped in a Metablock). It consists of the name of the link and +the first 8 characters of the signing key id. E.g.: + + fmt.Sprintf(LinkNameFormat, "package", + "2f89b9272acfc8f4a0a0f094d789fdb0ba798b0fe41f2f5f417c12f0085ff498") + // returns "package.2f89b9272.link" +*/ +const LinkNameFormat = "%s.%.8s.link" +const PreliminaryLinkNameFormat = ".%s.%.8s.link-unfinished" + +/* +LinkNameFormatShort is for links that are not signed, e.g.: + + fmt.Sprintf(LinkNameFormatShort, "unsigned") + // returns "unsigned.link" +*/ +const LinkNameFormatShort = "%s.link" +const LinkGlobFormat = "%s.????????.link" + +/* +SublayoutLinkDirFormat represents the format of the name of the directory for +sublayout links during the verification workflow. +*/ +const SublayoutLinkDirFormat = "%s.%.8s" + +/* +SupplyChainItem summarizes common fields of the two available supply chain +item types, Inspection and Step. +*/ +type SupplyChainItem struct { + Name string `json:"name"` + ExpectedMaterials [][]string `json:"expected_materials"` + ExpectedProducts [][]string `json:"expected_products"` +} + +/* +validateArtifactRule calls UnpackRule to validate that the passed rule conforms +with any of the available rule formats. +*/ +func validateArtifactRule(rule []string) error { + if _, err := UnpackRule(rule); err != nil { + return err + } + return nil +} + +/* +validateSliceOfArtifactRules iterates over passed rules to validate them. +*/ +func validateSliceOfArtifactRules(rules [][]string) error { + for _, rule := range rules { + if err := validateArtifactRule(rule); err != nil { + return err + } + } + return nil +} + +/* +validateSupplyChainItem is used to validate the common elements found in both +steps and inspections. Here, the function primarily ensures that the name of +a supply chain item isn't empty. +*/ +func validateSupplyChainItem(item SupplyChainItem) error { + if item.Name == "" { + return fmt.Errorf("name cannot be empty") + } + + if err := validateSliceOfArtifactRules(item.ExpectedMaterials); err != nil { + return fmt.Errorf("invalid material rule: %s", err) + } + if err := validateSliceOfArtifactRules(item.ExpectedProducts); err != nil { + return fmt.Errorf("invalid product rule: %s", err) + } + return nil +} + +/* +Inspection represents an in-toto supply chain inspection, whose command in the +Run field is executed during final product verification, generating unsigned +link metadata. Materials and products used/produced by the inspection are +constrained by the artifact rules in the inspection's ExpectedMaterials and +ExpectedProducts fields. +*/ +type Inspection struct { + Type string `json:"_type"` + Run []string `json:"run"` + SupplyChainItem +} + +/* +validateInspection ensures that a passed inspection is valid and matches the +necessary format of an inspection. +*/ +func validateInspection(inspection Inspection) error { + if err := validateSupplyChainItem(inspection.SupplyChainItem); err != nil { + return fmt.Errorf("inspection %s", err.Error()) + } + if inspection.Type != "inspection" { + return fmt.Errorf("invalid Type value for inspection '%s': should be "+ + "'inspection'", inspection.SupplyChainItem.Name) + } + return nil +} + +/* +Step represents an in-toto step of the supply chain performed by a functionary. +During final product verification in-toto looks for corresponding Link +metadata, which is used as signed evidence that the step was performed +according to the supply chain definition. Materials and products used/produced +by the step are constrained by the artifact rules in the step's +ExpectedMaterials and ExpectedProducts fields. +*/ +type Step struct { + Type string `json:"_type"` + PubKeys []string `json:"pubkeys"` + CertificateConstraints []CertificateConstraint `json:"cert_constraints,omitempty"` + ExpectedCommand []string `json:"expected_command"` + Threshold int `json:"threshold"` + SupplyChainItem +} + +// CheckCertConstraints returns true if the provided certificate matches at least one +// of the constraints for this step. +func (s Step) CheckCertConstraints(key Key, rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) error { + if len(s.CertificateConstraints) == 0 { + return fmt.Errorf("no constraints found") + } + + _, possibleCert, err := decodeAndParse([]byte(key.KeyVal.Certificate)) + if err != nil { + return err + } + + cert, ok := possibleCert.(*x509.Certificate) + if !ok { + return fmt.Errorf("not a valid certificate") + } + + for _, constraint := range s.CertificateConstraints { + err = constraint.Check(cert, rootCAIDs, rootCertPool, intermediateCertPool) + if err == nil { + return nil + } + } + if err != nil { + return err + } + + // this should not be reachable since there is at least one constraint, and the for loop only saw err != nil + return fmt.Errorf("unknown certificate constraint error") +} + +/* +validateStep ensures that a passed step is valid and matches the +necessary format of an step. +*/ +func validateStep(step Step) error { + if err := validateSupplyChainItem(step.SupplyChainItem); err != nil { + return fmt.Errorf("step %s", err.Error()) + } + if step.Type != "step" { + return fmt.Errorf("invalid Type value for step '%s': should be 'step'", + step.SupplyChainItem.Name) + } + for _, keyID := range step.PubKeys { + if err := validateHexString(keyID); err != nil { + return err + } + } + return nil +} + +/* +ISO8601DateSchema defines the format string of a timestamp following the +ISO 8601 standard. +*/ +const ISO8601DateSchema = "2006-01-02T15:04:05Z" + +/* +Layout represents the definition of a software supply chain. It lists the +sequence of steps required in the software supply chain and the functionaries +authorized to perform these steps. Functionaries are identified by their +public keys. In addition, the layout may list a sequence of inspections that +are executed during in-toto supply chain verification. A layout should be +contained in a generic Metablock object, which provides functionality for +signing and signature verification, and reading from and writing to disk. +*/ +type Layout struct { + Type string `json:"_type"` + Steps []Step `json:"steps"` + Inspect []Inspection `json:"inspect"` + Keys map[string]Key `json:"keys"` + RootCas map[string]Key `json:"rootcas,omitempty"` + IntermediateCas map[string]Key `json:"intermediatecas,omitempty"` + Expires string `json:"expires"` + Readme string `json:"readme"` +} + +// Go does not allow to pass `[]T` (slice with certain type) to a function +// that accepts `[]interface{}` (slice with generic type) +// We have to manually create the interface slice first, see +// https://golang.org/doc/faq#convert_slice_of_interface +// TODO: Is there a better way to do polymorphism for steps and inspections? +func (l *Layout) stepsAsInterfaceSlice() []interface{} { + stepsI := make([]interface{}, len(l.Steps)) + for i, v := range l.Steps { + stepsI[i] = v + } + return stepsI +} +func (l *Layout) inspectAsInterfaceSlice() []interface{} { + inspectionsI := make([]interface{}, len(l.Inspect)) + for i, v := range l.Inspect { + inspectionsI[i] = v + } + return inspectionsI +} + +// RootCAIDs returns a slice of all of the Root CA IDs +func (l *Layout) RootCAIDs() []string { + rootCAIDs := make([]string, 0, len(l.RootCas)) + for rootCAID := range l.RootCas { + rootCAIDs = append(rootCAIDs, rootCAID) + } + return rootCAIDs +} + +func validateLayoutKeys(keys map[string]Key) error { + for keyID, key := range keys { + if key.KeyID != keyID { + return fmt.Errorf("invalid key found") + } + err := validatePublicKey(key) + if err != nil { + return err + } + } + + return nil +} + +/* +validateLayout is a function used to ensure that a passed item of type Layout +matches the necessary format. +*/ +func validateLayout(layout Layout) error { + if layout.Type != "layout" { + return fmt.Errorf("invalid Type value for layout: should be 'layout'") + } + + if _, err := time.Parse(ISO8601DateSchema, layout.Expires); err != nil { + return fmt.Errorf("expiry time parsed incorrectly - date either" + + " invalid or of incorrect format") + } + + if err := validateLayoutKeys(layout.Keys); err != nil { + return err + } + + if err := validateLayoutKeys(layout.RootCas); err != nil { + return err + } + + if err := validateLayoutKeys(layout.IntermediateCas); err != nil { + return err + } + + var namesSeen = make(map[string]bool) + for _, step := range layout.Steps { + if namesSeen[step.Name] { + return fmt.Errorf("non unique step or inspection name found") + } + + namesSeen[step.Name] = true + + if err := validateStep(step); err != nil { + return err + } + } + for _, inspection := range layout.Inspect { + if namesSeen[inspection.Name] { + return fmt.Errorf("non unique step or inspection name found") + } + + namesSeen[inspection.Name] = true + } + return nil +} + +/* +Metablock is a generic container for signable in-toto objects such as Layout +or Link. It has two fields, one that contains the signable object and one that +contains corresponding signatures. Metablock also provides functionality for +signing and signature verification, and reading from and writing to disk. +*/ +type Metablock struct { + // NOTE: Whenever we want to access an attribute of `Signed` we have to + // perform type assertion, e.g. `metablock.Signed.(Layout).Keys` + // Maybe there is a better way to store either Layouts or Links in `Signed`? + // The notary folks seem to have separate container structs: + // https://github.com/theupdateframework/notary/blob/master/tuf/data/root.go#L10-L14 + // https://github.com/theupdateframework/notary/blob/master/tuf/data/targets.go#L13-L17 + // I implemented it this way, because there will be several functions that + // receive or return a Metablock, where the type of Signed has to be inferred + // on runtime, e.g. when iterating over links for a layout, and a link can + // turn out to be a layout (sublayout) + Signed interface{} `json:"signed"` + Signatures []Signature `json:"signatures"` +} + +type jsonField struct { + name string + omitempty bool +} + +/* +checkRequiredJSONFields checks that the passed map (obj) has keys for each of +the json tags in the passed struct type (typ), and returns an error otherwise. +Any json tags that contain the "omitempty" option be allowed to be optional. +*/ +func checkRequiredJSONFields(obj map[string]interface{}, + typ reflect.Type) error { + + // Create list of json tags, e.g. `json:"_type"` + attributeCount := typ.NumField() + allFields := make([]jsonField, 0) + for i := 0; i < attributeCount; i++ { + fieldStr := typ.Field(i).Tag.Get("json") + field := jsonField{ + name: fieldStr, + omitempty: false, + } + + if idx := strings.Index(fieldStr, ","); idx != -1 { + field.name = fieldStr[:idx] + field.omitempty = strings.Contains(fieldStr[idx+1:], "omitempty") + } + + allFields = append(allFields, field) + } + + // Assert that there's a key in the passed map for each tag + for _, field := range allFields { + if _, ok := obj[field.name]; !ok && !field.omitempty { + return fmt.Errorf("required field %s missing", field.name) + } + } + return nil +} + +/* +Load parses JSON formatted metadata at the passed path into the Metablock +object on which it was called. It returns an error if it cannot parse +a valid JSON formatted Metablock that contains a Link or Layout. +*/ +func (mb *Metablock) Load(path string) error { + // Open file and close before returning + jsonFile, err := os.Open(path) + if err != nil { + return err + } + defer jsonFile.Close() + + // Read entire file + jsonBytes, err := ioutil.ReadAll(jsonFile) + if err != nil { + return err + } + + // Unmarshal JSON into a map of raw messages (signed and signatures) + // We can't fully unmarshal immediately, because we need to inspect the + // type (link or layout) to decide which data structure to use + var rawMb map[string]*json.RawMessage + if err := json.Unmarshal(jsonBytes, &rawMb); err != nil { + return err + } + + // Error out on missing `signed` or `signatures` field or if + // one of them has a `null` value, which would lead to a nil pointer + // dereference in Unmarshal below. + if rawMb["signed"] == nil || rawMb["signatures"] == nil { + return fmt.Errorf("in-toto metadata requires 'signed' and" + + " 'signatures' parts") + } + + // Fully unmarshal signatures part + if err := json.Unmarshal(*rawMb["signatures"], &mb.Signatures); err != nil { + return err + } + + // Temporarily copy signed to opaque map to inspect the `_type` of signed + // and create link or layout accordingly + var signed map[string]interface{} + if err := json.Unmarshal(*rawMb["signed"], &signed); err != nil { + return err + } + + if signed["_type"] == "link" { + var link Link + if err := checkRequiredJSONFields(signed, reflect.TypeOf(link)); err != nil { + return err + } + + data, err := rawMb["signed"].MarshalJSON() + if err != nil { + return err + } + decoder := json.NewDecoder(strings.NewReader(string(data))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&link); err != nil { + return err + } + mb.Signed = link + + } else if signed["_type"] == "layout" { + var layout Layout + if err := checkRequiredJSONFields(signed, reflect.TypeOf(layout)); err != nil { + return err + } + + data, err := rawMb["signed"].MarshalJSON() + if err != nil { + return err + } + decoder := json.NewDecoder(strings.NewReader(string(data))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&layout); err != nil { + return err + } + + mb.Signed = layout + + } else { + return fmt.Errorf("the '_type' field of the 'signed' part of in-toto" + + " metadata must be one of 'link' or 'layout'") + } + + return jsonFile.Close() +} + +/* +Dump JSON serializes and writes the Metablock on which it was called to the +passed path. It returns an error if JSON serialization or writing fails. +*/ +func (mb *Metablock) Dump(path string) error { + // JSON encode Metablock formatted with newlines and indentation + // TODO: parametrize format + jsonBytes, err := json.MarshalIndent(mb, "", " ") + if err != nil { + return err + } + + // Write JSON bytes to the passed path with permissions (-rw-r--r--) + err = ioutil.WriteFile(path, jsonBytes, 0644) + if err != nil { + return err + } + + return nil +} + +/* +GetSignableRepresentation returns the canonical JSON representation of the +Signed field of the Metablock on which it was called. If canonicalization +fails the first return value is nil and the second return value is the error. +*/ +func (mb *Metablock) GetSignableRepresentation() ([]byte, error) { + return cjson.EncodeCanonical(mb.Signed) +} + +/* +VerifySignature verifies the first signature, corresponding to the passed Key, +that it finds in the Signatures field of the Metablock on which it was called. +It returns an error if Signatures does not contain a Signature corresponding to +the passed Key, the object in Signed cannot be canonicalized, or the Signature +is invalid. +*/ +func (mb *Metablock) VerifySignature(key Key) error { + sig, err := mb.GetSignatureForKeyID(key.KeyID) + if err != nil { + return err + } + + dataCanonical, err := mb.GetSignableRepresentation() + if err != nil { + return err + } + + if err := VerifySignature(key, sig, dataCanonical); err != nil { + return err + } + return nil +} + +// GetSignatureForKeyID returns the signature that was created by the provided keyID, if it exists. +func (mb *Metablock) GetSignatureForKeyID(keyID string) (Signature, error) { + for _, s := range mb.Signatures { + if s.KeyID == keyID { + return s, nil + } + } + + return Signature{}, fmt.Errorf("no signature found for key '%s'", keyID) +} + +/* +ValidateMetablock ensures that a passed Metablock object is valid. It indirectly +validates the Link or Layout that the Metablock object contains. +*/ +func ValidateMetablock(mb Metablock) error { + switch mbSignedType := mb.Signed.(type) { + case Layout: + if err := validateLayout(mb.Signed.(Layout)); err != nil { + return err + } + case Link: + if err := validateLink(mb.Signed.(Link)); err != nil { + return err + } + default: + return fmt.Errorf("unknown type '%s', should be 'layout' or 'link'", + mbSignedType) + } + + if err := validateSliceOfSignatures(mb.Signatures); err != nil { + return err + } + + return nil +} + +/* +Sign creates a signature over the signed portion of the metablock using the Key +object provided. It then appends the resulting signature to the signatures +field as provided. It returns an error if the Signed object cannot be +canonicalized, or if the key is invalid or not supported. +*/ +func (mb *Metablock) Sign(key Key) error { + + dataCanonical, err := mb.GetSignableRepresentation() + if err != nil { + return err + } + + newSignature, err := GenerateSignature(dataCanonical, key) + if err != nil { + return err + } + + mb.Signatures = append(mb.Signatures, newSignature) + return nil +} + +// Subject describes the set of software artifacts the statement applies to. +type Subject struct { + Name string `json:"name"` + Digest common.DigestSet `json:"digest"` +} + +// StatementHeader defines the common fields for all statements +type StatementHeader struct { + Type string `json:"_type"` + PredicateType string `json:"predicateType"` + Subject []Subject `json:"subject"` +} + +/* +Statement binds the attestation to a particular subject and identifies the +of the predicate. This struct represents a generic statement. +*/ +type Statement struct { + StatementHeader + // Predicate contains type speficic metadata. + Predicate interface{} `json:"predicate"` +} + +// ProvenanceStatementSLSA01 is the definition for an entire provenance statement with SLSA 0.1 predicate. +type ProvenanceStatementSLSA01 struct { + StatementHeader + Predicate slsa01.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatementSLSA02 is the definition for an entire provenance statement with SLSA 0.2 predicate. +type ProvenanceStatementSLSA02 struct { + StatementHeader + Predicate slsa02.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatement is the definition for an entire provenance statement with SLSA 0.2 predicate. +// Deprecated: Only version-specific provenance structs will be maintained (ProvenanceStatementSLSA01, ProvenanceStatementSLSA02). +type ProvenanceStatement struct { + StatementHeader + Predicate slsa02.ProvenancePredicate `json:"predicate"` +} + +// LinkStatement is the definition for an entire link statement. +type LinkStatement struct { + StatementHeader + Predicate Link `json:"predicate"` +} + +/* +SPDXStatement is the definition for an entire SPDX statement. +This is currently not implemented. Some tooling exists here: +https://github.com/spdx/tools-golang, but this software is still in +early state. +This struct is the same as the generic Statement struct but is added for +completeness +*/ +type SPDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} + +/* +CycloneDXStatement defines a cyclonedx sbom in the predicate. It is not +currently serialized just as its SPDX counterpart. It is an empty +interface, like the generic Statement. +*/ +type CycloneDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} + +/* +DSSESigner provides signature generation and validation based on the SSL +Signing Spec: https://github.com/secure-systems-lab/signing-spec +as describe by: https://github.com/MarkLodato/ITE/tree/media-type/ITE/5 +It wraps the generic SSL envelope signer and enforces the correct payload +type both during signature generation and validation. +*/ +type DSSESigner struct { + signer *dsse.EnvelopeSigner +} + +func NewDSSESigner(p ...dsse.SignVerifier) (*DSSESigner, error) { + es, err := dsse.NewEnvelopeSigner(p...) + if err != nil { + return nil, err + } + + return &DSSESigner{ + signer: es, + }, nil +} + +func (s *DSSESigner) SignPayload(body []byte) (*dsse.Envelope, error) { + return s.signer.SignPayload(PayloadType, body) +} + +func (s *DSSESigner) Verify(e *dsse.Envelope) error { + if e.PayloadType != PayloadType { + return ErrInvalidPayloadType + } + + _, err := s.signer.Verify(e) + return err +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go new file mode 100644 index 000000000000..1bba77c39e50 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go @@ -0,0 +1,131 @@ +package in_toto + +import ( + "fmt" + "strings" +) + +// An error message issued in UnpackRule if it receives a malformed rule. +var errorMsg = "Wrong rule format, available formats are:\n" + + "\tMATCH [IN ] WITH (MATERIALS|PRODUCTS)" + + " [IN ] FROM ,\n" + + "\tCREATE ,\n" + + "\tDELETE ,\n" + + "\tMODIFY ,\n" + + "\tALLOW ,\n" + + "\tDISALLOW ,\n" + + "\tREQUIRE \n\n" + +/* +UnpackRule parses the passed rule and extracts and returns the information +required for rule processing. It can be used to verify if a rule has a valid +format. Available rule formats are: + + MATCH [IN ] WITH (MATERIALS|PRODUCTS) + [IN ] FROM , + CREATE , + DELETE , + MODIFY , + ALLOW , + DISALLOW + +Rule tokens are normalized to lower case before returning. The returned map +has the following format: + + { + "type": "match" | "create" | "delete" |"modify" | "allow" | "disallow" + "pattern": "", + "srcPrefix": "", // MATCH rule only + "dstPrefix": "", // MATCH rule only + "dstType": "materials" | "products">, // MATCH rule only + "dstName": "", // Match rule only + } + +If the rule does not match any of the available formats the first return value +is nil and the second return value is the error. +*/ +func UnpackRule(rule []string) (map[string]string, error) { + // Cache rule len + ruleLen := len(rule) + + // Create all lower rule copy to case-insensitively parse out tokens whose + // position we don't know yet. We keep the original rule to retain the + // non-token elements' case. + ruleLower := make([]string, ruleLen) + for i, val := range rule { + ruleLower[i] = strings.ToLower(val) + } + + switch ruleLower[0] { + case "create", "modify", "delete", "allow", "disallow", "require": + if ruleLen != 2 { + return nil, + fmt.Errorf("%s Got:\n\t %s", errorMsg, rule) + } + + return map[string]string{ + "type": ruleLower[0], + "pattern": rule[1], + }, nil + + case "match": + var srcPrefix string + var dstType string + var dstPrefix string + var dstName string + + // MATCH IN WITH (MATERIALS|PRODUCTS) \ + // IN FROM + if ruleLen == 10 && ruleLower[2] == "in" && + ruleLower[4] == "with" && ruleLower[6] == "in" && + ruleLower[8] == "from" { + srcPrefix = rule[3] + dstType = ruleLower[5] + dstPrefix = rule[7] + dstName = rule[9] + // MATCH IN WITH (MATERIALS|PRODUCTS) \ + // FROM + } else if ruleLen == 8 && ruleLower[2] == "in" && + ruleLower[4] == "with" && ruleLower[6] == "from" { + srcPrefix = rule[3] + dstType = ruleLower[5] + dstPrefix = "" + dstName = rule[7] + + // MATCH WITH (MATERIALS|PRODUCTS) IN + // FROM + } else if ruleLen == 8 && ruleLower[2] == "with" && + ruleLower[4] == "in" && ruleLower[6] == "from" { + srcPrefix = "" + dstType = ruleLower[3] + dstPrefix = rule[5] + dstName = rule[7] + + // MATCH WITH (MATERIALS|PRODUCTS) FROM + } else if ruleLen == 6 && ruleLower[2] == "with" && + ruleLower[4] == "from" { + srcPrefix = "" + dstType = ruleLower[3] + dstPrefix = "" + dstName = rule[5] + + } else { + return nil, + fmt.Errorf("%s Got:\n\t %s", errorMsg, rule) + + } + + return map[string]string{ + "type": ruleLower[0], + "pattern": rule[1], + "srcPrefix": srcPrefix, + "dstPrefix": dstPrefix, + "dstType": dstType, + "dstName": dstName, + }, nil + + default: + return nil, + fmt.Errorf("%s Got:\n\t %s", errorMsg, rule) + } +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go new file mode 100644 index 000000000000..87e690507011 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go @@ -0,0 +1,409 @@ +package in_toto + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "syscall" + + "github.com/shibumi/go-pathspec" +) + +// ErrSymCycle signals a detected symlink cycle in our RecordArtifacts() function. +var ErrSymCycle = errors.New("symlink cycle detected") + +// ErrUnsupportedHashAlgorithm signals a missing hash mapping in getHashMapping +var ErrUnsupportedHashAlgorithm = errors.New("unsupported hash algorithm detected") + +var ErrEmptyCommandArgs = errors.New("the command args are empty") + +// visitedSymlinks is a hashset that contains all paths that we have visited. +var visitedSymlinks Set + +/* +RecordArtifact reads and hashes the contents of the file at the passed path +using sha256 and returns a map in the following format: + + { + "": { + "sha256": + } + } + +If reading the file fails, the first return value is nil and the second return +value is the error. +NOTE: For cross-platform consistency Windows-style line separators (CRLF) are +normalized to Unix-style line separators (LF) before hashing file contents. +*/ +func RecordArtifact(path string, hashAlgorithms []string, lineNormalization bool) (map[string]interface{}, error) { + supportedHashMappings := getHashMapping() + // Read file from passed path + contents, err := ioutil.ReadFile(path) + hashedContentsMap := make(map[string]interface{}) + if err != nil { + return nil, err + } + + if lineNormalization { + // "Normalize" file contents. We convert all line separators to '\n' + // for keeping operating system independence + contents = bytes.ReplaceAll(contents, []byte("\r\n"), []byte("\n")) + contents = bytes.ReplaceAll(contents, []byte("\r"), []byte("\n")) + } + + // Create a map of all the hashes present in the hash_func list + for _, element := range hashAlgorithms { + if _, ok := supportedHashMappings[element]; !ok { + return nil, fmt.Errorf("%w: %s", ErrUnsupportedHashAlgorithm, element) + } + h := supportedHashMappings[element] + result := fmt.Sprintf("%x", hashToHex(h(), contents)) + hashedContentsMap[element] = result + } + + // Return it in a format that is conformant with link metadata artifacts + return hashedContentsMap, nil +} + +/* +RecordArtifacts is a wrapper around recordArtifacts. +RecordArtifacts initializes a set for storing visited symlinks, +calls recordArtifacts and deletes the set if no longer needed. +recordArtifacts walks through the passed slice of paths, traversing +subdirectories, and calls RecordArtifact for each file. It returns a map in +the following format: + + { + "": { + "sha256": + }, + "": { + "sha256": + }, + ... + } + +If recording an artifact fails the first return value is nil and the second +return value is the error. +*/ +func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (evalArtifacts map[string]interface{}, err error) { + // Make sure to initialize a fresh hashset for every RecordArtifacts call + visitedSymlinks = NewSet() + evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + // pass result and error through + return evalArtifacts, err +} + +/* +recordArtifacts walks through the passed slice of paths, traversing +subdirectories, and calls RecordArtifact for each file. It returns a map in +the following format: + + { + "": { + "sha256": + }, + "": { + "sha256": + }, + ... + } + +If recording an artifact fails the first return value is nil and the second +return value is the error. +*/ +func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (map[string]interface{}, error) { + artifacts := make(map[string]interface{}) + for _, path := range paths { + err := filepath.Walk(path, + func(path string, info os.FileInfo, err error) error { + // Abort if Walk function has a problem, + // e.g. path does not exist + if err != nil { + return err + } + // We need to call pathspec.GitIgnore inside of our filepath.Walk, because otherwise + // we will not catch all paths. Just imagine a path like "." and a pattern like "*.pub". + // If we would call pathspec outside of the filepath.Walk this would not match. + ignore, err := pathspec.GitIgnore(gitignorePatterns, path) + if err != nil { + return err + } + if ignore { + return nil + } + // Don't hash directories + if info.IsDir() { + return nil + } + + // check for symlink and evaluate the last element in a symlink + // chain via filepath.EvalSymlinks. We use EvalSymlinks here, + // because with os.Readlink() we would just read the next + // element in a possible symlink chain. This would mean more + // iterations. infoMode()&os.ModeSymlink uses the file + // type bitmask to check for a symlink. + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + // return with error if we detect a symlink cycle + if ok := visitedSymlinks.Has(path); ok { + // this error will get passed through + // to RecordArtifacts() + return ErrSymCycle + } + evalSym, err := filepath.EvalSymlinks(path) + if err != nil { + return err + } + // add symlink to visitedSymlinks set + // this way, we know which link we have visited already + // if we visit a symlink twice, we have detected a symlink cycle + visitedSymlinks.Add(path) + // We recursively call RecordArtifacts() to follow + // the new path. + evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if evalErr != nil { + return evalErr + } + for key, value := range evalArtifacts { + artifacts[key] = value + } + return nil + } + artifact, err := RecordArtifact(path, hashAlgorithms, lineNormalization) + // Abort if artifact can't be recorded, e.g. + // due to file permissions + if err != nil { + return err + } + + for _, strip := range lStripPaths { + if strings.HasPrefix(path, strip) { + path = strings.TrimPrefix(path, strip) + break + } + } + // Check if path is unique + _, existingPath := artifacts[path] + if existingPath { + return fmt.Errorf("left stripping has resulted in non unique dictionary key: %s", path) + } + artifacts[path] = artifact + return nil + }) + + if err != nil { + return nil, err + } + } + + return artifacts, nil +} + +/* +waitErrToExitCode converts an error returned by Cmd.wait() to an exit code. It +returns -1 if no exit code can be inferred. +*/ +func waitErrToExitCode(err error) int { + // If there's no exit code, we return -1 + retVal := -1 + + // See https://stackoverflow.com/questions/10385551/get-exit-code-go + if err != nil { + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + // This works on both Unix and Windows. Although package + // syscall is generally platform dependent, WaitStatus is + // defined for both Unix and Windows and in both cases has + // an ExitStatus() method with the same signature. + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + retVal = status.ExitStatus() + } + } + } else { + retVal = 0 + } + + return retVal +} + +/* +RunCommand executes the passed command in a subprocess. The first element of +cmdArgs is used as executable and the rest as command arguments. It captures +and returns stdout, stderr and exit code. The format of the returned map is: + + { + "return-value": , + "stdout": "", + "stderr": "" + } + +If the command cannot be executed or no pipes for stdout or stderr can be +created the first return value is nil and the second return value is the error. +NOTE: Since stdout and stderr are captured, they cannot be seen during the +command execution. +*/ +func RunCommand(cmdArgs []string, runDir string) (map[string]interface{}, error) { + if len(cmdArgs) == 0 { + return nil, ErrEmptyCommandArgs + } + + cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) + + if runDir != "" { + cmd.Dir = runDir + } + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + + if err := cmd.Start(); err != nil { + return nil, err + } + + // TODO: duplicate stdout, stderr + stdout, _ := ioutil.ReadAll(stdoutPipe) + stderr, _ := ioutil.ReadAll(stderrPipe) + + retVal := waitErrToExitCode(cmd.Wait()) + + return map[string]interface{}{ + "return-value": float64(retVal), + "stdout": string(stdout), + "stderr": string(stderr), + }, nil +} + +/* +InTotoRun executes commands, e.g. for software supply chain steps or +inspections of an in-toto layout, and creates and returns corresponding link +metadata. Link metadata contains recorded products at the passed productPaths +and materials at the passed materialPaths. The returned link is wrapped in a +Metablock object. If command execution or artifact recording fails the first +return value is an empty Metablock and the second return value is the error. +*/ +func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string, + cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string, + lStripPaths []string, lineNormalization bool) (Metablock, error) { + var linkMb Metablock + + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if err != nil { + return linkMb, err + } + + // make sure that we only run RunCommand if cmdArgs is not nil or empty + byProducts := map[string]interface{}{} + if len(cmdArgs) != 0 { + byProducts, err = RunCommand(cmdArgs, runDir) + if err != nil { + return linkMb, err + } + } + + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if err != nil { + return linkMb, err + } + + linkMb.Signed = Link{ + Type: "link", + Name: name, + Materials: materials, + Products: products, + ByProducts: byProducts, + Command: cmdArgs, + Environment: map[string]interface{}{}, + } + + linkMb.Signatures = []Signature{} + // We use a new feature from Go1.13 here, to check the key struct. + // IsZero() will return True, if the key hasn't been initialized + + // with other values than the default ones. + if !reflect.ValueOf(key).IsZero() { + if err := linkMb.Sign(key); err != nil { + return linkMb, err + } + } + + return linkMb, nil +} + +/* +InTotoRecordStart begins the creation of a link metablock file in two steps, +in order to provide evidence for supply chain steps that cannot be carries out +by a single command. InTotoRecordStart collects the hashes of the materials +before any commands are run, signs the unfinished link, and returns the link. +*/ +func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) { + var linkMb Metablock + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if err != nil { + return linkMb, err + } + + linkMb.Signed = Link{ + Type: "link", + Name: name, + Materials: materials, + Products: map[string]interface{}{}, + ByProducts: map[string]interface{}{}, + Command: []string{}, + Environment: map[string]interface{}{}, + } + + if !reflect.ValueOf(key).IsZero() { + if err := linkMb.Sign(key); err != nil { + return linkMb, err + } + } + + return linkMb, nil +} + +/* +InTotoRecordStop ends the creation of a metatadata link file created by +InTotoRecordStart. InTotoRecordStop takes in a signed unfinished link metablock +created by InTotoRecordStart and records the hashes of any products creted by +commands run between InTotoRecordStart and InTotoRecordStop. The resultant +finished link metablock is then signed by the provided key and returned. +*/ +func InTotoRecordStop(prelimLinkMb Metablock, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) { + var linkMb Metablock + if err := prelimLinkMb.VerifySignature(key); err != nil { + return linkMb, err + } + + link, ok := prelimLinkMb.Signed.(Link) + if !ok { + return linkMb, errors.New("invalid metadata block") + } + + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if err != nil { + return linkMb, err + } + + link.Products = products + linkMb.Signed = link + + if !reflect.ValueOf(key).IsZero() { + if err := linkMb.Sign(key); err != nil { + return linkMb, err + } + } + + return linkMb, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go new file mode 100644 index 000000000000..a45a45463468 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go @@ -0,0 +1,16 @@ +package common + +// DigestSet contains a set of digests. It is represented as a map from +// algorithm name to lowercase hex-encoded value. +type DigestSet map[string]string + +// ProvenanceBuilder idenfifies the entity that executed the build steps. +type ProvenanceBuilder struct { + ID string `json:"id"` +} + +// ProvenanceMaterial defines the materials used to build an artifact. +type ProvenanceMaterial struct { + URI string `json:"uri,omitempty"` + Digest DigestSet `json:"digest,omitempty"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go new file mode 100644 index 000000000000..5978e9229d9c --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go @@ -0,0 +1,50 @@ +package v01 + +import ( + "time" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" +) + +const ( + // PredicateSLSAProvenance represents a build provenance for an artifact. + PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.1" +) + +// ProvenancePredicate is the provenance predicate definition. +type ProvenancePredicate struct { + Builder common.ProvenanceBuilder `json:"builder"` + Recipe ProvenanceRecipe `json:"recipe"` + Metadata *ProvenanceMetadata `json:"metadata,omitempty"` + Materials []common.ProvenanceMaterial `json:"materials,omitempty"` +} + +// ProvenanceRecipe describes the actions performed by the builder. +type ProvenanceRecipe struct { + Type string `json:"type"` + // DefinedInMaterial can be sent as the null pointer to indicate that + // the value is not present. + DefinedInMaterial *int `json:"definedInMaterial,omitempty"` + EntryPoint string `json:"entryPoint"` + Arguments interface{} `json:"arguments,omitempty"` + Environment interface{} `json:"environment,omitempty"` +} + +// ProvenanceMetadata contains metadata for the built artifact. +type ProvenanceMetadata struct { + // Use pointer to make sure that the abscense of a time is not + // encoded as the Epoch time. + BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"` + BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"` + Completeness ProvenanceComplete `json:"completeness"` + Reproducible bool `json:"reproducible"` +} + +// ProvenanceComplete indicates wheter the claims in build/recipe are complete. +// For in depth information refer to the specifictaion: +// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md +type ProvenanceComplete struct { + Arguments bool `json:"arguments"` + Environment bool `json:"environment"` + Materials bool `json:"materials"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go new file mode 100644 index 000000000000..5fca7abb7326 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go @@ -0,0 +1,137 @@ +package v02 + +import ( + "time" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" +) + +const ( + // PredicateSLSAProvenance represents a build provenance for an artifact. + PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.2" +) + +// ProvenancePredicate is the provenance predicate definition. +type ProvenancePredicate struct { + // Builder identifies the entity that executed the invocation, which is trusted to have + // correctly performed the operation and populated this provenance. + // + // The identity MUST reflect the trust base that consumers care about. How detailed to be is a + // judgement call. For example, GitHub Actions supports both GitHub-hosted runners and + // self-hosted runners. The GitHub-hosted runner might be a single identity because it’s all + // GitHub from the consumer’s perspective. Meanwhile, each self-hosted runner might have its + // own identity because not all runners are trusted by all consumers. + Builder common.ProvenanceBuilder `json:"builder"` + + // BuildType is a URI indicating what type of build was performed. It determines the meaning of + // [Invocation], [BuildConfig] and [Materials]. + BuildType string `json:"buildType"` + + // Invocation identifies the event that kicked off the build. When combined with materials, + // this SHOULD fully describe the build, such that re-running this invocation results in + // bit-for-bit identical output (if the build is reproducible). + // + // MAY be unset/null if unknown, but this is DISCOURAGED. + Invocation ProvenanceInvocation `json:"invocation,omitempty"` + + // BuildConfig lists the steps in the build. If [ProvenanceInvocation.ConfigSource] is not + // available, BuildConfig can be used to verify information about the build. + // + // This is an arbitrary JSON object with a schema defined by [BuildType]. + BuildConfig interface{} `json:"buildConfig,omitempty"` + + // Metadata contains other properties of the build. + Metadata *ProvenanceMetadata `json:"metadata,omitempty"` + + // Materials is the collection of artifacts that influenced the build including sources, + // dependencies, build tools, base images, and so on. + // + // This is considered to be incomplete unless metadata.completeness.materials is true. + Materials []common.ProvenanceMaterial `json:"materials,omitempty"` +} + +// ProvenanceInvocation identifies the event that kicked off the build. +type ProvenanceInvocation struct { + // ConfigSource describes where the config file that kicked off the build came from. This is + // effectively a pointer to the source where [ProvenancePredicate.BuildConfig] came from. + ConfigSource ConfigSource `json:"configSource,omitempty"` + + // Parameters is a collection of all external inputs that influenced the build on top of + // ConfigSource. For example, if the invocation type were “make”, then this might be the + // flags passed to make aside from the target, which is captured in [ConfigSource.EntryPoint]. + // + // Consumers SHOULD accept only “safe” Parameters. The simplest and safest way to + // achieve this is to disallow any parameters altogether. + // + // This is an arbitrary JSON object with a schema defined by buildType. + Parameters interface{} `json:"parameters,omitempty"` + + // Environment contains any other builder-controlled inputs necessary for correctly evaluating + // the build. Usually only needed for reproducing the build but not evaluated as part of + // policy. + // + // This SHOULD be minimized to only include things that are part of the public API, that cannot + // be recomputed from other values in the provenance, and that actually affect the evaluation + // of the build. For example, this might include variables that are referenced in the workflow + // definition, but it SHOULD NOT include a dump of all environment variables or include things + // like the hostname (assuming hostname is not part of the public API). + Environment interface{} `json:"environment,omitempty"` +} + +type ConfigSource struct { + // URI indicating the identity of the source of the config. + URI string `json:"uri,omitempty"` + // Digest is a collection of cryptographic digests for the contents of the artifact specified + // by [URI]. + Digest common.DigestSet `json:"digest,omitempty"` + // EntryPoint identifying the entry point into the build. This is often a path to a + // configuration file and/or a target label within that file. The syntax and meaning are + // defined by buildType. For example, if the buildType were “make”, then this would reference + // the directory in which to run make as well as which target to use. + // + // Consumers SHOULD accept only specific [ProvenanceInvocation.EntryPoint] values. For example, + // a policy might only allow the "release" entry point but not the "debug" entry point. + // MAY be omitted if the buildType specifies a default value. + EntryPoint string `json:"entryPoint,omitempty"` +} + +// ProvenanceMetadata contains metadata for the built artifact. +type ProvenanceMetadata struct { + // BuildInvocationID identifies this particular build invocation, which can be useful for + // finding associated logs or other ad-hoc analysis. The exact meaning and format is defined + // by [common.ProvenanceBuilder.ID]; by default it is treated as opaque and case-sensitive. + // The value SHOULD be globally unique. + BuildInvocationID string `json:"buildInvocationID,omitempty"` + + // BuildStartedOn is the timestamp of when the build started. + // + // Use pointer to make sure that the abscense of a time is not + // encoded as the Epoch time. + BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"` + // BuildFinishedOn is the timestamp of when the build completed. + BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"` + + // Completeness indicates that the builder claims certain fields in this message to be + // complete. + Completeness ProvenanceComplete `json:"completeness"` + + // Reproducible if true, means the builder claims that running invocation on materials will + // produce bit-for-bit identical output. + Reproducible bool `json:"reproducible"` +} + +// ProvenanceComplete indicates wheter the claims in build/recipe are complete. +// For in depth information refer to the specifictaion: +// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md +type ProvenanceComplete struct { + // Parameters if true, means the builder claims that [ProvenanceInvocation.Parameters] is + // complete, meaning that all external inputs are properly captured in + // ProvenanceInvocation.Parameters. + Parameters bool `json:"parameters"` + // Environment if true, means the builder claims that [ProvenanceInvocation.Environment] is + // complete. + Environment bool `json:"environment"` + // Materials if true, means the builder claims that materials is complete, usually through some + // controls to prevent network access. Sometimes called “hermetic”. + Materials bool `json:"materials"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go new file mode 100644 index 000000000000..59cba86eb52c --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go @@ -0,0 +1,147 @@ +package in_toto + +import ( + "fmt" +) + +/* +Set represents a data structure for set operations. See `NewSet` for how to +create a Set, and available Set receivers for useful set operations. + +Under the hood Set aliases map[string]struct{}, where the map keys are the set +elements and the map values are a memory-efficient way of storing the keys. +*/ +type Set map[string]struct{} + +/* +NewSet creates a new Set, assigns it the optionally passed variadic string +elements, and returns it. +*/ +func NewSet(elems ...string) Set { + var s Set = make(map[string]struct{}) + for _, elem := range elems { + s.Add(elem) + } + return s +} + +/* +Has returns True if the passed string is member of the set on which it was +called and False otherwise. +*/ +func (s Set) Has(elem string) bool { + _, ok := s[elem] + return ok +} + +/* +Add adds the passed string to the set on which it was called, if the string is +not a member of the set. +*/ +func (s Set) Add(elem string) { + s[elem] = struct{}{} +} + +/* +Remove removes the passed string from the set on which was is called, if the +string is a member of the set. +*/ +func (s Set) Remove(elem string) { + delete(s, elem) +} + +/* +Intersection creates and returns a new Set with the elements of the set on +which it was called that are also in the passed set. +*/ +func (s Set) Intersection(s2 Set) Set { + res := NewSet() + for elem := range s { + if !s2.Has(elem) { + continue + } + res.Add(elem) + } + return res +} + +/* +Difference creates and returns a new Set with the elements of the set on +which it was called that are not in the passed set. +*/ +func (s Set) Difference(s2 Set) Set { + res := NewSet() + for elem := range s { + if s2.Has(elem) { + continue + } + res.Add(elem) + } + return res +} + +/* +Filter creates and returns a new Set with the elements of the set on which it +was called that match the passed pattern. A matching error is treated like a +non-match plus a warning is printed. +*/ +func (s Set) Filter(pattern string) Set { + res := NewSet() + for elem := range s { + matched, err := match(pattern, elem) + if err != nil { + fmt.Printf("WARNING: %s, pattern was '%s'\n", err, pattern) + continue + } + if !matched { + continue + } + res.Add(elem) + } + return res +} + +/* +Slice creates and returns an unordered string slice with the elements of the +set on which it was called. +*/ +func (s Set) Slice() []string { + var res []string + res = make([]string, 0, len(s)) + for elem := range s { + res = append(res, elem) + } + return res +} + +/* +InterfaceKeyStrings returns string keys of passed interface{} map in an +unordered string slice. +*/ +func InterfaceKeyStrings(m map[string]interface{}) []string { + res := make([]string, len(m)) + i := 0 + for k := range m { + res[i] = k + i++ + } + return res +} + +/* +IsSubSet checks if the parameter subset is a +subset of the superset s. +*/ +func (s Set) IsSubSet(subset Set) bool { + if len(subset) > len(s) { + return false + } + for key := range subset { + if s.Has(key) { + continue + } else { + return false + } + } + return true +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go new file mode 100644 index 000000000000..f555f79a528d --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go @@ -0,0 +1,14 @@ +//go:build linux || darwin || !windows +// +build linux darwin !windows + +package in_toto + +import "golang.org/x/sys/unix" + +func isWritable(path string) error { + err := unix.Access(path, unix.W_OK) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go new file mode 100644 index 000000000000..8552f0345d04 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go @@ -0,0 +1,25 @@ +package in_toto + +import ( + "errors" + "os" +) + +func isWritable(path string) error { + // get fileInfo + info, err := os.Stat(path) + if err != nil { + return err + } + + // check if path is a directory + if !info.IsDir() { + return errors.New("not a directory") + } + + // Check if the user bit is enabled in file permission + if info.Mode().Perm()&(1<<(uint(7))) == 0 { + return errors.New("not writable") + } + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go new file mode 100644 index 000000000000..2302040f4600 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go @@ -0,0 +1,1091 @@ +/* +Package in_toto implements types and routines to verify a software supply chain +according to the in-toto specification. +See https://github.com/in-toto/docs/blob/master/in-toto-spec.md +*/ +package in_toto + +import ( + "crypto/x509" + "errors" + "fmt" + "io" + "os" + "path" + osPath "path" + "path/filepath" + "reflect" + "regexp" + "strings" + "time" +) + +// ErrInspectionRunDirIsSymlink gets thrown if the runDir is a symlink +var ErrInspectionRunDirIsSymlink = errors.New("runDir is a symlink. This is a security risk") + +/* +RunInspections iteratively executes the command in the Run field of all +inspections of the passed layout, creating unsigned link metadata that records +all files found in the current working directory as materials (before command +execution) and products (after command execution). A map with inspection names +as keys and Metablocks containing the generated link metadata as values is +returned. The format is: + + { + : Metablock, + : Metablock, + ... + } + +If executing the inspection command fails, or if the executed command has a +non-zero exit code, the first return value is an empty Metablock map and the +second return value is the error. +*/ +func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[string]Metablock, error) { + inspectionMetadata := make(map[string]Metablock) + + for _, inspection := range layout.Inspect { + + paths := []string{"."} + if runDir != "" { + paths = []string{runDir} + } + + linkMb, err := InTotoRun(inspection.Name, runDir, paths, paths, + inspection.Run, Key{}, []string{"sha256"}, nil, nil, lineNormalization) + + if err != nil { + return nil, err + } + + retVal := linkMb.Signed.(Link).ByProducts["return-value"] + if retVal != float64(0) { + return nil, fmt.Errorf("inspection command '%s' of inspection '%s'"+ + " returned a non-zero value: %d", inspection.Run, inspection.Name, + retVal) + } + + // Dump inspection link to cwd using the short link name format + linkName := fmt.Sprintf(LinkNameFormatShort, inspection.Name) + if err := linkMb.Dump(linkName); err != nil { + fmt.Printf("JSON serialization or writing failed: %s", err) + } + + inspectionMetadata[inspection.Name] = linkMb + } + return inspectionMetadata, nil +} + +// verifyMatchRule is a helper function to process artifact rules of +// type MATCH. See VerifyArtifacts for more details. +func verifyMatchRule(ruleData map[string]string, + srcArtifacts map[string]interface{}, srcArtifactQueue Set, + itemsMetadata map[string]Metablock) Set { + consumed := NewSet() + // Get destination link metadata + dstLinkMb, exists := itemsMetadata[ruleData["dstName"]] + if !exists { + // Destination link does not exist, rule can't consume any + // artifacts + return consumed + } + + // Get artifacts from destination link metadata + var dstArtifacts map[string]interface{} + switch ruleData["dstType"] { + case "materials": + dstArtifacts = dstLinkMb.Signed.(Link).Materials + case "products": + dstArtifacts = dstLinkMb.Signed.(Link).Products + } + + // cleanup paths in pattern and artifact maps + if ruleData["pattern"] != "" { + ruleData["pattern"] = path.Clean(ruleData["pattern"]) + } + for k := range srcArtifacts { + if path.Clean(k) != k { + srcArtifacts[path.Clean(k)] = srcArtifacts[k] + delete(srcArtifacts, k) + } + } + for k := range dstArtifacts { + if path.Clean(k) != k { + dstArtifacts[path.Clean(k)] = dstArtifacts[k] + delete(dstArtifacts, k) + } + } + + // Normalize optional source and destination prefixes, i.e. if + // there is a prefix, then add a trailing slash if not there yet + for _, prefix := range []string{"srcPrefix", "dstPrefix"} { + if ruleData[prefix] != "" { + ruleData[prefix] = path.Clean(ruleData[prefix]) + if !strings.HasSuffix(ruleData[prefix], "/") { + ruleData[prefix] += "/" + } + } + } + // Iterate over queue and mark consumed artifacts + for srcPath := range srcArtifactQueue { + // Remove optional source prefix from source artifact path + // Noop if prefix is empty, or artifact does not have it + srcBasePath := strings.TrimPrefix(srcPath, ruleData["srcPrefix"]) + + // Ignore artifacts not matched by rule pattern + matched, err := match(ruleData["pattern"], srcBasePath) + if err != nil || !matched { + continue + } + + // Construct corresponding destination artifact path, i.e. + // an optional destination prefix plus the source base path + dstPath := path.Clean(osPath.Join(ruleData["dstPrefix"], srcBasePath)) + + // Try to find the corresponding destination artifact + dstArtifact, exists := dstArtifacts[dstPath] + // Ignore artifacts without corresponding destination artifact + if !exists { + continue + } + + // Ignore artifact pairs with no matching hashes + if !reflect.DeepEqual(srcArtifacts[srcPath], dstArtifact) { + continue + } + + // Only if a source and destination artifact pair was found and + // their hashes are equal, will we mark the source artifact as + // successfully consumed, i.e. it will be removed from the queue + consumed.Add(srcPath) + } + return consumed +} + +/* +VerifyArtifacts iteratively applies the material and product rules of the +passed items (step or inspection) to enforce and authorize artifacts (materials +or products) reported by the corresponding link and to guarantee that +artifacts are linked together across links. In the beginning all artifacts are +placed in a queue according to their type. If an artifact gets consumed by a +rule it is removed from the queue. An artifact can only be consumed once in +the course of processing the set of rules in ExpectedMaterials or +ExpectedProducts. + +Rules of type MATCH, ALLOW, CREATE, DELETE, MODIFY and DISALLOW are supported. + +All rules except for DISALLOW consume queued artifacts on success, and +leave the queue unchanged on failure. Hence, it is left to a terminal +DISALLOW rule to fail overall verification, if artifacts are left in the queue +that should have been consumed by preceding rules. +*/ +func VerifyArtifacts(items []interface{}, + itemsMetadata map[string]Metablock) error { + // Verify artifact rules for each item in the layout + for _, itemI := range items { + // The layout item (interface) must be a Link or an Inspection we are only + // interested in the name and the expected materials and products + var itemName string + var expectedMaterials [][]string + var expectedProducts [][]string + + switch item := itemI.(type) { + case Step: + itemName = item.Name + expectedMaterials = item.ExpectedMaterials + expectedProducts = item.ExpectedProducts + + case Inspection: + itemName = item.Name + expectedMaterials = item.ExpectedMaterials + expectedProducts = item.ExpectedProducts + + default: // Something wrong + return fmt.Errorf("VerifyArtifacts received an item of invalid type,"+ + " elements of passed slice 'items' must be one of 'Step' or"+ + " 'Inspection', got: '%s'", reflect.TypeOf(item)) + } + + // Use the item's name to extract the corresponding link + srcLinkMb, exists := itemsMetadata[itemName] + if !exists { + return fmt.Errorf("VerifyArtifacts could not find metadata"+ + " for item '%s', got: '%s'", itemName, itemsMetadata) + } + + // Create shortcuts to materials and products (including hashes) reported + // by the item's link, required to verify "match" rules + materials := srcLinkMb.Signed.(Link).Materials + products := srcLinkMb.Signed.(Link).Products + + // All other rules only require the material or product paths (without + // hashes). We extract them from the corresponding maps and store them as + // sets for convenience in further processing + materialPaths := NewSet() + for _, p := range InterfaceKeyStrings(materials) { + materialPaths.Add(path.Clean(p)) + } + productPaths := NewSet() + for _, p := range InterfaceKeyStrings(products) { + productPaths.Add(path.Clean(p)) + } + + // For `create`, `delete` and `modify` rules we prepare sets of artifacts + // (without hashes) that were created, deleted or modified in the current + // step or inspection + created := productPaths.Difference(materialPaths) + deleted := materialPaths.Difference(productPaths) + remained := materialPaths.Intersection(productPaths) + modified := NewSet() + for name := range remained { + if !reflect.DeepEqual(materials[name], products[name]) { + modified.Add(name) + } + } + + // For each item we have to run rule verification, once per artifact type. + // Here we prepare the corresponding data for each round. + verificationDataList := []map[string]interface{}{ + { + "srcType": "materials", + "rules": expectedMaterials, + "artifacts": materials, + "artifactPaths": materialPaths, + }, + { + "srcType": "products", + "rules": expectedProducts, + "artifacts": products, + "artifactPaths": productPaths, + }, + } + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("Verifying %s '%s' ", reflect.TypeOf(itemI), itemName) + + // Process all material rules using the corresponding materials and all + // product rules using the corresponding products + for _, verificationData := range verificationDataList { + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("%s...\n", verificationData["srcType"]) + + rules := verificationData["rules"].([][]string) + artifacts := verificationData["artifacts"].(map[string]interface{}) + + // Use artifacts (without hashes) as base queue. Each rule only operates + // on artifacts in that queue. If a rule consumes an artifact (i.e. can + // be applied successfully), the artifact is removed from the queue. By + // applying a DISALLOW rule eventually, verification may return an error, + // if the rule matches any artifacts in the queue that should have been + // consumed earlier. + queue := verificationData["artifactPaths"].(Set) + + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("Initial state\nMaterials: %s\nProducts: %s\nQueue: %s\n\n", + // materialPaths.Slice(), productPaths.Slice(), queue.Slice()) + + // Verify rules sequentially + for _, rule := range rules { + // Parse rule and error out if it is malformed + // NOTE: the rule format should have been validated before + ruleData, err := UnpackRule(rule) + if err != nil { + return err + } + + // Apply rule pattern to filter queued artifacts that are up for rule + // specific consumption + filtered := queue.Filter(path.Clean(ruleData["pattern"])) + + var consumed Set + switch ruleData["type"] { + case "match": + // Note: here we need to perform more elaborate filtering + consumed = verifyMatchRule(ruleData, artifacts, queue, itemsMetadata) + + case "allow": + // Consumes all filtered artifacts + consumed = filtered + + case "create": + // Consumes filtered artifacts that were created + consumed = filtered.Intersection(created) + + case "delete": + // Consumes filtered artifacts that were deleted + consumed = filtered.Intersection(deleted) + + case "modify": + // Consumes filtered artifacts that were modified + consumed = filtered.Intersection(modified) + + case "disallow": + // Does not consume but errors out if artifacts were filtered + if len(filtered) > 0 { + return fmt.Errorf("artifact verification failed for %s '%s',"+ + " %s %s disallowed by rule %s", + reflect.TypeOf(itemI).Name(), itemName, + verificationData["srcType"], filtered.Slice(), rule) + } + case "require": + // REQUIRE is somewhat of a weird animal that does not use + // patterns bur rather single filenames (for now). + if !queue.Has(ruleData["pattern"]) { + return fmt.Errorf("artifact verification failed for %s in REQUIRE '%s',"+ + " because %s is not in %s", verificationData["srcType"], + ruleData["pattern"], ruleData["pattern"], queue.Slice()) + } + } + // Update queue by removing consumed artifacts + queue = queue.Difference(consumed) + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("Rule: %s\nQueue: %s\n\n", rule, queue.Slice()) + } + } + } + return nil +} + +/* +ReduceStepsMetadata merges for each step of the passed Layout all the passed +per-functionary links into a single link, asserting that the reported Materials +and Products are equal across links for a given step. This function may be +used at a time during the overall verification, where link threshold's have +been verified and subsequent verification only needs one exemplary link per +step. The function returns a map with one Metablock (link) per step: + + { + : Metablock, + : Metablock, + ... + } + +If links corresponding to the same step report different Materials or different +Products, the first return value is an empty Metablock map and the second +return value is the error. +*/ +func ReduceStepsMetadata(layout Layout, + stepsMetadata map[string]map[string]Metablock) (map[string]Metablock, + error) { + stepsMetadataReduced := make(map[string]Metablock) + + for _, step := range layout.Steps { + linksPerStep, ok := stepsMetadata[step.Name] + // We should never get here, layout verification must fail earlier + if !ok || len(linksPerStep) < 1 { + panic("Could not reduce metadata for step '" + step.Name + + "', no link metadata found.") + } + + // Get the first link (could be any link) for the current step, which will + // serve as reference link for below comparisons + var referenceKeyID string + var referenceLinkMb Metablock + for keyID, linkMb := range linksPerStep { + referenceLinkMb = linkMb + referenceKeyID = keyID + break + } + + // Only one link, nothing to reduce, take the reference link + if len(linksPerStep) == 1 { + stepsMetadataReduced[step.Name] = referenceLinkMb + + // Multiple links, reduce but first check + } else { + // Artifact maps must be equal for each type among all links + // TODO: What should we do if there are more links, than the + // threshold requires, but not all of them are equal? Right now we would + // also error. + for keyID, linkMb := range linksPerStep { + if !reflect.DeepEqual(linkMb.Signed.(Link).Materials, + referenceLinkMb.Signed.(Link).Materials) || + !reflect.DeepEqual(linkMb.Signed.(Link).Products, + referenceLinkMb.Signed.(Link).Products) { + return nil, fmt.Errorf("link '%s' and '%s' have different"+ + " artifacts", + fmt.Sprintf(LinkNameFormat, step.Name, referenceKeyID), + fmt.Sprintf(LinkNameFormat, step.Name, keyID)) + } + } + // We haven't errored out, so we can reduce (i.e take the reference link) + stepsMetadataReduced[step.Name] = referenceLinkMb + } + } + return stepsMetadataReduced, nil +} + +/* +VerifyStepCommandAlignment (soft) verifies that for each step of the passed +layout the command executed, as per the passed link, matches the expected +command, as per the layout. Soft verification means that, in case a command +does not align, a warning is issued. +*/ +func VerifyStepCommandAlignment(layout Layout, + stepsMetadata map[string]map[string]Metablock) { + for _, step := range layout.Steps { + linksPerStep, ok := stepsMetadata[step.Name] + // We should never get here, layout verification must fail earlier + if !ok || len(linksPerStep) < 1 { + panic("Could not verify command alignment for step '" + step.Name + + "', no link metadata found.") + } + + for signerKeyID, linkMb := range linksPerStep { + expectedCommandS := strings.Join(step.ExpectedCommand, " ") + executedCommandS := strings.Join(linkMb.Signed.(Link).Command, " ") + + if expectedCommandS != executedCommandS { + linkName := fmt.Sprintf(LinkNameFormat, step.Name, signerKeyID) + fmt.Printf("WARNING: Expected command for step '%s' (%s) and command"+ + " reported by '%s' (%s) differ.\n", + step.Name, expectedCommandS, linkName, executedCommandS) + } + } + } +} + +/* +LoadLayoutCertificates loads the root and intermediate CAs from the layout if in the layout. +This will be used to check signatures that were used to sign links but not configured +in the PubKeys section of the step. No configured CAs means we don't want to allow this. +Returned CertPools will be empty in this case. +*/ +func LoadLayoutCertificates(layout Layout, intermediatePems [][]byte) (*x509.CertPool, *x509.CertPool, error) { + rootPool := x509.NewCertPool() + for _, certPem := range layout.RootCas { + ok := rootPool.AppendCertsFromPEM([]byte(certPem.KeyVal.Certificate)) + if !ok { + return nil, nil, fmt.Errorf("failed to load root certificates for layout") + } + } + + intermediatePool := x509.NewCertPool() + for _, intermediatePem := range layout.IntermediateCas { + ok := intermediatePool.AppendCertsFromPEM([]byte(intermediatePem.KeyVal.Certificate)) + if !ok { + return nil, nil, fmt.Errorf("failed to load intermediate certificates for layout") + } + } + + for _, intermediatePem := range intermediatePems { + ok := intermediatePool.AppendCertsFromPEM(intermediatePem) + if !ok { + return nil, nil, fmt.Errorf("failed to load provided intermediate certificates") + } + } + + return rootPool, intermediatePool, nil +} + +/* +VerifyLinkSignatureThesholds verifies that for each step of the passed layout, +there are at least Threshold links, validly signed by different authorized +functionaries. The returned map of link metadata per steps contains only +links with valid signatures from distinct functionaries and has the format: + + { + : { + : Metablock, + : Metablock, + ... + }, + : { + : Metablock, + : Metablock, + ... + } + ... + } + +If for any step of the layout there are not enough links available, the first +return value is an empty map of Metablock maps and the second return value is +the error. +*/ +func VerifyLinkSignatureThesholds(layout Layout, + stepsMetadata map[string]map[string]Metablock, rootCertPool, intermediateCertPool *x509.CertPool) ( + map[string]map[string]Metablock, error) { + // This will stores links with valid signature from an authorized functionary + // for all steps + stepsMetadataVerified := make(map[string]map[string]Metablock) + + // Try to find enough (>= threshold) links each with a valid signature from + // distinct authorized functionaries for each step + for _, step := range layout.Steps { + var stepErr error + + // This will store links with valid signature from an authorized + // functionary for the given step + linksPerStepVerified := make(map[string]Metablock) + + // Check if there are any links at all for the given step + linksPerStep, ok := stepsMetadata[step.Name] + if !ok || len(linksPerStep) < 1 { + stepErr = fmt.Errorf("no links found") + } + + // For each link corresponding to a step, check that the signer key was + // authorized, the layout contains a verification key and the signature + // verification passes. Only good links are stored, to verify thresholds + // below. + isAuthorizedSignature := false + for signerKeyID, linkMb := range linksPerStep { + for _, authorizedKeyID := range step.PubKeys { + if signerKeyID == authorizedKeyID { + if verifierKey, ok := layout.Keys[authorizedKeyID]; ok { + if err := linkMb.VerifySignature(verifierKey); err == nil { + linksPerStepVerified[signerKeyID] = linkMb + isAuthorizedSignature = true + break + } + } + } + } + + // If the signer's key wasn't in our step's pubkeys array, check the cert pool to + // see if the key is known to us. + if !isAuthorizedSignature { + sig, err := linkMb.GetSignatureForKeyID(signerKeyID) + if err != nil { + stepErr = err + continue + } + + cert, err := sig.GetCertificate() + if err != nil { + stepErr = err + continue + } + + // test certificate against the step's constraints to make sure it's a valid functionary + err = step.CheckCertConstraints(cert, layout.RootCAIDs(), rootCertPool, intermediateCertPool) + if err != nil { + stepErr = err + continue + } + + err = linkMb.VerifySignature(cert) + if err != nil { + stepErr = err + continue + } + + linksPerStepVerified[signerKeyID] = linkMb + } + } + + // Store all good links for a step + stepsMetadataVerified[step.Name] = linksPerStepVerified + + if len(linksPerStepVerified) < step.Threshold { + linksPerStep := stepsMetadata[step.Name] + return nil, fmt.Errorf("step '%s' requires '%d' link metadata file(s)."+ + " '%d' out of '%d' available link(s) have a valid signature from an"+ + " authorized signer: %v", step.Name, step.Threshold, + len(linksPerStepVerified), len(linksPerStep), stepErr) + } + } + return stepsMetadataVerified, nil +} + +/* +LoadLinksForLayout loads for every Step of the passed Layout a Metablock +containing the corresponding Link. A base path to a directory that contains +the links may be passed using linkDir. Link file names are constructed, +using LinkNameFormat together with the corresponding step name and authorized +functionary key ids. A map of link metadata is returned and has the following +format: + + { + : { + : Metablock, + : Metablock, + ... + }, + : { + : Metablock, + : Metablock, + ... + } + ... + } + +If a link cannot be loaded at a constructed link name or is invalid, it is +ignored. Only a preliminary threshold check is performed, that is, if there +aren't at least Threshold links for any given step, the first return value +is an empty map of Metablock maps and the second return value is the error. +*/ +func LoadLinksForLayout(layout Layout, linkDir string) (map[string]map[string]Metablock, error) { + stepsMetadata := make(map[string]map[string]Metablock) + + for _, step := range layout.Steps { + linksPerStep := make(map[string]Metablock) + // Since we can verify against certificates belonging to a CA, we need to + // load any possible links + linkFiles, err := filepath.Glob(osPath.Join(linkDir, fmt.Sprintf(LinkGlobFormat, step.Name))) + if err != nil { + return nil, err + } + + for _, linkPath := range linkFiles { + var linkMb Metablock + if err := linkMb.Load(linkPath); err != nil { + continue + } + + // To get the full key from the metadata's signatures, we have to check + // for one with the same short id... + signerShortKeyID := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(linkPath), step.Name+"."), ".link") + for _, sig := range linkMb.Signatures { + if strings.HasPrefix(sig.KeyID, signerShortKeyID) { + linksPerStep[sig.KeyID] = linkMb + break + } + } + } + + if len(linksPerStep) < step.Threshold { + return nil, fmt.Errorf("step '%s' requires '%d' link metadata file(s),"+ + " found '%d'", step.Name, step.Threshold, len(linksPerStep)) + } + + stepsMetadata[step.Name] = linksPerStep + } + + return stepsMetadata, nil +} + +/* +VerifyLayoutExpiration verifies that the passed Layout has not expired. It +returns an error if the (zulu) date in the Expires field is in the past. +*/ +func VerifyLayoutExpiration(layout Layout) error { + expires, err := time.Parse(ISO8601DateSchema, layout.Expires) + if err != nil { + return err + } + // Uses timezone of expires, i.e. UTC + if time.Until(expires) < 0 { + return fmt.Errorf("layout has expired on '%s'", expires) + } + return nil +} + +/* +VerifyLayoutSignatures verifies for each key in the passed key map the +corresponding signature of the Layout in the passed Metablock's Signed field. +Signatures and keys are associated by key id. If the key map is empty, or the +Metablock's Signature field does not have a signature for one or more of the +passed keys, or a matching signature is invalid, an error is returned. +*/ +func VerifyLayoutSignatures(layoutMb Metablock, + layoutKeys map[string]Key) error { + if len(layoutKeys) < 1 { + return fmt.Errorf("layout verification requires at least one key") + } + + for _, key := range layoutKeys { + if err := layoutMb.VerifySignature(key); err != nil { + return err + } + } + return nil +} + +/* +GetSummaryLink merges the materials of the first step (as mentioned in the +layout) and the products of the last step and returns a new link. This link +reports the materials and products and summarizes the overall software supply +chain. +NOTE: The assumption is that the steps mentioned in the layout are to be +performed sequentially. So, the first step mentioned in the layout denotes what +comes into the supply chain and the last step denotes what goes out. +*/ +func GetSummaryLink(layout Layout, stepsMetadataReduced map[string]Metablock, + stepName string) (Metablock, error) { + var summaryLink Link + var result Metablock + if len(layout.Steps) > 0 { + firstStepLink := stepsMetadataReduced[layout.Steps[0].Name] + lastStepLink := stepsMetadataReduced[layout.Steps[len(layout.Steps)-1].Name] + + summaryLink.Materials = firstStepLink.Signed.(Link).Materials + summaryLink.Name = stepName + summaryLink.Type = firstStepLink.Signed.(Link).Type + + summaryLink.Products = lastStepLink.Signed.(Link).Products + summaryLink.ByProducts = lastStepLink.Signed.(Link).ByProducts + // Using the last command of the sublayout as the command + // of the summary link can be misleading. Is it necessary to + // include all the commands executed as part of sublayout? + summaryLink.Command = lastStepLink.Signed.(Link).Command + } + + result.Signed = summaryLink + + return result, nil +} + +/* +VerifySublayouts checks if any step in the supply chain is a sublayout, and if +so, recursively resolves it and replaces it with a summary link summarizing the +steps carried out in the sublayout. +*/ +func VerifySublayouts(layout Layout, + stepsMetadataVerified map[string]map[string]Metablock, + superLayoutLinkPath string, intermediatePems [][]byte, lineNormalization bool) (map[string]map[string]Metablock, error) { + for stepName, linkData := range stepsMetadataVerified { + for keyID, metadata := range linkData { + if _, ok := metadata.Signed.(Layout); ok { + layoutKeys := make(map[string]Key) + layoutKeys[keyID] = layout.Keys[keyID] + + sublayoutLinkDir := fmt.Sprintf(SublayoutLinkDirFormat, + stepName, keyID) + sublayoutLinkPath := filepath.Join(superLayoutLinkPath, + sublayoutLinkDir) + summaryLink, err := InTotoVerify(metadata, layoutKeys, + sublayoutLinkPath, stepName, make(map[string]string), intermediatePems, lineNormalization) + if err != nil { + return nil, err + } + linkData[keyID] = summaryLink + } + + } + } + return stepsMetadataVerified, nil +} + +// TODO: find a better way than two helper functions for the replacer op + +func substituteParamatersInSlice(replacer *strings.Replacer, slice []string) []string { + newSlice := make([]string, 0) + for _, item := range slice { + newSlice = append(newSlice, replacer.Replace(item)) + } + return newSlice +} + +func substituteParametersInSliceOfSlices(replacer *strings.Replacer, + slice [][]string) [][]string { + newSlice := make([][]string, 0) + for _, item := range slice { + newSlice = append(newSlice, substituteParamatersInSlice(replacer, + item)) + } + return newSlice +} + +/* +SubstituteParameters performs parameter substitution in steps and inspections +in the following fields: +- Expected Materials and Expected Products of both +- Run of inspections +- Expected Command of steps +The substitution marker is '{}' and the keyword within the braces is replaced +by a value found in the substitution map passed, parameterDictionary. The +layout with parameters substituted is returned to the calling function. +*/ +func SubstituteParameters(layout Layout, + parameterDictionary map[string]string) (Layout, error) { + + if len(parameterDictionary) == 0 { + return layout, nil + } + + parameters := make([]string, 0) + + re := regexp.MustCompile("^[a-zA-Z0-9_-]+$") + + for parameter, value := range parameterDictionary { + parameterFormatCheck := re.MatchString(parameter) + if !parameterFormatCheck { + return layout, fmt.Errorf("invalid format for parameter") + } + + parameters = append(parameters, "{"+parameter+"}") + parameters = append(parameters, value) + } + + replacer := strings.NewReplacer(parameters...) + + for i := range layout.Steps { + layout.Steps[i].ExpectedMaterials = substituteParametersInSliceOfSlices( + replacer, layout.Steps[i].ExpectedMaterials) + layout.Steps[i].ExpectedProducts = substituteParametersInSliceOfSlices( + replacer, layout.Steps[i].ExpectedProducts) + layout.Steps[i].ExpectedCommand = substituteParamatersInSlice(replacer, + layout.Steps[i].ExpectedCommand) + } + + for i := range layout.Inspect { + layout.Inspect[i].ExpectedMaterials = + substituteParametersInSliceOfSlices(replacer, + layout.Inspect[i].ExpectedMaterials) + layout.Inspect[i].ExpectedProducts = + substituteParametersInSliceOfSlices(replacer, + layout.Inspect[i].ExpectedProducts) + layout.Inspect[i].Run = substituteParamatersInSlice(replacer, + layout.Inspect[i].Run) + } + + return layout, nil +} + +/* +InTotoVerify can be used to verify an entire software supply chain according to +the in-toto specification. It requires the metadata of the root layout, a map +that contains public keys to verify the root layout signatures, a path to a +directory from where it can load link metadata files, which are treated as +signed evidence for the steps defined in the layout, a step name, and a +paramater dictionary used for parameter substitution. The step name only +matters for sublayouts, where it's important to associate the summary of that +step with a unique name. The verification routine is as follows: + +1. Verify layout signature(s) using passed key(s) +2. Verify layout expiration date +3. Substitute parameters in layout +4. Load link metadata files for steps of layout +5. Verify signatures and signature thresholds for steps of layout +6. Verify sublayouts recursively +7. Verify command alignment for steps of layout (only warns) +8. Verify artifact rules for steps of layout +9. Execute inspection commands (generates link metadata for each inspection) +10. Verify artifact rules for inspections of layout + +InTotoVerify returns a summary link wrapped in a Metablock object and an error +value. If any of the verification routines fail, verification is aborted and +error is returned. In such an instance, the first value remains an empty +Metablock object. + +NOTE: Artifact rules of type "create", "modify" +and "delete" are currently not supported. +*/ +func InTotoVerify(layoutMb Metablock, layoutKeys map[string]Key, + linkDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( + Metablock, error) { + + var summaryLink Metablock + var err error + + // Verify root signatures + if err := VerifyLayoutSignatures(layoutMb, layoutKeys); err != nil { + return summaryLink, err + } + + // Extract the layout from its Metablock container (for further processing) + layout := layoutMb.Signed.(Layout) + + // Verify layout expiration + if err := VerifyLayoutExpiration(layout); err != nil { + return summaryLink, err + } + + // Substitute parameters in layout + layout, err = SubstituteParameters(layout, parameterDictionary) + if err != nil { + return summaryLink, err + } + + rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) + if err != nil { + return summaryLink, err + } + + // Load links for layout + stepsMetadata, err := LoadLinksForLayout(layout, linkDir) + if err != nil { + return summaryLink, err + } + + // Verify link signatures + stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, + stepsMetadata, rootCertPool, intermediateCertPool) + if err != nil { + return summaryLink, err + } + + // Verify and resolve sublayouts + stepsSublayoutVerified, err := VerifySublayouts(layout, + stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) + if err != nil { + return summaryLink, err + } + + // Verify command alignment (WARNING only) + VerifyStepCommandAlignment(layout, stepsSublayoutVerified) + + // Given that signature thresholds have been checked above and the rest of + // the relevant link properties, i.e. materials and products, have to be + // exactly equal, we can reduce the map of steps metadata. However, we error + // if the relevant properties are not equal among links of a step. + stepsMetadataReduced, err := ReduceStepsMetadata(layout, + stepsSublayoutVerified) + if err != nil { + return summaryLink, err + } + + // Verify artifact rules + if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), + stepsMetadataReduced); err != nil { + return summaryLink, err + } + + inspectionMetadata, err := RunInspections(layout, "", lineNormalization) + if err != nil { + return summaryLink, err + } + + // Add steps metadata to inspection metadata, because inspection artifact + // rules may also refer to artifacts reported by step links + for k, v := range stepsMetadataReduced { + inspectionMetadata[k] = v + } + + if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), + inspectionMetadata); err != nil { + return summaryLink, err + } + + summaryLink, err = GetSummaryLink(layout, stepsMetadataReduced, stepName) + if err != nil { + return summaryLink, err + } + + return summaryLink, nil +} + +/* +InTotoVerifyWithDirectory provides the same functionality as IntotoVerify, but +adds the possibility to select a local directory from where the inspections are run. +*/ +func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, + linkDir string, runDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( + Metablock, error) { + + var summaryLink Metablock + var err error + + // runDir sanity checks + // check if path exists + info, err := os.Stat(runDir) + if err != nil { + return Metablock{}, err + } + + // check if runDir is a symlink + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + return Metablock{}, ErrInspectionRunDirIsSymlink + } + + // check if runDir is writable and a directory + err = isWritable(runDir) + if err != nil { + return Metablock{}, err + } + + // check if runDir is empty (we do not want to overwrite files) + // We abuse File.Readdirnames for this action. + f, err := os.Open(runDir) + if err != nil { + return Metablock{}, err + } + defer f.Close() + // We use Readdirnames(1) for performance reasons, one child node + // is enough to proof that the directory is not empty + _, err = f.Readdirnames(1) + // if io.EOF gets returned as error the directory is empty + if err == io.EOF { + return Metablock{}, err + } + err = f.Close() + if err != nil { + return Metablock{}, err + } + + // Verify root signatures + if err := VerifyLayoutSignatures(layoutMb, layoutKeys); err != nil { + return summaryLink, err + } + + // Extract the layout from its Metablock container (for further processing) + layout := layoutMb.Signed.(Layout) + + // Verify layout expiration + if err := VerifyLayoutExpiration(layout); err != nil { + return summaryLink, err + } + + // Substitute parameters in layout + layout, err = SubstituteParameters(layout, parameterDictionary) + if err != nil { + return summaryLink, err + } + + rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) + if err != nil { + return summaryLink, err + } + + // Load links for layout + stepsMetadata, err := LoadLinksForLayout(layout, linkDir) + if err != nil { + return summaryLink, err + } + + // Verify link signatures + stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, + stepsMetadata, rootCertPool, intermediateCertPool) + if err != nil { + return summaryLink, err + } + + // Verify and resolve sublayouts + stepsSublayoutVerified, err := VerifySublayouts(layout, + stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) + if err != nil { + return summaryLink, err + } + + // Verify command alignment (WARNING only) + VerifyStepCommandAlignment(layout, stepsSublayoutVerified) + + // Given that signature thresholds have been checked above and the rest of + // the relevant link properties, i.e. materials and products, have to be + // exactly equal, we can reduce the map of steps metadata. However, we error + // if the relevant properties are not equal among links of a step. + stepsMetadataReduced, err := ReduceStepsMetadata(layout, + stepsSublayoutVerified) + if err != nil { + return summaryLink, err + } + + // Verify artifact rules + if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), + stepsMetadataReduced); err != nil { + return summaryLink, err + } + + inspectionMetadata, err := RunInspections(layout, runDir, lineNormalization) + if err != nil { + return summaryLink, err + } + + // Add steps metadata to inspection metadata, because inspection artifact + // rules may also refer to artifacts reported by step links + for k, v := range stepsMetadataReduced { + inspectionMetadata[k] = v + } + + if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), + inspectionMetadata); err != nil { + return summaryLink, err + } + + summaryLink, err = GetSummaryLink(layout, stepsMetadataReduced, stepName) + if err != nil { + return summaryLink, err + } + + return summaryLink, nil +} diff --git a/vendor/github.com/ishidawataru/sctp/.gitignore b/vendor/github.com/ishidawataru/sctp/.gitignore deleted file mode 100644 index cf2d826c15f2..000000000000 --- a/vendor/github.com/ishidawataru/sctp/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -example/example diff --git a/vendor/github.com/ishidawataru/sctp/.travis.yml b/vendor/github.com/ishidawataru/sctp/.travis.yml deleted file mode 100644 index a1c693c0135a..000000000000 --- a/vendor/github.com/ishidawataru/sctp/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le -go: - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x -# allowing test cases to fail for the versions were not suppotred by ppc64le -matrix: - allow_failures: - - go: 1.9.x - - go: 1.10.x - - go: 1.13.x - - -script: - - go test -v -race ./... - - GOOS=linux GOARCH=amd64 go build . - - GOOS=linux GOARCH=arm go build . - - GOOS=linux GOARCH=arm64 go build . - - GOOS=linux GOARCH=ppc64le go build . - - GOOS=linux GOARCH=mips64le go build . - - (go version | grep go1.6 > /dev/null) || GOOS=linux GOARCH=s390x go build . -# can be compiled but not functional: - - GOOS=linux GOARCH=386 go build . - - GOOS=windows GOARCH=amd64 go build . diff --git a/vendor/github.com/ishidawataru/sctp/GO_LICENSE b/vendor/github.com/ishidawataru/sctp/GO_LICENSE deleted file mode 100644 index 6a66aea5eafe..000000000000 --- a/vendor/github.com/ishidawataru/sctp/GO_LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ishidawataru/sctp/LICENSE b/vendor/github.com/ishidawataru/sctp/LICENSE deleted file mode 100644 index 8dada3edaf50..000000000000 --- a/vendor/github.com/ishidawataru/sctp/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/ishidawataru/sctp/NOTICE b/vendor/github.com/ishidawataru/sctp/NOTICE deleted file mode 100644 index cfb675fd4ba4..000000000000 --- a/vendor/github.com/ishidawataru/sctp/NOTICE +++ /dev/null @@ -1,3 +0,0 @@ -This source code includes following third party code - -- ipsock_linux.go : licensed by the Go authors, see GO_LICENSE file for the license which applies to the code diff --git a/vendor/github.com/ishidawataru/sctp/README.md b/vendor/github.com/ishidawataru/sctp/README.md deleted file mode 100644 index 574ececa8626..000000000000 --- a/vendor/github.com/ishidawataru/sctp/README.md +++ /dev/null @@ -1,18 +0,0 @@ -Stream Control Transmission Protocol (SCTP) ----- - -[![Build Status](https://travis-ci.org/ishidawataru/sctp.svg?branch=master)](https://travis-ci.org/ishidawataru/sctp/builds) - -Examples ----- - -See `example/sctp.go` - -```go -$ cd example -$ go build -$ # run example SCTP server -$ ./example -server -port 1000 -ip 10.10.0.1,10.20.0.1 -$ # run example SCTP client -$ ./example -port 1000 -ip 10.10.0.1,10.20.0.1 -``` diff --git a/vendor/github.com/ishidawataru/sctp/ipsock_linux.go b/vendor/github.com/ishidawataru/sctp/ipsock_linux.go deleted file mode 100644 index 3df30fa4601a..000000000000 --- a/vendor/github.com/ishidawataru/sctp/ipsock_linux.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the GO_LICENSE file. - -package sctp - -import ( - "net" - "os" - "sync" - "syscall" -) - -//from https://github.com/golang/go -// Boolean to int. -func boolint(b bool) int { - if b { - return 1 - } - return 0 -} - -//from https://github.com/golang/go -func ipToSockaddr(family int, ip net.IP, port int, zone string) (syscall.Sockaddr, error) { - switch family { - case syscall.AF_INET: - if len(ip) == 0 { - ip = net.IPv4zero - } - ip4 := ip.To4() - if ip4 == nil { - return nil, &net.AddrError{Err: "non-IPv4 address", Addr: ip.String()} - } - sa := &syscall.SockaddrInet4{Port: port} - copy(sa.Addr[:], ip4) - return sa, nil - case syscall.AF_INET6: - // In general, an IP wildcard address, which is either - // "0.0.0.0" or "::", means the entire IP addressing - // space. For some historical reason, it is used to - // specify "any available address" on some operations - // of IP node. - // - // When the IP node supports IPv4-mapped IPv6 address, - // we allow an listener to listen to the wildcard - // address of both IP addressing spaces by specifying - // IPv6 wildcard address. - if len(ip) == 0 || ip.Equal(net.IPv4zero) { - ip = net.IPv6zero - } - // We accept any IPv6 address including IPv4-mapped - // IPv6 address. - ip6 := ip.To16() - if ip6 == nil { - return nil, &net.AddrError{Err: "non-IPv6 address", Addr: ip.String()} - } - //we set ZoneId to 0, as currently we use this functon only to probe the IP capabilities of the host - //if real Zone handling is required, the zone cache implementation in golang/net should be pulled here - sa := &syscall.SockaddrInet6{Port: port, ZoneId: 0} - copy(sa.Addr[:], ip6) - return sa, nil - } - return nil, &net.AddrError{Err: "invalid address family", Addr: ip.String()} -} - -//from https://github.com/golang/go -func sockaddr(a *net.TCPAddr, family int) (syscall.Sockaddr, error) { - if a == nil { - return nil, nil - } - return ipToSockaddr(family, a.IP, a.Port, a.Zone) -} - -//from https://github.com/golang/go -type ipStackCapabilities struct { - sync.Once // guards following - ipv4Enabled bool - ipv6Enabled bool - ipv4MappedIPv6Enabled bool -} - -//from https://github.com/golang/go -var ipStackCaps ipStackCapabilities - -//from https://github.com/golang/go -// supportsIPv4 reports whether the platform supports IPv4 networking -// functionality. -func supportsIPv4() bool { - ipStackCaps.Once.Do(ipStackCaps.probe) - return ipStackCaps.ipv4Enabled -} - -//from https://github.com/golang/go -// supportsIPv6 reports whether the platform supports IPv6 networking -// functionality. -func supportsIPv6() bool { - ipStackCaps.Once.Do(ipStackCaps.probe) - return ipStackCaps.ipv6Enabled -} - -//from https://github.com/golang/go -// supportsIPv4map reports whether the platform supports mapping an -// IPv4 address inside an IPv6 address at transport layer -// protocols. See RFC 4291, RFC 4038 and RFC 3493. -func supportsIPv4map() bool { - ipStackCaps.Once.Do(ipStackCaps.probe) - return ipStackCaps.ipv4MappedIPv6Enabled -} - -//from https://github.com/golang/go -// Probe probes IPv4, IPv6 and IPv4-mapped IPv6 communication -// capabilities which are controlled by the IPV6_V6ONLY socket option -// and kernel configuration. -// -// Should we try to use the IPv4 socket interface if we're only -// dealing with IPv4 sockets? As long as the host system understands -// IPv4-mapped IPv6, it's okay to pass IPv4-mapeed IPv6 addresses to -// the IPv6 interface. That simplifies our code and is most -// general. Unfortunately, we need to run on kernels built without -// IPv6 support too. So probe the kernel to figure it out. -func (p *ipStackCapabilities) probe() { - s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP) - switch err { - case syscall.EAFNOSUPPORT, syscall.EPROTONOSUPPORT: - case nil: - syscall.Close(s) - p.ipv4Enabled = true - } - var probes = []struct { - laddr net.TCPAddr - value int - }{ - // IPv6 communication capability - {laddr: net.TCPAddr{IP: net.IPv6loopback}, value: 1}, - // IPv4-mapped IPv6 address communication capability - {laddr: net.TCPAddr{IP: net.IPv4(127, 0, 0, 1)}, value: 0}, - } - - for i := range probes { - s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_TCP) - if err != nil { - continue - } - defer syscall.Close(s) - syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, probes[i].value) - sa, err := sockaddr(&(probes[i].laddr), syscall.AF_INET6) - if err != nil { - continue - } - if err := syscall.Bind(s, sa); err != nil { - continue - } - if i == 0 { - p.ipv6Enabled = true - } else { - p.ipv4MappedIPv6Enabled = true - } - } -} - -//from https://github.com/golang/go -//Change: we check the first IP address in the list of candidate SCTP IP addresses -func (a *SCTPAddr) isWildcard() bool { - if a == nil { - return true - } - if 0 == len(a.IPAddrs) { - return true - } - - return a.IPAddrs[0].IP.IsUnspecified() -} - -func (a *SCTPAddr) family() int { - if a != nil { - for _, ip := range a.IPAddrs { - if ip.IP.To4() == nil { - return syscall.AF_INET6 - } - } - } - return syscall.AF_INET -} - -//from https://github.com/golang/go -func favoriteAddrFamily(network string, laddr *SCTPAddr, raddr *SCTPAddr, mode string) (family int, ipv6only bool) { - switch network[len(network)-1] { - case '4': - return syscall.AF_INET, false - case '6': - return syscall.AF_INET6, true - } - - if mode == "listen" && (laddr == nil || laddr.isWildcard()) { - if supportsIPv4map() || !supportsIPv4() { - return syscall.AF_INET6, false - } - if laddr == nil { - return syscall.AF_INET, false - } - return laddr.family(), false - } - - if (laddr == nil || laddr.family() == syscall.AF_INET) && - (raddr == nil || raddr.family() == syscall.AF_INET) { - return syscall.AF_INET, false - } - return syscall.AF_INET6, false -} - -//from https://github.com/golang/go -//Changes: it is for SCTP only -func setDefaultSockopts(s int, family int, ipv6only bool) error { - if family == syscall.AF_INET6 { - // Allow both IP versions even if the OS default - // is otherwise. Note that some operating systems - // never admit this option. - syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, boolint(ipv6only)) - } - // Allow broadcast. - return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)) -} diff --git a/vendor/github.com/ishidawataru/sctp/sctp.go b/vendor/github.com/ishidawataru/sctp/sctp.go deleted file mode 100644 index 94842f42702f..000000000000 --- a/vendor/github.com/ishidawataru/sctp/sctp.go +++ /dev/null @@ -1,729 +0,0 @@ -// Copyright 2019 Wataru Ishida. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sctp - -import ( - "bytes" - "encoding/binary" - "fmt" - "net" - "strconv" - "strings" - "sync" - "sync/atomic" - "syscall" - "time" - "unsafe" -) - -const ( - SOL_SCTP = 132 - - SCTP_BINDX_ADD_ADDR = 0x01 - SCTP_BINDX_REM_ADDR = 0x02 - - MSG_NOTIFICATION = 0x8000 -) - -const ( - SCTP_RTOINFO = iota - SCTP_ASSOCINFO - SCTP_INITMSG - SCTP_NODELAY - SCTP_AUTOCLOSE - SCTP_SET_PEER_PRIMARY_ADDR - SCTP_PRIMARY_ADDR - SCTP_ADAPTATION_LAYER - SCTP_DISABLE_FRAGMENTS - SCTP_PEER_ADDR_PARAMS - SCTP_DEFAULT_SENT_PARAM - SCTP_EVENTS - SCTP_I_WANT_MAPPED_V4_ADDR - SCTP_MAXSEG - SCTP_STATUS - SCTP_GET_PEER_ADDR_INFO - SCTP_DELAYED_ACK_TIME - SCTP_DELAYED_ACK = SCTP_DELAYED_ACK_TIME - SCTP_DELAYED_SACK = SCTP_DELAYED_ACK_TIME - - SCTP_SOCKOPT_BINDX_ADD = 100 - SCTP_SOCKOPT_BINDX_REM = 101 - SCTP_SOCKOPT_PEELOFF = 102 - SCTP_GET_PEER_ADDRS = 108 - SCTP_GET_LOCAL_ADDRS = 109 - SCTP_SOCKOPT_CONNECTX = 110 - SCTP_SOCKOPT_CONNECTX3 = 111 -) - -const ( - SCTP_EVENT_DATA_IO = 1 << iota - SCTP_EVENT_ASSOCIATION - SCTP_EVENT_ADDRESS - SCTP_EVENT_SEND_FAILURE - SCTP_EVENT_PEER_ERROR - SCTP_EVENT_SHUTDOWN - SCTP_EVENT_PARTIAL_DELIVERY - SCTP_EVENT_ADAPTATION_LAYER - SCTP_EVENT_AUTHENTICATION - SCTP_EVENT_SENDER_DRY - - SCTP_EVENT_ALL = SCTP_EVENT_DATA_IO | SCTP_EVENT_ASSOCIATION | SCTP_EVENT_ADDRESS | SCTP_EVENT_SEND_FAILURE | SCTP_EVENT_PEER_ERROR | SCTP_EVENT_SHUTDOWN | SCTP_EVENT_PARTIAL_DELIVERY | SCTP_EVENT_ADAPTATION_LAYER | SCTP_EVENT_AUTHENTICATION | SCTP_EVENT_SENDER_DRY -) - -type SCTPNotificationType int - -const ( - SCTP_SN_TYPE_BASE = SCTPNotificationType(iota + (1 << 15)) - SCTP_ASSOC_CHANGE - SCTP_PEER_ADDR_CHANGE - SCTP_SEND_FAILED - SCTP_REMOTE_ERROR - SCTP_SHUTDOWN_EVENT - SCTP_PARTIAL_DELIVERY_EVENT - SCTP_ADAPTATION_INDICATION - SCTP_AUTHENTICATION_INDICATION - SCTP_SENDER_DRY_EVENT -) - -type NotificationHandler func([]byte) error - -type EventSubscribe struct { - DataIO uint8 - Association uint8 - Address uint8 - SendFailure uint8 - PeerError uint8 - Shutdown uint8 - PartialDelivery uint8 - AdaptationLayer uint8 - Authentication uint8 - SenderDry uint8 -} - -const ( - SCTP_CMSG_INIT = iota - SCTP_CMSG_SNDRCV - SCTP_CMSG_SNDINFO - SCTP_CMSG_RCVINFO - SCTP_CMSG_NXTINFO -) - -const ( - SCTP_UNORDERED = 1 << iota - SCTP_ADDR_OVER - SCTP_ABORT - SCTP_SACK_IMMEDIATELY - SCTP_EOF -) - -const ( - SCTP_MAX_STREAM = 0xffff -) - -type InitMsg struct { - NumOstreams uint16 - MaxInstreams uint16 - MaxAttempts uint16 - MaxInitTimeout uint16 -} - -type SndRcvInfo struct { - Stream uint16 - SSN uint16 - Flags uint16 - _ uint16 - PPID uint32 - Context uint32 - TTL uint32 - TSN uint32 - CumTSN uint32 - AssocID int32 -} - -type SndInfo struct { - SID uint16 - Flags uint16 - PPID uint32 - Context uint32 - AssocID int32 -} - -type GetAddrsOld struct { - AssocID int32 - AddrNum int32 - Addrs uintptr -} - -type NotificationHeader struct { - Type uint16 - Flags uint16 - Length uint32 -} - -type SCTPState uint16 - -const ( - SCTP_COMM_UP = SCTPState(iota) - SCTP_COMM_LOST - SCTP_RESTART - SCTP_SHUTDOWN_COMP - SCTP_CANT_STR_ASSOC -) - -var nativeEndian binary.ByteOrder -var sndRcvInfoSize uintptr - -func init() { - i := uint16(1) - if *(*byte)(unsafe.Pointer(&i)) == 0 { - nativeEndian = binary.BigEndian - } else { - nativeEndian = binary.LittleEndian - } - info := SndRcvInfo{} - sndRcvInfoSize = unsafe.Sizeof(info) -} - -func toBuf(v interface{}) []byte { - var buf bytes.Buffer - binary.Write(&buf, nativeEndian, v) - return buf.Bytes() -} - -func htons(h uint16) uint16 { - if nativeEndian == binary.LittleEndian { - return (h << 8 & 0xff00) | (h >> 8 & 0xff) - } - return h -} - -var ntohs = htons - -// setInitOpts sets options for an SCTP association initialization -// see https://tools.ietf.org/html/rfc4960#page-25 -func setInitOpts(fd int, options InitMsg) error { - optlen := unsafe.Sizeof(options) - _, _, err := setsockopt(fd, SCTP_INITMSG, uintptr(unsafe.Pointer(&options)), uintptr(optlen)) - return err -} - -func setNumOstreams(fd, num int) error { - return setInitOpts(fd, InitMsg{NumOstreams: uint16(num)}) -} - -type SCTPAddr struct { - IPAddrs []net.IPAddr - Port int -} - -func (a *SCTPAddr) ToRawSockAddrBuf() []byte { - p := htons(uint16(a.Port)) - if len(a.IPAddrs) == 0 { // if a.IPAddrs list is empty - fall back to IPv4 zero addr - s := syscall.RawSockaddrInet4{ - Family: syscall.AF_INET, - Port: p, - } - copy(s.Addr[:], net.IPv4zero) - return toBuf(s) - } - buf := []byte{} - for _, ip := range a.IPAddrs { - ipBytes := ip.IP - if len(ipBytes) == 0 { - ipBytes = net.IPv4zero - } - if ip4 := ipBytes.To4(); ip4 != nil { - s := syscall.RawSockaddrInet4{ - Family: syscall.AF_INET, - Port: p, - } - copy(s.Addr[:], ip4) - buf = append(buf, toBuf(s)...) - } else { - var scopeid uint32 - ifi, err := net.InterfaceByName(ip.Zone) - if err == nil { - scopeid = uint32(ifi.Index) - } - s := syscall.RawSockaddrInet6{ - Family: syscall.AF_INET6, - Port: p, - Scope_id: scopeid, - } - copy(s.Addr[:], ipBytes) - buf = append(buf, toBuf(s)...) - } - } - return buf -} - -func (a *SCTPAddr) String() string { - var b bytes.Buffer - - for n, i := range a.IPAddrs { - if i.IP.To4() != nil { - b.WriteString(i.String()) - } else if i.IP.To16() != nil { - b.WriteRune('[') - b.WriteString(i.String()) - b.WriteRune(']') - } - if n < len(a.IPAddrs)-1 { - b.WriteRune('/') - } - } - b.WriteRune(':') - b.WriteString(strconv.Itoa(a.Port)) - return b.String() -} - -func (a *SCTPAddr) Network() string { return "sctp" } - -func ResolveSCTPAddr(network, addrs string) (*SCTPAddr, error) { - tcpnet := "" - switch network { - case "", "sctp": - tcpnet = "tcp" - case "sctp4": - tcpnet = "tcp4" - case "sctp6": - tcpnet = "tcp6" - default: - return nil, fmt.Errorf("invalid net: %s", network) - } - elems := strings.Split(addrs, "/") - if len(elems) == 0 { - return nil, fmt.Errorf("invalid input: %s", addrs) - } - ipaddrs := make([]net.IPAddr, 0, len(elems)) - for _, e := range elems[:len(elems)-1] { - tcpa, err := net.ResolveTCPAddr(tcpnet, e+":") - if err != nil { - return nil, err - } - ipaddrs = append(ipaddrs, net.IPAddr{IP: tcpa.IP, Zone: tcpa.Zone}) - } - tcpa, err := net.ResolveTCPAddr(tcpnet, elems[len(elems)-1]) - if err != nil { - return nil, err - } - if tcpa.IP != nil { - ipaddrs = append(ipaddrs, net.IPAddr{IP: tcpa.IP, Zone: tcpa.Zone}) - } else { - ipaddrs = nil - } - return &SCTPAddr{ - IPAddrs: ipaddrs, - Port: tcpa.Port, - }, nil -} - -func SCTPConnect(fd int, addr *SCTPAddr) (int, error) { - buf := addr.ToRawSockAddrBuf() - param := GetAddrsOld{ - AddrNum: int32(len(buf)), - Addrs: uintptr(uintptr(unsafe.Pointer(&buf[0]))), - } - optlen := unsafe.Sizeof(param) - _, _, err := getsockopt(fd, SCTP_SOCKOPT_CONNECTX3, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) - if err == nil { - return int(param.AssocID), nil - } else if err != syscall.ENOPROTOOPT { - return 0, err - } - r0, _, err := setsockopt(fd, SCTP_SOCKOPT_CONNECTX, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf))) - return int(r0), err -} - -func SCTPBind(fd int, addr *SCTPAddr, flags int) error { - var option uintptr - switch flags { - case SCTP_BINDX_ADD_ADDR: - option = SCTP_SOCKOPT_BINDX_ADD - case SCTP_BINDX_REM_ADDR: - option = SCTP_SOCKOPT_BINDX_REM - default: - return syscall.EINVAL - } - - buf := addr.ToRawSockAddrBuf() - _, _, err := setsockopt(fd, option, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf))) - return err -} - -type SCTPConn struct { - _fd int32 - notificationHandler NotificationHandler -} - -func (c *SCTPConn) fd() int { - return int(atomic.LoadInt32(&c._fd)) -} - -func NewSCTPConn(fd int, handler NotificationHandler) *SCTPConn { - conn := &SCTPConn{ - _fd: int32(fd), - notificationHandler: handler, - } - return conn -} - -func (c *SCTPConn) Write(b []byte) (int, error) { - return c.SCTPWrite(b, nil) -} - -func (c *SCTPConn) Read(b []byte) (int, error) { - n, _, err := c.SCTPRead(b) - if n < 0 { - n = 0 - } - return n, err -} - -func (c *SCTPConn) SetInitMsg(numOstreams, maxInstreams, maxAttempts, maxInitTimeout int) error { - return setInitOpts(c.fd(), InitMsg{ - NumOstreams: uint16(numOstreams), - MaxInstreams: uint16(maxInstreams), - MaxAttempts: uint16(maxAttempts), - MaxInitTimeout: uint16(maxInitTimeout), - }) -} - -func (c *SCTPConn) SubscribeEvents(flags int) error { - var d, a, ad, sf, p, sh, pa, ada, au, se uint8 - if flags&SCTP_EVENT_DATA_IO > 0 { - d = 1 - } - if flags&SCTP_EVENT_ASSOCIATION > 0 { - a = 1 - } - if flags&SCTP_EVENT_ADDRESS > 0 { - ad = 1 - } - if flags&SCTP_EVENT_SEND_FAILURE > 0 { - sf = 1 - } - if flags&SCTP_EVENT_PEER_ERROR > 0 { - p = 1 - } - if flags&SCTP_EVENT_SHUTDOWN > 0 { - sh = 1 - } - if flags&SCTP_EVENT_PARTIAL_DELIVERY > 0 { - pa = 1 - } - if flags&SCTP_EVENT_ADAPTATION_LAYER > 0 { - ada = 1 - } - if flags&SCTP_EVENT_AUTHENTICATION > 0 { - au = 1 - } - if flags&SCTP_EVENT_SENDER_DRY > 0 { - se = 1 - } - param := EventSubscribe{ - DataIO: d, - Association: a, - Address: ad, - SendFailure: sf, - PeerError: p, - Shutdown: sh, - PartialDelivery: pa, - AdaptationLayer: ada, - Authentication: au, - SenderDry: se, - } - optlen := unsafe.Sizeof(param) - _, _, err := setsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(¶m)), uintptr(optlen)) - return err -} - -func (c *SCTPConn) SubscribedEvents() (int, error) { - param := EventSubscribe{} - optlen := unsafe.Sizeof(param) - _, _, err := getsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) - if err != nil { - return 0, err - } - var flags int - if param.DataIO > 0 { - flags |= SCTP_EVENT_DATA_IO - } - if param.Association > 0 { - flags |= SCTP_EVENT_ASSOCIATION - } - if param.Address > 0 { - flags |= SCTP_EVENT_ADDRESS - } - if param.SendFailure > 0 { - flags |= SCTP_EVENT_SEND_FAILURE - } - if param.PeerError > 0 { - flags |= SCTP_EVENT_PEER_ERROR - } - if param.Shutdown > 0 { - flags |= SCTP_EVENT_SHUTDOWN - } - if param.PartialDelivery > 0 { - flags |= SCTP_EVENT_PARTIAL_DELIVERY - } - if param.AdaptationLayer > 0 { - flags |= SCTP_EVENT_ADAPTATION_LAYER - } - if param.Authentication > 0 { - flags |= SCTP_EVENT_AUTHENTICATION - } - if param.SenderDry > 0 { - flags |= SCTP_EVENT_SENDER_DRY - } - return flags, nil -} - -func (c *SCTPConn) SetDefaultSentParam(info *SndRcvInfo) error { - optlen := unsafe.Sizeof(*info) - _, _, err := setsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(optlen)) - return err -} - -func (c *SCTPConn) GetDefaultSentParam() (*SndRcvInfo, error) { - info := &SndRcvInfo{} - optlen := unsafe.Sizeof(*info) - _, _, err := getsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(&optlen))) - return info, err -} - -func resolveFromRawAddr(ptr unsafe.Pointer, n int) (*SCTPAddr, error) { - addr := &SCTPAddr{ - IPAddrs: make([]net.IPAddr, n), - } - - switch family := (*(*syscall.RawSockaddrAny)(ptr)).Addr.Family; family { - case syscall.AF_INET: - addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port))) - tmp := syscall.RawSockaddrInet4{} - size := unsafe.Sizeof(tmp) - for i := 0; i < n; i++ { - a := *(*syscall.RawSockaddrInet4)(unsafe.Pointer( - uintptr(ptr) + size*uintptr(i))) - addr.IPAddrs[i] = net.IPAddr{IP: a.Addr[:]} - } - case syscall.AF_INET6: - addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port))) - tmp := syscall.RawSockaddrInet6{} - size := unsafe.Sizeof(tmp) - for i := 0; i < n; i++ { - a := *(*syscall.RawSockaddrInet6)(unsafe.Pointer( - uintptr(ptr) + size*uintptr(i))) - var zone string - ifi, err := net.InterfaceByIndex(int(a.Scope_id)) - if err == nil { - zone = ifi.Name - } - addr.IPAddrs[i] = net.IPAddr{IP: a.Addr[:], Zone: zone} - } - default: - return nil, fmt.Errorf("unknown address family: %d", family) - } - return addr, nil -} - -func sctpGetAddrs(fd, id, optname int) (*SCTPAddr, error) { - - type getaddrs struct { - assocId int32 - addrNum uint32 - addrs [4096]byte - } - param := getaddrs{ - assocId: int32(id), - } - optlen := unsafe.Sizeof(param) - _, _, err := getsockopt(fd, uintptr(optname), uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) - if err != nil { - return nil, err - } - return resolveFromRawAddr(unsafe.Pointer(¶m.addrs), int(param.addrNum)) -} - -func (c *SCTPConn) SCTPGetPrimaryPeerAddr() (*SCTPAddr, error) { - - type sctpGetSetPrim struct { - assocId int32 - addrs [128]byte - } - param := sctpGetSetPrim{ - assocId: int32(0), - } - optlen := unsafe.Sizeof(param) - _, _, err := getsockopt(c.fd(), SCTP_PRIMARY_ADDR, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) - if err != nil { - return nil, err - } - return resolveFromRawAddr(unsafe.Pointer(¶m.addrs), 1) -} - -func (c *SCTPConn) SCTPLocalAddr(id int) (*SCTPAddr, error) { - return sctpGetAddrs(c.fd(), id, SCTP_GET_LOCAL_ADDRS) -} - -func (c *SCTPConn) SCTPRemoteAddr(id int) (*SCTPAddr, error) { - return sctpGetAddrs(c.fd(), id, SCTP_GET_PEER_ADDRS) -} - -func (c *SCTPConn) LocalAddr() net.Addr { - addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_LOCAL_ADDRS) - if err != nil { - return nil - } - return addr -} - -func (c *SCTPConn) RemoteAddr() net.Addr { - addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_PEER_ADDRS) - if err != nil { - return nil - } - return addr -} - -func (c *SCTPConn) PeelOff(id int) (*SCTPConn, error) { - type peeloffArg struct { - assocId int32 - sd int - } - param := peeloffArg{ - assocId: int32(id), - } - optlen := unsafe.Sizeof(param) - _, _, err := getsockopt(c.fd(), SCTP_SOCKOPT_PEELOFF, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) - if err != nil { - return nil, err - } - return &SCTPConn{_fd: int32(param.sd)}, nil -} - -func (c *SCTPConn) SetDeadline(t time.Time) error { - return syscall.EOPNOTSUPP -} - -func (c *SCTPConn) SetReadDeadline(t time.Time) error { - return syscall.EOPNOTSUPP -} - -func (c *SCTPConn) SetWriteDeadline(t time.Time) error { - return syscall.EOPNOTSUPP -} - -type SCTPListener struct { - fd int - m sync.Mutex -} - -func (ln *SCTPListener) Addr() net.Addr { - laddr, err := sctpGetAddrs(ln.fd, 0, SCTP_GET_LOCAL_ADDRS) - if err != nil { - return nil - } - return laddr -} - -type SCTPSndRcvInfoWrappedConn struct { - conn *SCTPConn -} - -func NewSCTPSndRcvInfoWrappedConn(conn *SCTPConn) *SCTPSndRcvInfoWrappedConn { - conn.SubscribeEvents(SCTP_EVENT_DATA_IO) - return &SCTPSndRcvInfoWrappedConn{conn} -} - -func (c *SCTPSndRcvInfoWrappedConn) Write(b []byte) (int, error) { - if len(b) < int(sndRcvInfoSize) { - return 0, syscall.EINVAL - } - info := (*SndRcvInfo)(unsafe.Pointer(&b[0])) - n, err := c.conn.SCTPWrite(b[sndRcvInfoSize:], info) - return n + int(sndRcvInfoSize), err -} - -func (c *SCTPSndRcvInfoWrappedConn) Read(b []byte) (int, error) { - if len(b) < int(sndRcvInfoSize) { - return 0, syscall.EINVAL - } - n, info, err := c.conn.SCTPRead(b[sndRcvInfoSize:]) - if err != nil { - return n, err - } - copy(b, toBuf(info)) - return n + int(sndRcvInfoSize), err -} - -func (c *SCTPSndRcvInfoWrappedConn) Close() error { - return c.conn.Close() -} - -func (c *SCTPSndRcvInfoWrappedConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *SCTPSndRcvInfoWrappedConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *SCTPSndRcvInfoWrappedConn) SetDeadline(t time.Time) error { - return c.conn.SetDeadline(t) -} - -func (c *SCTPSndRcvInfoWrappedConn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -func (c *SCTPSndRcvInfoWrappedConn) SetWriteDeadline(t time.Time) error { - return c.conn.SetWriteDeadline(t) -} - -func (c *SCTPSndRcvInfoWrappedConn) SetWriteBuffer(bytes int) error { - return c.conn.SetWriteBuffer(bytes) -} - -func (c *SCTPSndRcvInfoWrappedConn) GetWriteBuffer() (int, error) { - return c.conn.GetWriteBuffer() -} - -func (c *SCTPSndRcvInfoWrappedConn) SetReadBuffer(bytes int) error { - return c.conn.SetReadBuffer(bytes) -} - -func (c *SCTPSndRcvInfoWrappedConn) GetReadBuffer() (int, error) { - return c.conn.GetReadBuffer() -} - -// SocketConfig contains options for the SCTP socket. -type SocketConfig struct { - // If Control is not nil it is called after the socket is created but before - // it is bound or connected. - Control func(network, address string, c syscall.RawConn) error - - // InitMsg is the options to send in the initial SCTP message - InitMsg InitMsg -} - -func (cfg *SocketConfig) Listen(net string, laddr *SCTPAddr) (*SCTPListener, error) { - return listenSCTPExtConfig(net, laddr, cfg.InitMsg, cfg.Control) -} - -func (cfg *SocketConfig) Dial(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { - return dialSCTPExtConfig(net, laddr, raddr, cfg.InitMsg, cfg.Control) -} diff --git a/vendor/github.com/ishidawataru/sctp/sctp_linux.go b/vendor/github.com/ishidawataru/sctp/sctp_linux.go deleted file mode 100644 index d96d09e5ca92..000000000000 --- a/vendor/github.com/ishidawataru/sctp/sctp_linux.go +++ /dev/null @@ -1,305 +0,0 @@ -// +build linux,!386 -// Copyright 2019 Wataru Ishida. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sctp - -import ( - "io" - "net" - "sync/atomic" - "syscall" - "unsafe" -) - -func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { - // FIXME: syscall.SYS_SETSOCKOPT is undefined on 386 - r0, r1, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, - uintptr(fd), - SOL_SCTP, - optname, - optval, - optlen, - 0) - if errno != 0 { - return r0, r1, errno - } - return r0, r1, nil -} - -func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { - // FIXME: syscall.SYS_GETSOCKOPT is undefined on 386 - r0, r1, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, - uintptr(fd), - SOL_SCTP, - optname, - optval, - optlen, - 0) - if errno != 0 { - return r0, r1, errno - } - return r0, r1, nil -} - -type rawConn struct { - sockfd int -} - -func (r rawConn) Control(f func(fd uintptr)) error { - f(uintptr(r.sockfd)) - return nil -} - -func (r rawConn) Read(f func(fd uintptr) (done bool)) error { - panic("not implemented") -} - -func (r rawConn) Write(f func(fd uintptr) (done bool)) error { - panic("not implemented") -} - -func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) { - var cbuf []byte - if info != nil { - cmsgBuf := toBuf(info) - hdr := &syscall.Cmsghdr{ - Level: syscall.IPPROTO_SCTP, - Type: SCTP_CMSG_SNDRCV, - } - - // bitwidth of hdr.Len is platform-specific, - // so we use hdr.SetLen() rather than directly setting hdr.Len - hdr.SetLen(syscall.CmsgSpace(len(cmsgBuf))) - cbuf = append(toBuf(hdr), cmsgBuf...) - } - return syscall.SendmsgN(c.fd(), b, cbuf, nil, 0) -} - -func parseSndRcvInfo(b []byte) (*SndRcvInfo, error) { - msgs, err := syscall.ParseSocketControlMessage(b) - if err != nil { - return nil, err - } - for _, m := range msgs { - if m.Header.Level == syscall.IPPROTO_SCTP { - switch m.Header.Type { - case SCTP_CMSG_SNDRCV: - return (*SndRcvInfo)(unsafe.Pointer(&m.Data[0])), nil - } - } - } - return nil, nil -} - -func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) { - oob := make([]byte, 254) - for { - n, oobn, recvflags, _, err := syscall.Recvmsg(c.fd(), b, oob, 0) - if err != nil { - return n, nil, err - } - - if n == 0 && oobn == 0 { - return 0, nil, io.EOF - } - - if recvflags&MSG_NOTIFICATION > 0 && c.notificationHandler != nil { - if err := c.notificationHandler(b[:n]); err != nil { - return 0, nil, err - } - } else { - var info *SndRcvInfo - if oobn > 0 { - info, err = parseSndRcvInfo(oob[:oobn]) - } - return n, info, err - } - } -} - -func (c *SCTPConn) Close() error { - if c != nil { - fd := atomic.SwapInt32(&c._fd, -1) - if fd > 0 { - info := &SndRcvInfo{ - Flags: SCTP_EOF, - } - c.SCTPWrite(nil, info) - syscall.Shutdown(int(fd), syscall.SHUT_RDWR) - return syscall.Close(int(fd)) - } - } - return syscall.EBADF -} - -func (c *SCTPConn) SetWriteBuffer(bytes int) error { - return syscall.SetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_SNDBUF, bytes) -} - -func (c *SCTPConn) GetWriteBuffer() (int, error) { - return syscall.GetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_SNDBUF) -} - -func (c *SCTPConn) SetReadBuffer(bytes int) error { - return syscall.SetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_RCVBUF, bytes) -} - -func (c *SCTPConn) GetReadBuffer() (int, error) { - return syscall.GetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_RCVBUF) -} - -// ListenSCTP - start listener on specified address/port -func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) { - return ListenSCTPExt(net, laddr, InitMsg{NumOstreams: SCTP_MAX_STREAM}) -} - -// ListenSCTPExt - start listener on specified address/port with given SCTP options -func ListenSCTPExt(network string, laddr *SCTPAddr, options InitMsg) (*SCTPListener, error) { - return listenSCTPExtConfig(network, laddr, options, nil) -} - -// listenSCTPExtConfig - start listener on specified address/port with given SCTP options and socket configuration -func listenSCTPExtConfig(network string, laddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPListener, error) { - af, ipv6only := favoriteAddrFamily(network, laddr, nil, "listen") - sock, err := syscall.Socket( - af, - syscall.SOCK_STREAM, - syscall.IPPROTO_SCTP, - ) - if err != nil { - return nil, err - } - - // close socket on error - defer func() { - if err != nil { - syscall.Close(sock) - } - }() - if err = setDefaultSockopts(sock, af, ipv6only); err != nil { - return nil, err - } - if control != nil { - rc := rawConn{sockfd: sock} - if err = control(network, laddr.String(), rc); err != nil { - return nil, err - } - } - err = setInitOpts(sock, options) - if err != nil { - return nil, err - } - - if laddr != nil { - // If IP address and/or port was not provided so far, let's use the unspecified IPv4 or IPv6 address - if len(laddr.IPAddrs) == 0 { - if af == syscall.AF_INET { - laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv4zero}) - } else if af == syscall.AF_INET6 { - laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv6zero}) - } - } - err = SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR) - if err != nil { - return nil, err - } - } - err = syscall.Listen(sock, syscall.SOMAXCONN) - if err != nil { - return nil, err - } - return &SCTPListener{ - fd: sock, - }, nil -} - -// AcceptSCTP waits for and returns the next SCTP connection to the listener. -func (ln *SCTPListener) AcceptSCTP() (*SCTPConn, error) { - fd, _, err := syscall.Accept4(ln.fd, 0) - return NewSCTPConn(fd, nil), err -} - -// Accept waits for and returns the next connection connection to the listener. -func (ln *SCTPListener) Accept() (net.Conn, error) { - return ln.AcceptSCTP() -} - -func (ln *SCTPListener) Close() error { - syscall.Shutdown(ln.fd, syscall.SHUT_RDWR) - return syscall.Close(ln.fd) -} - -// DialSCTP - bind socket to laddr (if given) and connect to raddr -func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { - return DialSCTPExt(net, laddr, raddr, InitMsg{NumOstreams: SCTP_MAX_STREAM}) -} - -// DialSCTPExt - same as DialSCTP but with given SCTP options -func DialSCTPExt(network string, laddr, raddr *SCTPAddr, options InitMsg) (*SCTPConn, error) { - return dialSCTPExtConfig(network, laddr, raddr, options, nil) -} - -// dialSCTPExtConfig - same as DialSCTP but with given SCTP options and socket configuration -func dialSCTPExtConfig(network string, laddr, raddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPConn, error) { - af, ipv6only := favoriteAddrFamily(network, laddr, raddr, "dial") - sock, err := syscall.Socket( - af, - syscall.SOCK_STREAM, - syscall.IPPROTO_SCTP, - ) - if err != nil { - return nil, err - } - - // close socket on error - defer func() { - if err != nil { - syscall.Close(sock) - } - }() - if err = setDefaultSockopts(sock, af, ipv6only); err != nil { - return nil, err - } - if control != nil { - rc := rawConn{sockfd: sock} - if err = control(network, laddr.String(), rc); err != nil { - return nil, err - } - } - err = setInitOpts(sock, options) - if err != nil { - return nil, err - } - if laddr != nil { - // If IP address and/or port was not provided so far, let's use the unspecified IPv4 or IPv6 address - if len(laddr.IPAddrs) == 0 { - if af == syscall.AF_INET { - laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv4zero}) - } else if af == syscall.AF_INET6 { - laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv6zero}) - } - } - err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR) - if err != nil { - return nil, err - } - } - _, err = SCTPConnect(sock, raddr) - if err != nil { - return nil, err - } - return NewSCTPConn(sock, nil), nil -} diff --git a/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go b/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go deleted file mode 100644 index 118fe159e92d..000000000000 --- a/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build !linux linux,386 -// Copyright 2019 Wataru Ishida. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sctp - -import ( - "errors" - "net" - "runtime" - "syscall" -) - -var ErrUnsupported = errors.New("SCTP is unsupported on " + runtime.GOOS + "/" + runtime.GOARCH) - -func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { - return 0, 0, ErrUnsupported -} - -func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { - return 0, 0, ErrUnsupported -} - -func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) { - return 0, ErrUnsupported -} - -func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) { - return 0, nil, ErrUnsupported -} - -func (c *SCTPConn) Close() error { - return ErrUnsupported -} - -func (c *SCTPConn) SetWriteBuffer(bytes int) error { - return ErrUnsupported -} - -func (c *SCTPConn) GetWriteBuffer() (int, error) { - return 0, ErrUnsupported -} - -func (c *SCTPConn) SetReadBuffer(bytes int) error { - return ErrUnsupported -} - -func (c *SCTPConn) GetReadBuffer() (int, error) { - return 0, ErrUnsupported -} - -func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) { - return nil, ErrUnsupported -} - -func ListenSCTPExt(net string, laddr *SCTPAddr, options InitMsg) (*SCTPListener, error) { - return nil, ErrUnsupported -} - -func listenSCTPExtConfig(network string, laddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPListener, error) { - return nil, ErrUnsupported -} - -func (ln *SCTPListener) Accept() (net.Conn, error) { - return nil, ErrUnsupported -} - -func (ln *SCTPListener) AcceptSCTP() (*SCTPConn, error) { - return nil, ErrUnsupported -} - -func (ln *SCTPListener) Close() error { - return ErrUnsupported -} - -func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { - return nil, ErrUnsupported -} - -func DialSCTPExt(network string, laddr, raddr *SCTPAddr, options InitMsg) (*SCTPConn, error) { - return nil, ErrUnsupported -} - -func dialSCTPExtConfig(network string, laddr, raddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPConn, error) { - return nil, ErrUnsupported -} diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 000000000000..5091fb0736c8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +/jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 000000000000..c56f37c0c943 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,28 @@ +language: go + +sudo: false + +go: + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - 1.15.x + - tip + +allow_failures: + - go: tip + +script: make build + +matrix: + include: + - language: go + go: 1.15.x + script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 000000000000..b03310a91fde --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 000000000000..fb38ec2760e1 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,51 @@ + +CMD = jpgo + +SRC_PKGS=./ ./cmd/... ./fuzz/... + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ${SRC_PKGS} + +build: + rm -f $(CMD) + go build ${SRC_PKGS} + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: test-internal-testify + echo "making tests ${SRC_PKGS}" + go test -v ${SRC_PKGS} + +check: + go vet ${SRC_PKGS} + @echo "golint ${SRC_PKGS}" + @lint=`golint ${SRC_PKGS}`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out + +test-internal-testify: + cd internal/testify && go test ./... + diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 000000000000..110ad799976d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,87 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +go-jmespath is a GO implementation of JMESPath, +which is a query language for JSON. It will take a JSON +document and transform it into another JSON document +through a JMESPath expression. + +Using go-jmespath is really easy. There's a single function +you use, `jmespath.search`: + + +```go +> import "github.com/jmespath/go-jmespath" +> +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.Search("foo.bar.baz[2]", data) +result = 2 +``` + +In the example we gave the ``search`` function input data of +`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath +expression `foo.bar.baz[2]`, and the `search` function evaluated +the expression against the input data to produce the result ``2``. + +The JMESPath language can do a lot more than select an element +from a list. Here are a few more examples: + +```go +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo.bar", data) +result = { "baz": [ 0, 1, 2, 3, 4 ] } + + +> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, + {"first": "c", "last": "d"}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search({"foo[*].first", data) +result [ 'a', 'c' ] + + +> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, + {"age": 30}, {"age": 35}, + {"age": 40}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo[?age > `30`]") +result = [ { age: 35 }, { age: 40 } ] +``` + +You can also pre-compile your query. This is usefull if +you are going to run multiple searches with it: + +```go + > var jsondata = []byte(`{"foo": "bar"}`) + > var data interface{} + > err := json.Unmarshal(jsondata, &data) + > precompiled, err := Compile("foo") + > if err != nil{ + > // ... handle the error + > } + > result, err := precompiled.Search(data) + result = "bar" +``` + +## More Resources + +The example above only show a small amount of what +a JMESPath expression can do. If you want to take a +tour of the language, the *best* place to go is the +[JMESPath Tutorial](http://jmespath.org/tutorial.html). + +One of the best things about JMESPath is that it is +implemented in many different programming languages including +python, ruby, php, lua, etc. To see a complete list of libraries, +check out the [JMESPath libraries page](http://jmespath.org/libraries.html). + +And finally, the full JMESPath specification can be found +on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 000000000000..010efe9bfba3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JMESPath is the representation of a compiled JMES path query. A JMESPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 000000000000..1cd2d239c969 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 000000000000..9b7cd89b4bcc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 000000000000..13c74604c2c8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 000000000000..817900c8f529 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 000000000000..4abc303ab4a9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expression: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 000000000000..dae79cbdf338 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 000000000000..ddc1b7d7d460 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore index b35f8449bf28..d31b37815279 100644 --- a/vendor/github.com/klauspost/compress/.gitignore +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -23,3 +23,10 @@ _testmain.go *.test *.prof /s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 9ddf39f6f37e..9ec000ffaa9d 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -17,6 +17,113 @@ This package provides various compression algorithms. # changelog +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
    + See changes to v1.14.x + * Feb 22, 2022 (v1.14.4) * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) @@ -42,7 +149,11 @@ This package provides various compression algorithms. * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
    +
    + See changes to v1.13.x + * Aug 30, 2021 (v1.13.5) * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) @@ -71,6 +182,8 @@ This package provides various compression algorithms. * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
    +
    See changes to v1.12.x diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index 03562db16fb8..504a7be9dae3 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -215,11 +215,6 @@ func (b *bitReaderShifted) fill() { } } -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderShifted) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - func (b *bitReaderShifted) remaining() uint { return b.off*8 + uint(64-b.bitsRead) } diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 6bce4e87d4ff..ec71f7a349a1 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -5,8 +5,6 @@ package huff0 -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF} /* up to 16 bits */ -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { b.nBits += encA.nBits + encB.nBits } -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - return - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - b.bitContainer >>= 1 << 3 - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - b.bitContainer >>= 2 << 3 - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - b.bitContainer >>= 3 << 3 - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - b.bitContainer >>= 4 << 3 - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - b.bitContainer >>= 5 << 3 - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - b.bitContainer >>= 6 << 3 - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - b.bitContainer >>= 7 << 3 - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - b.bitContainer = 0 - b.nBits = 0 - return - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { @@ -201,10 +93,3 @@ func (b *bitWriter) close() error { b.flushAlign() return nil } - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go index 50bcdf6ea99c..4dcab8d23277 100644 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) { b.off = 0 } -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - // Int32 returns a little endian int32 starting at current offset. func (b byteReader) Int32() int32 { v3 := int32(b.b[b.off+3]) @@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 { return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 } -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - // remain will return the number of bytes remaining. func (b byteReader) remain() int { return len(b.b) - b.off diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index bc95ac623bd2..4d14542facf3 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -404,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool { return true } +//lint:ignore U1000 used for debugging func (s *Scratch) validateTable(c cTable) bool { if len(c) < int(s.symbolLen) { return false diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 3ae7d46771fb..42a237eac4ab 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -11,7 +11,6 @@ import ( type dTable struct { single []dEntrySingle - double []dEntryDouble } // single-symbols decoding @@ -19,13 +18,6 @@ type dEntrySingle struct { entry uint16 } -// double-symbols decoding -type dEntryDouble struct { - seq [4]byte - nBits uint8 - len uint8 -} - // Uses special code for all tables that are < 8 bits. const use8BitTables = true @@ -35,7 +27,7 @@ const use8BitTables = true // If no Scratch is provided a new one is allocated. // The returned Scratch can be used for encoding or decoding input using this table. func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(in) + s, err = s.prepare(nil) if err != nil { return s, nil, err } @@ -236,108 +228,6 @@ func (d *Decoder) buffer() *[4][256]byte { return &[4][256]byte{} } -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - // decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. // The cap of the output buffer will be the maximum decompressed size. // The length of the supplied input must match the end of a block exactly. @@ -725,189 +615,6 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { return dst, br.close() } -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[0][:]) - copy(out[dstEvery:], buf[1][:]) - copy(out[dstEvery*2:], buf[2][:]) - copy(out[dstEvery*3:], buf[3][:]) - out = out[bufoff:] - decoded += bufoff * 4 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - // Decompress4X will decompress a 4X encoded stream. // The length of the supplied input must match the end of a block exactly. // The *capacity* of the dst slice must match the destination size of @@ -1056,17 +763,20 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[0][:]) - copy(out[dstEvery:], buf[1][:]) - copy(out[dstEvery*2:], buf[2][:]) - copy(out[dstEvery*3:], buf[3][:]) - out = out[bufoff:] - decoded += bufoff * 4 // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { + if len(out)-bufoff < dstEvery*3 { d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 } } if off > 0 { @@ -1178,7 +888,6 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { const shift = 56 const tlSize = 1 << 8 - const tlMask = tlSize - 1 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. @@ -1291,17 +1000,22 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[0][:]) - copy(out[dstEvery:], buf[1][:]) - copy(out[dstEvery*2:], buf[2][:]) - copy(out[dstEvery*3:], buf[3][:]) - out = out[bufoff:] - decoded += bufoff * 4 // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { + if len(out)-bufoff < dstEvery*3 { d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 } } if off > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 000000000000..ba7e8e6b0276 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 000000000000..8d2187a2ce6a --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,846 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), SI + MOVQ 48(AX), BX + MOVQ 24(AX), R9 + MOVQ 32(AX), R10 + MOVQ (AX), R11 + + // Main loop +main_loop: + MOVQ SI, R8 + CMPQ R8, BX + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ (R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 24(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br0.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 48(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 72(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br1.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 96(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 120(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br2.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 144(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 168(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br3.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x02, SI + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), SI + SHLQ $0x02, SI + MOVQ SI, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R9 + MOVQ 32(CX), R10 + MOVQ (CX), R11 + + // Main loop +main_loop: + MOVQ BX, R8 + CMPQ R8, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ (R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 24(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br0.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 48(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 72(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br1.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 96(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 120(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br2.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 144(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 168(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br3.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 000000000000..908c17de63fc --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 000000000000..3954c51219b2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 000000000000..e802579c4f96 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 000000000000..4465fbe9e905 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 511bba65db8f..298c4f8e97da 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 { // emitLiteral writes a literal chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= len(lit) && len(lit) <= 65536 func emitLiteral(dst, lit []byte) int { @@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int { // emitCopy writes a copy chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= offset && offset <= 65535 // 4 <= length && length <= 65535 @@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int { // src[i:i+k-j] and src[j:k] have the same contents. // // It assumes that: +// // 0 <= i && i < j && j <= len(src) func extendMatch(src []byte, i, j int) int { for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { @@ -105,8 +108,9 @@ func hash(u, shift uint32) uint32 { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlock(dst, src []byte) (d int) { // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. // The table element type is uint16, as s < sLimit and sLimit < len(src) diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index c876c591acce..65b38abed805 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + ## Installation Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. @@ -153,10 +155,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip This package: file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73101992 643 313.87 -silesia.tar zskp 2 211947520 67504318 969 208.38 -silesia.tar zskp 3 211947520 64595893 2007 100.68 -silesia.tar zskp 4 211947520 60995370 8825 22.90 +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 cgo zstd: silesia.tar zstd 1 211947520 73605392 543 371.56 @@ -165,94 +167,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66 silesia.tar zstd 9 211947520 60212393 5063 39.92 gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80136201 1152 175.45 +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 GOB stream of binary data. Highly compressible. https://files.klauspost.com/compress/gob-stream.7z file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 235022249 3088 590.30 -gob-stream zskp 2 1911399616 205669791 3786 481.34 -gob-stream zskp 3 1911399616 175034659 9636 189.17 -gob-stream zskp 4 1911399616 165609838 50369 36.19 +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 6 1911399616 193632038 6687 272.56 gob-stream zstd 9 1911399616 177620386 16175 112.70 -gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 359753026 5438 335.20 +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 The test data for the Large Text Compression Benchmark is the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. http://mattmahoney.net/dc/textdata.html file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343848582 3609 264.18 -enwik9 zskp 2 1000000000 317276632 5746 165.97 -enwik9 zskp 3 1000000000 292243069 12162 78.41 -enwik9 zskp 4 1000000000 262183768 82837 11.51 +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 6 1000000000 295138875 10290 92.68 enwik9 zstd 9 1000000000 278348700 28549 33.40 -enwik9 gzstd 1 1000000000 382578136 9604 99.30 -enwik9 gzkp 1 1000000000 383825945 6544 145.73 +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 Highly compressible JSON file. https://files.klauspost.com/compress/github-june-2days-2019.json.zst file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 -github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 -github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 -github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16 +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 -github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61 +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 VM Image, Linux mint with a few installed applications: https://files.klauspost.com/compress/rawstudio-mint14.7z file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 -rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 -rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 -rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52 +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92 +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 CSV data: https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 -nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 -nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 -nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33 +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 -nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ``` ## Decompressor @@ -386,47 +388,31 @@ In practice this means that concurrency is often limited to utilizing about 3 co ### Benchmarks -These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). - The first two are streaming decodes and the last are smaller inputs. - + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + ``` -BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op - -BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op -BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op - -Concurrent performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op - -BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op ``` -This reflects the performance around May 2020, but this may be out of date. +This reflects the performance around May 2022, but this may be out of date. ## Zstd inside ZIP files diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index d7cd15ba29d6..97299d499cf0 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -63,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 { return v } -func (b *bitReader) get16BitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - // fillFast() will make sure at least 32 bits are available. // There must be at least 4 bytes available. func (b *bitReader) fillFast() { diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index b36618285095..78b3c61be3ec 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -5,8 +5,6 @@ package zstd -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { b.nBits += bits } -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 607b62ee37a9..da814715da0d 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,9 +5,13 @@ package zstd import ( + "bytes" + "encoding/binary" "errors" "fmt" "io" + "os" + "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -38,14 +42,14 @@ const ( // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) maxCompressedBlockSize = 128 << 10 + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + // Maximum possible block size (all Raw+Uncompressed). maxBlockSize = (1 << 21) - 1 - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header - maxCompressedLiteralSize = 1 << 18 - maxRLELiteralSize = 1 << 20 - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff // We support slightly less than the reference decoder to be able to // use ints on 32 bit archs. @@ -97,7 +101,6 @@ type blockDec struct { // Block is RLE, this is the size. RLESize uint32 - tmp [4]byte Type blockType @@ -136,7 +139,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { b.Type = blockType((bh >> 1) & 3) // find size. cSize := int(bh >> 3) - maxSize := maxBlockSize + maxSize := maxCompressedBlockSizeAlloc switch b.Type { case blockTypeReserved: return ErrReservedBlockType @@ -157,9 +160,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { println("Data size on stream:", cSize) } b.RLESize = 0 - maxSize = maxCompressedBlockSize + maxSize = maxCompressedBlockSizeAlloc if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + maxSize = int(windowSize) + compressedBlockOverAlloc } if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { if debugDecoder { @@ -167,6 +170,11 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return ErrCompressedSizeTooBig } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } case blockTypeRaw: if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { if debugDecoder { @@ -185,9 +193,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { // Read block data. if cap(b.dataStorage) < cSize { if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize) + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSize) + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } if cap(b.dst) <= maxSize { @@ -224,7 +232,7 @@ func (b *blockDec) decodeBuf(hist *history) error { if b.lowMem { b.dst = make([]byte, b.RLESize) } else { - b.dst = make([]byte, maxBlockSize) + b.dst = make([]byte, maxCompressedBlockSize) } } b.dst = b.dst[:b.RLESize] @@ -355,14 +363,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err } if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, litRegenSize) + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) } else { - if litRegenSize > maxCompressedLiteralSize { - // Exceptional - b.literalBuf = make([]byte, litRegenSize) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - } + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) } } literals = b.literalBuf[:litRegenSize] @@ -392,14 +395,14 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) } } var err error // Use our out buffer. - huff.MaxDecodedSize = maxCompressedBlockSize + huff.MaxDecodedSize = litRegenSize if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) } else { @@ -424,9 +427,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) } } huff := hist.huffTree @@ -443,7 +446,7 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err return in, err } hist.huffTree = huff - huff.MaxDecodedSize = maxCompressedBlockSize + huff.MaxDecodedSize = litRegenSize // Use our out buffer. if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) @@ -458,6 +461,8 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err if len(literals) != litRegenSize { return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } @@ -481,16 +486,24 @@ func (b *blockDec) decodeCompressed(hist *history) error { b.dst = append(b.dst, hist.decoders.literals...) return nil } - err = hist.decoders.decodeSync(hist) + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) if err != nil { return err } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } b.dst = hist.decoders.out hist.recentOffsets = hist.decoders.prevOffset return nil } func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } // Decode Sequences // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section if len(in) < 1 { @@ -499,8 +512,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { var nSeqs int seqHeader := in[0] switch { - case seqHeader == 0: - in = in[1:] case seqHeader < 128: nSeqs = int(seqHeader) in = in[1:] @@ -517,6 +528,13 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) in = in[3:] } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } var seqs = &hist.decoders seqs.nSeqs = nSeqs @@ -619,6 +637,22 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { println("initializing sequences:", err) return err } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + return nil } @@ -635,7 +669,9 @@ func (b *blockDec) decodeSequences(hist *history) error { hist.decoders.seqSize = len(hist.decoders.literals) return nil } + hist.decoders.windowSize = hist.windowSize hist.decoders.prevOffset = hist.recentOffsets + err := hist.decoders.decode(b.sequence) hist.recentOffsets = hist.decoders.prevOffset return err diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index b80191e4b1e7..176788f25976 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -7,7 +7,6 @@ package zstd import ( "fmt" "io" - "io/ioutil" ) type byteBuffer interface { @@ -23,7 +22,7 @@ type byteBuffer interface { readByte() (byte, error) // Skip n bytes. - skipN(n int) error + skipN(n int64) error } // in-memory buffer @@ -52,10 +51,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { return r, nil } -func (b *byteBuf) remain() []byte { - return *b -} - func (b *byteBuf) readByte() (byte, error) { bb := *b if len(bb) < 1 { @@ -66,9 +61,12 @@ func (b *byteBuf) readByte() (byte, error) { return r, nil } -func (b *byteBuf) skipN(n int) error { +func (b *byteBuf) skipN(n int64) error { bb := *b - if len(bb) < n { + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { return io.ErrUnexpectedEOF } *b = bb[n:] @@ -124,9 +122,9 @@ func (r *readerWrapper) readByte() (byte, error) { return r.tmp[0], nil } -func (r *readerWrapper) skipN(n int) error { - n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) - if n2 != int64(n) { +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { err = io.ErrUnexpectedEOF } return err diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go index 2c4fca17fa1d..0e59a242d8dc 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -13,12 +13,6 @@ type byteReader struct { off int } -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - // advance the stream b n bytes. func (b *byteReader) advance(n uint) { b.off += int(n) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index a93dfaf100e4..74d645f7c380 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -35,6 +35,7 @@ type Decoder struct { br readerWrapper enabled bool inFrame bool + dstBuf []byte } frame *frameDec @@ -187,21 +188,23 @@ func (d *Decoder) Reset(r io.Reader) error { } // If bytes buffer and < 5MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < 5<<20 { + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { bb2 := bb if debugDecoder { println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) } b := bb2.Bytes() var dst []byte - if cap(d.current.b) > 0 { - dst = d.current.b + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] } - dst, err := d.DecodeAll(b, dst[:0]) + dst, err := d.DecodeAll(b, dst) if err == nil { err = io.EOF } + // Save output buffer + d.syncStream.dstBuf = dst d.current.b = dst d.current.err = err d.current.flushed = true @@ -216,6 +219,7 @@ func (d *Decoder) Reset(r io.Reader) error { d.current.err = nil d.current.flushed = false d.current.d = nil + d.syncStream.dstBuf = nil // Ensure no-one else is still running... d.streamWg.Wait() @@ -312,6 +316,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // Grab a block decoder and frame decoder. block := <-d.decoders frame := block.localFrame + initialSize := len(dst) defer func() { if debugDecoder { printf("re-adding decoder: %p", block) @@ -347,19 +352,33 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } frame.history.setDict(&dict) } - - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded } - if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate more than 1 GB up front. + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) copy(dst2, dst) dst = dst2 } } - if cap(dst) == 0 { + + if cap(dst) == 0 && !d.o.limitToCap { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. size := len(input) * 2 @@ -377,6 +396,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if err != nil { return dst, err } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } if len(frame.bBuf) == 0 { if debugDecoder { println("frame dbuf empty") @@ -437,7 +459,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) } - if len(next.b) > 0 { + if !d.o.ignoreChecksum && len(next.b) > 0 { n, err := d.current.crc.Write(next.b) if err == nil { if n != len(next.b) { @@ -449,7 +471,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { got := d.current.crc.Sum64() var tmp [4]byte binary.LittleEndian.PutUint32(tmp[:], uint32(got)) - if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC { + if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) { if debugDecoder { println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") } @@ -514,7 +536,7 @@ func (d *Decoder) nextBlockSync() (ok bool) { // Check frame size (before CRC) d.syncStream.decodedFrame += uint64(len(d.current.b)) - if d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame > d.frame.FrameContentSize { + if d.syncStream.decodedFrame > d.frame.FrameContentSize { if debugDecoder { printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) } @@ -523,7 +545,7 @@ func (d *Decoder) nextBlockSync() (ok bool) { } // Check FCS - if d.current.d.Last && d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { if debugDecoder { printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) } @@ -533,9 +555,15 @@ func (d *Decoder) nextBlockSync() (ok bool) { // Update/Check CRC if d.frame.HasCheckSum { - d.frame.crc.Write(d.current.b) + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } if d.current.d.Last { - d.current.err = d.frame.checkCRC() + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } if d.current.err != nil { println("CRC error:", d.current.err) return false @@ -629,60 +657,18 @@ func (d *Decoder) startSyncDecoder(r io.Reader) error { // Create Decoder: // ASYNC: -// Spawn 4 go routines. -// 0: Read frames and decode blocks. -// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree. -// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets. -// 3: Wait for stream history, execute sequences, send stream history. +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { defer d.streamWg.Done() br := readerWrapper{r: r} - var seqPrepare = make(chan *blockDec, d.o.concurrent) var seqDecode = make(chan *blockDec, d.o.concurrent) var seqExecute = make(chan *blockDec, d.o.concurrent) - // Async 1: Prepare blocks... - go func() { - var hist history - var hasErr bool - for block := range seqPrepare { - if hasErr { - if block != nil { - seqDecode <- block - } - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 1: new history") - } - hist.reset() - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqDecode <- block - continue - } - - remain, err := block.decodeLiterals(block.data, &hist) - block.err = err - hasErr = block.err != nil - if err == nil { - block.async.literals = hist.decoders.literals - block.async.seqData = remain - } else if debugDecoder { - println("decodeLiterals error:", err) - } - seqDecode <- block - } - close(seqDecode) - }() - - // Async 2: Decode sequences... + // Async 1: Decode sequences... go func() { var hist history var hasErr bool @@ -696,10 +682,12 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch } if block.async.newHist != nil { if debugDecoder { - println("Async 2: new history, recent:", block.async.newHist.recentOffsets) + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) } + hist.reset() hist.decoders = block.async.newHist.decoders hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize if block.async.newHist.dict != nil { hist.setDict(block.async.newHist.dict) } @@ -728,6 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch seqExecute <- block } close(seqExecute) + hist.reset() }() var wg sync.WaitGroup @@ -749,8 +738,9 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch } if block.async.newHist != nil { if debugDecoder { - println("Async 3: new history") + println("Async 2: new history") } + hist.reset() hist.windowSize = block.async.newHist.windowSize hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer if block.async.newHist.dict != nil { @@ -780,7 +770,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch if block.lowMem { block.dst = make([]byte, block.RLESize) } else { - block.dst = make([]byte, maxBlockSize) + block.dst = make([]byte, maxCompressedBlockSize) } } block.dst = block.dst[:block.RLESize] @@ -811,11 +801,11 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch } if !hasErr { decodedFrame += uint64(len(do.b)) - if fcs > 0 && decodedFrame > fcs { + if decodedFrame > fcs { println("fcs exceeded", block.Last, fcs, decodedFrame) do.err = ErrFrameSizeExceeded hasErr = true - } else if block.Last && fcs > 0 && decodedFrame != fcs { + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { do.err = ErrFrameSizeMismatch hasErr = true } else { @@ -832,10 +822,38 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch if debugDecoder { println("decoder goroutines finished") } + hist.reset() }() + var hist history decodeStream: for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } frame := d.frame if debugDecoder { println("New frame...") @@ -855,6 +873,10 @@ decodeStream: } } if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + err = ErrDecoderSizeExceeded } if err != nil { @@ -862,7 +884,7 @@ decodeStream: case <-ctx.Done(): case dec := <-d.decoders: dec.sendErr(err) - seqPrepare <- dec + decodeBlock(dec) } break decodeStream } @@ -882,6 +904,10 @@ decodeStream: if debugDecoder { println("Alloc History:", h.allocFrameBuffer) } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } dec.async.newHist = &h dec.async.fcs = frame.FrameContentSize historySent = true @@ -908,7 +934,7 @@ decodeStream: } err = dec.err last := dec.Last - seqPrepare <- dec + decodeBlock(dec) if err != nil { break decodeStream } @@ -917,7 +943,8 @@ decodeStream: } } } - close(seqPrepare) + close(seqDecode) wg.Wait() + hist.reset() d.frame.history.b = frameHistCache } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index fd05c9bb0123..f42448e69c95 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -14,24 +14,28 @@ type DOption func(*decoderOptions) error // options retains accumulated state of multiple options. type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - maxWindowSize uint64 - dicts []dict + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int } func (o *decoderOptions) setDefault() { *o = decoderOptions{ // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - maxWindowSize: MaxWindowSize, + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, } if o.concurrent > 4 { o.concurrent = 4 } - o.maxDecodedSize = 1 << 63 + o.maxDecodedSize = 64 << 30 } // WithDecoderLowmem will set whether to use a lower amount of memory, @@ -66,7 +70,7 @@ func WithDecoderConcurrency(n int) DOption { // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. -// Maximum and default is 1 << 63 bytes. +// Maximum is 1 << 63 bytes. Default is 64GiB. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { @@ -112,3 +116,34 @@ func WithDecoderMaxWindow(size uint64) DOption { return nil } } + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 96028ecd8366..dbbb88d92b39 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -32,6 +32,7 @@ type match struct { length int32 rep int32 est int32 + _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes } const highScore = 25000 diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 602c05ee0c4c..d70e3fd3d3e9 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -156,8 +156,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -416,15 +416,23 @@ encodeLoop: // Try to find a better match by searching for a long match at the end of the current best match if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) + s2 := s + skipBeginning + cv := load3232(src, s2) candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 if matchedNext > matched { t = coffsetL + s = s2 matched = matchedNext if debugMatches { println("long match at end-of-match") @@ -434,12 +442,13 @@ encodeLoop: // Check prev long... if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 if matchedNext > matched { t = coffsetL + s = s2 matched = matchedNext if debugMatches { println("prev long match at end-of-match") @@ -518,8 +527,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -674,8 +683,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -1047,8 +1056,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index d6b3104240b0..1f4a9a245563 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -127,8 +127,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -439,8 +439,8 @@ encodeLoop: var t int32 for { - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -785,8 +785,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -969,7 +969,7 @@ encodeLoop: te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) e.longTable[longHash1] = te0 e.longTable[longHash2] = te1 e.markLongShardDirty(longHash1) @@ -1002,8 +1002,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -1103,7 +1103,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - copy(e.longTable[:], e.dictLongTable) + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) for i := range e.longTableShardDirty { e.longTableShardDirty[i] = false } @@ -1114,7 +1115,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { continue } - copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + e.longTableShardDirty[i] = false } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index f51ab529a0bc..202636db05ea 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -304,7 +304,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { minNonLiteralBlockSize = 1 + 1 + inputMargin ) if debugEncoder { - if len(src) > maxBlockSize { + if len(src) > maxCompressedBlockSize { panic("src too big") } } @@ -871,7 +871,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { const shardCnt = tableShardCnt const shardSize = tableShardSize if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) for i := range e.tableShardDirty { e.tableShardDirty[i] = false } @@ -883,7 +884,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { continue } - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) e.tableShardDirty[i] = false } e.allDirty = false diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index dcc987a7cb68..7aaaedb23e58 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -528,8 +528,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If a non-single block is needed the encoder will reset again. e.encoders <- enc }() - // Use single segments when above minimum window and below 1MB. - single := len(src) < 1<<20 && len(src) > MinWindowSize + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { single = *e.o.single } @@ -551,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } // If we can do everything in one block, prefer that. - if len(src) <= maxCompressedBlockSize { + if len(src) <= e.o.blockSize { enc.Reset(e.o.dict, true) // Slightly faster with no history and everything in one block. if e.o.crc { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 44d8dbd199a6..a7c5e1aac432 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -283,7 +283,7 @@ func WithNoEntropyCompression(b bool) EOption { // a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. // For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. // This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. // This setting has no effect on streamed encodes. func WithSingleSegment(b bool) EOption { return func(o *encoderOptions) error { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 29c3176b0543..2c0affcd85c7 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -106,7 +106,7 @@ func (d *frameDec) reset(br byteBuffer) error { } n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) println("Skipping frame with", n, "bytes.") - err = br.skipN(int(n)) + err = br.skipN(int64(n)) if err != nil { if debugDecoder { println("Reading discarded frame", err) @@ -197,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error { default: fcsSize = 1 << v } - d.FrameContentSize = 0 + d.FrameContentSize = fcsUnknown if fcsSize > 0 { b, err := br.readSmall(fcsSize) if err != nil { @@ -231,20 +231,27 @@ func (d *frameDec) reset(br byteBuffer) error { d.crc.Reset() } + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + if d.WindowSize == 0 && d.SingleSegment { // We may not need window in this case. d.WindowSize = d.FrameContentSize if d.WindowSize < MinWindowSize { d.WindowSize = MinWindowSize } - } - - if d.WindowSize > uint64(d.o.maxWindowSize) { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded } - return ErrWindowSizeExceeded } + // The minimum Window_Size is 1 KB. if d.WindowSize < MinWindowSize { if debugDecoder { @@ -253,11 +260,17 @@ func (d *frameDec) reset(br byteBuffer) error { return ErrWindowSizeTooSmall } d.history.windowSize = int(d.WindowSize) - if d.o.lowMem && d.history.windowSize < maxBlockSize { + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. d.history.allocFrameBuffer = d.history.windowSize * 2 - // TODO: Maybe use FrameContent size } else { - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } } if debugDecoder { @@ -290,13 +303,6 @@ func (d *frameDec) checkCRC() error { if !d.HasCheckSum { return nil } - var tmp [4]byte - got := d.crc.Sum64() - // Flip to match file order. - tmp[0] = byte(got >> 0) - tmp[1] = byte(got >> 8) - tmp[2] = byte(got >> 16) - tmp[3] = byte(got >> 24) // We can overwrite upper tmp now want, err := d.rawInput.readSmall(4) @@ -305,7 +311,19 @@ func (d *frameDec) checkCRC() error { return err } - if !bytes.Equal(tmp[:], want) && !ignoreCRC { + if d.o.ignoreChecksum { + return nil + } + + var tmp [4]byte + got := d.crc.Sum64() + // Flip to match file order. + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) + + if !bytes.Equal(tmp[:], want) { if debugDecoder { println("CRC Check Failed:", tmp[:], "!=", want) } @@ -317,7 +335,20 @@ func (d *frameDec) checkCRC() error { return nil } -// runDecoder will create a sync decoder that will decode a block of data. +// consumeCRC reads the checksum data if the frame has one. +func (d *frameDec) consumeCRC() error { + if d.HasCheckSum { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + } + + return nil +} + +// runDecoder will run the decoder for the remainder of the frame. func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { saved := d.history.b @@ -326,6 +357,30 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { d.history.ignoreBuffer = len(dst) // Store input length, so we only check new data. crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } var err error for { err = dec.reset(d.rawInput, d.WindowSize) @@ -339,16 +394,17 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { if err != nil { break } - if uint64(len(d.history.b)) > d.o.maxDecodedSize { + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) err = ErrDecoderSizeExceeded break } - if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { - println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) - err = ErrFrameSizeExceeded + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded break } - if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) err = ErrFrameSizeExceeded break @@ -356,22 +412,26 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { if dec.Last { break } - if debugDecoder && d.FrameContentSize > 0 { + if debugDecoder { println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) } } dst = d.history.b if err == nil { - if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { err = ErrFrameSizeMismatch } else if d.HasCheckSum { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index bb3d4fd6c312..2f8860a722b8 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -5,8 +5,10 @@ package zstd import ( + "encoding/binary" "errors" "fmt" + "io" ) const ( @@ -178,10 +180,32 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<> 3) - // println(s.norm[:s.symbolLen], s.symbolLen) return s.buildDtable() } +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + // decSymbol contains information about a state entry, // Including the state offset base, the output symbol and // the number of bits to read for the low part of the destination state. @@ -204,18 +228,10 @@ func (d decSymbol) newState() uint16 { return uint16(d >> 16) } -func (d decSymbol) baseline() uint32 { - return uint32(d >> 32) -} - func (d decSymbol) baselineInt() int { return int(d >> 32) } -func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { - *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - func (d *decSymbol) setNBits(nBits uint8) { const mask = 0xffffffffffffff00 *d = (*d & mask) | decSymbol(nBits) @@ -231,11 +247,6 @@ func (d *decSymbol) setNewState(state uint16) { *d = (*d & mask) | decSymbol(state)<<16 } -func (d *decSymbol) setBaseline(baseline uint32) { - const mask = 0xffffffff - *d = (*d & mask) | decSymbol(baseline)<<32 -} - func (d *decSymbol) setExt(addBits uint8, baseline uint32) { const mask = 0xffff00ff *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) @@ -257,68 +268,6 @@ func (s *fseDecoder) setRLE(symbol decSymbol) { s.dt[0] = symbol } -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} - // transform will transform the decoder table into a table usable for // decoding without having to apply the transformation while decoding. // The state will contain the base value and the number of bits to read. @@ -352,34 +301,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { s.state = dt[br.getBits(tableLog)] } -// next returns the current symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) next(br *bitReader) { - lowBits := uint16(br.getBits(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (s *fseState) finished(br *bitReader) bool { - return br.finished() && s.state.nbBits() > 0 -} - -// final returns the current state symbol without decoding the next. -func (s *fseState) final() (int, uint8) { - return s.state.baselineInt(), s.state.addBits() -} - // final returns the current state symbol without decoding the next. func (s decSymbol) final() (int, uint8) { return s.baselineInt(), s.addBits() } - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { - lowBits := br.get16BitsFast(s.state.nbBits()) - s.state = s.dt[s.state.newState()+lowBits] - return s.state.baseline(), s.state.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 000000000000..d04a829b0a0e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 000000000000..bcde39869535 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 000000000000..332e51fe44fa --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,72 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index 5442061b18df..ab26326a8ff8 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { s.clearCount = maxCount != 0 } -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *fseEncoder) prepare() (*fseEncoder, error) { - if s == nil { - s = &fseEncoder{} - } - s.useRLE = false - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - return s, nil -} - // allocCtable will allocate tables needed for compression. // If existing tables a re big enough, they are simply re-used. func (s *fseEncoder) allocCtable() { @@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { c.state = c.stateTable[lu] } -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - // flush will write the tablelog to the output and flush the remaining full bytes. func (c *cState) flush(tableLog uint8) { c.bw.flush32() diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz.go b/vendor/github.com/klauspost/compress/zstd/fuzz.go deleted file mode 100644 index fda8a7422824..000000000000 --- a/vendor/github.com/klauspost/compress/zstd/fuzz.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// ignoreCRC can be used for fuzz testing to ignore CRC values... -const ignoreCRC = true diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go deleted file mode 100644 index 0515b201ccbf..000000000000 --- a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !gofuzz -// +build !gofuzz - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// ignoreCRC can be used for fuzz testing to ignore CRC values... -const ignoreCRC = false diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go index cf33f29a1b48..5d73c21ebdd4 100644 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 { return (uint32(u) * prime4bytes) >> (32 - length) } } - -// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash3(u uint32, h uint8) uint32 { - return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index 28b40153cc2c..09164856d222 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -37,24 +37,21 @@ func (h *history) reset() { h.ignoreBuffer = 0 h.error = false h.recentOffsets = [3]int{1, 4, 8} - if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.offsets.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } + h.decoders.freeDecoders() h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { if h.huffTree != nil { if h.dict == nil || h.dict.litEnc != h.huffTree { huffDecoderPool.Put(h.huffTree) + h.huffTree = nil } } - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) } func (h *history) setDict(dict *dict) { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 213736ad77e3..f833d1541f98 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -73,6 +73,7 @@ type sequenceDecs struct { seqSize int windowSize int maxBits uint8 + maxSyncLen uint64 } // initialize all 3 decoders from the stream input. @@ -98,150 +99,28 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro return nil } -// decode sequences from the stream with the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - s.seqSize = 0 - litRemain := len(s.literals) - - for i := range seqs { - var ll, mo, ml int - if br.off > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - if br.overread() { - if debugDecoder { - printf("reading sequence %d, exceeded available data\n", i) - } - return io.ErrUnexpectedEOF - } - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - // Evaluate. - // We might be doing this async, so do it early. - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - s.seqSize += ll + ml - if s.seqSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", s.seqSize) - } - litRemain -= ll - if litRemain < 0 { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) - } - seqs[i] = seqVals{ - ll: ll, - ml: ml, - mo: mo, - } - if i == len(seqs)-1 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } +func (s *sequenceDecs) freeDecoders() { + if f := s.litLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + s.litLengths.fse = nil } - s.seqSize += litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", s.seqSize) + if f := s.offsets.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + s.offsets.fse = nil } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) + if f := s.matchLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + s.matchLengths.fse = nil } - return err } // execute will execute the decoded sequence with the provided history. // The sequence must be evaluated before being sent. func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { + if len(s.dict) == 0 { + return s.executeSimple(seqs, hist) + } + // Ensure we have enough output size... if len(s.out)+s.seqSize > cap(s.out) { addBytes := s.seqSize + len(s.out) @@ -324,6 +203,7 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { } } } + // Add final literals copy(out[t:], s.literals) if debugDecoder { @@ -338,15 +218,23 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { } // decode sequences from the stream with the provided history. -func (s *sequenceDecs) decodeSync(history *history) error { +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + br := s.br seqs := s.nSeqs startSize := len(s.out) // Grab full sizes tables, to avoid bounds checks. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - hist := history.b[history.ignoreBuffer:] out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } for i := seqs - 1; i >= 0; i-- { if br.overread() { @@ -426,7 +314,10 @@ func (s *sequenceDecs) decodeSync(history *history) error { } size := ll + ml + len(out) if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", size) + if size-startSize == 424242 { + panic("here") + } + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } if size > cap(out) { // Not enough size, which can happen under high volume block streaming conditions @@ -456,13 +347,13 @@ func (s *sequenceDecs) decodeSync(history *history) error { if mo > len(out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } // we may be in dictionary. dictO := len(s.dict) - (mo - (len(out) + len(hist))) if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } end := dictO + ml if end > len(s.dict) { @@ -523,6 +414,7 @@ func (s *sequenceDecs) decodeSync(history *history) error { ofState = ofTable[ofState.newState()&maxTableMask] } else { bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) llState = llTable[(llState.newState()+lowBits)&maxTableMask] @@ -535,21 +427,16 @@ func (s *sequenceDecs) decodeSync(history *history) error { } } + // Check if space for literals + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + // Add final literals s.out = append(out, s.literals...) return br.close() } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) update(br *bitReader) { - // Max 8 bits - s.litLengths.state.next(br) - // Max 9 bits - s.matchLengths.state.next(br) - // Max 8 bits - s.offsets.state.next(br) -} - var bitMask [16]uint16 func init() { @@ -558,87 +445,6 @@ func init() { } } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) updateAlt(br *bitReader) { - // Update all 3 states at once. Approx 20% faster. - a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - - nBits := a.nbBits() + b.nbBits() + c.nbBits() - if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState()] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] - s.offsets.state.state = s.offsets.state.dt[c.newState()] - return - } - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] - - lowBits = uint16(bits >> (c.nbBits() & 31)) - lowBits &= bitMask[b.nbBits()&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] - - lowBits = uint16(bits) & bitMask[c.nbBits()&15] - s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] -} - -// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. -func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - return - } - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - return - } - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - return -} - func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { // Final will not read from stream. ll, llB := llState.final() diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 000000000000..191384adfd06 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,379 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 000000000000..52e5703c26c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4099 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 000000000000..ac2a80d29111 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 967f29b3120e..29c15c8c4efe 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -18,36 +18,58 @@ const ZipMethodWinZip = 93 // See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT const ZipMethodPKWare = 20 -var zipReaderPool sync.Pool +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} -// newZipReader cannot be used since we would leak goroutines... -func newZipReader(r io.Reader) io.ReadCloser { - dec, ok := zipReaderPool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d } - dec = d + return &pooledZipReader{dec: dec, pool: pool} } - return &pooledZipReader{dec: dec} } type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - dec *Decoder + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder } func (r *pooledZipReader) Read(p []byte) (n int, err error) { r.mu.Lock() defer r.mu.Unlock() if r.dec == nil { - return 0, errors.New("Read after Close") + return 0, errors.New("read after close or EOF") } dec, err := r.dec.Read(p) - + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } return dec, err } @@ -57,7 +79,7 @@ func (r *pooledZipReader) Close() error { var err error if r.dec != nil { err = r.dec.Reset(nil) - zipReaderPool.Put(r.dec) + r.pool.Put(r.dec) r.dec = nil } return err @@ -111,12 +133,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { // ZipDecompressor returns a decompressor that can be registered with zip libraries. // See ZipCompressor for example. -func ZipDecompressor() func(r io.Reader) io.ReadCloser { - return func(r io.Reader) io.ReadCloser { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) - } - return d.IOReadCloser() - } +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) } diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 0b0c2571ddc5..3eb3f1c82661 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -39,6 +39,9 @@ const zstdMinMatch = 3 // Reset the buffer offset when reaching this. const bufferReset = math.MaxInt32 - MaxWindowSize +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + var ( // ErrReservedBlockType is returned when a reserved block type is found. // Typically this indicates wrong or corrupted input. @@ -52,6 +55,10 @@ var ( // Typically returned on invalid input. ErrBlockTooSmall = errors.New("block too small") + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. // Typically this indicates wrong or corrupted input. ErrMagicMismatch = errors.New("invalid input: magic number mismatch") @@ -103,17 +110,6 @@ func printf(format string, a ...interface{}) { } } -// matchLenFast does matching, but will not match the last up to 7 bytes. -func matchLenFast(a, b []byte) int { - endI := len(a) & (math.MaxInt32 - 7) - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + bits.TrailingZeros64(diff)>>3 - } - } - return endI -} - // matchLen returns the maximum length. // a must be the shortest of the two. // The function also returns whether all bytes matched. diff --git a/vendor/github.com/kylelemons/godebug/LICENSE b/vendor/github.com/kylelemons/godebug/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kylelemons/godebug/diff/diff.go b/vendor/github.com/kylelemons/godebug/diff/diff.go new file mode 100644 index 000000000000..200e596c6259 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/diff/diff.go @@ -0,0 +1,186 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package diff implements a linewise diff algorithm. +package diff + +import ( + "bytes" + "fmt" + "strings" +) + +// Chunk represents a piece of the diff. A chunk will not have both added and +// deleted lines. Equal lines are always after any added or deleted lines. +// A Chunk may or may not have any lines in it, especially for the first or last +// chunk in a computation. +type Chunk struct { + Added []string + Deleted []string + Equal []string +} + +func (c *Chunk) empty() bool { + return len(c.Added) == 0 && len(c.Deleted) == 0 && len(c.Equal) == 0 +} + +// Diff returns a string containing a line-by-line unified diff of the linewise +// changes required to make A into B. Each line is prefixed with '+', '-', or +// ' ' to indicate if it should be added, removed, or is correct respectively. +func Diff(A, B string) string { + aLines := strings.Split(A, "\n") + bLines := strings.Split(B, "\n") + + chunks := DiffChunks(aLines, bLines) + + buf := new(bytes.Buffer) + for _, c := range chunks { + for _, line := range c.Added { + fmt.Fprintf(buf, "+%s\n", line) + } + for _, line := range c.Deleted { + fmt.Fprintf(buf, "-%s\n", line) + } + for _, line := range c.Equal { + fmt.Fprintf(buf, " %s\n", line) + } + } + return strings.TrimRight(buf.String(), "\n") +} + +// DiffChunks uses an O(D(N+M)) shortest-edit-script algorithm +// to compute the edits required from A to B and returns the +// edit chunks. +func DiffChunks(a, b []string) []Chunk { + // algorithm: http://www.xmailserver.org/diff2.pdf + + // We'll need these quantities a lot. + alen, blen := len(a), len(b) // M, N + + // At most, it will require len(a) deletions and len(b) additions + // to transform a into b. + maxPath := alen + blen // MAX + if maxPath == 0 { + // degenerate case: two empty lists are the same + return nil + } + + // Store the endpoint of the path for diagonals. + // We store only the a index, because the b index on any diagonal + // (which we know during the loop below) is aidx-diag. + // endpoint[maxPath] represents the 0 diagonal. + // + // Stated differently: + // endpoint[d] contains the aidx of a furthest reaching path in diagonal d + endpoint := make([]int, 2*maxPath+1) // V + + saved := make([][]int, 0, 8) // Vs + save := func() { + dup := make([]int, len(endpoint)) + copy(dup, endpoint) + saved = append(saved, dup) + } + + var editDistance int // D +dLoop: + for editDistance = 0; editDistance <= maxPath; editDistance++ { + // The 0 diag(onal) represents equality of a and b. Each diagonal to + // the left is numbered one lower, to the right is one higher, from + // -alen to +blen. Negative diagonals favor differences from a, + // positive diagonals favor differences from b. The edit distance to a + // diagonal d cannot be shorter than d itself. + // + // The iterations of this loop cover either odds or evens, but not both, + // If odd indices are inputs, even indices are outputs and vice versa. + for diag := -editDistance; diag <= editDistance; diag += 2 { // k + var aidx int // x + switch { + case diag == -editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath-editDistance+1] + 0 + case diag == editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath+editDistance-1] + 1 + case endpoint[maxPath+diag+1] > endpoint[maxPath+diag-1]: + // diagonal d+1 was farther along, so use that + aidx = endpoint[maxPath+diag+1] + 0 + default: + // diagonal d-1 was farther (or the same), so use that + aidx = endpoint[maxPath+diag-1] + 1 + } + // On diagonal d, we can compute bidx from aidx. + bidx := aidx - diag // y + // See how far we can go on this diagonal before we find a difference. + for aidx < alen && bidx < blen && a[aidx] == b[bidx] { + aidx++ + bidx++ + } + // Store the end of the current edit chain. + endpoint[maxPath+diag] = aidx + // If we've found the end of both inputs, we're done! + if aidx >= alen && bidx >= blen { + save() // save the final path + break dLoop + } + } + save() // save the current path + } + if editDistance == 0 { + return nil + } + chunks := make([]Chunk, editDistance+1) + + x, y := alen, blen + for d := editDistance; d > 0; d-- { + endpoint := saved[d] + diag := x - y + insert := diag == -d || (diag != d && endpoint[maxPath+diag-1] < endpoint[maxPath+diag+1]) + + x1 := endpoint[maxPath+diag] + var x0, xM, kk int + if insert { + kk = diag + 1 + x0 = endpoint[maxPath+kk] + xM = x0 + } else { + kk = diag - 1 + x0 = endpoint[maxPath+kk] + xM = x0 + 1 + } + y0 := x0 - kk + + var c Chunk + if insert { + c.Added = b[y0:][:1] + } else { + c.Deleted = a[x0:][:1] + } + if xM < x1 { + c.Equal = a[xM:][:x1-xM] + } + + x, y = x0, y0 + chunks[d] = c + } + if x > 0 { + chunks[0].Equal = a[:x] + } + if chunks[0].empty() { + chunks = chunks[1:] + } + if len(chunks) == 0 { + return nil + } + return chunks +} diff --git a/vendor/github.com/kylelemons/godebug/pretty/.gitignore b/vendor/github.com/kylelemons/godebug/pretty/.gitignore new file mode 100644 index 000000000000..fa9a735da3c1 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/.gitignore @@ -0,0 +1,5 @@ +*.test +*.bench +*.golden +*.txt +*.prof diff --git a/vendor/github.com/kylelemons/godebug/pretty/doc.go b/vendor/github.com/kylelemons/godebug/pretty/doc.go new file mode 100644 index 000000000000..03b5718a70db --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/doc.go @@ -0,0 +1,25 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pretty pretty-prints Go structures. +// +// This package uses reflection to examine a Go value and can +// print out in a nice, aligned fashion. It supports three +// modes (normal, compact, and extended) for advanced use. +// +// See the Reflect and Print examples for what the output looks like. +package pretty + +// TODO: +// - Catch cycles diff --git a/vendor/github.com/kylelemons/godebug/pretty/public.go b/vendor/github.com/kylelemons/godebug/pretty/public.go new file mode 100644 index 000000000000..fbc5d7abbf87 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/public.go @@ -0,0 +1,188 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "bytes" + "fmt" + "io" + "net" + "reflect" + "time" + + "github.com/kylelemons/godebug/diff" +) + +// A Config represents optional configuration parameters for formatting. +// +// Some options, notably ShortList, dramatically increase the overhead +// of pretty-printing a value. +type Config struct { + // Verbosity options + Compact bool // One-line output. Overrides Diffable. + Diffable bool // Adds extra newlines for more easily diffable output. + + // Field and value options + IncludeUnexported bool // Include unexported fields in output + PrintStringers bool // Call String on a fmt.Stringer + PrintTextMarshalers bool // Call MarshalText on an encoding.TextMarshaler + SkipZeroFields bool // Skip struct fields that have a zero value. + + // Output transforms + ShortList int // Maximum character length for short lists if nonzero. + + // Type-specific overrides + // + // Formatter maps a type to a function that will provide a one-line string + // representation of the input value. Conceptually: + // Formatter[reflect.TypeOf(v)](v) = "v as a string" + // + // Note that the first argument need not explicitly match the type, it must + // merely be callable with it. + // + // When processing an input value, if its type exists as a key in Formatter: + // 1) If the value is nil, no stringification is performed. + // This allows overriding of PrintStringers and PrintTextMarshalers. + // 2) The value will be called with the input as its only argument. + // The function must return a string as its first return value. + // + // In addition to func literals, two common values for this will be: + // fmt.Sprint (function) func Sprint(...interface{}) string + // Type.String (method) func (Type) String() string + // + // Note that neither of these work if the String method is a pointer + // method and the input will be provided as a value. In that case, + // use a function that calls .String on the formal value parameter. + Formatter map[reflect.Type]interface{} + + // If TrackCycles is enabled, pretty will detect and track + // self-referential structures. If a self-referential structure (aka a + // "recursive" value) is detected, numbered placeholders will be emitted. + // + // Pointer tracking is disabled by default for performance reasons. + TrackCycles bool +} + +// Default Config objects +var ( + // DefaultFormatter is the default set of overrides for stringification. + DefaultFormatter = map[reflect.Type]interface{}{ + reflect.TypeOf(time.Time{}): fmt.Sprint, + reflect.TypeOf(net.IP{}): fmt.Sprint, + reflect.TypeOf((*error)(nil)).Elem(): fmt.Sprint, + } + + // CompareConfig is the default configuration used for Compare. + CompareConfig = &Config{ + Diffable: true, + IncludeUnexported: true, + Formatter: DefaultFormatter, + } + + // DefaultConfig is the default configuration used for all other top-level functions. + DefaultConfig = &Config{ + Formatter: DefaultFormatter, + } + + // CycleTracker is a convenience config for formatting and comparing recursive structures. + CycleTracker = &Config{ + Diffable: true, + Formatter: DefaultFormatter, + TrackCycles: true, + } +) + +func (cfg *Config) fprint(buf *bytes.Buffer, vals ...interface{}) { + ref := &reflector{ + Config: cfg, + } + if cfg.TrackCycles { + ref.pointerTracker = new(pointerTracker) + } + for i, val := range vals { + if i > 0 { + buf.WriteByte('\n') + } + newFormatter(cfg, buf).write(ref.val2node(reflect.ValueOf(val))) + } +} + +// Print writes the DefaultConfig representation of the given values to standard output. +func Print(vals ...interface{}) { + DefaultConfig.Print(vals...) +} + +// Print writes the configured presentation of the given values to standard output. +func (cfg *Config) Print(vals ...interface{}) { + fmt.Println(cfg.Sprint(vals...)) +} + +// Sprint returns a string representation of the given value according to the DefaultConfig. +func Sprint(vals ...interface{}) string { + return DefaultConfig.Sprint(vals...) +} + +// Sprint returns a string representation of the given value according to cfg. +func (cfg *Config) Sprint(vals ...interface{}) string { + buf := new(bytes.Buffer) + cfg.fprint(buf, vals...) + return buf.String() +} + +// Fprint writes the representation of the given value to the writer according to the DefaultConfig. +func Fprint(w io.Writer, vals ...interface{}) (n int64, err error) { + return DefaultConfig.Fprint(w, vals...) +} + +// Fprint writes the representation of the given value to the writer according to the cfg. +func (cfg *Config) Fprint(w io.Writer, vals ...interface{}) (n int64, err error) { + buf := new(bytes.Buffer) + cfg.fprint(buf, vals...) + return buf.WriteTo(w) +} + +// Compare returns a string containing a line-by-line unified diff of the +// values in a and b, using the CompareConfig. +// +// Each line in the output is prefixed with '+', '-', or ' ' to indicate which +// side it's from. Lines from the a side are marked with '-', lines from the +// b side are marked with '+' and lines that are the same on both sides are +// marked with ' '. +// +// The comparison is based on the intentionally-untyped output of Print, and as +// such this comparison is pretty forviving. In particular, if the types of or +// types within in a and b are different but have the same representation, +// Compare will not indicate any differences between them. +func Compare(a, b interface{}) string { + return CompareConfig.Compare(a, b) +} + +// Compare returns a string containing a line-by-line unified diff of the +// values in got and want according to the cfg. +// +// Each line in the output is prefixed with '+', '-', or ' ' to indicate which +// side it's from. Lines from the a side are marked with '-', lines from the +// b side are marked with '+' and lines that are the same on both sides are +// marked with ' '. +// +// The comparison is based on the intentionally-untyped output of Print, and as +// such this comparison is pretty forviving. In particular, if the types of or +// types within in a and b are different but have the same representation, +// Compare will not indicate any differences between them. +func (cfg *Config) Compare(a, b interface{}) string { + diffCfg := *cfg + diffCfg.Diffable = true + return diff.Diff(cfg.Sprint(a), cfg.Sprint(b)) +} diff --git a/vendor/github.com/kylelemons/godebug/pretty/reflect.go b/vendor/github.com/kylelemons/godebug/pretty/reflect.go new file mode 100644 index 000000000000..5cd30b7f0360 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/reflect.go @@ -0,0 +1,241 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "encoding" + "fmt" + "reflect" + "sort" +) + +func isZeroVal(val reflect.Value) bool { + if !val.CanInterface() { + return false + } + z := reflect.Zero(val.Type()).Interface() + return reflect.DeepEqual(val.Interface(), z) +} + +// pointerTracker is a helper for tracking pointer chasing to detect cycles. +type pointerTracker struct { + addrs map[uintptr]int // addr[address] = seen count + + lastID int + ids map[uintptr]int // ids[address] = id +} + +// track tracks following a reference (pointer, slice, map, etc). Every call to +// track should be paired with a call to untrack. +func (p *pointerTracker) track(ptr uintptr) { + if p.addrs == nil { + p.addrs = make(map[uintptr]int) + } + p.addrs[ptr]++ +} + +// untrack registers that we have backtracked over the reference to the pointer. +func (p *pointerTracker) untrack(ptr uintptr) { + p.addrs[ptr]-- + if p.addrs[ptr] == 0 { + delete(p.addrs, ptr) + } +} + +// seen returns whether the pointer was previously seen along this path. +func (p *pointerTracker) seen(ptr uintptr) bool { + _, ok := p.addrs[ptr] + return ok +} + +// keep allocates an ID for the given address and returns it. +func (p *pointerTracker) keep(ptr uintptr) int { + if p.ids == nil { + p.ids = make(map[uintptr]int) + } + if _, ok := p.ids[ptr]; !ok { + p.lastID++ + p.ids[ptr] = p.lastID + } + return p.ids[ptr] +} + +// id returns the ID for the given address. +func (p *pointerTracker) id(ptr uintptr) (int, bool) { + if p.ids == nil { + p.ids = make(map[uintptr]int) + } + id, ok := p.ids[ptr] + return id, ok +} + +// reflector adds local state to the recursive reflection logic. +type reflector struct { + *Config + *pointerTracker +} + +// follow handles following a possiblly-recursive reference to the given value +// from the given ptr address. +func (r *reflector) follow(ptr uintptr, val reflect.Value) node { + if r.pointerTracker == nil { + // Tracking disabled + return r.val2node(val) + } + + // If a parent already followed this, emit a reference marker + if r.seen(ptr) { + id := r.keep(ptr) + return ref{id} + } + + // Track the pointer we're following while on this recursive branch + r.track(ptr) + defer r.untrack(ptr) + n := r.val2node(val) + + // If the recursion used this ptr, wrap it with a target marker + if id, ok := r.id(ptr); ok { + return target{id, n} + } + + // Otherwise, return the node unadulterated + return n +} + +func (r *reflector) val2node(val reflect.Value) node { + if !val.IsValid() { + return rawVal("nil") + } + + if val.CanInterface() { + v := val.Interface() + if formatter, ok := r.Formatter[val.Type()]; ok { + if formatter != nil { + res := reflect.ValueOf(formatter).Call([]reflect.Value{val}) + return rawVal(res[0].Interface().(string)) + } + } else { + if s, ok := v.(fmt.Stringer); ok && r.PrintStringers { + return stringVal(s.String()) + } + if t, ok := v.(encoding.TextMarshaler); ok && r.PrintTextMarshalers { + if raw, err := t.MarshalText(); err == nil { // if NOT an error + return stringVal(string(raw)) + } + } + } + } + + switch kind := val.Kind(); kind { + case reflect.Ptr: + if val.IsNil() { + return rawVal("nil") + } + return r.follow(val.Pointer(), val.Elem()) + case reflect.Interface: + if val.IsNil() { + return rawVal("nil") + } + return r.val2node(val.Elem()) + case reflect.String: + return stringVal(val.String()) + case reflect.Slice: + n := list{} + length := val.Len() + ptr := val.Pointer() + for i := 0; i < length; i++ { + n = append(n, r.follow(ptr, val.Index(i))) + } + return n + case reflect.Array: + n := list{} + length := val.Len() + for i := 0; i < length; i++ { + n = append(n, r.val2node(val.Index(i))) + } + return n + case reflect.Map: + // Extract the keys and sort them for stable iteration + keys := val.MapKeys() + pairs := make([]mapPair, 0, len(keys)) + for _, key := range keys { + pairs = append(pairs, mapPair{ + key: new(formatter).compactString(r.val2node(key)), // can't be cyclic + value: val.MapIndex(key), + }) + } + sort.Sort(byKey(pairs)) + + // Process the keys into the final representation + ptr, n := val.Pointer(), keyvals{} + for _, pair := range pairs { + n = append(n, keyval{ + key: pair.key, + val: r.follow(ptr, pair.value), + }) + } + return n + case reflect.Struct: + n := keyvals{} + typ := val.Type() + fields := typ.NumField() + for i := 0; i < fields; i++ { + sf := typ.Field(i) + if !r.IncludeUnexported && sf.PkgPath != "" { + continue + } + field := val.Field(i) + if r.SkipZeroFields && isZeroVal(field) { + continue + } + n = append(n, keyval{sf.Name, r.val2node(field)}) + } + return n + case reflect.Bool: + if val.Bool() { + return rawVal("true") + } + return rawVal("false") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rawVal(fmt.Sprintf("%d", val.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rawVal(fmt.Sprintf("%d", val.Uint())) + case reflect.Uintptr: + return rawVal(fmt.Sprintf("0x%X", val.Uint())) + case reflect.Float32, reflect.Float64: + return rawVal(fmt.Sprintf("%v", val.Float())) + case reflect.Complex64, reflect.Complex128: + return rawVal(fmt.Sprintf("%v", val.Complex())) + } + + // Fall back to the default %#v if we can + if val.CanInterface() { + return rawVal(fmt.Sprintf("%#v", val.Interface())) + } + + return rawVal(val.String()) +} + +type mapPair struct { + key string + value reflect.Value +} + +type byKey []mapPair + +func (v byKey) Len() int { return len(v) } +func (v byKey) Swap(i, j int) { v[i], v[j] = v[j], v[i] } +func (v byKey) Less(i, j int) bool { return v[i].key < v[j].key } diff --git a/vendor/github.com/kylelemons/godebug/pretty/structure.go b/vendor/github.com/kylelemons/godebug/pretty/structure.go new file mode 100644 index 000000000000..d876f60cad21 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/structure.go @@ -0,0 +1,223 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// a formatter stores stateful formatting information as well as being +// an io.Writer for simplicity. +type formatter struct { + *bufio.Writer + *Config + + // Self-referential structure tracking + tagNumbers map[int]int // tagNumbers[id] = <#n> +} + +// newFormatter creates a new buffered formatter. For the output to be written +// to the given writer, this must be accompanied by a call to write (or Flush). +func newFormatter(cfg *Config, w io.Writer) *formatter { + return &formatter{ + Writer: bufio.NewWriter(w), + Config: cfg, + tagNumbers: make(map[int]int), + } +} + +func (f *formatter) write(n node) { + defer f.Flush() + n.format(f, "") +} + +func (f *formatter) tagFor(id int) int { + if tag, ok := f.tagNumbers[id]; ok { + return tag + } + if f.tagNumbers == nil { + return 0 + } + tag := len(f.tagNumbers) + 1 + f.tagNumbers[id] = tag + return tag +} + +type node interface { + format(f *formatter, indent string) +} + +func (f *formatter) compactString(n node) string { + switch k := n.(type) { + case stringVal: + return string(k) + case rawVal: + return string(k) + } + + buf := new(bytes.Buffer) + f2 := newFormatter(&Config{Compact: true}, buf) + f2.tagNumbers = f.tagNumbers // reuse tagNumbers just in case + f2.write(n) + return buf.String() +} + +type stringVal string + +func (str stringVal) format(f *formatter, indent string) { + f.WriteString(strconv.Quote(string(str))) +} + +type rawVal string + +func (r rawVal) format(f *formatter, indent string) { + f.WriteString(string(r)) +} + +type keyval struct { + key string + val node +} + +type keyvals []keyval + +func (l keyvals) format(f *formatter, indent string) { + f.WriteByte('{') + + switch { + case f.Compact: + // All on one line: + for i, kv := range l { + if i > 0 { + f.WriteByte(',') + } + f.WriteString(kv.key) + f.WriteByte(':') + kv.val.format(f, indent) + } + case f.Diffable: + f.WriteByte('\n') + inner := indent + " " + // Each value gets its own line: + for _, kv := range l { + f.WriteString(inner) + f.WriteString(kv.key) + f.WriteString(": ") + kv.val.format(f, inner) + f.WriteString(",\n") + } + f.WriteString(indent) + default: + keyWidth := 0 + for _, kv := range l { + if kw := len(kv.key); kw > keyWidth { + keyWidth = kw + } + } + alignKey := indent + " " + alignValue := strings.Repeat(" ", keyWidth) + inner := alignKey + alignValue + " " + // First and last line shared with bracket: + for i, kv := range l { + if i > 0 { + f.WriteString(",\n") + f.WriteString(alignKey) + } + f.WriteString(kv.key) + f.WriteString(": ") + f.WriteString(alignValue[len(kv.key):]) + kv.val.format(f, inner) + } + } + + f.WriteByte('}') +} + +type list []node + +func (l list) format(f *formatter, indent string) { + if max := f.ShortList; max > 0 { + short := f.compactString(l) + if len(short) <= max { + f.WriteString(short) + return + } + } + + f.WriteByte('[') + + switch { + case f.Compact: + // All on one line: + for i, v := range l { + if i > 0 { + f.WriteByte(',') + } + v.format(f, indent) + } + case f.Diffable: + f.WriteByte('\n') + inner := indent + " " + // Each value gets its own line: + for _, v := range l { + f.WriteString(inner) + v.format(f, inner) + f.WriteString(",\n") + } + f.WriteString(indent) + default: + inner := indent + " " + // First and last line shared with bracket: + for i, v := range l { + if i > 0 { + f.WriteString(",\n") + f.WriteString(inner) + } + v.format(f, inner) + } + } + + f.WriteByte(']') +} + +type ref struct { + id int +} + +func (r ref) format(f *formatter, indent string) { + fmt.Fprintf(f, "", f.tagFor(r.id)) +} + +type target struct { + id int + value node +} + +func (t target) format(f *formatter, indent string) { + tag := fmt.Sprintf("<#%d> ", f.tagFor(t.id)) + switch { + case f.Diffable, f.Compact: + // no indent changes + default: + indent += strings.Repeat(" ", len(tag)) + } + f.WriteString(tag) + t.value.format(f, indent) +} diff --git a/vendor/github.com/moby/term/LICENSE b/vendor/github.com/moby/patternmatcher/LICENSE similarity index 100% rename from vendor/github.com/moby/term/LICENSE rename to vendor/github.com/moby/patternmatcher/LICENSE diff --git a/vendor/github.com/moby/patternmatcher/NOTICE b/vendor/github.com/moby/patternmatcher/NOTICE new file mode 100644 index 000000000000..e5154640fe02 --- /dev/null +++ b/vendor/github.com/moby/patternmatcher/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/moby/patternmatcher/patternmatcher.go b/vendor/github.com/moby/patternmatcher/patternmatcher.go new file mode 100644 index 000000000000..37a1a59ac4e3 --- /dev/null +++ b/vendor/github.com/moby/patternmatcher/patternmatcher.go @@ -0,0 +1,474 @@ +package patternmatcher + +import ( + "errors" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + "unicode/utf8" +) + +// escapeBytes is a bitmap used to check whether a character should be escaped when creating the regex. +var escapeBytes [8]byte + +// shouldEscape reports whether a rune should be escaped as part of the regex. +// +// This only includes characters that require escaping in regex but are also NOT valid filepath pattern characters. +// Additionally, '\' is not excluded because there is specific logic to properly handle this, as it's a path separator +// on Windows. +// +// Adapted from regexp::QuoteMeta in go stdlib. +// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/regexp/regexp.go;l=703-715;drc=refs%2Ftags%2Fgo1.17.2 +func shouldEscape(b rune) bool { + return b < utf8.RuneSelf && escapeBytes[b%8]&(1<<(b/8)) != 0 +} + +func init() { + for _, b := range []byte(`.+()|{}$`) { + escapeBytes[b%8] |= 1 << (b / 8) + } +} + +// PatternMatcher allows checking paths against a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// New creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func New(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = p[1:] + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Matches returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +// +// The "file" argument should be a slash-delimited path. +// +// Matches is not safe to call concurrently. +// +// Deprecated: This implementation is buggy (it only checks a single parent dir +// against the pattern) and will be removed soon. Use either +// MatchesOrParentMatches or MatchesUsingParentResults instead. +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) + } + } + + if match { + matched = !pattern.exclusion + } + } + + return matched, nil +} + +// MatchesOrParentMatches returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +// +// The "file" argument should be a slash-delimited path. +// +// Matches is not safe to call concurrently. +func (pm *PatternMatcher) MatchesOrParentMatches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + for i := range parentPathDirs { + match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) + if match { + break + } + } + } + + if match { + matched = !pattern.exclusion + } + } + + return matched, nil +} + +// MatchesUsingParentResult returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. The functionality is +// the same as Matches, but as an optimization, the caller keeps track of +// whether the parent directory matched. +// +// The "file" argument should be a slash-delimited path. +// +// MatchesUsingParentResult is not safe to call concurrently. +// +// Deprecated: this function does behave correctly in some cases (see +// https://github.com/docker/buildx/issues/850). +// +// Use MatchesUsingParentResults instead. +func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bool) (bool, error) { + matched := parentMatched + file = filepath.FromSlash(file) + + for _, pattern := range pm.patterns { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if match { + matched = !pattern.exclusion + } + } + return matched, nil +} + +// MatchInfo tracks information about parent dir matches while traversing a +// filesystem. +type MatchInfo struct { + parentMatched []bool +} + +// MatchesUsingParentResults returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. The functionality is +// the same as Matches, but as an optimization, the caller passes in +// intermediate results from matching the parent directory. +// +// The "file" argument should be a slash-delimited path. +// +// MatchesUsingParentResults is not safe to call concurrently. +func (pm *PatternMatcher) MatchesUsingParentResults(file string, parentMatchInfo MatchInfo) (bool, MatchInfo, error) { + parentMatched := parentMatchInfo.parentMatched + if len(parentMatched) != 0 && len(parentMatched) != len(pm.patterns) { + return false, MatchInfo{}, errors.New("wrong number of values in parentMatched") + } + + file = filepath.FromSlash(file) + matched := false + + matchInfo := MatchInfo{ + parentMatched: make([]bool, len(pm.patterns)), + } + for i, pattern := range pm.patterns { + match := false + // If the parent matched this pattern, we don't need to recheck. + if len(parentMatched) != 0 { + match = parentMatched[i] + } + + if !match { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + var err error + match, err = pattern.match(file) + if err != nil { + return false, matchInfo, err + } + + // If the zero value of MatchInfo was passed in, we don't have + // any information about the parent dir's match results, and we + // apply the same logic as MatchesOrParentMatches. + if !match && len(parentMatched) == 0 { + if parentPath := filepath.Dir(file); parentPath != "." { + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + // Check to see if the pattern matches one of our parent dirs. + for i := range parentPathDirs { + match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) + if match { + break + } + } + } + } + } + matchInfo.parentMatched[i] = match + + if match { + matched = !pattern.exclusion + } + } + return matched, matchInfo, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used to filter file paths. +type Pattern struct { + matchType matchType + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +type matchType int + +const ( + unknownMatch matchType = iota + exactMatch + prefixMatch + suffixMatch + regexpMatch +) + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + if p.matchType == unknownMatch { + if err := p.compile(string(os.PathSeparator)); err != nil { + return false, filepath.ErrBadPattern + } + } + + switch p.matchType { + case exactMatch: + return path == p.cleanedPattern, nil + case prefixMatch: + // strip trailing ** + return strings.HasPrefix(path, p.cleanedPattern[:len(p.cleanedPattern)-2]), nil + case suffixMatch: + // strip leading ** + suffix := p.cleanedPattern[2:] + if strings.HasSuffix(path, suffix) { + return true, nil + } + // **/foo matches "foo" + return suffix[0] == os.PathSeparator && path == suffix[1:], nil + case regexpMatch: + return p.regexp.MatchString(path), nil + } + + return false, nil +} + +func (p *Pattern) compile(sl string) error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + escSL := sl + if sl == `\` { + escSL += `\` + } + + p.matchType = exactMatch + for i := 0; scan.Peek() != scanner.EOF; i++ { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + if p.matchType == exactMatch { + p.matchType = prefixMatch + } else { + regStr += ".*" + p.matchType = regexpMatch + } + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + p.matchType = regexpMatch + } + + if i == 0 { + p.matchType = suffixMatch + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + p.matchType = regexpMatch + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + p.matchType = regexpMatch + } else if shouldEscape(ch) { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + p.matchType = regexpMatch + } else { + regStr += `\` + } + } else if ch == '[' || ch == ']' { + regStr += string(ch) + p.matchType = regexpMatch + } else { + regStr += string(ch) + } + } + + if p.matchType != regexpMatch { + return nil + } + + regStr += "$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + p.matchType = regexpMatch + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +// +// This implementation is buggy (it only checks a single parent dir against the +// pattern) and will be removed soon. Use MatchesOrParentMatches instead. +func Matches(file string, patterns []string) (bool, error) { + pm, err := New(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.Matches(file) +} + +// MatchesOrParentMatches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func MatchesOrParentMatches(file string, patterns []string) (bool, error) { + pm, err := New(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.MatchesOrParentMatches(file) +} diff --git a/vendor/github.com/moby/sys/mount/mount_errors.go b/vendor/github.com/moby/sys/mount/mount_errors.go index 2bd1ef7f157a..b0d8582e844f 100644 --- a/vendor/github.com/moby/sys/mount/mount_errors.go +++ b/vendor/github.com/moby/sys/mount/mount_errors.go @@ -1,5 +1,5 @@ -//go:build !windows -// +build !windows +//go:build !darwin && !windows +// +build !darwin,!windows package mount diff --git a/vendor/github.com/moby/sys/mount/mounter_unsupported.go b/vendor/github.com/moby/sys/mount/mounter_unsupported.go index 31fb7235b0d2..b69d62bd65b4 100644 --- a/vendor/github.com/moby/sys/mount/mounter_unsupported.go +++ b/vendor/github.com/moby/sys/mount/mounter_unsupported.go @@ -1,5 +1,5 @@ -//go:build (!linux && !freebsd && !openbsd && !windows) || (freebsd && !cgo) || (openbsd && !cgo) -// +build !linux,!freebsd,!openbsd,!windows freebsd,!cgo openbsd,!cgo +//go:build (!linux && !freebsd && !openbsd && !windows && !darwin) || (freebsd && !cgo) || (openbsd && !cgo) +// +build !linux,!freebsd,!openbsd,!windows,!darwin freebsd,!cgo openbsd,!cgo package mount diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go index bf221e687f17..e78e726196e1 100644 --- a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go +++ b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go @@ -15,7 +15,7 @@ import ( // // If a non-existent path is specified, an appropriate error is returned. // In case the caller is not interested in this particular error, it should -// be handled separately using e.g. errors.Is(err, os.ErrNotExist). +// be handled separately using e.g. errors.Is(err, fs.ErrNotExist). // // This function is only available on Linux. When available (since kernel // v5.6), openat2(2) syscall is used to reliably detect all mounts. Otherwise, diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go index 45ddad236f34..c7b7678f9a06 100644 --- a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go +++ b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go @@ -1,10 +1,9 @@ -//go:build linux || (freebsd && cgo) || (openbsd && cgo) || (darwin && cgo) -// +build linux freebsd,cgo openbsd,cgo darwin,cgo +//go:build linux || freebsd || openbsd || darwin +// +build linux freebsd openbsd darwin package mountinfo import ( - "fmt" "os" "path/filepath" @@ -33,13 +32,13 @@ func mountedByStat(path string) (bool, error) { func normalizePath(path string) (realPath string, err error) { if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err) + return "", err } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) + return "", err } if _, err := os.Stat(realPath); err != nil { - return "", fmt.Errorf("failed to stat target of %q: %w", path, err) + return "", err } return realPath, nil } diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo.go b/vendor/github.com/moby/sys/mountinfo/mountinfo.go index c7e5cb42aca3..574aeb8767a7 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo.go @@ -15,7 +15,7 @@ func GetMounts(f FilterFunc) ([]*Info, error) { // // If a non-existent path is specified, an appropriate error is returned. // In case the caller is not interested in this particular error, it should -// be handled separately using e.g. errors.Is(err, os.ErrNotExist). +// be handled separately using e.g. errors.Is(err, fs.ErrNotExist). func Mounted(path string) (bool, error) { // root is always mounted if path == string(os.PathSeparator) { diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go index d5513a26d2fa..8420f58c7a97 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go @@ -1,53 +1,37 @@ -//go:build (freebsd && cgo) || (openbsd && cgo) || (darwin && cgo) -// +build freebsd,cgo openbsd,cgo darwin,cgo +//go:build freebsd || openbsd || darwin +// +build freebsd openbsd darwin package mountinfo -/* -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "reflect" - "unsafe" -) +import "golang.org/x/sys/unix" // parseMountTable returns information about mounted filesystems func parseMountTable(filter FilterFunc) ([]*Info, error) { - var rawEntries *C.struct_statfs - - count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) - if count == 0 { - return nil, fmt.Errorf("failed to call getmntinfo") + count, err := unix.Getfsstat(nil, unix.MNT_WAIT) + if err != nil { + return nil, err } - var entries []C.struct_statfs - header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) - header.Cap = count - header.Len = count - header.Data = uintptr(unsafe.Pointer(rawEntries)) + entries := make([]unix.Statfs_t, count) + _, err = unix.Getfsstat(entries, unix.MNT_WAIT) + if err != nil { + return nil, err + } var out []*Info for _, entry := range entries { - var mountinfo Info var skip, stop bool - mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) - mountinfo.FSType = C.GoString(&entry.f_fstypename[0]) - mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo := getMountinfo(&entry) if filter != nil { // filter out entries we're not interested in - skip, stop = filter(&mountinfo) + skip, stop = filter(mountinfo) if skip { continue } } - out = append(out, &mountinfo) + out = append(out, mountinfo) if stop { break } diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go new file mode 100644 index 000000000000..ecaaa7a9c11f --- /dev/null +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go @@ -0,0 +1,14 @@ +//go:build freebsd || darwin +// +build freebsd darwin + +package mountinfo + +import "golang.org/x/sys/unix" + +func getMountinfo(entry *unix.Statfs_t) *Info { + return &Info{ + Mountpoint: unix.ByteSliceToString(entry.Mntonname[:]), + FSType: unix.ByteSliceToString(entry.Fstypename[:]), + Source: unix.ByteSliceToString(entry.Mntfromname[:]), + } +} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go new file mode 100644 index 000000000000..f682c2d3b594 --- /dev/null +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go @@ -0,0 +1,11 @@ +package mountinfo + +import "golang.org/x/sys/unix" + +func getMountinfo(entry *unix.Statfs_t) *Info { + return &Info{ + Mountpoint: unix.ByteSliceToString(entry.F_mntonname[:]), + FSType: unix.ByteSliceToString(entry.F_fstypename[:]), + Source: unix.ByteSliceToString(entry.F_mntfromname[:]), + } +} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go index 95769a76dadb..c2e64bc81c76 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go @@ -1,5 +1,5 @@ -//go:build (!windows && !linux && !freebsd && !openbsd && !darwin) || (freebsd && !cgo) || (openbsd && !cgo) || (darwin && !cgo) -// +build !windows,!linux,!freebsd,!openbsd,!darwin freebsd,!cgo openbsd,!cgo darwin,!cgo +//go:build !windows && !linux && !freebsd && !openbsd && !darwin +// +build !windows,!linux,!freebsd,!openbsd,!darwin package mountinfo diff --git a/vendor/github.com/moby/sys/sequential/LICENSE b/vendor/github.com/moby/sys/sequential/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/sys/sequential/doc.go b/vendor/github.com/moby/sys/sequential/doc.go new file mode 100644 index 000000000000..af2817504b7a --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/doc.go @@ -0,0 +1,15 @@ +// Package sequential provides a set of functions for managing sequential +// files on Windows. +// +// The origin of these functions are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. +// +// For non-Windows platforms, the package provides wrappers for the equivalents +// in the os packages. They are passthrough on Unix platforms, and only relevant +// on Windows. +package sequential diff --git a/vendor/github.com/moby/sys/sequential/sequential_unix.go b/vendor/github.com/moby/sys/sequential/sequential_unix.go new file mode 100644 index 000000000000..a3c7340e3acb --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/sequential_unix.go @@ -0,0 +1,45 @@ +//go:build !windows +// +build !windows + +package sequential + +import "os" + +// Create creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func Create(name string) (*os.File, error) { + return os.Create(name) +} + +// Open opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func Open(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// CreateTemp creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func CreateTemp(dir, prefix string) (f *os.File, err error) { + return os.CreateTemp(dir, prefix) +} diff --git a/vendor/github.com/moby/sys/sequential/sequential_windows.go b/vendor/github.com/moby/sys/sequential/sequential_windows.go new file mode 100644 index 000000000000..3f7f0d83e008 --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/sequential_windows.go @@ -0,0 +1,161 @@ +package sequential + +import ( + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Create creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func Create(name string) (*os.File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// Open opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func Open(name string) (*os.File, error) { + return OpenFile(name, os.O_RDONLY, 0) +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFile(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, err := openFileSequential(name, flag, 0) + if err == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: err} +} + +func openFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := openSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for CreateTemp +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// CreateTemp is a copy of os.CreateTemp, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func CreateTemp(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/vendor/github.com/moby/sys/signal/signal_windows.go b/vendor/github.com/moby/sys/signal/signal_windows.go index cb459d17c251..5f7d5871eb94 100644 --- a/vendor/github.com/moby/sys/signal/signal_windows.go +++ b/vendor/github.com/moby/sys/signal/signal_windows.go @@ -31,4 +31,27 @@ var SignalMap = map[string]syscall.Signal{ "SEGV": syscall.Signal(windows.SIGSEGV), "TERM": syscall.Signal(windows.SIGTERM), "TRAP": syscall.Signal(windows.SIGTRAP), + + // additional linux signals supported for LCOW + "CHLD": syscall.Signal(0x11), + "CLD": syscall.Signal(0x11), + "CONT": syscall.Signal(0x12), + "IO": syscall.Signal(0x1d), + "IOT": syscall.Signal(0x6), + "POLL": syscall.Signal(0x1d), + "PROF": syscall.Signal(0x1b), + "PWR": syscall.Signal(0x1e), + "STKFLT": syscall.Signal(0x10), + "STOP": syscall.Signal(0x13), + "SYS": syscall.Signal(0x1f), + "TSTP": syscall.Signal(0x14), + "TTIN": syscall.Signal(0x15), + "TTOU": syscall.Signal(0x16), + "URG": syscall.Signal(0x17), + "USR1": syscall.Signal(0xa), + "USR2": syscall.Signal(0xc), + "VTALRM": syscall.Signal(0x1a), + "WINCH": syscall.Signal(0x1c), + "XCPU": syscall.Signal(0x18), + "XFSZ": syscall.Signal(0x19), } diff --git a/vendor/github.com/moby/term/.gitignore b/vendor/github.com/moby/term/.gitignore deleted file mode 100644 index b0747ff010a7..000000000000 --- a/vendor/github.com/moby/term/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -# if you want to ignore files created by your editor/tools, consider using a -# global .gitignore or .git/info/exclude see https://help.github.com/articles/ignoring-files -.* -!.github -!.gitignore -profile.out -# support running go modules in vendor mode for local development -vendor/ diff --git a/vendor/github.com/moby/term/README.md b/vendor/github.com/moby/term/README.md deleted file mode 100644 index 0ce92cc33980..000000000000 --- a/vendor/github.com/moby/term/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# term - utilities for dealing with terminals - -![Test](https://github.com/moby/term/workflows/Test/badge.svg) [![GoDoc](https://godoc.org/github.com/moby/term?status.svg)](https://godoc.org/github.com/moby/term) [![Go Report Card](https://goreportcard.com/badge/github.com/moby/term)](https://goreportcard.com/report/github.com/moby/term) - -term provides structures and helper functions to work with terminal (state, sizes). - -#### Using term - -```go -package main - -import ( - "log" - "os" - - "github.com/moby/term" -) - -func main() { - fd := os.Stdin.Fd() - if term.IsTerminal(fd) { - ws, err := term.GetWinsize(fd) - if err != nil { - log.Fatalf("term.GetWinsize: %s", err) - } - log.Printf("%d:%d\n", ws.Height, ws.Width) - } -} -``` - -## Contributing - -Want to hack on term? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. - -## Copyright and license -Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. diff --git a/vendor/github.com/moby/term/ascii.go b/vendor/github.com/moby/term/ascii.go deleted file mode 100644 index 55873c0556c9..000000000000 --- a/vendor/github.com/moby/term/ascii.go +++ /dev/null @@ -1,66 +0,0 @@ -package term - -import ( - "fmt" - "strings" -) - -// ASCII list the possible supported ASCII key sequence -var ASCII = []string{ - "ctrl-@", - "ctrl-a", - "ctrl-b", - "ctrl-c", - "ctrl-d", - "ctrl-e", - "ctrl-f", - "ctrl-g", - "ctrl-h", - "ctrl-i", - "ctrl-j", - "ctrl-k", - "ctrl-l", - "ctrl-m", - "ctrl-n", - "ctrl-o", - "ctrl-p", - "ctrl-q", - "ctrl-r", - "ctrl-s", - "ctrl-t", - "ctrl-u", - "ctrl-v", - "ctrl-w", - "ctrl-x", - "ctrl-y", - "ctrl-z", - "ctrl-[", - "ctrl-\\", - "ctrl-]", - "ctrl-^", - "ctrl-_", -} - -// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. -func ToBytes(keys string) ([]byte, error) { - codes := []byte{} -next: - for _, key := range strings.Split(keys, ",") { - if len(key) != 1 { - for code, ctrl := range ASCII { - if ctrl == key { - codes = append(codes, byte(code)) - continue next - } - } - if key == "DEL" { - codes = append(codes, 127) - } else { - return nil, fmt.Errorf("Unknown character: '%s'", key) - } - } else { - codes = append(codes, key[0]) - } - } - return codes, nil -} diff --git a/vendor/github.com/moby/term/proxy.go b/vendor/github.com/moby/term/proxy.go deleted file mode 100644 index c47756b89a9c..000000000000 --- a/vendor/github.com/moby/term/proxy.go +++ /dev/null @@ -1,88 +0,0 @@ -package term - -import ( - "io" -) - -// EscapeError is special error which returned by a TTY proxy reader's Read() -// method in case its detach escape sequence is read. -type EscapeError struct{} - -func (EscapeError) Error() string { - return "read escape sequence" -} - -// escapeProxy is used only for attaches with a TTY. It is used to proxy -// stdin keypresses from the underlying reader and look for the passed in -// escape key sequence to signal a detach. -type escapeProxy struct { - escapeKeys []byte - escapeKeyPos int - r io.Reader - buf []byte -} - -// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader -// and detects when the specified escape keys are read, in which case the Read -// method will return an error of type EscapeError. -func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { - return &escapeProxy{ - escapeKeys: escapeKeys, - r: r, - } -} - -func (r *escapeProxy) Read(buf []byte) (n int, err error) { - if len(r.escapeKeys) > 0 && r.escapeKeyPos == len(r.escapeKeys) { - return 0, EscapeError{} - } - - if len(r.buf) > 0 { - n = copy(buf, r.buf) - r.buf = r.buf[n:] - } - - nr, err := r.r.Read(buf[n:]) - n += nr - if len(r.escapeKeys) == 0 { - return n, err - } - - for i := 0; i < n; i++ { - if buf[i] == r.escapeKeys[r.escapeKeyPos] { - r.escapeKeyPos++ - - // Check if the full escape sequence is matched. - if r.escapeKeyPos == len(r.escapeKeys) { - n = i + 1 - r.escapeKeyPos - if n < 0 { - n = 0 - } - return n, EscapeError{} - } - continue - } - - // If we need to prepend a partial escape sequence from the previous - // read, make sure the new buffer size doesn't exceed len(buf). - // Otherwise, preserve any extra data in a buffer for the next read. - if i < r.escapeKeyPos { - preserve := make([]byte, 0, r.escapeKeyPos+n) - preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) - preserve = append(preserve, buf[:n]...) - n = copy(buf, preserve) - i += r.escapeKeyPos - r.buf = append(r.buf, preserve[n:]...) - } - r.escapeKeyPos = 0 - } - - // If we're in the middle of reading an escape sequence, make sure we don't - // let the caller read it. If later on we find that this is not the escape - // sequence, we'll prepend it back to buf. - n -= r.escapeKeyPos - if n < 0 { - n = 0 - } - return n, err -} diff --git a/vendor/github.com/moby/term/tc.go b/vendor/github.com/moby/term/tc.go deleted file mode 100644 index 65556027a6d3..000000000000 --- a/vendor/github.com/moby/term/tc.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -func tcget(fd uintptr) (*Termios, error) { - p, err := unix.IoctlGetTermios(int(fd), getTermios) - if err != nil { - return nil, err - } - return p, nil -} - -func tcset(fd uintptr, p *Termios) error { - return unix.IoctlSetTermios(int(fd), setTermios, p) -} diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go deleted file mode 100644 index 29c6acf1c7ef..000000000000 --- a/vendor/github.com/moby/term/term.go +++ /dev/null @@ -1,120 +0,0 @@ -// +build !windows - -// Package term provides structures and helper functions to work with -// terminal (state, sizes). -package term - -import ( - "errors" - "fmt" - "io" - "os" - "os/signal" - - "golang.org/x/sys/unix" -) - -var ( - // ErrInvalidState is returned if the state of the terminal is invalid. - ErrInvalidState = errors.New("Invalid terminal state") -) - -// State represents the state of the terminal. -type State struct { - termios Termios -} - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - _, err := tcget(fd) - return err == nil -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - return tcset(fd, &state.termios) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - termios, err := tcget(fd) - if err != nil { - return nil, err - } - return &State{termios: *termios}, nil -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= unix.ECHO - - if err := tcset(fd, &newState); err != nil { - return err - } - handleInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil -} - -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - go func() { - for range sigchan { - // quit cleanly and the new terminal item is on a new line - fmt.Println() - signal.Stop(sigchan) - close(sigchan) - RestoreTerminal(fd, state) - os.Exit(1) - } - }() -} diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go deleted file mode 100644 index ba82960d4a6d..000000000000 --- a/vendor/github.com/moby/term/term_windows.go +++ /dev/null @@ -1,231 +0,0 @@ -package term - -import ( - "io" - "os" - "os/signal" - - windowsconsole "github.com/moby/term/windows" - "golang.org/x/sys/windows" -) - -// State holds the console mode for the terminal. -type State struct { - mode uint32 -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 -} - -// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console -var vtInputSupported bool - -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - // Turn on VT handling on all std handles, if possible. This might - // fail, in which case we will fall back to terminal emulation. - var ( - emulateStdin, emulateStdout, emulateStderr bool - - mode uint32 - ) - - fd := windows.Handle(os.Stdin.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { - // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { - emulateStdin = true - } else { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - _ = windows.SetConsoleMode(fd, mode) - } - - fd = windows.Handle(os.Stdout.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { - // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { - emulateStdout = true - } else { - _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) - } - } - - fd = windows.Handle(os.Stderr.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { - // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { - emulateStderr = true - } else { - _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) - } - } - - // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and - // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as - // go-ansiterm hasn't switch to x/sys/windows. - // TODO: switch back to x/sys/windows once go-ansiterm has switched - if emulateStdin { - h := uint32(windows.STD_INPUT_HANDLE) - stdIn = windowsconsole.NewAnsiReader(int(h)) - } else { - stdIn = os.Stdin - } - - if emulateStdout { - h := uint32(windows.STD_OUTPUT_HANDLE) - stdOut = windowsconsole.NewAnsiWriter(int(h)) - } else { - stdOut = os.Stdout - } - - if emulateStderr { - h := uint32(windows.STD_ERROR_HANDLE) - stdErr = windowsconsole.NewAnsiWriter(int(h)) - } else { - stdErr = os.Stderr - } - - return -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - return windowsconsole.GetHandleInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - var info windows.ConsoleScreenBufferInfo - if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - var mode uint32 - err := windows.GetConsoleMode(windows.Handle(fd), &mode) - return err == nil -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - return windows.SetConsoleMode(windows.Handle(fd), state.mode) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - var mode uint32 - - if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil { - return nil, err - } - - return &State{mode: mode}, nil -} - -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { - mode := state.mode - mode &^= windows.ENABLE_ECHO_INPUT - mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT - err := windows.SetConsoleMode(windows.Handle(fd), mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this - // version of Windows. - _ = windows.SetConsoleMode(windows.Handle(fd), state.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.mode - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= windows.ENABLE_ECHO_INPUT - mode &^= windows.ENABLE_LINE_INPUT - mode &^= windows.ENABLE_MOUSE_INPUT - mode &^= windows.ENABLE_WINDOW_INPUT - mode &^= windows.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= windows.ENABLE_EXTENDED_FLAGS - mode |= windows.ENABLE_INSERT_MODE - mode |= windows.ENABLE_QUICK_EDIT_MODE - if vtInputSupported { - mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT - } - - err = windows.SetConsoleMode(windows.Handle(fd), mode) - if err != nil { - return nil, err - } - return state, nil -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - _ = RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/vendor/github.com/moby/term/termios.go b/vendor/github.com/moby/term/termios.go deleted file mode 100644 index 0f028e2273ac..000000000000 --- a/vendor/github.com/moby/term/termios.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -// Termios is the Unix API for terminal I/O. -type Termios = unix.Termios - -// MakeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - termios, err := tcget(fd) - if err != nil { - return nil, err - } - - oldState := State{termios: *termios} - - termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) - termios.Oflag &^= unix.OPOST - termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - termios.Cflag &^= (unix.CSIZE | unix.PARENB) - termios.Cflag |= unix.CS8 - termios.Cc[unix.VMIN] = 1 - termios.Cc[unix.VTIME] = 0 - - if err := tcset(fd, termios); err != nil { - return nil, err - } - return &oldState, nil -} diff --git a/vendor/github.com/moby/term/termios_bsd.go b/vendor/github.com/moby/term/termios_bsd.go deleted file mode 100644 index 922dd4baab04..000000000000 --- a/vendor/github.com/moby/term/termios_bsd.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build darwin freebsd openbsd netbsd - -package term - -import ( - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TIOCGETA - setTermios = unix.TIOCSETA -) diff --git a/vendor/github.com/moby/term/termios_nonbsd.go b/vendor/github.com/moby/term/termios_nonbsd.go deleted file mode 100644 index 038fd61ba1eb..000000000000 --- a/vendor/github.com/moby/term/termios_nonbsd.go +++ /dev/null @@ -1,12 +0,0 @@ -//+build !darwin,!freebsd,!netbsd,!openbsd,!windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TCGETS - setTermios = unix.TCSETS -) diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go deleted file mode 100644 index 155251521b09..000000000000 --- a/vendor/github.com/moby/term/windows/ansi_reader.go +++ /dev/null @@ -1,252 +0,0 @@ -// +build windows - -package windowsconsole - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a -// Windows console input handle. -func NewAnsiReader(nFile int) io.ReadCloser { - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("unexpected copy length encountered") - } - - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(ar *ansiReader, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } else if countRecords == 0 { - countRecords = 1 - } - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(ar.fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(ar.fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/vendor/github.com/moby/term/windows/ansi_writer.go b/vendor/github.com/moby/term/windows/ansi_writer.go deleted file mode 100644 index ccb5ef07757f..000000000000 --- a/vendor/github.com/moby/term/windows/ansi_writer.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build windows - -package windowsconsole - -import ( - "io" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a -// Windows console output handle. -func NewAnsiWriter(nFile int) io.Writer { - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - - return &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - return aw.parser.Parse(p) -} diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go deleted file mode 100644 index 993694ddcd99..000000000000 --- a/vendor/github.com/moby/term/windows/console.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build windows - -package windowsconsole - -import ( - "os" - - "golang.org/x/sys/windows" -) - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = isConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -// Deprecated: use golang.org/x/sys/windows.GetConsoleMode() or golang.org/x/term.IsTerminal() -var IsConsole = isConsole - -func isConsole(fd uintptr) bool { - var mode uint32 - err := windows.GetConsoleMode(windows.Handle(fd), &mode) - return err == nil -} diff --git a/vendor/github.com/moby/term/windows/doc.go b/vendor/github.com/moby/term/windows/doc.go deleted file mode 100644 index 54265fffaffd..000000000000 --- a/vendor/github.com/moby/term/windows/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windowsconsole diff --git a/vendor/github.com/moby/term/winsize.go b/vendor/github.com/moby/term/winsize.go deleted file mode 100644 index 1ef98d59961e..000000000000 --- a/vendor/github.com/moby/term/winsize.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} - return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go index 6e442a0853f4..94f19be62850 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -35,6 +35,11 @@ type Descriptor struct { // Annotations contains arbitrary metadata relating to the targeted content. Annotations map[string]string `json:"annotations,omitempty"` + // Data is an embedding of the targeted content. This is encoded as a base64 + // string when marshalled to JSON (automatically, by encoding/json). If + // present, Data can be used directly to avoid fetching the targeted content. + Data []byte `json:"data,omitempty"` + // Platform describes the platform which the image in the manifest runs on. // // This should only be used when referring to a manifest. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go index 82da6c6a8989..ed4a56e59e85 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -21,7 +21,7 @@ import "github.com/opencontainers/image-spec/specs-go" type Index struct { specs.Versioned - // MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` MediaType string `json:"mediaType,omitempty"` // Manifests references platform specific manifests. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index d72d15ce4bb8..8212d520c06f 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -20,7 +20,7 @@ import "github.com/opencontainers/image-spec/specs-go" type Manifest struct { specs.Versioned - // MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` MediaType string `json:"mediaType,omitempty"` // Config references a configuration object for a container, by digest. diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go index 0ac7d819e6d3..57a15c9a11ec 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go @@ -9,6 +9,5 @@ Usage: if selinux.EnforceMode() != selinux.Enforcing { selinux.SetEnforceMode(selinux.Enforcing) } - */ package selinux diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go index 12de0ae5d65e..f61a560158b2 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go @@ -3,8 +3,6 @@ package label import ( "errors" "fmt" - "os" - "os/user" "strings" "github.com/opencontainers/selinux/go-selinux" @@ -113,50 +111,6 @@ func Relabel(path string, fileLabel string, shared bool) error { return nil } - exclude_paths := map[string]bool{ - "/": true, - "/bin": true, - "/boot": true, - "/dev": true, - "/etc": true, - "/etc/passwd": true, - "/etc/pki": true, - "/etc/shadow": true, - "/home": true, - "/lib": true, - "/lib64": true, - "/media": true, - "/opt": true, - "/proc": true, - "/root": true, - "/run": true, - "/sbin": true, - "/srv": true, - "/sys": true, - "/tmp": true, - "/usr": true, - "/var": true, - "/var/lib": true, - "/var/log": true, - } - - if home := os.Getenv("HOME"); home != "" { - exclude_paths[home] = true - } - - if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" { - if usr, err := user.Lookup(sudoUser); err == nil { - exclude_paths[usr.HomeDir] = true - } - } - - if path != "/" { - path = strings.TrimSuffix(path, "/") - } - if exclude_paths[path] { - return fmt.Errorf("SELinux relabeling of %s is not allowed", path) - } - if shared { c, err := selinux.NewContext(fileLabel) if err != nil { diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go index 02d206239c7e..f21c80c5ab03 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package label diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go index 897ecbac41ce..8bff29355798 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go @@ -1,3 +1,4 @@ +//go:build linux && go1.16 // +build linux,go1.16 package selinux @@ -11,8 +12,19 @@ import ( ) func rchcon(fpath, label string) error { + fastMode := false + // If the current label matches the new label, assume + // other labels are correct. + if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + fastMode = true + } return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error { - e := setFileLabel(p, label) + if fastMode { + if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + return nil + } + } + e := lSetFileLabel(p, label) // Walk a file tree can race with removal, so ignore ENOENT. if errors.Is(e, os.ErrNotExist) { return nil diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go index 2c8b033ce058..303cb1890ad4 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go @@ -1,3 +1,4 @@ +//go:build linux && !go1.16 // +build linux,!go1.16 package selinux @@ -11,7 +12,7 @@ import ( func rchcon(fpath, label string) error { return pwalk.Walk(fpath, func(p string, _ os.FileInfo, _ error) error { - e := setFileLabel(p, label) + e := lSetFileLabel(p, label) // Walk a file tree can race with removal, so ignore ENOENT. if errors.Is(e, os.ErrNotExist) { return nil diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index ee602ab96ddc..4582cc9e0a2e 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -11,6 +11,7 @@ import ( "io/ioutil" "math/big" "os" + "os/user" "path" "path/filepath" "strconv" @@ -1072,21 +1073,6 @@ func copyLevel(src, dest string) (string, error) { return tcon.Get(), nil } -// Prevent users from relabeling system files -func badPrefix(fpath string) error { - if fpath == "" { - return ErrEmptyPath - } - - badPrefixes := []string{"/usr"} - for _, prefix := range badPrefixes { - if strings.HasPrefix(fpath, prefix) { - return fmt.Errorf("relabeling content in %s is not allowed", prefix) - } - } - return nil -} - // chcon changes the fpath file object to the SELinux label label. // If fpath is a directory and recurse is true, then chcon walks the // directory tree setting the label. @@ -1097,12 +1083,70 @@ func chcon(fpath string, label string, recurse bool) error { if label == "" { return nil } - if err := badPrefix(fpath); err != nil { - return err + + exclude_paths := map[string]bool{ + "/": true, + "/bin": true, + "/boot": true, + "/dev": true, + "/etc": true, + "/etc/passwd": true, + "/etc/pki": true, + "/etc/shadow": true, + "/home": true, + "/lib": true, + "/lib64": true, + "/media": true, + "/opt": true, + "/proc": true, + "/root": true, + "/run": true, + "/sbin": true, + "/srv": true, + "/sys": true, + "/tmp": true, + "/usr": true, + "/var": true, + "/var/lib": true, + "/var/log": true, + } + + if home := os.Getenv("HOME"); home != "" { + exclude_paths[home] = true + } + + if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" { + if usr, err := user.Lookup(sudoUser); err == nil { + exclude_paths[usr.HomeDir] = true + } + } + + if fpath != "/" { + fpath = strings.TrimSuffix(fpath, "/") + } + if exclude_paths[fpath] { + return fmt.Errorf("SELinux relabeling of %s is not allowed", fpath) } if !recurse { - return setFileLabel(fpath, label) + err := lSetFileLabel(fpath, label) + if err != nil { + // Check if file doesn't exist, must have been removed + if errors.Is(err, os.ErrNotExist) { + return nil + } + // Check if current label is correct on disk + flabel, nerr := lFileLabel(fpath) + if nerr == nil && flabel == label { + return nil + } + // Check if file doesn't exist, must have been removed + if errors.Is(nerr, os.ErrNotExist) { + return nil + } + return err + } + return nil } return rchcon(fpath, label) diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go index 78743b020c92..20d888031218 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package selinux diff --git a/vendor/github.com/package-url/packageurl-go/.gitignore b/vendor/github.com/package-url/packageurl-go/.gitignore new file mode 100644 index 000000000000..a1338d68517e --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/package-url/packageurl-go/.golangci.yaml b/vendor/github.com/package-url/packageurl-go/.golangci.yaml new file mode 100644 index 000000000000..73a5741c9270 --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/.golangci.yaml @@ -0,0 +1,17 @@ +# individual linter configs go here +linters-settings: + +# default linters are enabled `golangci-lint help linters` +linters: + disable-all: true + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck \ No newline at end of file diff --git a/vendor/github.com/package-url/packageurl-go/LICENSE b/vendor/github.com/package-url/packageurl-go/LICENSE new file mode 100644 index 000000000000..0b5633b5de5b --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) the purl authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/package-url/packageurl-go/Makefile b/vendor/github.com/package-url/packageurl-go/Makefile new file mode 100644 index 000000000000..f6e71425f759 --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/Makefile @@ -0,0 +1,12 @@ +.PHONY: test clean lint + +test: + curl -L https://raw.githubusercontent.com/package-url/purl-spec/master/test-suite-data.json -o testdata/test-suite-data.json + go test -v -cover ./... + +clean: + find . -name "test-suite-data.json" | xargs rm -f + +lint: + go get -u golang.org/x/lint/golint + golint -set_exit_status diff --git a/vendor/github.com/package-url/packageurl-go/README.md b/vendor/github.com/package-url/packageurl-go/README.md new file mode 100644 index 000000000000..783985498b0b --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/README.md @@ -0,0 +1,74 @@ +# packageurl-go + +[![build](https://github.com/package-url/packageurl-go/workflows/test/badge.svg)](https://github.com/package-url/packageurl-go/actions?query=workflow%3Atest) [![Coverage Status](https://coveralls.io/repos/github/package-url/packageurl-go/badge.svg)](https://coveralls.io/github/package-url/packageurl-go) [![PkgGoDev](https://pkg.go.dev/badge/github.com/package-url/packageurl-go)](https://pkg.go.dev/github.com/package-url/packageurl-go) [![Go Report Card](https://goreportcard.com/badge/github.com/package-url/packageurl-go)](https://goreportcard.com/report/github.com/package-url/packageurl-go) + +Go implementation of the package url spec. + + +## Install +``` +go get -u github.com/package-url/packageurl-go +``` + +## Versioning + +The versions will follow the spec. So if the spec is released at ``1.0``. Then all versions in the ``1.x.y`` will follow the ``1.x`` spec. + + +## Usage + +### Create from parts +```go +package main + +import ( + "fmt" + + "github.com/package-url/packageurl-go" +) + +func main() { + instance := packageurl.NewPackageURL("test", "ok", "name", "version", nil, "") + fmt.Printf("%s", instance.ToString()) +} +``` + +### Parse from string +```go +package main + +import ( + "fmt" + + "github.com/package-url/packageurl-go" +) + +func main() { + instance, err := packageurl.FromString("test:ok/name@version") + if err != nil { + panic(err) + } + fmt.Printf("%#v", instance) +} + +``` + + +## Test +Testing using the normal ``go test`` command. Using ``make test`` will pull the test fixtures shared between all package-url projects and then execute the tests. + +``` +$ make test +curl -L https://raw.githubusercontent.com/package-url/purl-test-suite/master/test-suite-data.json -o testdata/test-suite-data.json + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 7181 100 7181 0 0 1202 0 0:00:05 0:00:05 --:--:-- 1611 +go test -v -cover ./... +=== RUN TestFromStringExamples +--- PASS: TestFromStringExamples (0.00s) +=== RUN TestToStringExamples +--- PASS: TestToStringExamples (0.00s) +PASS +coverage: 94.7% of statements +ok github.com/package-url/packageurl-go 0.002s +``` diff --git a/vendor/github.com/package-url/packageurl-go/VERSION b/vendor/github.com/package-url/packageurl-go/VERSION new file mode 100644 index 000000000000..77d6f4ca2371 --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/VERSION @@ -0,0 +1 @@ +0.0.0 diff --git a/vendor/github.com/package-url/packageurl-go/packageurl.go b/vendor/github.com/package-url/packageurl-go/packageurl.go new file mode 100644 index 000000000000..3cba7095d5f1 --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/packageurl.go @@ -0,0 +1,402 @@ +/* +Copyright (c) the purl authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Package packageurl implements the package-url spec +package packageurl + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "sort" + "strings" +) + +var ( + // QualifierKeyPattern describes a valid qualifier key: + // + // - The key must be composed only of ASCII letters and numbers, '.', + // '-' and '_' (period, dash and underscore). + // - A key cannot start with a number. + QualifierKeyPattern = regexp.MustCompile(`^[A-Za-z\.\-_][0-9A-Za-z\.\-_]*$`) +) + +// These are the known purl types as defined in the spec. Some of these require +// special treatment during parsing. +// https://github.com/package-url/purl-spec#known-purl-types +var ( + // TypeBitbucket is a pkg:bitbucket purl. + TypeBitbucket = "bitbucket" + // TypeCocoapods is a pkg:cocoapods purl. + TypeCocoapods = "cocoapods" + // TypeCargo is a pkg:cargo purl. + TypeCargo = "cargo" + // TypeComposer is a pkg:composer purl. + TypeComposer = "composer" + // TypeConan is a pkg:conan purl. + TypeConan = "conan" + // TypeConda is a pkg:conda purl. + TypeConda = "conda" + // TypeCran is a pkg:cran purl. + TypeCran = "cran" + // TypeDebian is a pkg:deb purl. + TypeDebian = "deb" + // TypeDocker is a pkg:docker purl. + TypeDocker = "docker" + // TypeGem is a pkg:gem purl. + TypeGem = "gem" + // TypeGeneric is a pkg:generic purl. + TypeGeneric = "generic" + // TypeGithub is a pkg:github purl. + TypeGithub = "github" + // TypeGolang is a pkg:golang purl. + TypeGolang = "golang" + // TypeHackage is a pkg:hackage purl. + TypeHackage = "hackage" + // TypeHex is a pkg:hex purl. + TypeHex = "hex" + // TypeMaven is a pkg:maven purl. + TypeMaven = "maven" + // TypeNPM is a pkg:npm purl. + TypeNPM = "npm" + // TypeNuget is a pkg:nuget purl. + TypeNuget = "nuget" + // TypeOCI is a pkg:oci purl + TypeOCI = "oci" + // TypePyPi is a pkg:pypi purl. + TypePyPi = "pypi" + // TypeRPM is a pkg:rpm purl. + TypeRPM = "rpm" + // TypeSwift is pkg:swift purl + TypeSwift = "swift" +) + +// Qualifier represents a single key=value qualifier in the package url +type Qualifier struct { + Key string + Value string +} + +func (q Qualifier) String() string { + // A value must be a percent-encoded string + return fmt.Sprintf("%s=%s", q.Key, url.PathEscape(q.Value)) +} + +// Qualifiers is a slice of key=value pairs, with order preserved as it appears +// in the package URL. +type Qualifiers []Qualifier + +// QualifiersFromMap constructs a Qualifiers slice from a string map. To get a +// deterministic qualifier order (despite maps not providing any iteration order +// guarantees) the returned Qualifiers are sorted in increasing order of key. +func QualifiersFromMap(mm map[string]string) Qualifiers { + q := Qualifiers{} + + for k, v := range mm { + q = append(q, Qualifier{Key: k, Value: v}) + } + + // sort for deterministic qualifier order + sort.Slice(q, func(i int, j int) bool { return q[i].Key < q[j].Key }) + + return q +} + +// Map converts a Qualifiers struct to a string map. +func (qq Qualifiers) Map() map[string]string { + m := make(map[string]string) + + for i := 0; i < len(qq); i++ { + k := qq[i].Key + v := qq[i].Value + m[k] = v + } + + return m +} + +func (qq Qualifiers) String() string { + var kvPairs []string + for _, q := range qq { + kvPairs = append(kvPairs, q.String()) + } + return strings.Join(kvPairs, "&") +} + +// PackageURL is the struct representation of the parts that make a package url +type PackageURL struct { + Type string + Namespace string + Name string + Version string + Qualifiers Qualifiers + Subpath string +} + +// NewPackageURL creates a new PackageURL struct instance based on input +func NewPackageURL(purlType, namespace, name, version string, + qualifiers Qualifiers, subpath string) *PackageURL { + + return &PackageURL{ + Type: purlType, + Namespace: namespace, + Name: name, + Version: version, + Qualifiers: qualifiers, + Subpath: subpath, + } +} + +// ToString returns the human-readable instance of the PackageURL structure. +// This is the literal purl as defined by the spec. +func (p *PackageURL) ToString() string { + // Start with the type and a colon + purl := fmt.Sprintf("pkg:%s/", p.Type) + // Add namespaces if provided + if p.Namespace != "" { + var ns []string + for _, item := range strings.Split(p.Namespace, "/") { + ns = append(ns, url.QueryEscape(item)) + } + purl = purl + strings.Join(ns, "/") + "/" + } + // The name is always required and must be a percent-encoded string + // Use url.QueryEscape instead of PathEscape, as it handles @ signs + purl = purl + url.QueryEscape(p.Name) + // If a version is provided, add it after the at symbol + if p.Version != "" { + // A name must be a percent-encoded string + purl = purl + "@" + url.PathEscape(p.Version) + } + + // Iterate over qualifiers and make groups of key=value + var qualifiers []string + for _, q := range p.Qualifiers { + qualifiers = append(qualifiers, q.String()) + } + // If there are one or more key=value pairs, append on the package url + if len(qualifiers) != 0 { + purl = purl + "?" + strings.Join(qualifiers, "&") + } + // Add a subpath if available + if p.Subpath != "" { + purl = purl + "#" + p.Subpath + } + return purl +} + +func (p PackageURL) String() string { + return p.ToString() +} + +// FromString parses a valid package url string into a PackageURL structure +func FromString(purl string) (PackageURL, error) { + initialIndex := strings.Index(purl, "#") + // Start with purl being stored in the remainder + remainder := purl + substring := "" + if initialIndex != -1 { + initialSplit := strings.SplitN(purl, "#", 2) + remainder = initialSplit[0] + rightSide := initialSplit[1] + rightSide = strings.TrimLeft(rightSide, "/") + rightSide = strings.TrimRight(rightSide, "/") + var rightSides []string + + for _, item := range strings.Split(rightSide, "/") { + item = strings.Replace(item, ".", "", -1) + item = strings.Replace(item, "..", "", -1) + if item != "" { + i, err := url.PathUnescape(item) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape path: %s", err) + } + rightSides = append(rightSides, i) + } + } + substring = strings.Join(rightSides, "/") + } + qualifiers := Qualifiers{} + index := strings.LastIndex(remainder, "?") + // If we don't have anything to split then return an empty result + if index != -1 { + qualifier := remainder[index+1:] + for _, item := range strings.Split(qualifier, "&") { + kv := strings.Split(item, "=") + key := strings.ToLower(kv[0]) + key, err := url.PathUnescape(key) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape qualifier key: %s", err) + } + if !validQualifierKey(key) { + return PackageURL{}, fmt.Errorf("invalid qualifier key: '%s'", key) + } + // TODO + // - If the `key` is `checksums`, split the `value` on ',' to create + // a list of `checksums` + if kv[1] == "" { + continue + } + value, err := url.PathUnescape(kv[1]) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape qualifier value: %s", err) + } + qualifiers = append(qualifiers, Qualifier{key, value}) + } + remainder = remainder[:index] + } + + nextSplit := strings.SplitN(remainder, ":", 2) + if len(nextSplit) != 2 || nextSplit[0] != "pkg" { + return PackageURL{}, errors.New("scheme is missing") + } + // leading slashes after pkg: are to be ignored (pkg://maven is + // equivalent to pkg:maven) + remainder = strings.TrimLeft(nextSplit[1], "/") + + nextSplit = strings.SplitN(remainder, "/", 2) + if len(nextSplit) != 2 { + return PackageURL{}, errors.New("type is missing") + } + // purl type is case-insensitive, canonical form is lower-case + purlType := strings.ToLower(nextSplit[0]) + remainder = nextSplit[1] + + index = strings.LastIndex(remainder, "/") + name := typeAdjustName(purlType, remainder[index+1:]) + version := "" + + atIndex := strings.Index(name, "@") + if atIndex != -1 { + v, err := url.PathUnescape(name[atIndex+1:]) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape purl version: %s", err) + } + version = v + + unecapeName, err := url.PathUnescape(name[:atIndex]) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape purl name: %s", err) + } + name = unecapeName + } + var namespaces []string + + if index != -1 { + remainder = remainder[:index] + + for _, item := range strings.Split(remainder, "/") { + if item != "" { + unescaped, err := url.PathUnescape(item) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape path: %s", err) + } + namespaces = append(namespaces, unescaped) + } + } + } + namespace := strings.Join(namespaces, "/") + namespace = typeAdjustNamespace(purlType, namespace) + + // Fail if name is empty at this point + if name == "" { + return PackageURL{}, errors.New("name is required") + } + + err := validCustomRules(purlType, name, namespace, version, qualifiers) + if err != nil { + return PackageURL{}, err + } + + return PackageURL{ + Type: purlType, + Namespace: namespace, + Name: name, + Version: version, + Qualifiers: qualifiers, + Subpath: substring, + }, nil +} + +// Make any purl type-specific adjustments to the parsed namespace. +// See https://github.com/package-url/purl-spec#known-purl-types +func typeAdjustNamespace(purlType, ns string) string { + switch purlType { + case TypeBitbucket, TypeDebian, TypeGithub, TypeGolang, TypeNPM, TypeRPM: + return strings.ToLower(ns) + } + return ns +} + +// Make any purl type-specific adjustments to the parsed name. +// See https://github.com/package-url/purl-spec#known-purl-types +func typeAdjustName(purlType, name string) string { + switch purlType { + case TypeBitbucket, TypeDebian, TypeGithub, TypeGolang, TypeNPM: + return strings.ToLower(name) + case TypePyPi: + return strings.ToLower(strings.ReplaceAll(name, "_", "-")) + } + return name +} + +// validQualifierKey validates a qualifierKey against our QualifierKeyPattern. +func validQualifierKey(key string) bool { + return QualifierKeyPattern.MatchString(key) +} + +// validCustomRules evaluates additional rules for each package url type, as specified in the package-url specification. +// On success, it returns nil. On failure, a descriptive error will be returned. +func validCustomRules(purlType, name, ns, version string, qualifiers Qualifiers) error { + q := qualifiers.Map() + switch purlType { + case TypeConan: + if ns != "" { + if val, ok := q["channel"]; ok { + if val == "" { + return errors.New("the qualifier channel must be not empty if namespace is present") + } + } else { + return errors.New("channel qualifier does not exist") + } + } else { + if val, ok := q["channel"]; ok { + if val != "" { + return errors.New("namespace is required if channel is non empty") + } + } + } + case TypeSwift: + if ns == "" { + return errors.New("namespace is required") + } + if version == "" { + return errors.New("version is required") + } + case TypeCran: + if version == "" { + return errors.New("version is required") + } + } + return nil +} diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md index 6c061712bb10..7399e04bf654 100644 --- a/vendor/github.com/pelletier/go-toml/README.md +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -25,9 +25,9 @@ and [much faster][v2-bench]. If you only need reading and writing TOML documents (majority of cases), those features are implemented and the API unlikely to change. -The remaining features (Document structure editing and tooling) will be added -shortly. While pull-requests are welcome on v1, no active development is -expected on it. When v2.0.0 is released, v1 will be deprecated. +The remaining features will be added shortly. While pull-requests are welcome on +v1, no active development is expected on it. When v2.0.0 is released, v1 will be +deprecated. 👉 [go-toml v2][v2] diff --git a/vendor/github.com/pelletier/go-toml/SECURITY.md b/vendor/github.com/pelletier/go-toml/SECURITY.md new file mode 100644 index 000000000000..b2f21cfc92c9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +Use this section to tell people about which versions of your project are +currently being supported with security updates. + +| Version | Supported | +| ---------- | ------------------ | +| Latest 2.x | :white_check_mark: | +| All 1.x | :x: | +| All 0.x | :x: | + +## Reporting a Vulnerability + +Email a vulnerability report to `security@pelletier.codes`. Make sure to include +as many details as possible to reproduce the vulnerability. This is a +side-project: I will try to get back to you as quickly as possible, time +permitting in my personal life. Providing a working patch helps very much! diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go index 3443c35452ad..571273049848 100644 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -1113,7 +1113,7 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { + if val.Type().Kind() != reflect.Uint64 && val.Convert(reflect.TypeOf(int(1))).Int() < 0 { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) } if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go index f5e1a44fb4d5..b3726d0dd8cc 100644 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -293,42 +293,41 @@ func (p *tomlParser) parseRvalue() interface{} { return math.NaN() case tokenInteger: cleanedVal := cleanupNumberToken(tok.val) - var err error - var val int64 + base := 10 + s := cleanedVal + checkInvalidUnderscore := numberContainsInvalidUnderscore if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { switch cleanedVal[1] { case 'x': - err = hexNumberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) + checkInvalidUnderscore = hexNumberContainsInvalidUnderscore + base = 16 case 'o': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) + base = 8 case 'b': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) + base = 2 default: panic("invalid base") // the lexer should catch this first } - } else { - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal, 10, 64) + s = cleanedVal[2:] } + + err := checkInvalidUnderscore(tok.val) if err != nil { p.raiseError(tok, "%s", err) } - return val + + var val interface{} + val, err = strconv.ParseInt(s, base, 64) + if err == nil { + return val + } + + if s[0] != '-' { + if val, err = strconv.ParseUint(s, base, 64); err == nil { + return val + } + } + p.raiseError(tok, "%s", err) case tokenFloat: err := numberContainsInvalidUnderscore(tok.val) if err != nil { diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go index 6d82587c4882..5541b941f8b8 100644 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -471,7 +471,7 @@ func LoadBytes(b []byte) (tree *Tree, err error) { if _, ok := r.(runtime.Error); ok { panic(r) } - err = errors.New(r.(string)) + err = fmt.Errorf("%s", r) } }() diff --git a/vendor/github.com/pkg/browser/LICENSE b/vendor/github.com/pkg/browser/LICENSE new file mode 100644 index 000000000000..65f78fb62910 --- /dev/null +++ b/vendor/github.com/pkg/browser/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2014, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/browser/README.md b/vendor/github.com/pkg/browser/README.md new file mode 100644 index 000000000000..72b1976e3035 --- /dev/null +++ b/vendor/github.com/pkg/browser/README.md @@ -0,0 +1,55 @@ + +# browser + import "github.com/pkg/browser" + +Package browser provides helpers to open files, readers, and urls in a browser window. + +The choice of which browser is started is entirely client dependant. + + + + + +## Variables +``` go +var Stderr io.Writer = os.Stderr +``` +Stderr is the io.Writer to which executed commands write standard error. + +``` go +var Stdout io.Writer = os.Stdout +``` +Stdout is the io.Writer to which executed commands write standard output. + + +## func OpenFile +``` go +func OpenFile(path string) error +``` +OpenFile opens new browser window for the file path. + + +## func OpenReader +``` go +func OpenReader(r io.Reader) error +``` +OpenReader consumes the contents of r and presents the +results in a new browser window. + + +## func OpenURL +``` go +func OpenURL(url string) error +``` +OpenURL opens a new browser window pointing to url. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/github.com/pkg/browser/browser.go b/vendor/github.com/pkg/browser/browser.go new file mode 100644 index 000000000000..3e5969064258 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser.go @@ -0,0 +1,63 @@ +// Package browser provides helpers to open files, readers, and urls in a browser window. +// +// The choice of which browser is started is entirely client dependant. +package browser + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" +) + +// Stdout is the io.Writer to which executed commands write standard output. +var Stdout io.Writer = os.Stdout + +// Stderr is the io.Writer to which executed commands write standard error. +var Stderr io.Writer = os.Stderr + +// OpenFile opens new browser window for the file path. +func OpenFile(path string) error { + path, err := filepath.Abs(path) + if err != nil { + return err + } + return OpenURL("file://" + path) +} + +// OpenReader consumes the contents of r and presents the +// results in a new browser window. +func OpenReader(r io.Reader) error { + f, err := ioutil.TempFile("", "browser") + if err != nil { + return fmt.Errorf("browser: could not create temporary file: %v", err) + } + if _, err := io.Copy(f, r); err != nil { + f.Close() + return fmt.Errorf("browser: caching temporary file failed: %v", err) + } + if err := f.Close(); err != nil { + return fmt.Errorf("browser: caching temporary file failed: %v", err) + } + oldname := f.Name() + newname := oldname + ".html" + if err := os.Rename(oldname, newname); err != nil { + return fmt.Errorf("browser: renaming temporary file failed: %v", err) + } + return OpenFile(newname) +} + +// OpenURL opens a new browser window pointing to url. +func OpenURL(url string) error { + return openBrowser(url) +} + +func runCmd(prog string, args ...string) error { + cmd := exec.Command(prog, args...) + cmd.Stdout = Stdout + cmd.Stderr = Stderr + setFlags(cmd) + return cmd.Run() +} diff --git a/vendor/github.com/pkg/browser/browser_darwin.go b/vendor/github.com/pkg/browser/browser_darwin.go new file mode 100644 index 000000000000..6dff0403c711 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_darwin.go @@ -0,0 +1,9 @@ +package browser + +import "os/exec" + +func openBrowser(url string) error { + return runCmd("open", url) +} + +func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_freebsd.go b/vendor/github.com/pkg/browser/browser_freebsd.go new file mode 100644 index 000000000000..8cc0a7f539b4 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_freebsd.go @@ -0,0 +1,16 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from ports(8)") + } + return err +} + +func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_linux.go b/vendor/github.com/pkg/browser/browser_linux.go new file mode 100644 index 000000000000..ab9b4f6bd04e --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_linux.go @@ -0,0 +1,23 @@ +package browser + +import ( + "os/exec" + "strings" +) + +func openBrowser(url string) error { + providers := []string{"xdg-open", "x-www-browser", "www-browser"} + + // There are multiple possible providers to open a browser on linux + // One of them is xdg-open, another is x-www-browser, then there's www-browser, etc. + // Look for one that exists and run it + for _, provider := range providers { + if _, err := exec.LookPath(provider); err == nil { + return runCmd(provider, url) + } + } + + return &exec.Error{Name: strings.Join(providers, ","), Err: exec.ErrNotFound} +} + +func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_openbsd.go b/vendor/github.com/pkg/browser/browser_openbsd.go new file mode 100644 index 000000000000..8cc0a7f539b4 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_openbsd.go @@ -0,0 +1,16 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from ports(8)") + } + return err +} + +func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_unsupported.go b/vendor/github.com/pkg/browser/browser_unsupported.go new file mode 100644 index 000000000000..5eb17b013a98 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!darwin,!openbsd,!freebsd + +package browser + +import ( + "fmt" + "os/exec" + "runtime" +) + +func openBrowser(url string) error { + return fmt.Errorf("openBrowser: unsupported operating system: %v", runtime.GOOS) +} + +func setFlags(cmd *exec.Cmd) {} diff --git a/vendor/github.com/pkg/browser/browser_windows.go b/vendor/github.com/pkg/browser/browser_windows.go new file mode 100644 index 000000000000..a2b30d39bffc --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_windows.go @@ -0,0 +1,13 @@ +//go:generate mkwinsyscall -output zbrowser_windows.go browser_windows.go +//sys ShellExecute(hwnd int, verb string, file string, args string, cwd string, showCmd int) (err error) = shell32.ShellExecuteW +package browser + +import "os/exec" +const SW_SHOWNORMAL = 1 + +func openBrowser(url string) error { + return ShellExecute(0, "", url, "", "", SW_SHOWNORMAL) +} + +func setFlags(cmd *exec.Cmd) { +} diff --git a/vendor/github.com/pkg/browser/zbrowser_windows.go b/vendor/github.com/pkg/browser/zbrowser_windows.go new file mode 100644 index 000000000000..cbb25ba639a7 --- /dev/null +++ b/vendor/github.com/pkg/browser/zbrowser_windows.go @@ -0,0 +1,76 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package browser + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modshell32 = windows.NewLazySystemDLL("shell32.dll") + + procShellExecuteW = modshell32.NewProc("ShellExecuteW") +) + +func ShellExecute(hwnd int, verb string, file string, args string, cwd string, showCmd int) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(verb) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(file) + if err != nil { + return + } + var _p2 *uint16 + _p2, err = syscall.UTF16PtrFromString(args) + if err != nil { + return + } + var _p3 *uint16 + _p3, err = syscall.UTF16PtrFromString(cwd) + if err != nil { + return + } + return _ShellExecute(hwnd, _p0, _p1, _p2, _p3, showCmd) +} + +func _ShellExecute(hwnd int, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int) (err error) { + r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index ac1ca3cf5ffb..cf05079fb822 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -69,9 +69,9 @@ type Collector interface { // If a Collector collects the same metrics throughout its lifetime, its // Describe method can simply be implemented as: // -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } // // However, this will not work if the metrics collected change dynamically over // the lifetime of the Collector in a way that their combined set of descriptors diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 00d70f09b689..a912b75a05b7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -51,7 +51,7 @@ type Counter interface { // will lead to a valid (label-less) exemplar. But if Labels is nil, the current // exemplar is left in place. AddWithExemplar panics if the value is < 0, if any // of the provided labels are invalid, or if the provided labels contain more -// than 64 runes in total. +// than 128 runes in total. type ExemplarAdder interface { AddWithExemplar(value float64, exemplar Labels) } @@ -140,12 +140,13 @@ func (c *counter) get() float64 { } func (c *counter) Write(out *dto.Metric) error { - val := c.get() - + // Read the Exemplar first and the value second. This is to avoid a race condition + // where users see an exemplar for a not-yet-existing observation. var exemplar *dto.Exemplar if e := c.exemplar.Load(); e != nil { exemplar = e.(*dto.Exemplar) } + val := c.get() return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) } @@ -245,7 +246,8 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *CounterVec) WithLabelValues(lvs ...string) Counter { c, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -256,7 +258,8 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *CounterVec) With(labels Labels) Counter { c, err := v.GetMetricWith(labels) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 4bb816ab75ac..8bc5e44e2fc4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -20,6 +20,9 @@ import ( "strings" "github.com/cespare/xxhash/v2" + + "github.com/prometheus/client_golang/prometheus/internal" + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" @@ -154,7 +157,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * Value: proto.String(v), }) } - sort.Sort(labelPairSorter(d.constLabelPairs)) + sort.Sort(internal.LabelPairSorter(d.constLabelPairs)) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 98450125d6a3..811072cbd54f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -21,55 +21,66 @@ // All exported functions and methods are safe to be used concurrently unless // specified otherwise. // -// A Basic Example +// # A Basic Example // // As a starting point, a very basic usage example: // -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) -// ) -// -// func init() { -// // Metrics have to be registered to be exposed: -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// type metrics struct { +// cpuTemp prometheus.Gauge +// hdFailures *prometheus.CounterVec +// } +// +// func NewMetrics(reg prometheus.Registerer) *metrics { +// m := &metrics{ +// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }), +// hdFailures: prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ), +// } +// reg.MustRegister(m.cpuTemp) +// reg.MustRegister(m.hdFailures) +// return m +// } +// +// func main() { +// // Create a non-global registry. +// reg := prometheus.NewRegistry() +// +// // Create new metrics and register them using the custom registry. +// m := NewMetrics(reg) +// // Set values for the new created metrics. +// m.cpuTemp.Set(65.3) +// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // Expose metrics and custom registry via an HTTP server +// // using the HandleFor function. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } // // This is a complete program that exports two metrics, a Gauge and a Counter, // the latter with a label attached to turn it into a (one-dimensional) vector. +// It register the metrics using a custom registry and exposes them via an HTTP server +// on the /metrics endpoint. // -// Metrics +// # Metrics // // The number of exported identifiers in this package might appear a bit // overwhelming. However, in addition to the basic plumbing shown in the example @@ -100,7 +111,7 @@ // To create instances of Metrics and their vector versions, you need a suitable // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. // -// Custom Collectors and constant Metrics +// # Custom Collectors and constant Metrics // // While you could create your own implementations of Metric, most likely you // will only ever implement the Collector interface on your own. At a first @@ -141,7 +152,7 @@ // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting // shortcuts. // -// Advanced Uses of the Registry +// # Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, // sometimes you might want to handle the errors the registration might cause. @@ -176,23 +187,23 @@ // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. // -// HTTP Exposition +// # HTTP Exposition // // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example // above. The tools to expose metrics via HTTP are in the promhttp sub-package. // -// Pushing to the Pushgateway +// # Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // -// Graphite Bridge +// # Graphite Bridge // // Functions and examples to push metrics from a Gatherer to Graphite can be // found in the graphite sub-package. // -// Other Means of Exposition +// # Other Means of Exposition // // More ways of exposing metrics can easily be added by following the approaches // of the existing implementations. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index bd0733d6a7d6..21271a5bb462 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -210,7 +210,8 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { g, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -221,7 +222,8 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *GaugeVec) With(labels Labels) Gauge { g, err := v.GetMetricWith(labels) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go new file mode 100644 index 000000000000..614fd61be95a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !js || wasm +// +build !js wasm + +package prometheus + +import "os" + +func getPIDFn() func() (int, error) { + pid := os.Getpid() + return func() (int, error) { + return pid, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go new file mode 100644 index 000000000000..eaf8059ee15d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js && !wasm +// +build js,!wasm + +package prometheus + +func getPIDFn() func() (int, error) { + return func() (int, error) { + return 1, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index 08195b410218..ad9a71a5e0d4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -19,6 +19,10 @@ import ( "time" ) +// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. +// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so +// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is +// populated using runtime/metrics. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { @@ -197,14 +201,6 @@ func goRuntimeMemStats() memStatsMetrics { ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, }, } } @@ -232,7 +228,7 @@ func newBaseGoCollector() baseGoCollector { "A summary of the pause duration of garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( - memstatNamespace("last_gc_time_seconds"), + "go_memstats_last_gc_time_seconds", "Number of seconds since 1970 of last garbage collection.", nil, nil), goInfoDesc: NewDesc( @@ -254,8 +250,9 @@ func (c *baseGoCollector) Describe(ch chan<- *Desc) { // Collect returns the current state of all metrics of the collector. func (c *baseGoCollector) Collect(ch chan<- Metric) { ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - n, _ := runtime.ThreadCreateProfile(nil) - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + n := getRuntimeNumThreads() + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n) var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) @@ -268,7 +265,6 @@ func (c *baseGoCollector) Collect(ch chan<- Metric) { quantiles[0.0] = stats.PauseQuantiles[0].Seconds() ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9) - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go index 24526131e732..897a6e906b3a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go @@ -40,13 +40,28 @@ type goCollector struct { // // Deprecated: Use collectors.NewGoCollector instead. func NewGoCollector() Collector { + msMetrics := goRuntimeMemStats() + msMetrics = append(msMetrics, struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType + }{ + // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }) return &goCollector{ base: newBaseGoCollector(), msLast: &runtime.MemStats{}, msRead: runtime.ReadMemStats, msMaxWait: time.Second, msMaxAge: 5 * time.Minute, - msMetrics: goRuntimeMemStats(), + msMetrics: msMetrics, } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go deleted file mode 100644 index d43bdcddabc0..000000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go +++ /dev/null @@ -1,408 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.17 -// +build go1.17 - -package prometheus - -import ( - "math" - "runtime" - "runtime/metrics" - "strings" - "sync" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/client_golang/prometheus/internal" - dto "github.com/prometheus/client_model/go" -) - -type goCollector struct { - base baseGoCollector - - // mu protects updates to all fields ensuring a consistent - // snapshot is always produced by Collect. - mu sync.Mutex - - // rm... fields all pertain to the runtime/metrics package. - rmSampleBuf []metrics.Sample - rmSampleMap map[string]*metrics.Sample - rmMetrics []collectorMetric - - // With Go 1.17, the runtime/metrics package was introduced. - // From that point on, metric names produced by the runtime/metrics - // package could be generated from runtime/metrics names. However, - // these differ from the old names for the same values. - // - // This field exist to export the same values under the old names - // as well. - msMetrics memStatsMetrics -} - -// NewGoCollector is the obsolete version of collectors.NewGoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector() Collector { - descriptions := metrics.All() - - // Collect all histogram samples so that we can get their buckets. - // The API guarantees that the buckets are always fixed for the lifetime - // of the process. - var histograms []metrics.Sample - for _, d := range descriptions { - if d.Kind == metrics.KindFloat64Histogram { - histograms = append(histograms, metrics.Sample{Name: d.Name}) - } - } - metrics.Read(histograms) - bucketsMap := make(map[string][]float64) - for i := range histograms { - bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets - } - - // Generate a Desc and ValueType for each runtime/metrics metric. - metricSet := make([]collectorMetric, 0, len(descriptions)) - sampleBuf := make([]metrics.Sample, 0, len(descriptions)) - sampleMap := make(map[string]*metrics.Sample, len(descriptions)) - for i := range descriptions { - d := &descriptions[i] - namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d) - if !ok { - // Just ignore this metric; we can't do anything with it here. - // If a user decides to use the latest version of Go, we don't want - // to fail here. This condition is tested elsewhere. - continue - } - - // Set up sample buffer for reading, and a map - // for quick lookup of sample values. - sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) - sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] - - var m collectorMetric - if d.Kind == metrics.KindFloat64Histogram { - _, hasSum := rmExactSumMap[d.Name] - unit := d.Name[strings.IndexRune(d.Name, ':')+1:] - m = newBatchHistogram( - NewDesc( - BuildFQName(namespace, subsystem, name), - d.Description, - nil, - nil, - ), - internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit), - hasSum, - ) - } else if d.Cumulative { - m = NewCounter(CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: d.Description, - }) - } else { - m = NewGauge(GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: d.Description, - }) - } - metricSet = append(metricSet, m) - } - return &goCollector{ - base: newBaseGoCollector(), - rmSampleBuf: sampleBuf, - rmSampleMap: sampleMap, - rmMetrics: metricSet, - msMetrics: goRuntimeMemStats(), - } -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - c.base.Describe(ch) - for _, i := range c.msMetrics { - ch <- i.desc - } - for _, m := range c.rmMetrics { - ch <- m.Desc() - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - // Collect base non-memory metrics. - c.base.Collect(ch) - - // Collect must be thread-safe, so prevent concurrent use of - // rmSampleBuf. Just read into rmSampleBuf but write all the data - // we get into our Metrics or MemStats. - // - // This lock also ensures that the Metrics we send out are all from - // the same updates, ensuring their mutual consistency insofar as - // is guaranteed by the runtime/metrics package. - // - // N.B. This locking is heavy-handed, but Collect is expected to be called - // relatively infrequently. Also the core operation here, metrics.Read, - // is fast (O(tens of microseconds)) so contention should certainly be - // low, though channel operations and any allocations may add to that. - c.mu.Lock() - defer c.mu.Unlock() - - // Populate runtime/metrics sample buffer. - metrics.Read(c.rmSampleBuf) - - // Update all our metrics from rmSampleBuf. - for i, sample := range c.rmSampleBuf { - // N.B. switch on concrete type because it's significantly more efficient - // than checking for the Counter and Gauge interface implementations. In - // this case, we control all the types here. - switch m := c.rmMetrics[i].(type) { - case *counter: - // Guard against decreases. This should never happen, but a failure - // to do so will result in a panic, which is a harsh consequence for - // a metrics collection bug. - v0, v1 := m.get(), unwrapScalarRMValue(sample.Value) - if v1 > v0 { - m.Add(unwrapScalarRMValue(sample.Value) - m.get()) - } - m.Collect(ch) - case *gauge: - m.Set(unwrapScalarRMValue(sample.Value)) - m.Collect(ch) - case *batchHistogram: - m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name)) - m.Collect(ch) - default: - panic("unexpected metric type") - } - } - // ms is a dummy MemStats that we populate ourselves so that we can - // populate the old metrics from it. - var ms runtime.MemStats - memStatsFromRM(&ms, c.rmSampleMap) - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) - } -} - -// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed -// to be scalar and returns the equivalent float64 value. Panics if the -// value is not scalar. -func unwrapScalarRMValue(v metrics.Value) float64 { - switch v.Kind() { - case metrics.KindUint64: - return float64(v.Uint64()) - case metrics.KindFloat64: - return v.Float64() - case metrics.KindBad: - // Unsupported metric. - // - // This should never happen because we always populate our metric - // set from the runtime/metrics package. - panic("unexpected unsupported metric") - default: - // Unsupported metric kind. - // - // This should never happen because we check for this during initialization - // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") - } -} - -var rmExactSumMap = map[string]string{ - "/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes", - "/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes", -} - -// exactSumFor takes a runtime/metrics metric name (that is assumed to -// be of kind KindFloat64Histogram) and returns its exact sum and whether -// its exact sum exists. -// -// The runtime/metrics API for histograms doesn't currently expose exact -// sums, but some of the other metrics are in fact exact sums of histograms. -func (c *goCollector) exactSumFor(rmName string) float64 { - sumName, ok := rmExactSumMap[rmName] - if !ok { - return 0 - } - s, ok := c.rmSampleMap[sumName] - if !ok { - return 0 - } - return unwrapScalarRMValue(s.Value) -} - -func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) { - lookupOrZero := func(name string) uint64 { - if s, ok := rm[name]; ok { - return s.Value.Uint64() - } - return 0 - } - - // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees. - // The reason for this is because MemStats couldn't be extended at the time - // but there was a desire to have Mallocs at least be a little more representative, - // while having Mallocs - Frees still represent a live object count. - // Unfortunately, MemStats doesn't actually export a large allocation count, - // so it's impossible to pull this number out directly. - tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects") - ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs - ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs - - ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes") - ms.Sys = lookupOrZero("/memory/classes/total:bytes") - ms.Lookups = 0 // Already always zero. - ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes") - ms.Alloc = ms.HeapAlloc - ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes") - ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes") - ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes") - ms.HeapSys = ms.HeapInuse + ms.HeapIdle - ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects") - ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes") - ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes") - ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes") - ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes") - ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes") - ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes") - ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes") - ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes") - ms.OtherSys = lookupOrZero("/memory/classes/other:bytes") - ms.NextGC = lookupOrZero("/gc/heap/goal:bytes") - - // N.B. LastGC is omitted because runtime.GCStats already has this. - // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - // for more details. - ms.LastGC = 0 - - // N.B. GCCPUFraction is intentionally omitted. This metric is not useful, - // and often misleading due to the fact that it's an average over the lifetime - // of the process. - // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - // for more details. - ms.GCCPUFraction = 0 -} - -// batchHistogram is a mutable histogram that is updated -// in batches. -type batchHistogram struct { - selfCollector - - // Static fields updated only once. - desc *Desc - hasSum bool - - // Because this histogram operates in batches, it just uses a - // single mutex for everything. updates are always serialized - // but Write calls may operate concurrently with updates. - // Contention between these two sources should be rare. - mu sync.Mutex - buckets []float64 // Inclusive lower bounds, like runtime/metrics. - counts []uint64 - sum float64 // Used if hasSum is true. -} - -// newBatchHistogram creates a new batch histogram value with the given -// Desc, buckets, and whether or not it has an exact sum available. -// -// buckets must always be from the runtime/metrics package, following -// the same conventions. -func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram { - h := &batchHistogram{ - desc: desc, - buckets: buckets, - // Because buckets follows runtime/metrics conventions, there's - // 1 more value in the buckets list than there are buckets represented, - // because in runtime/metrics, the bucket values represent *boundaries*, - // and non-Inf boundaries are inclusive lower bounds for that bucket. - counts: make([]uint64, len(buckets)-1), - hasSum: hasSum, - } - h.init(h) - return h -} - -// update updates the batchHistogram from a runtime/metrics histogram. -// -// sum must be provided if the batchHistogram was created to have an exact sum. -// h.buckets must be a strict subset of his.Buckets. -func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) { - counts, buckets := his.Counts, his.Buckets - - h.mu.Lock() - defer h.mu.Unlock() - - // Clear buckets. - for i := range h.counts { - h.counts[i] = 0 - } - // Copy and reduce buckets. - var j int - for i, count := range counts { - h.counts[j] += count - if buckets[i+1] == h.buckets[j+1] { - j++ - } - } - if h.hasSum { - h.sum = sum - } -} - -func (h *batchHistogram) Desc() *Desc { - return h.desc -} - -func (h *batchHistogram) Write(out *dto.Metric) error { - h.mu.Lock() - defer h.mu.Unlock() - - sum := float64(0) - if h.hasSum { - sum = h.sum - } - dtoBuckets := make([]*dto.Bucket, 0, len(h.counts)) - totalCount := uint64(0) - for i, count := range h.counts { - totalCount += count - if !h.hasSum { - // N.B. This computed sum is an underestimate. - sum += h.buckets[i] * float64(count) - } - - // Skip the +Inf bucket, but only for the bucket list. - // It must still count for sum and totalCount. - if math.IsInf(h.buckets[i+1], 1) { - break - } - // Float64Histogram's upper bound is exclusive, so make it inclusive - // by obtaining the next float64 value down, in order. - upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i]) - dtoBuckets = append(dtoBuckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(totalCount), - UpperBound: proto.Float64(upperBound), - }) - } - out.Histogram = &dto.Histogram{ - Bucket: dtoBuckets, - SampleCount: proto.Uint64(totalCount), - SampleSum: proto.Float64(sum), - } - return nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go new file mode 100644 index 000000000000..3a2d55e84b1e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -0,0 +1,568 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.17 +// +build go1.17 + +package prometheus + +import ( + "math" + "runtime" + "runtime/metrics" + "strings" + "sync" + + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" +) + +const ( + // constants for strings referenced more than once. + goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects" + goGCHeapAllocsObjects = "/gc/heap/allocs:objects" + goGCHeapFreesObjects = "/gc/heap/frees:objects" + goGCHeapFreesBytes = "/gc/heap/frees:bytes" + goGCHeapAllocsBytes = "/gc/heap/allocs:bytes" + goGCHeapObjects = "/gc/heap/objects:objects" + goGCHeapGoalBytes = "/gc/heap/goal:bytes" + goMemoryClassesTotalBytes = "/memory/classes/total:bytes" + goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes" + goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes" + goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes" + goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes" + goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes" + goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes" + goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes" + goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes" + goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes" + goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes" + goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes" + goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes" + goMemoryClassesOtherBytes = "/memory/classes/other:bytes" +) + +// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic. +var rmNamesForMemStatsMetrics = []string{ + goGCHeapTinyAllocsObjects, + goGCHeapAllocsObjects, + goGCHeapFreesObjects, + goGCHeapAllocsBytes, + goGCHeapObjects, + goGCHeapGoalBytes, + goMemoryClassesTotalBytes, + goMemoryClassesHeapObjectsBytes, + goMemoryClassesHeapUnusedBytes, + goMemoryClassesHeapReleasedBytes, + goMemoryClassesHeapFreeBytes, + goMemoryClassesHeapStacksBytes, + goMemoryClassesOSStacksBytes, + goMemoryClassesMetadataMSpanInuseBytes, + goMemoryClassesMetadataMSPanFreeBytes, + goMemoryClassesMetadataMCacheInuseBytes, + goMemoryClassesMetadataMCacheFreeBytes, + goMemoryClassesProfilingBucketsBytes, + goMemoryClassesMetadataOtherBytes, + goMemoryClassesOtherBytes, +} + +func bestEffortLookupRM(lookup []string) []metrics.Description { + ret := make([]metrics.Description, 0, len(lookup)) + for _, rm := range metrics.All() { + for _, m := range lookup { + if m == rm.Name { + ret = append(ret, rm) + } + } + } + return ret +} + +type goCollector struct { + base baseGoCollector + + // mu protects updates to all fields ensuring a consistent + // snapshot is always produced by Collect. + mu sync.Mutex + + // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed). + sampleBuf []metrics.Sample + // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums. + sampleMap map[string]*metrics.Sample + + // rmExposedMetrics represents all runtime/metrics package metrics + // that were configured to be exposed. + rmExposedMetrics []collectorMetric + rmExactSumMapForHist map[string]string + + // With Go 1.17, the runtime/metrics package was introduced. + // From that point on, metric names produced by the runtime/metrics + // package could be generated from runtime/metrics names. However, + // these differ from the old names for the same values. + // + // This field exists to export the same values under the old names + // as well. + msMetrics memStatsMetrics + msMetricsEnabled bool +} + +type rmMetricDesc struct { + metrics.Description +} + +func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc { + var descs []rmMetricDesc + for _, d := range metrics.All() { + var ( + deny = true + desc rmMetricDesc + ) + + for _, r := range rules { + if !r.Matcher.MatchString(d.Name) { + continue + } + deny = r.Deny + } + if deny { + continue + } + + desc.Description = d + descs = append(descs, desc) + } + return descs +} + +func defaultGoCollectorOptions() internal.GoCollectorOptions { + return internal.GoCollectorOptions{ + RuntimeMetricSumForHist: map[string]string{ + "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes, + "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, + }, + RuntimeMetricRules: []internal.GoCollectorRule{ + //{Matcher: regexp.MustCompile("")}, + }, + } +} + +// NewGoCollector is the obsolete version of collectors.NewGoCollector. +// See there for documentation. +// +// Deprecated: Use collectors.NewGoCollector instead. +func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { + opt := defaultGoCollectorOptions() + for _, o := range opts { + o(&opt) + } + + exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules) + + // Collect all histogram samples so that we can get their buckets. + // The API guarantees that the buckets are always fixed for the lifetime + // of the process. + var histograms []metrics.Sample + for _, d := range exposedDescriptions { + if d.Kind == metrics.KindFloat64Histogram { + histograms = append(histograms, metrics.Sample{Name: d.Name}) + } + } + + if len(histograms) > 0 { + metrics.Read(histograms) + } + + bucketsMap := make(map[string][]float64) + for i := range histograms { + bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets + } + + // Generate a collector for each exposed runtime/metrics metric. + metricSet := make([]collectorMetric, 0, len(exposedDescriptions)) + // SampleBuf is used for reading from runtime/metrics. + // We are assuming the largest case to have stable pointers for sampleMap purposes. + sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics)) + sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions)) + for _, d := range exposedDescriptions { + namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description) + if !ok { + // Just ignore this metric; we can't do anything with it here. + // If a user decides to use the latest version of Go, we don't want + // to fail here. This condition is tested in TestExpectedRuntimeMetrics. + continue + } + + sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) + sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] + + var m collectorMetric + if d.Kind == metrics.KindFloat64Histogram { + _, hasSum := opt.RuntimeMetricSumForHist[d.Name] + unit := d.Name[strings.IndexRune(d.Name, ':')+1:] + m = newBatchHistogram( + NewDesc( + BuildFQName(namespace, subsystem, name), + d.Description.Description, + nil, + nil, + ), + internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit), + hasSum, + ) + } else if d.Cumulative { + m = NewCounter(CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: d.Description.Description, + }, + ) + } else { + m = NewGauge(GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: d.Description.Description, + }) + } + metricSet = append(metricSet, m) + } + + // Add exact sum metrics to sampleBuf if not added before. + for _, h := range histograms { + sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name] + if !ok { + continue + } + + if _, ok := sampleMap[sumMetric]; ok { + continue + } + sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric}) + sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1] + } + + var ( + msMetrics memStatsMetrics + msDescriptions []metrics.Description + ) + + if !opt.DisableMemStatsLikeMetrics { + msMetrics = goRuntimeMemStats() + msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics) + + // Check if metric was not exposed before and if not, add to sampleBuf. + for _, mdDesc := range msDescriptions { + if _, ok := sampleMap[mdDesc.Name]; ok { + continue + } + sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name}) + sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1] + } + } + + return &goCollector{ + base: newBaseGoCollector(), + sampleBuf: sampleBuf, + sampleMap: sampleMap, + rmExposedMetrics: metricSet, + rmExactSumMapForHist: opt.RuntimeMetricSumForHist, + msMetrics: msMetrics, + msMetricsEnabled: !opt.DisableMemStatsLikeMetrics, + } +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + c.base.Describe(ch) + for _, i := range c.msMetrics { + ch <- i.desc + } + for _, m := range c.rmExposedMetrics { + ch <- m.Desc() + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + // Collect base non-memory metrics. + c.base.Collect(ch) + + if len(c.sampleBuf) == 0 { + return + } + + // Collect must be thread-safe, so prevent concurrent use of + // sampleBuf elements. Just read into sampleBuf but write all the data + // we get into our Metrics or MemStats. + // + // This lock also ensures that the Metrics we send out are all from + // the same updates, ensuring their mutual consistency insofar as + // is guaranteed by the runtime/metrics package. + // + // N.B. This locking is heavy-handed, but Collect is expected to be called + // relatively infrequently. Also the core operation here, metrics.Read, + // is fast (O(tens of microseconds)) so contention should certainly be + // low, though channel operations and any allocations may add to that. + c.mu.Lock() + defer c.mu.Unlock() + + // Populate runtime/metrics sample buffer. + metrics.Read(c.sampleBuf) + + // Collect all our runtime/metrics user chose to expose from sampleBuf (if any). + for i, metric := range c.rmExposedMetrics { + // We created samples for exposed metrics first in order, so indexes match. + sample := c.sampleBuf[i] + + // N.B. switch on concrete type because it's significantly more efficient + // than checking for the Counter and Gauge interface implementations. In + // this case, we control all the types here. + switch m := metric.(type) { + case *counter: + // Guard against decreases. This should never happen, but a failure + // to do so will result in a panic, which is a harsh consequence for + // a metrics collection bug. + v0, v1 := m.get(), unwrapScalarRMValue(sample.Value) + if v1 > v0 { + m.Add(unwrapScalarRMValue(sample.Value) - m.get()) + } + m.Collect(ch) + case *gauge: + m.Set(unwrapScalarRMValue(sample.Value)) + m.Collect(ch) + case *batchHistogram: + m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name)) + m.Collect(ch) + default: + panic("unexpected metric type") + } + } + + if c.msMetricsEnabled { + // ms is a dummy MemStats that we populate ourselves so that we can + // populate the old metrics from it if goMemStatsCollection is enabled. + var ms runtime.MemStats + memStatsFromRM(&ms, c.sampleMap) + for _, i := range c.msMetrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) + } + } +} + +// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed +// to be scalar and returns the equivalent float64 value. Panics if the +// value is not scalar. +func unwrapScalarRMValue(v metrics.Value) float64 { + switch v.Kind() { + case metrics.KindUint64: + return float64(v.Uint64()) + case metrics.KindFloat64: + return v.Float64() + case metrics.KindBad: + // Unsupported metric. + // + // This should never happen because we always populate our metric + // set from the runtime/metrics package. + panic("unexpected unsupported metric") + default: + // Unsupported metric kind. + // + // This should never happen because we check for this during initialization + // and flag and filter metrics whose kinds we don't understand. + panic("unexpected unsupported metric kind") + } +} + +// exactSumFor takes a runtime/metrics metric name (that is assumed to +// be of kind KindFloat64Histogram) and returns its exact sum and whether +// its exact sum exists. +// +// The runtime/metrics API for histograms doesn't currently expose exact +// sums, but some of the other metrics are in fact exact sums of histograms. +func (c *goCollector) exactSumFor(rmName string) float64 { + sumName, ok := c.rmExactSumMapForHist[rmName] + if !ok { + return 0 + } + s, ok := c.sampleMap[sumName] + if !ok { + return 0 + } + return unwrapScalarRMValue(s.Value) +} + +func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) { + lookupOrZero := func(name string) uint64 { + if s, ok := rm[name]; ok { + return s.Value.Uint64() + } + return 0 + } + + // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees. + // The reason for this is because MemStats couldn't be extended at the time + // but there was a desire to have Mallocs at least be a little more representative, + // while having Mallocs - Frees still represent a live object count. + // Unfortunately, MemStats doesn't actually export a large allocation count, + // so it's impossible to pull this number out directly. + tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects) + ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs + ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs + + ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes) + ms.Sys = lookupOrZero(goMemoryClassesTotalBytes) + ms.Lookups = 0 // Already always zero. + ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes) + ms.Alloc = ms.HeapAlloc + ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes) + ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes) + ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes) + ms.HeapSys = ms.HeapInuse + ms.HeapIdle + ms.HeapObjects = lookupOrZero(goGCHeapObjects) + ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes) + ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes) + ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes) + ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes) + ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes) + ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes) + ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes) + ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes) + ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes) + ms.NextGC = lookupOrZero(goGCHeapGoalBytes) + + // N.B. GCCPUFraction is intentionally omitted. This metric is not useful, + // and often misleading due to the fact that it's an average over the lifetime + // of the process. + // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 + // for more details. + ms.GCCPUFraction = 0 +} + +// batchHistogram is a mutable histogram that is updated +// in batches. +type batchHistogram struct { + selfCollector + + // Static fields updated only once. + desc *Desc + hasSum bool + + // Because this histogram operates in batches, it just uses a + // single mutex for everything. updates are always serialized + // but Write calls may operate concurrently with updates. + // Contention between these two sources should be rare. + mu sync.Mutex + buckets []float64 // Inclusive lower bounds, like runtime/metrics. + counts []uint64 + sum float64 // Used if hasSum is true. +} + +// newBatchHistogram creates a new batch histogram value with the given +// Desc, buckets, and whether or not it has an exact sum available. +// +// buckets must always be from the runtime/metrics package, following +// the same conventions. +func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram { + // We need to remove -Inf values. runtime/metrics keeps them around. + // But -Inf bucket should not be allowed for prometheus histograms. + if buckets[0] == math.Inf(-1) { + buckets = buckets[1:] + } + h := &batchHistogram{ + desc: desc, + buckets: buckets, + // Because buckets follows runtime/metrics conventions, there's + // 1 more value in the buckets list than there are buckets represented, + // because in runtime/metrics, the bucket values represent *boundaries*, + // and non-Inf boundaries are inclusive lower bounds for that bucket. + counts: make([]uint64, len(buckets)-1), + hasSum: hasSum, + } + h.init(h) + return h +} + +// update updates the batchHistogram from a runtime/metrics histogram. +// +// sum must be provided if the batchHistogram was created to have an exact sum. +// h.buckets must be a strict subset of his.Buckets. +func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) { + counts, buckets := his.Counts, his.Buckets + + h.mu.Lock() + defer h.mu.Unlock() + + // Clear buckets. + for i := range h.counts { + h.counts[i] = 0 + } + // Copy and reduce buckets. + var j int + for i, count := range counts { + h.counts[j] += count + if buckets[i+1] == h.buckets[j+1] { + j++ + } + } + if h.hasSum { + h.sum = sum + } +} + +func (h *batchHistogram) Desc() *Desc { + return h.desc +} + +func (h *batchHistogram) Write(out *dto.Metric) error { + h.mu.Lock() + defer h.mu.Unlock() + + sum := float64(0) + if h.hasSum { + sum = h.sum + } + dtoBuckets := make([]*dto.Bucket, 0, len(h.counts)) + totalCount := uint64(0) + for i, count := range h.counts { + totalCount += count + if !h.hasSum { + if count != 0 { + // N.B. This computed sum is an underestimate. + sum += h.buckets[i] * float64(count) + } + } + + // Skip the +Inf bucket, but only for the bucket list. + // It must still count for sum and totalCount. + if math.IsInf(h.buckets[i+1], 1) { + break + } + // Float64Histogram's upper bound is exclusive, so make it inclusive + // by obtaining the next float64 value down, in order. + upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i]) + dtoBuckets = append(dtoBuckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(totalCount), + UpperBound: proto.Float64(upperBound), + }) + } + out.Histogram = &dto.Histogram{ + Bucket: dtoBuckets, + SampleCount: proto.Uint64(totalCount), + SampleSum: proto.Float64(sum), + } + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 893802fd6b4c..4c873a01c3d3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -28,19 +28,216 @@ import ( dto "github.com/prometheus/client_model/go" ) +// nativeHistogramBounds for the frac of observed values. Only relevant for +// schema > 0. The position in the slice is the schema. (0 is never used, just +// here for convenience of using the schema directly as the index.) +// +// TODO(beorn7): Currently, we do a binary search into these slices. There are +// ways to turn it into a small number of simple array lookups. It probably only +// matters for schema 5 and beyond, but should be investigated. See this comment +// as a starting point: +// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 +var nativeHistogramBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} + +// The nativeHistogramBounds above can be generated with the code below. +// +// TODO(beorn7): It's tempting to actually use `go generate` to generate the +// code above. However, this could lead to slightly different numbers on +// different architectures. We still need to come to terms if we are fine with +// that, or if we might prefer to specify precise numbers in the standard. +// +// var nativeHistogramBounds [][]float64 = make([][]float64, 9) +// +// func init() { +// // Populate nativeHistogramBounds. +// numBuckets := 1 +// for i := range nativeHistogramBounds { +// bounds := []float64{0.5} +// factor := math.Exp2(math.Exp2(float64(-i))) +// for j := 0; j < numBuckets-1; j++ { +// var bound float64 +// if (j+1)%2 == 0 { +// // Use previously calculated value for increased precision. +// bound = nativeHistogramBounds[i-1][j/2+1] +// } else { +// bound = bounds[j] * factor +// } +// bounds = append(bounds, bound) +// } +// numBuckets *= 2 +// nativeHistogramBounds[i] = bounds +// } +// } + // A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. +// configurable static buckets (or in dynamic sparse buckets as part of the +// experimental Native Histograms, see below for more details). Similar to a +// Summary, it also provides a sum of observations and an observation count. // // On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. +// the histogram_quantile PromQL function. +// +// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL +// (see the documentation for detailed procedures). However, Histograms require +// the user to pre-define suitable buckets, and they are in general less +// accurate. (Both problems are addressed by the experimental Native +// Histograms. To use them, configure a NativeHistogramBucketFactor in the +// HistogramOpts. They also require a Prometheus server v2.40+ with the +// corresponding feature flag enabled.) // -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. +// The Observe method of a Histogram has a very low performance overhead in +// comparison with the Observe method of a Summary. // // To create Histogram instances, use NewHistogram. type Histogram interface { @@ -50,7 +247,8 @@ type Histogram interface { // Observe adds a single observation to the histogram. Observations are // usually positive or zero. Negative observations are accepted but // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See + // counter resets in the sum of observations. (The experimental Native + // Histograms handle negative observations properly.) See // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations // for details. Observe(float64) @@ -64,18 +262,28 @@ const bucketLabel = "le" // tailored to broadly measure the response time (in seconds) of a network // service. Most likely, however, you will be required to define buckets // customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} +var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) +// DefNativeHistogramZeroThreshold is the default value for +// NativeHistogramZeroThreshold in the HistogramOpts. +// +// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), +// which is a bucket boundary at all possible resolutions. +const DefNativeHistogramZeroThreshold = 2.938735877055719e-39 + +// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold +// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero +// bucket that only receives observations of precisely zero. +const NativeHistogramZeroThresholdZero = -1 + +var errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, ) -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the +// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not +// counted and not included in the returned slice. The returned slice is meant +// to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is zero or negative. func LinearBuckets(start, width float64, count int) []float64 { @@ -90,11 +298,11 @@ func LinearBuckets(start, width float64, count int) []float64 { return buckets } -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket +// has an upper bound of 'start' and each following bucket's upper bound is +// 'factor' times the previous bucket's upper bound. The final +Inf bucket is +// not counted and not included in the returned slice. The returned slice is +// meant to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, // or if 'factor' is less than or equal 1. @@ -180,8 +388,85 @@ type HistogramOpts struct { // element in the slice is the upper inclusive bound of a bucket. The // values must be sorted in strictly increasing order. There is no need // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. + // implicitly. If Buckets is left as nil or set to a slice of length + // zero, it is replaced by default buckets. The default buckets are + // DefBuckets if no buckets for a native histogram (see below) are used, + // otherwise the default is no buckets. (In other words, if you want to + // use both reguler buckets and buckets for a native histogram, you have + // to define the regular buckets here explicitly.) Buckets []float64 + + // If NativeHistogramBucketFactor is greater than one, so-called sparse + // buckets are used (in addition to the regular buckets, if defined + // above). A Histogram with sparse buckets will be ingested as a Native + // Histogram by a Prometheus server with that feature enabled (requires + // Prometheus v2.40+). Sparse buckets are exponential buckets covering + // the whole float64 range (with the exception of the “zero” bucket, see + // SparseBucketsZeroThreshold below). From any one bucket to the next, + // the width of the bucket grows by a constant + // factor. NativeHistogramBucketFactor provides an upper bound for this + // factor (exception see below). The smaller + // NativeHistogramBucketFactor, the more buckets will be used and thus + // the more costly the histogram will become. A generally good trade-off + // between cost and accuracy is a value of 1.1 (each bucket is at most + // 10% wider than the previous one), which will result in each power of + // two divided into 8 buckets (e.g. there will be 8 buckets between 1 + // and 2, same as between 2 and 4, and 4 and 8, etc.). + // + // Details about the actually used factor: The factor is calculated as + // 2^(2^n), where n is an integer number between (and including) -8 and + // 4. n is chosen so that the resulting factor is the largest that is + // still smaller or equal to NativeHistogramBucketFactor. Note that the + // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) + // ). If NativeHistogramBucketFactor is greater than 1 but smaller than + // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though + // it is larger than the provided NativeHistogramBucketFactor. + // + // NOTE: Native Histograms are still an experimental feature. Their + // behavior might still change without a major version + // bump. Subsequently, all NativeHistogram... options here might still + // change their behavior or name (or might completely disappear) without + // a major version bump. + NativeHistogramBucketFactor float64 + // All observations with an absolute value of less or equal + // NativeHistogramZeroThreshold are accumulated into a “zero” + // bucket. For best results, this should be close to a bucket + // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold is left at zero, + // DefSparseBucketsZeroThreshold is used as the threshold. To configure + // a zero bucket with an actual threshold of zero (i.e. only + // observations of precisely zero will go into the zero bucket), set + // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero + // constant (or any negative float value). + NativeHistogramZeroThreshold float64 + + // The remaining fields define a strategy to limit the number of + // populated sparse buckets. If NativeHistogramMaxBucketNumber is left + // at zero, the number of buckets is not limited. (Note that this might + // lead to unbounded memory consumption if the values observed by the + // Histogram are sufficiently wide-spread. In particular, this could be + // used as a DoS attack vector. Where the observed values depend on + // external inputs, it is highly recommended to set a + // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber is exceeded, the following strategy is + // enacted: First, if the last reset (or the creation) of the histogram + // is at least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). If less time has passed, or if + // NativeHistogramMinResetDuration is zero, no reset is + // performed. Instead, the zero threshold is increased sufficiently to + // reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. After that, if the + // number of buckets still exceeds NativeHistogramMaxBucketNumber, the + // resolution of the histogram is reduced by doubling the width of the + // sparse buckets (up to a growth factor between one bucket to the next + // of 2^(2^4) = 65536, see above). + NativeHistogramMaxBucketNumber uint32 + NativeHistogramMinResetDuration time.Duration + NativeHistogramMaxZeroThreshold float64 } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It @@ -218,16 +503,29 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, + desc: desc, + upperBounds: opts.Buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, + nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, + nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, + lastResetTime: time.Now(), + now: time.Now, + } + if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { + h.upperBounds = DefBuckets + } + if opts.NativeHistogramBucketFactor <= 1 { + h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets. + } else { + switch { + case opts.NativeHistogramZeroThreshold > 0: + h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold + case opts.NativeHistogramZeroThreshold == 0: + h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold + } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. + h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -246,8 +544,16 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.counts[0] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, + } + h.counts[1] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, + } h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. @@ -255,13 +561,98 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } type histogramCounts struct { + // Order in this struct matters for the alignment required by atomic + // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // observations. sumBits uint64 count uint64 + + // nativeHistogramZeroBucket counts all (positive and negative) + // observations in the zero bucket (with an absolute value less or equal + // the current threshold, see next field. + nativeHistogramZeroBucket uint64 + // nativeHistogramZeroThresholdBits is the bit pattern of the current + // threshold for the zero bucket. It's initially equal to + // nativeHistogramZeroThreshold but may change according to the bucket + // count limitation strategy. + nativeHistogramZeroThresholdBits uint64 + // nativeHistogramSchema may change over time according to the bucket + // count limitation strategy and therefore has to be saved here. + nativeHistogramSchema int32 + // Number of (positive and negative) sparse buckets. + nativeHistogramBucketsNumber uint32 + + // Regular buckets. buckets []uint64 + + // The sparse buckets for native histograms are implemented with a + // sync.Map for now. A dedicated data structure will likely be more + // efficient. There are separate maps for negative and positive + // observations. The map's value is an *int64, counting observations in + // that bucket. (Note that we don't use uint64 as an int64 won't + // overflow in practice, and working with signed numbers from the + // beginning simplifies the handling of deltas.) The map's key is the + // index of the bucket according to the used + // nativeHistogramSchema. Index 0 is for an upper bound of 1. + nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map +} + +// observe manages the parts of observe that only affects +// histogramCounts. doSparse is true if sparse buckets should be done, +// too. +func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { + if bucket < len(hc.buckets) { + atomic.AddUint64(&hc.buckets[bucket], 1) + } + atomicAddFloat(&hc.sumBits, v) + if doSparse && !math.IsNaN(v) { + var ( + key int + schema = atomic.LoadInt32(&hc.nativeHistogramSchema) + zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits)) + bucketCreated, isInf bool + ) + if math.IsInf(v, 0) { + // Pretend v is MaxFloat64 but later increment key by one. + if math.IsInf(v, +1) { + v = math.MaxFloat64 + } else { + v = -math.MaxFloat64 + } + isInf = true + } + frac, exp := math.Frexp(math.Abs(v)) + if schema > 0 { + bounds := nativeHistogramBounds[schema] + key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) + } else { + key = exp + if frac == 0.5 { + key-- + } + div := 1 << -schema + key = (key + div - 1) / div + } + if isInf { + key++ + } + switch { + case v > zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1) + case v < -zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1) + default: + atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1) + } + if bucketCreated { + atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1) + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hc.count, 1) } type histogram struct { @@ -276,7 +667,7 @@ type histogram struct { // perspective of the histogram) swap the hot–cold under the writeMtx // lock. A cooldown is awaited (while locked) by comparing the number of // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must + // last observation on the now cool one has completed. All cold fields must // be merged into the new hot before releasing writeMtx. // // Fields with atomic access first! See alignment constraint: @@ -284,8 +675,10 @@ type histogram struct { countAndHotIdx uint64 selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. + desc *Desc + + // Only used in the Write method and for sparse bucket management. + mtx sync.Mutex // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of @@ -293,9 +686,15 @@ type histogram struct { // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. + nativeHistogramZeroThreshold float64 // The initial zero threshold. + nativeHistogramMaxZeroThreshold float64 + nativeHistogramMaxBuckets uint32 + nativeHistogramMinResetDuration time.Duration + lastResetTime time.Time // Protected by mtx. now func() time.Time // To mock out time.Now() for testing. } @@ -319,8 +718,8 @@ func (h *histogram) Write(out *dto.Metric) error { // the hot path, i.e. Observe is called much more often than Write. The // complication of making Write lock-free isn't worth it, if possible at // all. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() + h.mtx.Lock() + defer h.mtx.Unlock() // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) // without touching the count bits. See the struct comments for a full @@ -333,16 +732,16 @@ func (h *histogram) Write(out *dto.Metric) error { hotCounts := h.counts[n>>63] coldCounts := h.counts[(^n)>>63] - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } + waitForCooldown(count, coldCounts) his := &dto.Histogram{ Bucket: make([]*dto.Bucket, len(h.upperBounds)), SampleCount: proto.Uint64(count), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), } + out.Histogram = his + out.Label = h.labelPairs + var cumCount uint64 for i, upperBound := range h.upperBounds { cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) @@ -363,25 +762,21 @@ func (h *histogram) Write(out *dto.Metric) error { } his.Bucket = append(his.Bucket, b) } - - out.Histogram = his - out.Label = h.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) + if h.nativeHistogramSchema > math.MinInt32 { + his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits))) + his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema)) + zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket) + + defer func() { + coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber)) + coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber)) + }() + + his.ZeroCount = proto.Uint64(zeroBucket) + his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) + his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) } + addAndResetCounts(hotCounts, coldCounts) return nil } @@ -402,25 +797,216 @@ func (h *histogram) findBucket(v float64) int { // observe is the implementation for Observe without the findBucket part. func (h *histogram) observe(v float64, bucket int) { + // Do not add to sparse buckets for NaN observations. + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. n := atomic.AddUint64(&h.countAndHotIdx, 1) hotCounts := h.counts[n>>63] + hotCounts.observe(v, bucket, doSparse) + if doSparse { + h.limitBuckets(hotCounts, v, bucket) + } +} - if bucket < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[bucket], 1) +// limitSparsebuckets applies a strategy to limit the number of populated sparse +// buckets. It's generally best effort, and there are situations where the +// number can go higher (if even the lowest resolution isn't enough to reduce +// the number sufficiently, or if the provided counts aren't fully updated yet +// by a concurrently happening Write call). +func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) { + if h.nativeHistogramMaxBuckets == 0 { + return // No limit configured. } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded yet. + } + + h.mtx.Lock() + defer h.mtx.Unlock() + + // The hot counts might have been swapped just before we acquired the + // lock. Re-fetch the hot counts first... + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hotCounts := h.counts[hotIdx] + coldCounts := h.counts[coldIdx] + // ...and then check again if we really have to reduce the bucket count. + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded after all. + } + // Try the various strategies in order. + if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { + return + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { + return + } + h.doubleBucketWidth(hotCounts, coldCounts) +} + +// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration +// has been passed. It returns true if the histogram has been reset. The caller +// must have locked h.mtx. +func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { + // We are using the possibly mocked h.now() rather than + // time.Since(h.lastResetTime) to enable testing. + if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + return false + } + // Completely reset coldCounts. + h.resetCounts(cold) + // Repeat the latest observation to not lose it completely. + cold.observe(value, bucket, true) + // Make coldCounts the new hot counts while ressetting countAndHotIdx. + n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + return true +} + +// maybeWidenZeroBucket widens the zero bucket until it includes the existing +// buckets closest to the zero bucket (which could be two, if an equidistant +// negative and a positive bucket exists, but usually it's only one bucket to be +// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold +// limits how far the zero bucket can be extended, and if that's not enough to +// include an existing bucket, the method returns false. The caller must have +// locked h.mtx. +func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { + currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits)) + if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold { + return false + } + // Find the key of the bucket closest to zero. + smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive) + smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative) + if smallestNegativeKey < smallestKey { + smallestKey = smallestNegativeKey + } + if smallestKey == math.MaxInt32 { + return false + } + newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema)) + if newZeroThreshold > h.nativeHistogramMaxZeroThreshold { + return false // New threshold would exceed the max threshold. + } + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // Remove applicable buckets. + if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + // Make cold counts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the new zero threshold in the cold counts, too... + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // ...and then merge the newly deleted buckets into the wider zero + // bucket. + mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + if key == smallestKey { + // Merge into hot zero bucket... + atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket))) + // ...and delete from cold counts. + coldBuckets.Delete(key) + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } else { + // Add to corresponding hot bucket... + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + // ...and reset cold bucket. + atomic.StoreInt64(bucket, 0) + } + return true } } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) + + cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative)) + return true +} + +// doubleBucketWidth doubles the bucket width (by decrementing the schema +// number). Note that very sparse buckets could lead to a low reduction of the +// bucket count (or even no reduction at all). The method does nothing if the +// schema is already -4. +func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { + coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema) + if coldSchema == -4 { + return // Already at lowest resolution. + } + coldSchema-- + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // Play it simple and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) + // Make coldCounts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the schema in the cold counts, too... + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // ...and then merge the cold buckets into the wider hot buckets. + merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + // Adjust key to match the bucket to merge into. + if key > 0 { + key++ + } + key /= 2 + // Add to corresponding hot bucket. + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + return true + } + } + + cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative)) + // Play it simple again and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) +} + +func (h *histogram) resetCounts(counts *histogramCounts) { + atomic.StoreUint64(&counts.sumBits, 0) + atomic.StoreUint64(&counts.count, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema) + atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0) + for i := range h.upperBounds { + atomic.StoreUint64(&counts.buckets[i], 0) + } + deleteSyncMap(&counts.nativeHistogramBucketsNegative) + deleteSyncMap(&counts.nativeHistogramBucketsPositive) } // updateExemplar replaces the exemplar for the provided bucket. With empty @@ -516,7 +1102,8 @@ func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { h, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -527,7 +1114,8 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *HistogramVec) With(labels Labels) Observer { h, err := v.GetMetricWith(labels) if err != nil { @@ -581,11 +1169,11 @@ func (h *constHistogram) Desc() *Desc { func (h *constHistogram) Write(out *dto.Metric) error { his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) his.SampleCount = proto.Uint64(h.count) his.SampleSum = proto.Float64(h.sum) - for upperBound, count := range h.buckets { buckets = append(buckets, &dto.Bucket{ CumulativeCount: proto.Uint64(count), @@ -613,7 +1201,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { // to send it to Prometheus in the Collect method. // // buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. +// bucket. The +Inf bucket is implicit, and its value is equal to the provided count. // // NewConstHistogram returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. @@ -668,3 +1256,229 @@ func (s buckSort) Swap(i, j int) { func (s buckSort) Less(i, j int) bool { return s[i].GetUpperBound() < s[j].GetUpperBound() } + +// pickSchema returns the largest number n between -4 and 8 such that +// 2^(2^-n) is less or equal the provided bucketFactor. +// +// Special cases: +// - bucketFactor <= 1: panics. +// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. +func pickSchema(bucketFactor float64) int32 { + if bucketFactor <= 1 { + panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) + } + floor := math.Floor(math.Log2(math.Log2(bucketFactor))) + switch { + case floor <= -8: + return 8 + case floor >= 4: + return -4 + default: + return -int32(floor) + } +} + +func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { + var ii []int + buckets.Range(func(k, v interface{}) bool { + ii = append(ii, k.(int)) + return true + }) + sort.Ints(ii) + + if len(ii) == 0 { + return nil, nil + } + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + v, _ := buckets.Load(i) + count := atomic.LoadInt64(v.(*int64)) + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one ore two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} + +// addToBucket increments the sparse bucket at key by the provided amount. It +// returns true if a new sparse bucket had to be created for that. +func addToBucket(buckets *sync.Map, key int, increment int64) bool { + if existingBucket, ok := buckets.Load(key); ok { + // Fast path without allocation. + atomic.AddInt64(existingBucket.(*int64), increment) + return false + } + // Bucket doesn't exist yet. Slow path allocating new counter. + newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. + if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { + // The bucket was created concurrently in another goroutine. + // Have to increment after all. + atomic.AddInt64(actualBucket.(*int64), increment) + return false + } + return true +} + +// addAndReset returns a function to be used with sync.Map.Range of spare +// buckets in coldCounts. It increments the buckets in the provided hotBuckets +// according to the buckets ranged through. It then resets all buckets ranged +// through to 0 (but leaves them in place so that they don't need to get +// recreated on the next scrape). +func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + bucket := v.(*int64) + if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { + atomic.AddUint32(bucketNumber, 1) + } + atomic.StoreInt64(bucket, 0) + return true + } +} + +func deleteSyncMap(m *sync.Map) { + m.Range(func(k, v interface{}) bool { + m.Delete(k) + return true + }) +} + +func findSmallestKey(m *sync.Map) int { + result := math.MaxInt32 + m.Range(func(k, v interface{}) bool { + key := k.(int) + if key < result { + result = key + } + return true + }) + return result +} + +func getLe(key int, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with a key + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its key + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := key << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := key & ((1 << schema) - 1) + frac := nativeHistogramBounds[schema][fracIdx] + exp := (key >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// waitForCooldown returns after the count field in the provided histogramCounts +// has reached the provided count value. +func waitForCooldown(count uint64, counts *histogramCounts) { + for count != atomic.LoadUint64(&counts.count) { + runtime.Gosched() // Let observations get work done. + } +} + +// atomicAddFloat adds the provided float atomically to another float +// represented by the bit pattern the bits pointer is pointing to. +func atomicAddFloat(bits *uint64, v float64) { + for { + loadedBits := atomic.LoadUint64(bits) + newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) + if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { + break + } + } +} + +// atomicDecUint32 atomically decrements the uint32 p points to. See +// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done. +func atomicDecUint32(p *uint32) { + atomic.AddUint32(p, ^uint32(0)) +} + +// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero +// bucket) from the cold counts to the corresponding fields in the hot +// counts. Those fields are then reset to 0 in the cold counts. +func addAndResetCounts(hot, cold *histogramCounts) { + atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) + atomic.StoreUint64(&cold.count, 0) + coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits)) + atomicAddFloat(&hot.sumBits, coldSum) + atomic.StoreUint64(&cold.sumBits, 0) + for i := range hot.buckets { + atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) + atomic.StoreUint64(&cold.buckets[i], 0) + } + atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) + atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go new file mode 100644 index 000000000000..1ed5abe74c16 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go @@ -0,0 +1,60 @@ +// Copyright (c) 2015 Björn Rabenstein +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// The code in this package is copy/paste to avoid a dependency. Hence this file +// carries the copyright of the original repo. +// https://github.com/beorn7/floats +package internal + +import ( + "math" +) + +// minNormalFloat64 is the smallest positive normal value of type float64. +var minNormalFloat64 = math.Float64frombits(0x0010000000000000) + +// AlmostEqualFloat64 returns true if a and b are equal within a relative error +// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the +// details of the applied method. +func AlmostEqualFloat64(a, b, epsilon float64) bool { + if a == b { + return true + } + absA := math.Abs(a) + absB := math.Abs(b) + diff := math.Abs(a - b) + if a == 0 || b == 0 || absA+absB < minNormalFloat64 { + return diff < epsilon*minNormalFloat64 + } + return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon +} + +// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64. +func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !AlmostEqualFloat64(a[i], b[i], epsilon) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go new file mode 100644 index 000000000000..fd0750f2cf50 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -0,0 +1,654 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// Maintaining `GetUnifiedDiffString` here because original repository +// (https://github.com/pmezard/go-difflib) is no loger maintained. +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

    " lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool, +) *SequenceMatcher { + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// +// and for all (i',j',k') meeting those conditions, +// +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{ + c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n), + }) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s]++ + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches++ + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning-- // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return w.String(), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go new file mode 100644 index 000000000000..723b45d64444 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -0,0 +1,32 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "regexp" + +type GoCollectorRule struct { + Matcher *regexp.Regexp + Deny bool +} + +// GoCollectorOptions should not be used be directly by anything, except `collectors` package. +// Use it via collectors package instead. See issue +// https://github.com/prometheus/client_golang/issues/1030. +// +// This is internal, so external users only can use it via `collector.WithGoCollector*` methods +type GoCollectorOptions struct { + DisableMemStatsLikeMetrics bool + RuntimeMetricSumForHist map[string]string + RuntimeMetricRules []GoCollectorRule +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index fe0a52180e72..97d17d6cb60b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -61,9 +61,9 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) // name has - replaced with _ and is concatenated with the unit and // other data. name = strings.ReplaceAll(name, "-", "_") - name = name + "_" + unit - if d.Cumulative { - name = name + "_total" + name += "_" + unit + if d.Cumulative && d.Kind != metrics.KindFloat64Histogram { + name += "_total" } valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) @@ -84,12 +84,12 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { switch unit { case "bytes": - // Rebucket as powers of 2. - return rebucketExp(buckets, 2) + // Re-bucket as powers of 2. + return reBucketExp(buckets, 2) case "seconds": - // Rebucket as powers of 10 and then merge all buckets greater + // Re-bucket as powers of 10 and then merge all buckets greater // than 1 second into the +Inf bucket. - b := rebucketExp(buckets, 10) + b := reBucketExp(buckets, 10) for i := range b { if b[i] <= 1 { continue @@ -103,11 +103,11 @@ func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { return buckets } -// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and +// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and // downsamples the buckets to those a multiple of base apart. The end result // is a roughly exponential (in many cases, perfectly exponential) bucketing // scheme. -func rebucketExp(buckets []float64, base float64) []float64 { +func reBucketExp(buckets []float64, base float64) []float64 { bucket := buckets[0] var newBuckets []float64 // We may see a -Inf here, in which case, add it and skip it diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go index 351c26e1aedb..6515c114804f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -19,18 +19,34 @@ import ( dto "github.com/prometheus/client_model/go" ) -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type LabelPairSorter []*dto.LabelPair -func (s metricSorter) Len() int { +func (s LabelPairSorter) Len() int { return len(s) } -func (s metricSorter) Swap(i, j int) { +func (s LabelPairSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s metricSorter) Less(i, j int) bool { +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +// MetricSorter is a sortable slice of *dto.Metric. +type MetricSorter []*dto.Metric + +func (s MetricSorter) Len() int { + return len(s) +} + +func (s MetricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s MetricSorter) Less(i, j int) bool { if len(s[i].Label) != len(s[j].Label) { // This should not happen. The metrics are // inconsistent. However, we have to deal with the fact, as @@ -68,7 +84,7 @@ func (s metricSorter) Less(i, j int) bool { // the slice, with the contained Metrics sorted within each MetricFamily. func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) + sort.Sort(MetricSorter(mf.Metric)) } names := make([]string, 0, len(metricFamiliesByName)) for name, mf := range metricFamiliesByName { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 2744443ac228..c1b8fad36aeb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -25,7 +25,8 @@ import ( // Labels represents a collection of label name -> value mappings. This type is // commonly used with the With(Labels) and GetMetricWith(Labels) methods of // metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // // The other use-case is the specification of constant label pairs in Opts or to // create a Desc. @@ -39,7 +40,7 @@ var errInconsistentCardinality = errors.New("inconsistent label cardinality") func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { return fmt.Errorf( - "%s: %q has %d variable labels named %q but %d values %q were provided", + "%w: %q has %d variable labels named %q but %d values %q were provided", errInconsistentCardinality, fqName, len(labels), labels, len(labelValues), labelValues, @@ -49,7 +50,7 @@ func makeInconsistentCardinalityError(fqName string, labels, labelValues []strin func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { if len(labels) != expectedNumberOfValues { return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", + "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, len(labels), labels, ) @@ -67,7 +68,7 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", + "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, len(vals), vals, ) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index dc121910a520..b5119c50410e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -14,6 +14,9 @@ package prometheus import ( + "errors" + "math" + "sort" "strings" "time" @@ -115,22 +118,6 @@ func BuildFQName(namespace, subsystem, name string) string { return name } -// labelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type labelPairSorter []*dto.LabelPair - -func (s labelPairSorter) Len() int { - return len(s) -} - -func (s labelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s labelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - type invalidMetric struct { desc *Desc err error @@ -174,3 +161,96 @@ func (m timestampedMetric) Write(pb *dto.Metric) error { func NewMetricWithTimestamp(t time.Time, m Metric) Metric { return timestampedMetric{Metric: m, t: t} } + +type withExemplarsMetric struct { + Metric + + exemplars []*dto.Exemplar +} + +func (m *withExemplarsMetric) Write(pb *dto.Metric) error { + if err := m.Metric.Write(pb); err != nil { + return err + } + + switch { + case pb.Counter != nil: + pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] + case pb.Histogram != nil: + for _, e := range m.exemplars { + // pb.Histogram.Bucket are sorted by UpperBound. + i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { + return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + }) + if i < len(pb.Histogram.Bucket) { + pb.Histogram.Bucket[i].Exemplar = e + } else { + // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. + b := &dto.Bucket{ + CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), + UpperBound: proto.Float64(math.Inf(1)), + Exemplar: e, + } + pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + } + } + default: + // TODO(bwplotka): Implement Gauge? + return errors.New("cannot inject exemplar into Gauge, Summary or Untyped") + } + + return nil +} + +// Exemplar is easier to use, user-facing representation of *dto.Exemplar. +type Exemplar struct { + Value float64 + Labels Labels + // Optional. + // Default value (time.Time{}) indicates its empty, which should be + // understood as time.Now() time at the moment of creation of metric. + Timestamp time.Time +} + +// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given +// exemplars. Exemplars are validated. +// +// Only last applicable exemplar is injected from the list. +// For example for Counter it means last exemplar is injected. +// For Histogram, it means last applicable exemplar for each bucket is injected. +// +// NewMetricWithExemplars works best with MustNewConstMetric and +// MustNewConstHistogram, see example. +func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { + if len(exemplars) == 0 { + return nil, errors.New("no exemplar was passed for NewMetricWithExemplars") + } + + var ( + now = time.Now() + exs = make([]*dto.Exemplar, len(exemplars)) + err error + ) + for i, e := range exemplars { + ts := e.Timestamp + if ts == (time.Time{}) { + ts = now + } + exs[i], err = newExemplar(e.Value, ts, e.Labels) + if err != nil { + return nil, err + } + } + + return &withExemplarsMetric{Metric: m, exemplars: exs}, nil +} + +// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where +// NewMetricWithExemplars would have returned an error. +func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric { + ret, err := NewMetricWithExemplars(m, exemplars...) + if err != nil { + panic(err) + } + return ret +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go new file mode 100644 index 000000000000..7c12b210870a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go @@ -0,0 +1,25 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !js || wasm +// +build !js wasm + +package prometheus + +import "runtime" + +// getRuntimeNumThreads returns the number of open OS threads. +func getRuntimeNumThreads() float64 { + n, _ := runtime.ThreadCreateProfile(nil) + return float64(n) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go new file mode 100644 index 000000000000..7348df01dfbc --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js && !wasm +// +build js,!wasm + +package prometheus + +// getRuntimeNumThreads returns the number of open OS threads. +func getRuntimeNumThreads() float64 { + return 1 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go index 44128016fd1d..03773b21f759 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -58,7 +58,7 @@ type ObserverVec interface { // current time as timestamp, and the provided Labels. Empty Labels will lead to // a valid (label-less) exemplar. But if Labels is nil, the current exemplar is // left in place. ObserveWithExemplar panics if any of the provided labels are -// invalid or if the provided labels contain more than 64 runes in total. +// invalid or if the provided labels contain more than 128 runes in total. type ExemplarObserver interface { ObserveWithExemplar(value float64, exemplar Labels) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 5bfe0ff5bbc9..8548dd18ed5e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -16,7 +16,6 @@ package prometheus import ( "errors" "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -104,8 +103,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { } if opts.PidFn == nil { - pid := os.Getpid() - c.pidFn = func() (int, error) { return pid, nil } + c.pidFn = getPIDFn() } else { c.pidFn = opts.PidFn } @@ -152,13 +150,13 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) // It is meant to be used for the PidFn field in ProcessCollectorOpts. func NewPidFileFn(pidFilePath string) func() (int, error) { return func() (int, error) { - content, err := ioutil.ReadFile(pidFilePath) + content, err := os.ReadFile(pidFilePath) if err != nil { - return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err) + return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err) } pid, err := strconv.Atoi(strings.TrimSpace(string(content))) if err != nil { - return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err) + return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err) } return pid, nil diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go new file mode 100644 index 000000000000..b1e363d6cf69 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go @@ -0,0 +1,26 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js +// +build js + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + // noop on this platform + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 2dc3660da0a3..c0152cdb613a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows -// +build !windows +//go:build !windows && !js +// +build !windows,!js package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index e7c0d05464fa..9819917b83b3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,16 +76,19 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } -type closeNotifierDelegator struct{ *responseWriterDelegator } -type flusherDelegator struct{ *responseWriterDelegator } -type hijackerDelegator struct{ *responseWriterDelegator } -type readerFromDelegator struct{ *responseWriterDelegator } -type pusherDelegator struct{ *responseWriterDelegator } +type ( + closeNotifierDelegator struct{ *responseWriterDelegator } + flusherDelegator struct{ *responseWriterDelegator } + hijackerDelegator struct{ *responseWriterDelegator } + readerFromDelegator struct{ *responseWriterDelegator } + pusherDelegator struct{ *responseWriterDelegator } +) func (d closeNotifierDelegator) CloseNotify() <-chan bool { //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. return d.ResponseWriter.(http.CloseNotifier).CloseNotify() } + func (d flusherDelegator) Flush() { // If applicable, call WriteHeader here so that observeWriteHeader is // handled appropriately. @@ -94,9 +97,11 @@ func (d flusherDelegator) Flush() { } d.ResponseWriter.(http.Flusher).Flush() } + func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { return d.ResponseWriter.(http.Hijacker).Hijack() } + func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { // If applicable, call WriteHeader here so that observeWriteHeader is // handled appropriately. @@ -107,6 +112,7 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { d.written += n return n, err } + func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { return d.ResponseWriter.(http.Pusher).Push(target, opts) } @@ -261,7 +267,7 @@ func init() { http.Flusher }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23 return struct { *responseWriterDelegator http.Pusher diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index d86d0cf4b0e9..a4cc9810b072 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -33,6 +33,7 @@ package promhttp import ( "compress/gzip" + "errors" "fmt" "io" "net/http" @@ -84,6 +85,13 @@ func Handler() http.Handler { // instrumentation. Use the InstrumentMetricHandler function to apply the same // kind of instrumentation as it is used by the Handler function. func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts) +} + +// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which +// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after +// call to `done` of that `Gather`. +func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler { var ( inFlightSem chan struct{} errCnt = prometheus.NewCounterVec( @@ -103,7 +111,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { errCnt.WithLabelValues("gathering") errCnt.WithLabelValues("encoding") if err := opts.Registry.Register(errCnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { errCnt = are.ExistingCollector.(*prometheus.CounterVec) } else { panic(err) @@ -123,7 +132,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { return } } - mfs, err := reg.Gather() + mfs, done, err := reg.Gather() + defer done() if err != nil { if opts.ErrorLog != nil { opts.ErrorLog.Println("error gathering metrics:", err) @@ -242,7 +252,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht cnt.WithLabelValues("500") cnt.WithLabelValues("503") if err := reg.Register(cnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { cnt = are.ExistingCollector.(*prometheus.CounterVec) } else { panic(err) @@ -254,7 +265,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht Help: "Current number of scrapes being served.", }) if err := reg.Register(gge); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { gge = are.ExistingCollector.(prometheus.Gauge) } else { panic(err) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 861b4d21cac6..21086781621f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -38,11 +38,11 @@ func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { // // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { gauge.Inc() defer gauge.Dec() return next.RoundTrip(r) - }) + } } // InstrumentRoundTripperCounter is a middleware that wraps the provided @@ -59,22 +59,28 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp // If the wrapped RoundTripper panics or returns a non-nil error, the Counter // is not incremented. // +// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests. +// // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := &option{} + rtOpts := defaultOptions() for _, o := range opts { - o(rtOpts) + o.apply(rtOpts) } code, method := checkLabels(counter) - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { - counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc() + addWithExemplar( + counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), + 1, + rtOpts.getExemplarFn(r.Context()), + ) } return resp, err - }) + } } // InstrumentRoundTripperDuration is a middleware that wraps the provided @@ -94,24 +100,30 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou // If the wrapped RoundTripper panics or returns a non-nil error, no values are // reported. // +// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms. +// // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := &option{} + rtOpts := defaultOptions() for _, o := range opts { - o(rtOpts) + o.apply(rtOpts) } code, method := checkLabels(obs) - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds()) + observeWithExemplar( + obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), + time.Since(start).Seconds(), + rtOpts.getExemplarFn(r.Context()), + ) } return resp, err - }) + } } // InstrumentTrace is used to offer flexibility in instrumenting the available @@ -149,7 +161,7 @@ type InstrumentTrace struct { // // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { start := time.Now() trace := &httptrace.ClientTrace{ @@ -231,5 +243,5 @@ func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) Ro r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) return next.RoundTrip(r) - }) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index a23f0edc6f81..cca67a78a90d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -28,6 +28,26 @@ import ( // magicString is used for the hacky label test in checkLabels. Remove once fixed. const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" +// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver], +// which falls back to [prometheus.Observer.Observe] if no labels are provided. +func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) { + if labels == nil { + obs.Observe(val) + return + } + obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels) +} + +// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar], +// which falls back to [prometheus.Counter.Add] if no labels are provided. +func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) { + if labels == nil { + obs.Add(val) + return + } + obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels) +} + // InstrumentHandlerInFlight is a middleware that wraps the provided // http.Handler. It sets the provided prometheus.Gauge to the number of // requests currently handled by the wrapped http.Handler. @@ -48,7 +68,7 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // names are "code" and "method". The function panics otherwise. For the "method" // label a predefined default label value set is used to filter given values. // Values besides predefined values will count as `unknown` method. -//`WithExtraMethods` can be used to add more methods to the set. The Observe +// `WithExtraMethods` can be used to add more methods to the set. The Observe // method of the Observer in the ObserverVec is called with the request duration // in seconds. Partitioning happens by HTTP status code and/or HTTP method if // the respective instance label names are present in the ObserverVec. For @@ -62,28 +82,37 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(obs) if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, nil) next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) - }) + observeWithExemplar( + obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + time.Since(now).Seconds(), + hOpts.getExemplarFn(r.Context()), + ) + } } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { now := time.Now() next.ServeHTTP(w, r) - obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) - }) + + observeWithExemplar( + obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), + time.Since(now).Seconds(), + hOpts.getExemplarFn(r.Context()), + ) + } } // InstrumentHandlerCounter is a middleware that wraps the provided http.Handler @@ -104,25 +133,34 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(counter) if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc() - }) + + addWithExemplar( + counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + 1, + hOpts.getExemplarFn(r.Context()), + ) + } } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc() - }) + addWithExemplar( + counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), + 1, + hOpts.getExemplarFn(r.Context()), + ) + } } // InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided @@ -148,20 +186,24 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(obs) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) + observeWithExemplar( + obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), + time.Since(now).Seconds(), + hOpts.getExemplarFn(r.Context()), + ) }) next.ServeHTTP(d, r) - }) + } } // InstrumentHandlerRequestSize is a middleware that wraps the provided @@ -184,27 +226,34 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(obs) - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size)) - }) + observeWithExemplar( + obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + float64(size), + hOpts.getExemplarFn(r.Context()), + ) + } } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size)) - }) + observeWithExemplar( + obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), + float64(size), + hOpts.getExemplarFn(r.Context()), + ) + } } // InstrumentHandlerResponseSize is a middleware that wraps the provided @@ -227,9 +276,9 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(obs) @@ -237,7 +286,11 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written())) + observeWithExemplar( + obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + float64(d.Written()), + hOpts.getExemplarFn(r.Context()), + ) }) } @@ -246,7 +299,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler // Collector does not have a Desc or has more than one Desc or its Desc is // invalid. It also panics if the Collector has any non-const, non-curried // labels that are not named "code" or "method". -func checkLabels(c prometheus.Collector) (code bool, method bool) { +func checkLabels(c prometheus.Collector) (code, method bool) { // TODO(beorn7): Remove this hacky way to check for instance labels // once Descriptors can have their dimensionality queried. var ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go index 35e41bd1e6bc..c590d912c947 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go @@ -13,19 +13,46 @@ package promhttp -// Option are used to configure a middleware or round tripper.. -type Option func(*option) +import ( + "context" -type option struct { - extraMethods []string + "github.com/prometheus/client_golang/prometheus" +) + +// Option are used to configure both handler (middleware) or round tripper. +type Option interface { + apply(*options) +} + +// options store options for both a handler or round tripper. +type options struct { + extraMethods []string + getExemplarFn func(requestCtx context.Context) prometheus.Labels +} + +func defaultOptions() *options { + return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }} } +type optionApplyFunc func(*options) + +func (o optionApplyFunc) apply(opt *options) { o(opt) } + // WithExtraMethods adds additional HTTP methods to the list of allowed methods. // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list. // // See the example for ExampleInstrumentHandlerWithExtraMethods for example usage. func WithExtraMethods(methods ...string) Option { - return func(o *option) { + return optionApplyFunc(func(o *options) { o.extraMethods = methods - } + }) +} + +// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics. +// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric +// will get instrumented without exemplar. +func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { + return optionApplyFunc(func(o *options) { + o.getExemplarFn = getExemplarFn + }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 383a7f5941a5..09e34d307c97 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -15,8 +15,8 @@ package prometheus import ( "bytes" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -252,9 +252,12 @@ func (errs MultiError) MaybeUnwrap() error { } // Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. +// them into MetricFamilies for exposition. It implements Registerer, Gatherer, +// and Collector. The zero value is not usable. Create instances with +// NewRegistry or NewPedanticRegistry. +// +// Registry implements Collector to allow it to be used for creating groups of +// metrics. See the Grouping example for how this can be done. type Registry struct { mtx sync.RWMutex collectorsByID map[uint64]Collector // ID is a hash of the descIDs. @@ -289,7 +292,7 @@ func (r *Registry) Register(c Collector) error { // Is the descriptor valid at all? if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err) } // Is the descID unique? @@ -407,6 +410,14 @@ func (r *Registry) MustRegister(cs ...Collector) { // Gather implements Gatherer. func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + r.mtx.RLock() + + if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 { + // Fast path. + r.mtx.RUnlock() + return nil, nil + } + var ( checkedMetricChan = make(chan Metric, capMetricChan) uncheckedMetricChan = make(chan Metric, capMetricChan) @@ -416,7 +427,6 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { registeredDescIDs map[uint64]struct{} // Only used for pedantic checks ) - r.mtx.RLock() goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) checkedCollectors := make(chan Collector, len(r.collectorsByID)) @@ -549,6 +559,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } +// Describe implements Collector. +func (r *Registry) Describe(ch chan<- *Desc) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + // Only report the checked Collectors; unchecked collectors don't report any + // Desc. + for _, c := range r.collectorsByID { + c.Describe(ch) + } +} + +// Collect implements Collector. +func (r *Registry) Collect(ch chan<- Metric) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + for _, c := range r.collectorsByID { + c.Collect(ch) + } + for _, c := range r.uncheckedCollectors { + c.Collect(ch) + } +} + // WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the // Prometheus text format, and writes it to a temporary file. Upon success, the // temporary file is renamed to the provided filename. @@ -556,7 +591,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { // This is intended for use with the textfile collector of the node exporter. // Note that the node exporter expects the filename to be suffixed with ".prom". func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)) if err != nil { return err } @@ -575,7 +610,7 @@ func WriteToTextfile(filename string, g Gatherer) error { return err } - if err := os.Chmod(tmp.Name(), 0644); err != nil { + if err := os.Chmod(tmp.Name(), 0o644); err != nil { return err } return os.Rename(tmp.Name(), filename) @@ -596,7 +631,7 @@ func processMetric( } dtoMetric := &dto.Metric{} if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %s", desc, err) + return fmt.Errorf("error collecting metric %v: %w", desc, err) } metricFamily, ok := metricFamiliesByName[desc.fqName] if ok { // Existing name. @@ -718,12 +753,13 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { for i, g := range gs { mfs, err := g.Gather() if err != nil { - if multiErr, ok := err.(MultiError); ok { + multiErr := MultiError{} + if errors.As(err, &multiErr) { for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) } } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) } } for _, mf := range mfs { @@ -884,11 +920,11 @@ func checkMetricConsistency( h.Write(separatorByteSlice) // Make sure label pairs are sorted. We depend on it for the consistency // check. - if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) { // We cannot sort dtoMetric.Label in place as it is immutable by contract. copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) copy(copiedLabels, dtoMetric.Label) - sort.Sort(labelPairSorter(copiedLabels)) + sort.Sort(internal.LabelPairSorter(copiedLabels)) dtoMetric.Label = copiedLabels } for _, lp := range dtoMetric.Label { @@ -935,7 +971,7 @@ func checkDescConsistency( metricFamily.GetName(), dtoMetric, desc, ) } - sort.Sort(labelPairSorter(lpsFromDesc)) + sort.Sort(internal.LabelPairSorter(lpsFromDesc)) for i, lpFromDesc := range lpsFromDesc { lpFromMetric := dtoMetric.Label[i] if lpFromDesc.GetName() != lpFromMetric.GetName() || @@ -948,3 +984,89 @@ func checkDescConsistency( } return nil } + +var _ TransactionalGatherer = &MultiTRegistry{} + +// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple +// transactional gatherers. +// +// It is caller responsibility to ensure two registries have mutually exclusive metric families, +// no deduplication will happen. +type MultiTRegistry struct { + tGatherers []TransactionalGatherer +} + +// NewMultiTRegistry creates MultiTRegistry. +func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry { + return &MultiTRegistry{ + tGatherers: tGatherers, + } +} + +// Gather implements TransactionalGatherer interface. +func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) { + errs := MultiError{} + + dFns := make([]func(), 0, len(r.tGatherers)) + // TODO(bwplotka): Implement concurrency for those? + for _, g := range r.tGatherers { + // TODO(bwplotka): Check for duplicates? + m, d, err := g.Gather() + errs.Append(err) + + mfs = append(mfs, m...) + dFns = append(dFns, d) + } + + // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already. + sort.Slice(mfs, func(i, j int) bool { + return *mfs[i].Name < *mfs[j].Name + }) + return mfs, func() { + for _, d := range dFns { + d() + } + }, errs.MaybeUnwrap() +} + +// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory +// used by metric family is no longer used by a caller. This allows implementations with cache. +type TransactionalGatherer interface { + // Gather returns metrics in a lexicographically sorted slice + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. + // + // Important: done is expected to be triggered (even if the error occurs!) + // once caller does not need returned slice of dto.MetricFamily. + Gather() (_ []*dto.MetricFamily, done func(), err error) +} + +// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function. +func ToTransactionalGatherer(g Gatherer) TransactionalGatherer { + return &noTransactionGatherer{g: g} +} + +type noTransactionGatherer struct { + g Gatherer +} + +// Gather implements TransactionalGatherer interface. +func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) { + mfs, err := g.g.Gather() + return mfs, func() {}, err +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index c5fa8ed7c71a..7bc448a89394 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -603,7 +603,8 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { s, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -614,7 +615,8 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *SummaryVec) With(labels Labels) Observer { s, err := v.GetMetricWith(labels) if err != nil { @@ -701,7 +703,8 @@ func (s *constSummary) Write(out *dto.Metric) error { // // quantiles maps ranks to quantile values. For example, a median latency of // 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go index 8d5f10523375..f28a76f3a62a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -25,11 +25,12 @@ type Timer struct { // NewTimer creates a new Timer. The provided Observer is used to observe a // duration in seconds. Timer is usually used to time a function call in the // following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } +// +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } func NewTimer(o Observer) *Timer { return &Timer{ begin: time.Now(), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index b4e0ae11cb4b..2d3abc1cbd68 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -23,6 +23,8 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" + "github.com/prometheus/client_golang/prometheus/internal" + dto "github.com/prometheus/client_model/go" ) @@ -38,6 +40,23 @@ const ( UntypedValue ) +var ( + CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }() + GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }() + UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }() +) + +func (v ValueType) ToDTO() *dto.MetricType { + switch v { + case CounterValue: + return CounterMetricTypePtr + case GaugeValue: + return GaugeMetricTypePtr + default: + return UntypedMetricTypePtr + } +} + // valueFunc is a generic metric for simple values retrieved on collect time // from a function. It implements Metric and Collector. Its effective type is // determined by ValueType. This is a low-level building block used by the @@ -91,11 +110,15 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { return nil, err } + + metric := &dto.Metric{} + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { + return nil, err + } + return &constMetric{ - desc: desc, - valType: valueType, - val: value, - labelPairs: MakeLabelPairs(desc, labelValues), + desc: desc, + metric: metric, }, nil } @@ -110,10 +133,8 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal } type constMetric struct { - desc *Desc - valType ValueType - val float64 - labelPairs []*dto.LabelPair + desc *Desc + metric *dto.Metric } func (m *constMetric) Desc() *Desc { @@ -121,7 +142,11 @@ func (m *constMetric) Desc() *Desc { } func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, nil, out) + out.Label = m.metric.Label + out.Counter = m.metric.Counter + out.Gauge = m.metric.Gauge + out.Untyped = m.metric.Untyped + return nil } func populateMetric( @@ -170,12 +195,12 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { }) } labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(labelPairSorter(labelPairs)) + sort.Sort(internal.LabelPairSorter(labelPairs)) return labelPairs } // ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. -const ExemplarMaxRunes = 64 +const ExemplarMaxRunes = 128 // newExemplar creates a new dto.Exemplar from the provided values. An error is // returned if any of the label names or values are invalid or if the total diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 4ababe6c9812..7ae322590c86 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -99,6 +99,16 @@ func (m *MetricVec) Delete(labels Labels) bool { return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) } +// DeletePartialMatch deletes all metrics where the variable labels contain all of those +// passed in as labels. The order of the labels does not matter. +// It returns the number of metrics deleted. +// +// Note that curried labels will never be matched if deleting from the curried vector. +// To match curried labels with DeletePartialMatch, it must be called on the base vector. +func (m *MetricVec) DeletePartialMatch(labels Labels) int { + return m.metricMap.deleteByLabels(labels, m.curry) +} + // Without explicit forwarding of Describe, Collect, Reset, those methods won't // show up in GoDoc. @@ -381,6 +391,82 @@ func (m *metricMap) deleteByHashWithLabels( return true } +// deleteByLabels deletes a metric if the given labels are present in the metric. +func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int { + m.mtx.Lock() + defer m.mtx.Unlock() + + var numDeleted int + + for h, metrics := range m.metrics { + i := findMetricWithPartialLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + // Didn't find matching labels in this metric slice. + continue + } + delete(m.metrics, h) + numDeleted++ + } + + return numDeleted +} + +// findMetricWithPartialLabel returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithPartialLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchPartialLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +// indexOf searches the given slice of strings for the target string and returns +// the index or len(items) as well as a boolean whether the search succeeded. +func indexOf(target string, items []string) (int, bool) { + for i, l := range items { + if l == target { + return i, true + } + } + return len(items), false +} + +// valueMatchesVariableOrCurriedValue determines if a value was previously curried, +// and returns whether it matches either the "base" value or the curried value accordingly. +// It also indicates whether the match is against a curried or uncurried value. +func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) { + for _, curriedValue := range curry { + if curriedValue.index == index { + // This label was curried. See if the curried value matches our target. + return curriedValue.value == targetValue, true + } + } + // This label was not curried. See if the current value matches our target label. + return values[index] == targetValue, false +} + +// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present. +func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + for l, v := range labels { + // Check if the target label exists in our metrics and get the index. + varLabelIndex, validLabel := indexOf(l, desc.variableLabels) + if validLabel { + // Check the value of that label against the target value. + // We don't consider curried values in partial matches. + matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry) + if matches && !curried { + continue + } + } + return false + } + return true +} + // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value // or creates it and returns the new one. // @@ -485,7 +571,7 @@ func findMetricWithLabels( return len(metrics) } -func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { +func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool { if len(values) != len(lvs)+len(curry) { return false } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 74ee93280fed..1498ee144cb0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -21,6 +21,8 @@ import ( "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" ) // WrapRegistererWith returns a Registerer wrapping the provided @@ -182,7 +184,7 @@ func (m *wrappingMetric) Write(out *dto.Metric) error { Value: proto.String(lv), }) } - sort.Sort(labelPairSorter(out.Label)) + sort.Sort(internal.LabelPairSorter(out.Label)) return nil } diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 2f4930d9dd34..35904ea19861 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: metrics.proto +// source: io/prometheus/client/metrics.proto package io_prometheus_client @@ -24,11 +24,18 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type MetricType int32 const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 + // COUNTER must use the Metric field "counter". + MetricType_COUNTER MetricType = 0 + // GAUGE must use the Metric field "gauge". + MetricType_GAUGE MetricType = 1 + // SUMMARY must use the Metric field "summary". + MetricType_SUMMARY MetricType = 2 + // UNTYPED must use the Metric field "untyped". + MetricType_UNTYPED MetricType = 3 + // HISTOGRAM must use the Metric field "histogram". MetricType_HISTOGRAM MetricType = 4 + // GAUGE_HISTOGRAM must use the Metric field "histogram". + MetricType_GAUGE_HISTOGRAM MetricType = 5 ) var MetricType_name = map[int32]string{ @@ -37,14 +44,16 @@ var MetricType_name = map[int32]string{ 2: "SUMMARY", 3: "UNTYPED", 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", } var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, } func (x MetricType) Enum() *MetricType { @@ -67,7 +76,7 @@ func (x *MetricType) UnmarshalJSON(data []byte) error { } func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} + return fileDescriptor_d1e5ddb18987a258, []int{0} } type LabelPair struct { @@ -82,7 +91,7 @@ func (m *LabelPair) Reset() { *m = LabelPair{} } func (m *LabelPair) String() string { return proto.CompactTextString(m) } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} + return fileDescriptor_d1e5ddb18987a258, []int{0} } func (m *LabelPair) XXX_Unmarshal(b []byte) error { @@ -128,7 +137,7 @@ func (m *Gauge) Reset() { *m = Gauge{} } func (m *Gauge) String() string { return proto.CompactTextString(m) } func (*Gauge) ProtoMessage() {} func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{1} + return fileDescriptor_d1e5ddb18987a258, []int{1} } func (m *Gauge) XXX_Unmarshal(b []byte) error { @@ -168,7 +177,7 @@ func (m *Counter) Reset() { *m = Counter{} } func (m *Counter) String() string { return proto.CompactTextString(m) } func (*Counter) ProtoMessage() {} func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{2} + return fileDescriptor_d1e5ddb18987a258, []int{2} } func (m *Counter) XXX_Unmarshal(b []byte) error { @@ -215,7 +224,7 @@ func (m *Quantile) Reset() { *m = Quantile{} } func (m *Quantile) String() string { return proto.CompactTextString(m) } func (*Quantile) ProtoMessage() {} func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{3} + return fileDescriptor_d1e5ddb18987a258, []int{3} } func (m *Quantile) XXX_Unmarshal(b []byte) error { @@ -263,7 +272,7 @@ func (m *Summary) Reset() { *m = Summary{} } func (m *Summary) String() string { return proto.CompactTextString(m) } func (*Summary) ProtoMessage() {} func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{4} + return fileDescriptor_d1e5ddb18987a258, []int{4} } func (m *Summary) XXX_Unmarshal(b []byte) error { @@ -316,7 +325,7 @@ func (m *Untyped) Reset() { *m = Untyped{} } func (m *Untyped) String() string { return proto.CompactTextString(m) } func (*Untyped) ProtoMessage() {} func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{5} + return fileDescriptor_d1e5ddb18987a258, []int{5} } func (m *Untyped) XXX_Unmarshal(b []byte) error { @@ -345,9 +354,34 @@ func (m *Untyped) GetValue() float64 { } type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + // Buckets for the conventional histogram. + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` + // Negative buckets for the native histogram. + NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` + // Positive buckets for the native histogram. + PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -357,7 +391,7 @@ func (m *Histogram) Reset() { *m = Histogram{} } func (m *Histogram) String() string { return proto.CompactTextString(m) } func (*Histogram) ProtoMessage() {} func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{6} + return fileDescriptor_d1e5ddb18987a258, []int{6} } func (m *Histogram) XXX_Unmarshal(b []byte) error { @@ -385,6 +419,13 @@ func (m *Histogram) GetSampleCount() uint64 { return 0 } +func (m *Histogram) GetSampleCountFloat() float64 { + if m != nil && m.SampleCountFloat != nil { + return *m.SampleCountFloat + } + return 0 +} + func (m *Histogram) GetSampleSum() float64 { if m != nil && m.SampleSum != nil { return *m.SampleSum @@ -399,8 +440,81 @@ func (m *Histogram) GetBucket() []*Bucket { return nil } +func (m *Histogram) GetSchema() int32 { + if m != nil && m.Schema != nil { + return *m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil && m.ZeroThreshold != nil { + return *m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCount() uint64 { + if m != nil && m.ZeroCount != nil { + return *m.ZeroCount + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if m != nil && m.ZeroCountFloat != nil { + return *m.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpan() []*BucketSpan { + if m != nil { + return m.NegativeSpan + } + return nil +} + +func (m *Histogram) GetNegativeDelta() []int64 { + if m != nil { + return m.NegativeDelta + } + return nil +} + +func (m *Histogram) GetNegativeCount() []float64 { + if m != nil { + return m.NegativeCount + } + return nil +} + +func (m *Histogram) GetPositiveSpan() []*BucketSpan { + if m != nil { + return m.PositiveSpan + } + return nil +} + +func (m *Histogram) GetPositiveDelta() []int64 { + if m != nil { + return m.PositiveDelta + } + return nil +} + +func (m *Histogram) GetPositiveCount() []float64 { + if m != nil { + return m.PositiveCount + } + return nil +} + +// A Bucket of a conventional histogram, each of which is treated as +// an individual counter-like time series by Prometheus. type Bucket struct { CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -412,7 +526,7 @@ func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{7} + return fileDescriptor_d1e5ddb18987a258, []int{7} } func (m *Bucket) XXX_Unmarshal(b []byte) error { @@ -440,6 +554,13 @@ func (m *Bucket) GetCumulativeCount() uint64 { return 0 } +func (m *Bucket) GetCumulativeCountFloat() float64 { + if m != nil && m.CumulativeCountFloat != nil { + return *m.CumulativeCountFloat + } + return 0 +} + func (m *Bucket) GetUpperBound() float64 { if m != nil && m.UpperBound != nil { return *m.UpperBound @@ -454,6 +575,59 @@ func (m *Bucket) GetExemplar() *Exemplar { return nil } +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +type BucketSpan struct { + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{8} +} + +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketSpan.Unmarshal(m, b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return xxx_messageInfo_BucketSpan.Size(m) +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil && m.Length != nil { + return *m.Length + } + return 0 +} + type Exemplar struct { Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` @@ -467,7 +641,7 @@ func (m *Exemplar) Reset() { *m = Exemplar{} } func (m *Exemplar) String() string { return proto.CompactTextString(m) } func (*Exemplar) ProtoMessage() {} func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{8} + return fileDescriptor_d1e5ddb18987a258, []int{9} } func (m *Exemplar) XXX_Unmarshal(b []byte) error { @@ -526,7 +700,7 @@ func (m *Metric) Reset() { *m = Metric{} } func (m *Metric) String() string { return proto.CompactTextString(m) } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{9} + return fileDescriptor_d1e5ddb18987a258, []int{10} } func (m *Metric) XXX_Unmarshal(b []byte) error { @@ -610,7 +784,7 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} } func (m *MetricFamily) String() string { return proto.CompactTextString(m) } func (*MetricFamily) ProtoMessage() {} func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{10} + return fileDescriptor_d1e5ddb18987a258, []int{11} } func (m *MetricFamily) XXX_Unmarshal(b []byte) error { @@ -669,55 +843,72 @@ func init() { proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") } -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } - -var fileDescriptor_6039342a2ba47b72 = []byte{ - // 665 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, - 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, - 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, - 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, - 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, - 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, - 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, - 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, - 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, - 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, - 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, - 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, - 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, - 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, - 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, - 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, - 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, - 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, - 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, - 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, - 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, - 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, - 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, - 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, - 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, - 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, - 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, - 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, - 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, - 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, - 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, - 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, - 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, - 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, - 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, - 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, - 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, - 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, - 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, - 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, +func init() { + proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) +} + +var fileDescriptor_d1e5ddb18987a258 = []byte{ + // 896 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48, + 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92, + 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0, + 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b, + 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3, + 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90, + 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25, + 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb, + 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19, + 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba, + 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b, + 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c, + 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0, + 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5, + 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd, + 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d, + 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b, + 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b, + 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f, + 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b, + 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8, + 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e, + 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79, + 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29, + 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48, + 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca, + 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9, + 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5, + 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe, + 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55, + 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e, + 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83, + 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65, + 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a, + 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8, + 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf, + 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1, + 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97, + 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc, + 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7, + 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b, + 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58, + 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b, + 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8, + 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f, + 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67, + 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28, + 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27, + 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41, + 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f, + 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f, + 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac, + 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a, + 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab, + 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00, } diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index dc2eedeefcac..f819e4f8b549 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -12,6 +12,7 @@ // limitations under the License. // Build only when actually fuzzing +//go:build gofuzz // +build gofuzz package expfmt diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 8a9313a3bee9..9d94ae9effe3 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,7 +22,6 @@ import ( "strconv" "strings" - "github.com/golang/protobuf/ptypes" "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" @@ -473,10 +472,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - ts, err := ptypes.Timestamp((*e).Timestamp) + err = (*e).Timestamp.CheckValid() if err != nil { return written, err } + ts := (*e).Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 7f67b16e4295..c909b8aa8c50 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -193,7 +193,7 @@ func ParseDuration(durationStr string) (Duration, error) { // Allow 0 without a unit. return 0, nil case "": - return 0, fmt.Errorf("empty duration string") + return 0, errors.New("empty duration string") } matches := durationRE.FindStringSubmatch(durationStr) if matches == nil { diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore index 25e3659ab25a..7cc33ae4a704 100644 --- a/vendor/github.com/prometheus/procfs/.gitignore +++ b/vendor/github.com/prometheus/procfs/.gitignore @@ -1 +1,2 @@ -/fixtures/ +/testdata/fixtures/ +/fixtures diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 0aa09edacb38..a197699a1ee9 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,4 +1,12 @@ --- linters: enable: - - golint + - godot + - revive + +linter-settings: + godot: + capital: true + exclude: + # Ignore "See: URL" + - 'See:' diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md index 9a1aff412704..d325872bdfad 100644 --- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md +++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md @@ -1,3 +1,3 @@ -## Prometheus Community Code of Conduct +# Prometheus Community Code of Conduct -Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md index 943de7615eec..853eb9d49b8b 100644 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -97,7 +97,7 @@ Many of the files are changing continuously and the data being read can in some reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the full file in a single operation using an internal utility function called `util.ReadFileNoStat`. -This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of +This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of the file. Note that parsing the file's contents can still be performed one line at a time. This is done by first reading @@ -113,7 +113,7 @@ the full file, and then using a scanner on the `[]byte` or `string` containing t ``` The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files -can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does +can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does not bother to check the size of the file before reading. ``` data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index fa2bd5b5288f..7edfe4d09325 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -14,18 +14,18 @@ include Makefile.common %/.unpacked: %.ttar - @echo ">> extracting fixtures" + @echo ">> extracting fixtures $*" ./ttar -C $(dir $*) -x -f $*.ttar touch $@ -fixtures: fixtures/.unpacked +fixtures: testdata/fixtures/.unpacked update_fixtures: - rm -vf fixtures/.unpacked - ./ttar -c -f fixtures.ttar fixtures/ + rm -vf testdata/fixtures/.unpacked + ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/ .PHONY: build build: .PHONY: test -test: fixtures/.unpacked common-test +test: testdata/fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index a1b1ca40f4b0..6c8e3e219797 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -36,29 +36,6 @@ GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') -GOVENDOR := -GO111MODULE := -ifeq (, $(PRE_GO_111)) - ifneq (,$(wildcard go.mod)) - # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). - GO111MODULE := on - - ifneq (,$(wildcard vendor)) - # Always use the local vendor/ directory to satisfy the dependencies. - GOOPTS := $(GOOPTS) -mod=vendor - endif - endif -else - ifneq (,$(wildcard go.mod)) - ifneq (,$(wildcard vendor)) -$(warning This repository requires Go >= 1.11 because of Go modules) -$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') - endif - else - # This repository isn't using Go modules (yet). - GOVENDOR := $(FIRST_GOPATH)/bin/govendor - endif -endif PROMU := $(FIRST_GOPATH)/bin/promu pkgs = ./... @@ -78,17 +55,23 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.12.0 +PROMU_VERSION ?= 0.13.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.39.0 +GOLANGCI_LINT_VERSION ?= v1.45.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + # If we're in CI and there is an Actions file, that means the linter + # is being run in Actions, so we don't need to run it here. + ifeq (,$(CIRCLE_JOB)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif endif endif @@ -144,32 +127,25 @@ common-check_license: .PHONY: common-deps common-deps: @echo ">> getting dependencies" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(GO) mod download -else - $(GO) get $(GOOPTS) -t ./... -endif + $(GO) mod download .PHONY: update-go-deps update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get $$m; \ + $(GO) get -d $$m; \ done - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifneq (,$(wildcard vendor)) - GO111MODULE=$(GO111MODULE) $(GO) mod vendor -endif + $(GO) mod tidy .PHONY: common-test-short common-test-short: $(GOTEST_DIR) @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) + $(GOTEST) -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: $(GOTEST_DIR) @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) $(GOTEST_DIR): @mkdir -p $@ @@ -177,25 +153,21 @@ $(GOTEST_DIR): .PHONY: common-format common-format: @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) + $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-lint common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" -ifdef GO111MODULE # 'go list' needs to be executed before staticcheck to prepopulate the modules cache. # Otherwise staticcheck might fail randomly for some reason not yet explained. - GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -else - $(GOLANGCI_LINT) run $(pkgs) -endif + $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif .PHONY: common-yamllint @@ -212,28 +184,15 @@ endif common-staticcheck: lint .PHONY: common-unused -common-unused: $(GOVENDOR) -ifdef GOVENDOR - @echo ">> running check for unused packages" - @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' -else -ifdef GO111MODULE +common-unused: @echo ">> running check for unused/missing packages in go.mod" - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifeq (,$(wildcard vendor)) + $(GO) mod tidy @git diff --exit-code -- go.sum go.mod -else - @echo ">> running check for unused packages in vendor/" - GO111MODULE=$(GO111MODULE) $(GO) mod vendor - @git diff --exit-code -- go.sum go.mod vendor/ -endif -endif -endif .PHONY: common-build common-build: promu @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @@ -289,12 +248,6 @@ $(GOLANGCI_LINT): | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif -ifdef GOVENDOR -.PHONY: $(GOVENDOR) -$(GOVENDOR): - GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor -endif - .PHONY: precheck precheck:: diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md index 67741f015af1..fed02d85c79e 100644 --- a/vendor/github.com/prometheus/procfs/SECURITY.md +++ b/vendor/github.com/prometheus/procfs/SECURITY.md @@ -3,4 +3,4 @@ The Prometheus security policy, including how to report vulnerabilities, can be found here: -https://prometheus.io/docs/operating/security/ + diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 4e47e6172096..68f36e888f91 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -15,11 +15,28 @@ package procfs import ( "fmt" - "io/ioutil" "net" + "os" + "strconv" "strings" ) +// Learned from include/uapi/linux/if_arp.h. +const ( + // completed entry (ha valid). + ATFComplete = 0x02 + // permanent entry. + ATFPermanent = 0x04 + // Publish entry. + ATFPublish = 0x08 + // Has requested trailers. + ATFUseTrailers = 0x10 + // Obsoleted: Want to use a netmask (only for proxy entries). + ATFNetmask = 0x20 + // Don't answer this addresses. + ATFDontPublish = 0x40 +) + // ARPEntry contains a single row of the columnar data represented in // /proc/net/arp. type ARPEntry struct { @@ -29,12 +46,14 @@ type ARPEntry struct { HWAddr net.HardwareAddr // Name of the device Device string + // Flags + Flags byte } // GatherARPEntries retrieves all the ARP entries, parse the relevant columns, // and then return a slice of ARPEntry's. func (fs FS) GatherARPEntries() ([]ARPEntry, error) { - data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) + data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) } @@ -72,14 +91,26 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } func parseARPEntry(columns []string) (ARPEntry, error) { + entry := ARPEntry{Device: columns[5]} ip := net.ParseIP(columns[0]) - mac := net.HardwareAddr(columns[3]) + entry.IPAddr = ip + + if mac, err := net.ParseMAC(columns[3]); err == nil { + entry.HWAddr = mac + } else { + return ARPEntry{}, err + } - entry := ARPEntry{ - IPAddr: ip, - HWAddr: mac, - Device: columns[5], + if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil { + entry.Flags = byte(flags) + } else { + return ARPEntry{}, err } return entry, nil } + +// IsComplete returns true if ARP entry is marked with complete flag. +func (entry *ARPEntry) IsComplete() bool { + return entry.Flags&ATFComplete != 0 +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 5623b24a161f..ff6b927da159 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux // +build linux package procfs @@ -27,7 +28,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// CPUInfo contains general information about a system CPU found in /proc/cpuinfo +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo. type CPUInfo struct { Processor uint VendorID string @@ -469,7 +470,7 @@ func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode } // firstNonEmptyLine advances the scanner to the first non-empty line -// and returns the contents of that line +// and returns the contents of that line. func firstNonEmptyLine(scanner *bufio.Scanner) string { for scanner.Scan() { line := scanner.Text() diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go index 44b590ed38fa..64cfd534c1f9 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (arm || arm64) // +build linux // +build arm arm64 diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go index 91e272573a51..c11207f3ab61 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (mips || mipsle || mips64 || mips64le) // +build linux // +build mips mipsle mips64 mips64le diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index 95b5b4ec44a5..ea41bf2ca1e2 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux -// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x +//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go index 6068bd571c24..003bc2ad4a33 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (ppc64 || ppc64le) // +build linux // +build ppc64 ppc64le diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go index e83c2e207c18..1c9b7313b6cb 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (riscv || riscv64) // +build linux // +build riscv riscv64 diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go index 26814eebaaf3..fa3686bc0048 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux // +build linux package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go index d5bedf97f31c..a0ef55562ebb 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (386 || amd64) // +build linux // +build 386 amd64 diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar deleted file mode 100644 index 5e7eeef4a531..000000000000 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ /dev/null @@ -1,7673 +0,0 @@ -# Archive created by ttar -c -f fixtures.ttar fixtures/ -Directory: fixtures -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cmdline -Lines: 1 -vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/comm -Lines: 1 -vim -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cwd -SymlinkTo: /usr/bin -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/environ -Lines: 1 -PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/exe -SymlinkTo: /usr/bin/vim -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/10 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fdinfo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/0 -Lines: 6 -pos: 0 -flags: 02004000 -mnt_id: 13 -inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000 -inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a -inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/1 -Lines: 4 -pos: 0 -flags: 02004002 -mnt_id: 13 -eventfd-count: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/10 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/2 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/3 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/io -Lines: 7 -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 18446744073708503040 18446744073708503040 bytes -Max address space 8589934592 unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/mountstats -Lines: 20 -device rootfs mounted on / with fstype rootfs -device sysfs mounted on /sys with fstype sysfs -device proc mounted on /proc with fstype proc -device /dev/sda1 mounted on / with fstype ext4 -device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none - age: 13968 - caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 - nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured - sec: flavor=1,pseudoflavor=1 - events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 - bytes: 1207640230 0 0 0 1210214218 0 295483 0 - RPC iostats version: 1.0 p/v: 100003/4 (nfs) - xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 - per-op statistics - NULL: 0 0 0 0 0 0 0 0 - READ: 1298 1298 0 207680 1210292152 6 79386 79407 - WRITE: 0 0 0 0 0 0 0 0 - ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/net/dev -Lines: 4 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/ns -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/mnt -SymlinkTo: mnt:[4026531840] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/net -SymlinkTo: net:[4026531993] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/root -SymlinkTo: / -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/schedstat -Lines: 1 -411605849 93680043 79 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps -Lines: 252 -00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager -Size: 8900 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2952 kB -Pss: 2952 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 2952 kB -Private_Dirty: 0 kB -Referenced: 2864 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me dw sd -00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager -Size: 10236 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 6152 kB -Pss: 6152 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 6152 kB -Private_Dirty: 0 kB -Referenced: 5308 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr mw me dw sd -016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager -Size: 424 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 176 kB -Pss: 176 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 84 kB -Private_Dirty: 92 kB -Referenced: 176 kB -Anonymous: 92 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 12 kB -SwapPss: 12 kB -Locked: 0 kB -VmFlags: rd wr mr mw me dw ac sd -0171a000-0173f000 rw-p 00000000 00:00 0 -Size: 148 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 76 kB -Pss: 76 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 76 kB -Referenced: 76 kB -Anonymous: 76 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000000000-c000400000 rw-p 00000000 00:00 0 -Size: 4096 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2564 kB -Pss: 2564 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 20 kB -Private_Dirty: 2544 kB -Referenced: 2544 kB -Anonymous: 2564 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1100 kB -SwapPss: 1100 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000400000-c001600000 rw-p 00000000 00:00 0 -Size: 18432 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 16024 kB -Pss: 16024 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 5864 kB -Private_Dirty: 10160 kB -Referenced: 11944 kB -Anonymous: 16024 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 440 kB -SwapPss: 440 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd nh -c001600000-c004000000 rw-p 00000000 00:00 0 -Size: 43008 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 -Size: 38596 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 1992 kB -Pss: 1992 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 476 kB -Private_Dirty: 1516 kB -Referenced: 1828 kB -Anonymous: 1992 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 384 kB -SwapPss: 384 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack] -Size: 132 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 8 kB -Pss: 8 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 8 kB -Referenced: 8 kB -Anonymous: 8 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 4 kB -SwapPss: 4 kB -Locked: 0 kB -VmFlags: rd wr mr mw me gd ac -7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar] -Size: 12 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr pf io de dd sd -7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso] -Size: 8 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 4 kB -Pss: 0 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 4 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me de sd -ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] -Size: 4 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps_rollup -Lines: 17 -00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup] -Rss: 29948 kB -Pss: 29944 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 15548 kB -Private_Dirty: 14396 kB -Referenced: 24752 kB -Anonymous: 20756 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1940 kB -SwapPss: 1940 kB -Locked: 0 kB -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/stat -Lines: 1 -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/status -Lines: 53 - -Name: prometheus -Umask: 0022 -State: S (sleeping) -Tgid: 26231 -Ngid: 0 -Pid: 26231 -PPid: 1 -TracerPid: 0 -Uid: 1000 1000 1000 0 -Gid: 1001 1001 1001 0 -FDSize: 128 -Groups: -NStgid: 1 -NSpid: 1 -NSpgid: 1 -NSsid: 1 -VmPeak: 58472 kB -VmSize: 58440 kB -VmLck: 0 kB -VmPin: 0 kB -VmHWM: 8028 kB -VmRSS: 6716 kB -RssAnon: 2092 kB -RssFile: 4624 kB -RssShmem: 0 kB -VmData: 2580 kB -VmStk: 136 kB -VmExe: 948 kB -VmLib: 6816 kB -VmPTE: 128 kB -VmPMD: 12 kB -VmSwap: 660 kB -HugetlbPages: 0 kB -Threads: 1 -SigQ: 8/63965 -SigPnd: 0000000000000000 -ShdPnd: 0000000000000000 -SigBlk: 7be3c0fe28014a03 -SigIgn: 0000000000001000 -SigCgt: 00000001800004ec -CapInh: 0000000000000000 -CapPrm: 0000003fffffffff -CapEff: 0000003fffffffff -CapBnd: 0000003fffffffff -CapAmb: 0000000000000000 -Seccomp: 0 -Cpus_allowed: ff -Cpus_allowed_list: 0-7 -Mems_allowed: 00000000,00000001 -Mems_allowed_list: 0 -voluntary_ctxt_switches: 4742839 -nonvoluntary_ctxt_switches: 1727500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/wchan -Lines: 1 -poll_schedule_timeoutEOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cmdline -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/comm -Lines: 1 -ata_sff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cwd -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/4 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/maps -Lines: 9 -55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat -55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat -55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap] -7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive -7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so -7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack] -7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar] -7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso] -ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/root -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/stat -Lines: 1 -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/wchan -Lines: 1 -0EOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26233 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/cmdline -Lines: 1 -com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/schedstat -Lines: 8 - ____________________________________ -< this is a malformed schedstat file > - ------------------------------------ - \ ^__^ - \ (oo)\_______ - (__)\ )\/\ - ||----w | - || || -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26234 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26234/maps -Lines: 4 -08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh -08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh -0808c000-08146000 rwxp 00000000 00:00 0 -40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/584 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/584/stat -Lines: 2 -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/cmdline -Lines: 1 -BOOT_IMAGE=/vmlinuz-5.11.0-22-generic root=UUID=456a0345-450d-4f7b-b7c9-43e3241d99ad ro quiet splash vt.handoff=7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/cpuinfo -Lines: 216 -processor : 0 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.998 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 0 -initial apicid : 0 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 1 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.037 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 2 -initial apicid : 2 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 2 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.010 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 4 -initial apicid : 4 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 3 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.028 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 6 -initial apicid : 6 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 4 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.989 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 1 -initial apicid : 1 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 5 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.083 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 3 -initial apicid : 3 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 6 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.017 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 5 -initial apicid : 5 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 7 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.030 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 7 -initial apicid : 7 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/crypto -Lines: 972 -name : ccm(aes) -driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) -module : ccm -priority : 300 -refcnt : 4 -selftest : passed -internal : no -type : aead -async : no -blocksize : 1 -ivsize : 16 -maxauthsize : 16 -geniv : - -name : cbcmac(aes) -driver : cbcmac(aes-aesni) -module : ccm -priority : 300 -refcnt : 7 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 16 - -name : ecdh -driver : ecdh-generic -module : ecdh_generic -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp -async : yes - -name : ecb(arc4) -driver : ecb(arc4)-generic -module : arc4 -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 1 -max keysize : 256 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : arc4 -driver : arc4-generic -module : arc4 -priority : 0 -refcnt : 3 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 1 -max keysize : 256 - -name : crct10dif -driver : crct10dif-pclmul -module : crct10dif_pclmul -priority : 200 -refcnt : 2 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32 -driver : crc32-pclmul -module : crc32_pclmul -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : __ghash -driver : cryptd(__ghash-pclmulqdqni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : ghash -driver : ghash-clmulni -module : ghash_clmulni_intel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : __ghash -driver : __ghash-pclmulqdqni -module : ghash_clmulni_intel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : shash -blocksize : 16 -digestsize : 16 - -name : crc32c -driver : crc32c-intel -module : crc32c_intel -priority : 200 -refcnt : 5 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : cbc(aes) -driver : cbc(aes-aesni) -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr(aes-aesni) -module : kernel -priority : 300 -refcnt : 5 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : pkcs1pad(rsa,sha256) -driver : pkcs1pad(rsa-generic,sha256) -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : __xts(aes) -driver : cryptd(__xts-aes-aesni) -module : kernel -priority : 451 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : xts(aes) -driver : xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : cryptd(__ctr-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 1 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : cryptd(__cbc-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : cbc(aes) -driver : cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : cryptd(__ecb-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : ecb(aes) -driver : ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __generic-gcm-aes-aesni -driver : cryptd(__driver-generic-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : gcm(aes) -driver : generic-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __generic-gcm-aes-aesni -driver : __driver-generic-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : cryptd(__driver-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : rfc4106(gcm(aes)) -driver : rfc4106-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : __driver-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __xts(aes) -driver : __xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : __ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : __cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : __ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __aes -driver : __aes-aesni -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : yes -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : aes -driver : aes-aesni -module : kernel -priority : 300 -refcnt : 8 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : hmac(sha1) -driver : hmac(sha1-generic) -module : kernel -priority : 100 -refcnt : 9 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : ghash -driver : ghash-generic -module : kernel -priority : 100 -refcnt : 3 -selftest : passed -internal : no -type : shash -blocksize : 16 -digestsize : 16 - -name : jitterentropy_rng -driver : jitterentropy_rng -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha256 -module : kernel -priority : 221 -refcnt : 2 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha512 -module : kernel -priority : 220 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha384 -module : kernel -priority : 219 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha1 -module : kernel -priority : 218 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha256 -module : kernel -priority : 217 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha512 -module : kernel -priority : 216 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha384 -module : kernel -priority : 215 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha1 -module : kernel -priority : 214 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes256 -module : kernel -priority : 213 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes192 -module : kernel -priority : 212 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes128 -module : kernel -priority : 211 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : hmac(sha256) -driver : hmac(sha256-generic) -module : kernel -priority : 100 -refcnt : 10 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : stdrng -driver : drbg_pr_hmac_sha256 -module : kernel -priority : 210 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha512 -module : kernel -priority : 209 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha384 -module : kernel -priority : 208 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha1 -module : kernel -priority : 207 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha256 -module : kernel -priority : 206 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha512 -module : kernel -priority : 205 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha384 -module : kernel -priority : 204 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha1 -module : kernel -priority : 203 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes256 -module : kernel -priority : 202 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes192 -module : kernel -priority : 201 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes128 -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : 842 -driver : 842-scomp -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : 842 -driver : 842-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo-rle -driver : lzo-rle-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo-rle -driver : lzo-rle-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo -driver : lzo-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo -driver : lzo-generic -module : kernel -priority : 0 -refcnt : 9 -selftest : passed -internal : no -type : compression - -name : crct10dif -driver : crct10dif-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32c -driver : crc32c-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : zlib-deflate -driver : zlib-deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : aes -driver : aes-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : sha224 -driver : sha224-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 28 - -name : sha256 -driver : sha256-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : sha1 -driver : sha1-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : md5 -driver : md5-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 16 - -name : ecb(cipher_null) -driver : ecb-cipher_null -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 0 -max keysize : 0 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : digest_null -driver : digest_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 0 - -name : compress_null -driver : compress_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : cipher_null -driver : cipher_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 0 -max keysize : 0 - -name : rsa -driver : rsa-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : dh -driver : dh-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp - -name : aes -driver : aes-asm -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -Mode: 444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/diskstats -Lines: 52 - 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 - 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 - 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 - 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 - 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 - 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 - 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 - 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 - 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 - 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 - 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 - 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 - 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 - 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 - 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 - 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 - 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 - 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 - 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 - 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 - 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 - 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 - 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 - 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 - 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 - 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 - 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 - 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 - 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 - 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 - 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 - 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 - 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 - 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 - 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 - 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 - 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 - 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 - 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 - 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 - 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 - 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 - 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 - 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 - 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 - 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 - 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 - 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 - 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 - 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182 - 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0 - 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/fscache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/fscache/stats -Lines: 24 -FS-Cache statistics -Cookies: idx=3 dat=67877 spc=0 -Objects: alc=67473 nal=0 avl=67473 ded=388 -ChkAux : non=12 ok=33 upd=44 obs=55 -Pages : mrk=547164 unc=364577 -Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26 -Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85 -Invals : n=14 run=13 -Updates: n=7 nul=3 run=8 -Relinqs: n=394 nul=1 wcr=2 rtr=3 -AttrChg: n=6 ok=5 nbf=4 oom=3 run=2 -Allocs : n=20 ok=19 wt=18 nbf=17 int=16 -Allocs : ops=15 owt=14 abt=13 -Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43 -Retrvls: ops=151959 owt=42747 abt=44 -Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14 -Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43 -VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66 -Ops : pend=42753 run=221129 enq=628798 can=11 rej=88 -Ops : ini=377538 dfr=27 rel=377538 gc=37 -CacheOp: alo=1 luo=2 luc=3 gro=4 -CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10 -CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17 -CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/xfs/stat -Lines: 23 -extent_alloc 92447 97589 92448 93751 -abt 0 0 0 0 -blk_map 1767055 188820 184891 92447 92448 2140766 0 -bmbt 0 0 0 0 -dir 185039 92447 92444 136422 -trans 706 944304 0 -ig 185045 58807 0 126238 0 33637 22 -log 2883 113448 9 17360 739 -push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 -xstrat 92447 0 -rw 107739 94045 -attr 4 0 0 0 -icluster 8677 7849 135802 -vnodes 92601 0 0 0 92444 92444 92444 0 -buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 -abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 -abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 -bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 -fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -qm 0 0 0 0 0 0 0 0 -xpc 399724544 92823103 86219234 -debug 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/loadavg -Lines: 1 -0.02 0.04 0.05 1/497 11947 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/mdstat -Lines: 60 -Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] - -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) - 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - -md127 : active raid1 sdi2[0] sdj2[1] - 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdi1[0] sdj1[1] - 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0](F) sdb3[1](S) - 4883648 blocks [2/2] [UU] - -md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] - 195310144 blocks [2/1] [U_] - [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md201 : active raid1 sda3[0] sdb3[1] - 1993728 blocks super 1.2 [2/2] [UU] - [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec - -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) - 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] - bitmap: 0/30 pages [0KB], 65536KB chunk - -md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) - 523968 blocks super 1.2 [4/4] [UUUU] - resync=DELAYED - -md10 : active raid0 sda1[0] sdb1[1] - 314159265 blocks 64k chunks - -md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) - 4190208 blocks super 1.2 [2/2] [UU] - resync=PENDING - -md12 : active raid0 sdc2[0] sdd2[1] - 3886394368 blocks super 1.2 512k chunks - -md126 : active raid0 sdb[1] sdc[0] - 1855870976 blocks super external:/md127/0 128k chunks - -md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) - 7932 blocks super external:imsm - -md00 : active raid0 xvdb[0] - 4186624 blocks super 1.2 256k chunks - -md120 : active linear sda1[1] sdb1[0] - 2095104 blocks super 1.2 0k rounding - -md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] - 322560 blocks super 1.2 512k chunks - -unused devices: -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/meminfo -Lines: 42 -MemTotal: 15666184 kB -MemFree: 440324 kB -Buffers: 1020128 kB -Cached: 12007640 kB -SwapCached: 0 kB -Active: 6761276 kB -Inactive: 6532708 kB -Active(anon): 267256 kB -Inactive(anon): 268 kB -Active(file): 6494020 kB -Inactive(file): 6532440 kB -Unevictable: 0 kB -Mlocked: 0 kB -SwapTotal: 0 kB -SwapFree: 0 kB -Dirty: 768 kB -Writeback: 0 kB -AnonPages: 266216 kB -Mapped: 44204 kB -Shmem: 1308 kB -Slab: 1807264 kB -SReclaimable: 1738124 kB -SUnreclaim: 69140 kB -KernelStack: 1616 kB -PageTables: 5288 kB -NFS_Unstable: 0 kB -Bounce: 0 kB -WritebackTmp: 0 kB -CommitLimit: 7833092 kB -Committed_AS: 530844 kB -VmallocTotal: 34359738367 kB -VmallocUsed: 36596 kB -VmallocChunk: 34359637840 kB -HardwareCorrupted: 0 kB -AnonHugePages: 12288 kB -HugePages_Total: 0 -HugePages_Free: 0 -HugePages_Rsvd: 0 -HugePages_Surp: 0 -Hugepagesize: 2048 kB -DirectMap4k: 91136 kB -DirectMap2M: 16039936 kB -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/arp -Lines: 2 -IP address HW type Flags HW address Mask Device -192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/dev -Lines: 6 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 -docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs -Lines: 21 -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 -TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh - -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 -FWM 10001000 wlc - -> C0A8321A:0CEA Route 0 0 1 - -> C0A83215:0CEA Route 0 0 2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs_stats -Lines: 6 - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/protocols -Lines: 14 -protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em -PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n -PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n -RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n -UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n -UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n -TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y -UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n -UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n -PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n -RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n -UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n -TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y -NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/rpc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfs -Lines: 5 -net 18628 0 18628 6 -rpc 4329785 0 4338291 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 -proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfsd -Lines: 11 -rc 0 6 18622 -fh 0 0 0 0 0 -io 157286400 0 -th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 -ra 32 0 0 0 0 0 0 0 0 0 0 0 -net 18628 0 18628 6 -rpc 18628 0 0 0 0 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 -proc4 2 2 10853 -proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat -Lines: 6 -sockets: used 1602 -TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22 -UDP: inuse 12 mem 62 -UDPLITE: inuse 0 -RAW: inuse 0 -FRAG: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat6 -Lines: 5 -TCP6: inuse 17 -UDP6: inuse 9 -UDPLITE6: inuse 0 -RAW6: inuse 1 -FRAG6: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat -Lines: 2 -00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 -01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat.broken -Lines: 1 -00015c73 00020e76 F0000769 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/stat -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/stat/arp_cache -Lines: 3 -entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls -00000014 00000001 00000002 00000003 00000004 00000005 00000006 00000007 00000008 00000009 0000000a 0000000b 0000000c -00000014 0000000d 0000000e 0000000f 00000010 00000011 00000012 00000013 00000014 00000015 00000016 00000017 00000018 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/stat/ndisc_cache -Lines: 3 -entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls -00000024 000000f0 000000f1 000000f2 000000f3 000000f4 000000f5 000000f6 000000f7 000000f8 000000f9 000000fa 000000fb -00000024 000000fc 000000fd 000000fe 000000ff 00000100 00000101 00000102 00000103 00000104 00000105 00000106 00000107 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/tcp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/tcp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp_broken -Lines: 2 - sl local_address rem_address st - 1: 00000000:0016 00000000:0000 0A -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix -Lines: 6 -Num RefCount Protocol Flags Type St Inode Path -0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 5091797 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix_without_inode -Lines: 6 -Num RefCount Protocol Flags Type St Path -0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/xfrm_stat -Lines: 28 -XfrmInError 1 -XfrmInBufferError 2 -XfrmInHdrError 4 -XfrmInNoStates 3 -XfrmInStateProtoError 40 -XfrmInStateModeError 100 -XfrmInStateSeqError 6000 -XfrmInStateExpired 4 -XfrmInStateMismatch 23451 -XfrmInStateInvalid 55555 -XfrmInTmplMismatch 51 -XfrmInNoPols 65432 -XfrmInPolBlock 100 -XfrmInPolError 10000 -XfrmOutError 1000000 -XfrmOutBundleGenError 43321 -XfrmOutBundleCheckError 555 -XfrmOutNoStates 869 -XfrmOutStateProtoError 4542 -XfrmOutStateModeError 4 -XfrmOutStateSeqError 543 -XfrmOutStateExpired 565 -XfrmOutPolBlock 43456 -XfrmOutPolDead 7656 -XfrmOutPolError 1454 -XfrmFwdHdrError 6654 -XfrmOutStateInvalid 28765 -XfrmAcquireError 24532 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/pressure -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/cpu -Lines: 1 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/io -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/memory -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/schedstat -Lines: 6 -version 15 -timestamp 15819019232 -cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 -domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 -cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 -domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/self -SymlinkTo: 26231 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/slabinfo -Lines: 302 -slabinfo - version: 2.1 -# name : tunables : slabdata -pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0 -pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0 -nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0 -kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 -kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0 -kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0 -pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0 -x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0 -iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 -ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0 -bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0 -bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0 -fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0 -fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0 -squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 -fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 -fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0 -xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0 -xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 -nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 -nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0 -nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0 -nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 -jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 -jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0 -reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 -btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0 -ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0 -ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0 -ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0 -ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0 -ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0 -ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0 -jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0 -jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0 -jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0 -jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0 -jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 -ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0 -mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0 -dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 -dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 -dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0 -kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0 -io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0 -dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 -aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 -qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 -sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0 -scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0 -virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0 -RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0 -UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0 -UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0 -tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0 -TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0 -uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0 -bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0 -mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0 -isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0 -io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0 -aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0 -dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 -bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0 -posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0 -iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0 -iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0 -UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0 -ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0 -ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 -inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 -xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 -ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0 -ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0 -ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0 -PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0 -UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0 -tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0 -request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0 -TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0 -hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0 -dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0 -eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0 -inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0 -scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0 -request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0 -blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0 -bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0 -biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0 -biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0 -biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0 -biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 -bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0 -ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0 -uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0 -audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0 -sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0 -skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0 -skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0 -configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0 -file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0 -file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0 -fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0 -net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0 -task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0 -taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0 -proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0 -pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0 -proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0 -seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0 -sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 -bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 -shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0 -kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0 -kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0 -mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0 -inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0 -dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0 -names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0 -hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 -iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0 -lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0 -key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0 -uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 -nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 -vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0 -mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0 -fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0 -files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0 -signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0 -sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0 -task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0 -cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0 -anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0 -anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0 -pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0 -Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0 -Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0 -Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0 -Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0 -trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0 -ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0 -pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0 -radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0 -task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 -vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0 -dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0 -kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0 -kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0 -kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0 -kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0 -kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0 -kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0 -kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0 -kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0 -kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0 -kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0 -kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0 -kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0 -kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0 -kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0 -kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0 -kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/stat -Lines: 16 -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 1 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/swaps -Lines: 2 -Filename Type Size Used Priority -/dev/dm-2 partition 131068 176 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/symlinktargets -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/README -Lines: 2 -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/abc -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/def -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/ghi -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/uvw -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/xyz -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel/random -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/entropy_avail -Lines: 1 -3943 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/poolsize -Lines: 1 -4096 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold -Lines: 1 -3072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/vm -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/admin_reserve_kbytes -Lines: 1 -8192 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/block_dump -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/compact_unevictable_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_ratio -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_expire_centisecs -Lines: 1 -3000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_ratio -Lines: 1 -20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_writeback_centisecs -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirtytime_expire_seconds -Lines: 1 -43200 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/drop_caches -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/extfrag_threshold -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/hugetlb_shm_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/laptop_mode -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/legacy_va_layout -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/lowmem_reserve_ratio -Lines: 1 -256 256 32 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/max_map_count -Lines: 1 -65530 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_early_kill -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_recovery -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_free_kbytes -Lines: 1 -67584 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_slab_ratio -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_unmapped_ratio -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/mmap_min_addr -Lines: 1 -65536 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_overcommit_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_stat -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_zonelist_order -Lines: 1 -Node -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_dump_tasks -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_kill_allocating_task -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_kbytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_memory -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_ratio -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/page-cluster -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/panic_on_oom -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/percpu_pagelist_fraction -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/stat_interval -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/swappiness -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/user_reserve_kbytes -Lines: 1 -131072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/vfs_cache_pressure -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_boost_factor -Lines: 1 -15000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_scale_factor -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/zone_reclaim_mode -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/zoneinfo -Lines: 262 -Node 0, zone DMA - per-node stats - nr_inactive_anon 230981 - nr_active_anon 547580 - nr_inactive_file 316904 - nr_active_file 346282 - nr_unevictable 115467 - nr_slab_reclaimable 131220 - nr_slab_unreclaimable 47320 - nr_isolated_anon 0 - nr_isolated_file 0 - workingset_nodes 11627 - workingset_refault 466886 - workingset_activate 276925 - workingset_restore 84055 - workingset_nodereclaim 487 - nr_anon_pages 795576 - nr_mapped 215483 - nr_file_pages 761874 - nr_dirty 908 - nr_writeback 0 - nr_writeback_temp 0 - nr_shmem 224925 - nr_shmem_hugepages 0 - nr_shmem_pmdmapped 0 - nr_anon_transparent_hugepages 0 - nr_unstable 0 - nr_vmscan_write 12950 - nr_vmscan_immediate_reclaim 3033 - nr_dirtied 8007423 - nr_written 7752121 - nr_kernel_misc_reclaimable 0 - pages free 3952 - min 33 - low 41 - high 49 - spanned 4095 - present 3975 - managed 3956 - protection: (0, 2877, 7826, 7826, 7826) - nr_free_pages 3952 - nr_zone_inactive_anon 0 - nr_zone_active_anon 0 - nr_zone_inactive_file 0 - nr_zone_active_file 0 - nr_zone_unevictable 0 - nr_zone_write_pending 0 - nr_mlock 0 - nr_page_table_pages 0 - nr_kernel_stack 0 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 1 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 1 - numa_other 0 - pagesets - cpu: 0 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 1 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 2 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 3 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 4 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 5 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 6 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 7 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - node_unreclaimable: 0 - start_pfn: 1 -Node 0, zone DMA32 - pages free 204252 - min 19510 - low 21059 - high 22608 - spanned 1044480 - present 759231 - managed 742806 - protection: (0, 0, 4949, 4949, 4949) - nr_free_pages 204252 - nr_zone_inactive_anon 118558 - nr_zone_active_anon 106598 - nr_zone_inactive_file 75475 - nr_zone_active_file 70293 - nr_zone_unevictable 66195 - nr_zone_write_pending 64 - nr_mlock 4 - nr_page_table_pages 1756 - nr_kernel_stack 2208 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 113952967 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 113952967 - numa_other 0 - pagesets - cpu: 0 - count: 345 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 1 - count: 356 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 2 - count: 325 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 3 - count: 346 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 4 - count: 321 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 5 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 6 - count: 373 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 7 - count: 339 - high: 378 - batch: 63 - vm stats threshold: 48 - node_unreclaimable: 0 - start_pfn: 4096 -Node 0, zone Normal - pages free 18553 - min 11176 - low 13842 - high 16508 - spanned 1308160 - present 1308160 - managed 1268711 - protection: (0, 0, 0, 0, 0) - nr_free_pages 18553 - nr_zone_inactive_anon 112423 - nr_zone_active_anon 440982 - nr_zone_inactive_file 241429 - nr_zone_active_file 275989 - nr_zone_unevictable 49272 - nr_zone_write_pending 844 - nr_mlock 154 - nr_page_table_pages 9750 - nr_kernel_stack 15136 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 162718019 - numa_miss 0 - numa_foreign 0 - numa_interleave 26812 - numa_local 162718019 - numa_other 0 - pagesets - cpu: 0 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 1 - count: 366 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 2 - count: 60 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 3 - count: 256 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 4 - count: 253 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 5 - count: 159 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 6 - count: 311 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 7 - count: 264 - high: 378 - batch: 63 - vm stats threshold: 56 - node_unreclaimable: 0 - start_pfn: 1048576 -Node 0, zone Movable - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Node 0, zone Device - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/dm-0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/dm-0/stat -Lines: 1 -6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/add_random -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/chunk_sectors -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/dax -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_granularity -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_zeroes_data -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/fua -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/hw_sector_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll_delay -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_timeout -Lines: 1 -30000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue/iosched -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_max -Lines: 1 -16384 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async -Lines: 1 -250 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/low_latency -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/max_budget -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle -Lines: 1 -8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us -Lines: 1 -8000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/timeout_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iostats -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/logical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_discard_segments -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb -Lines: 1 -32767 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_integrity_segments -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_sectors_kb -Lines: 1 -1280 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segment_size -Lines: 1 -65536 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segments -Lines: 1 -168 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/minimum_io_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nomerges -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_requests -Lines: 1 -64 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_zones -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/optimal_io_size -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/physical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/read_ahead_kb -Lines: 1 -128 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rotational -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rq_affinity -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/scheduler -Lines: 1 -mq-deadline kyber [bfq] none -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/wbt_lat_usec -Lines: 1 -75000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_cache -Lines: 1 -write back -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_same_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/zoned -Lines: 1 -none -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/stat -Lines: 1 -9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm/card0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm/card0/device -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_correctable -Lines: 9 -RxErr 0 -BadTLP 0 -BadDLLP 0 -Rollover 0 -Timeout 0 -NonFatalErr 0 -CorrIntErr 0 -HeaderOF 0 -TOTAL_ERR_COR 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_fatal -Lines: 19 -Undefined 0 -DLP 0 -SDES 0 -TLP 0 -FCP 0 -CmpltTO 0 -CmpltAbrt 0 -UnxCmplt 0 -RxOF 0 -MalfTLP 0 -ECRC 0 -UnsupReq 0 -ACSViol 0 -UncorrIntErr 0 -BlockedTLP 0 -AtomicOpBlocked 0 -TLPBlockedErr 0 -PoisonTLPBlocked 0 -TOTAL_ERR_FATAL 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_nonfatal -Lines: 19 -Undefined 0 -DLP 0 -SDES 0 -TLP 0 -FCP 0 -CmpltTO 0 -CmpltAbrt 0 -UnxCmplt 0 -RxOF 0 -MalfTLP 0 -ECRC 0 -UnsupReq 0 -ACSViol 0 -UncorrIntErr 0 -BlockedTLP 0 -AtomicOpBlocked 0 -TLPBlockedErr 0 -PoisonTLPBlocked 0 -TOTAL_ERR_NONFATAL 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/ari_enabled -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/boot_vga -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/broken_parity_status -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/class -Lines: 1 -0x030000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/consistent_dma_mask_bits -Lines: 1 -44 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/current_link_speed -Lines: 1 -8.0 GT/s PCIe -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/current_link_width -Lines: 1 -16 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/d3cold_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/device -Lines: 1 -0x687f -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/dma_mask_bits -Lines: 1 -44 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/driver_override -Lines: 1 -(null) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/gpu_busy_percent -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/irq -Lines: 1 -95 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/local_cpulist -Lines: 1 -0-15 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/local_cpus -Lines: 1 -0000ffff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/max_link_speed -Lines: 1 -8.0 GT/s PCIe -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/max_link_width -Lines: 1 -16 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_used -Lines: 1 -144560128 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_used -Lines: 1 -1490378752 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_used -Lines: 1 -1490378752 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_vendor -Lines: 1 -samsung -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/modalias -Lines: 1 -pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/msi_bus -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/numa_node -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pcie_bw -Lines: 1 -6641 815 256 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pcie_replay_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_dpm_force_performance_level -Lines: 1 -manual -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_dpm_state -Lines: 1 -performance -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_state -Lines: 1 -D0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_cur_state -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_dcefclk -Lines: 5 -0: 600Mhz * -1: 720Mhz -2: 800Mhz -3: 847Mhz -4: 900Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_mclk -Lines: 4 -0: 167Mhz * -1: 500Mhz -2: 800Mhz -3: 945Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_pcie -Lines: 2 -0: 8.0GT/s, x16 -1: 8.0GT/s, x16 * -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_sclk -Lines: 8 -0: 852Mhz * -1: 991Mhz -2: 1084Mhz -3: 1138Mhz -4: 1200Mhz -5: 1401Mhz -6: 1536Mhz -7: 1630Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_socclk -Lines: 8 -0: 600Mhz -1: 720Mhz * -2: 800Mhz -3: 847Mhz -4: 900Mhz -5: 960Mhz -6: 1028Mhz -7: 1107Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_features -Lines: 32 -Current ppfeatures: 0x0000000019a1ff4f -FEATURES BITMASK ENABLEMENT -DPM_PREFETCHER 0x0000000000000001 Y -GFXCLK_DPM 0x0000000000000002 Y -UCLK_DPM 0x0000000000000004 Y -SOCCLK_DPM 0x0000000000000008 Y -UVD_DPM 0x0000000000000010 N -VCE_DPM 0x0000000000000020 N -ULV 0x0000000000000040 Y -MP0CLK_DPM 0x0000000000000080 N -LINK_DPM 0x0000000000000100 Y -DCEFCLK_DPM 0x0000000000000200 Y -AVFS 0x0000000000000400 Y -GFXCLK_DS 0x0000000000000800 Y -SOCCLK_DS 0x0000000000001000 Y -LCLK_DS 0x0000000000002000 Y -PPT 0x0000000000004000 Y -TDC 0x0000000000008000 Y -THERMAL 0x0000000000010000 Y -GFX_PER_CU_CG 0x0000000000020000 N -RM 0x0000000000040000 N -DCEFCLK_DS 0x0000000000080000 N -ACDC 0x0000000000100000 N -VR0HOT 0x0000000000200000 Y -VR1HOT 0x0000000000400000 N -FW_CTF 0x0000000000800000 Y -LED_DISPLAY 0x0000000001000000 Y -FAN_CONTROL 0x0000000002000000 N -FAST_PPT 0x0000000004000000 N -DIDT 0x0000000008000000 Y -ACG 0x0000000010000000 Y -PCC_LIMIT 0x0000000020000000 N -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_force_state -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_mclk_od -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_num_states -Lines: 3 -states: 2 -0 boot -1 performance -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_od_clk_voltage -Lines: 18 -OD_SCLK: -0: 852Mhz 800mV -1: 991Mhz 900mV -2: 1084Mhz 950mV -3: 1138Mhz 1000mV -4: 1200Mhz 1050mV -5: 1401Mhz 1100mV -6: 1536Mhz 1150mV -7: 1630Mhz 1200mV -OD_MCLK: -0: 167Mhz 800mV -1: 500Mhz 800mV -2: 800Mhz 950mV -3: 945Mhz 1100mV -OD_RANGE: -SCLK: 852MHz 2400MHz -MCLK: 167MHz 1500MHz -VDDC: 800mV 1200mV -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_power_profile_mode -Lines: 8 -NUM MODE_NAME BUSY_SET_POINT FPS USE_RLC_BUSY MIN_ACTIVE_LEVEL - 0 BOOTUP_DEFAULT : 70 60 0 0 - 1 3D_FULL_SCREEN*: 70 60 1 3 - 2 POWER_SAVING : 90 60 0 0 - 3 VIDEO : 70 60 0 0 - 4 VR : 70 90 0 0 - 5 COMPUTE : 30 60 0 6 - 6 CUSTOM : 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_sclk_od -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/product_name -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/product_number -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/resource -Lines: 13 -0x0000007c00000000 0x0000007dffffffff 0x000000000014220c -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000007e00000000 0x0000007e0fffffff 0x000000000014220c -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x000000000000d000 0x000000000000d0ff 0x0000000000040101 -0x00000000fcd00000 0x00000000fcd7ffff 0x0000000000040200 -0x00000000fcd80000 0x00000000fcd9ffff 0x0000000000046200 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/revision -Lines: 1 -0xc1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/serial_number -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/subsystem_device -Lines: 1 -0x04c4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/subsystem_vendor -Lines: 1 -0x1043 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/thermal_throttling_logging -Lines: 1 -0000:09:00.0: thermal throttling logging enabled, with interval 60 seconds -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/uevent -Lines: 6 -DRIVER=amdgpu -PCI_CLASS=30000 -PCI_ID=1002:687F -PCI_SUBSYS_ID=1043:04C4 -PCI_SLOT_NAME=0000:09:00.0 -MODALIAS=pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/unique_id -Lines: 1 -0123456789abcdef -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/vbios_version -Lines: 1 -115-D050PIL-100 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/vendor -Lines: 1 -0x1002 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo -Lines: 1 -30 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/fabric_name -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/node_name -Lines: 1 -0x2000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_id -Lines: 1 -0x000002 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_name -Lines: 1 -0x1000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_state -Lines: 1 -Online -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_type -Lines: 1 -Point-To-Point (direct nport connection) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/speed -Lines: 1 -16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames -Lines: 1 -0xffffffffffffffff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/error_frames -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts -Lines: 1 -0x13 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count -Lines: 1 -0x2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count -Lines: 1 -0x8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count -Lines: 1 -0x9 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count -Lines: 1 -0x11 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count -Lines: 1 -0x10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/nos_count -Lines: 1 -0x12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames -Lines: 1 -0x3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_words -Lines: 1 -0x4 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset -Lines: 1 -0x7 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames -Lines: 1 -0x5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_words -Lines: 1 -0x6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_classes -Lines: 1 -Class 3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_speeds -Lines: 1 -4 Gbit, 8 Gbit, 16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/symbolic_name -Lines: 1 -Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/board_id -Lines: 1 -HPE 100Gb 1-port OP101 QSFP28 x16 PCIe Gen3 with Intel Omni-Path Adapter -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/fw_ver -Lines: 1 -1.27.0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/VL15_dropped -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_data -Lines: 1 -345091702026 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_packets -Lines: 1 -638036947 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_data -Lines: 1 -273558326543 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_packets -Lines: 1 -568318856 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_wait -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/rate -Lines: 1 -100 Gb/sec (4X EDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/board_id -Lines: 1 -SM_1141000001000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver -Lines: 1 -2.31.5050 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/hca_type -Lines: 1 -MT4099 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data -Lines: 1 -2221223609 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets -Lines: 1 -87169372 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data -Lines: 1 -26509113295 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets -Lines: 1 -85734114 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait -Lines: 1 -3599 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data -Lines: 1 -2460436784 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets -Lines: 1 -89332064 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data -Lines: 1 -26540356890 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets -Lines: 1 -88622850 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait -Lines: 1 -3846 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net/eth0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_assign_type -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_len -Lines: 1 -6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/address -Lines: 1 -01:01:01:01:01:01 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/broadcast -Lines: 1 -ff:ff:ff:ff:ff:ff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_changes -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_down_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_up_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dev_id -Lines: 1 -0x20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/device -SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/ -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dormant -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/duplex -Lines: 1 -full -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/flags -Lines: 1 -0x1303 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifalias -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifindex -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/iflink -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/link_mode -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/mtu -Lines: 1 -1500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/name_assign_type -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/netdev_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/operstate -Lines: 1 -up -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_name -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_switch_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/speed -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/tx_queue_len -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/type -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/nvme -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/nvme/nvme0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/firmware_rev -Lines: 1 -1B2QEXP7 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/model -Lines: 1 -Samsung SSD 970 PRO 512GB -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/serial -Lines: 1 -S680HF8N190894I -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/state -Lines: 1 -live -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0 -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/name -Lines: 1 -package-0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw -Lines: 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us -Lines: 1 -976 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj -Lines: 1 -118821284256 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/name -Lines: 1 -core -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/name -Lines: 1 -package-10 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/scsi_tape -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0 -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0a -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0l -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0m -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0 -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0a -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0l -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0m -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/cur_state -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/max_state -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/type -Lines: 1 -Processor -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/cur_state -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/max_state -Lines: 1 -27 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/type -Lines: 1 -intel_powerclamp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/temp -Lines: 1 -49925 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/type -Lines: 1 -bcm2835_thermal -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/mode -Lines: 1 -enabled -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/passive -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/temp -Lines: 1 --44000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/type -Lines: 1 -acpitz -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device -SymlinkTo: ../../../ACPI0003:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup -Lines: 1 -enabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms -Lines: 1 -10598 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type -Lines: 1 -Mains -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent -Lines: 2 -POWER_SUPPLY_NAME=AC -POWER_SUPPLY_ONLINE=0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm -Lines: 1 -2369000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity -Lines: 1 -98 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level -Lines: 1 -Normal -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold -Lines: 1 -95 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device -SymlinkTo: ../../../PNP0C0A:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full -Lines: 1 -50060000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design -Lines: 1 -47520000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now -Lines: 1 -49450000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer -Lines: 1 -LGC -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name -Lines: 1 -LNV-45N1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now -Lines: 1 -4830000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number -Lines: 1 -38109 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status -Lines: 1 -Discharging -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology -Lines: 1 -Li-ion -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type -Lines: 1 -Battery -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent -Lines: 16 -POWER_SUPPLY_NAME=BAT0 -POWER_SUPPLY_STATUS=Discharging -POWER_SUPPLY_PRESENT=1 -POWER_SUPPLY_TECHNOLOGY=Li-ion -POWER_SUPPLY_CYCLE_COUNT=0 -POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 -POWER_SUPPLY_VOLTAGE_NOW=11750000 -POWER_SUPPLY_POWER_NOW=5064000 -POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 -POWER_SUPPLY_ENERGY_FULL=47390000 -POWER_SUPPLY_ENERGY_NOW=40730000 -POWER_SUPPLY_CAPACITY=85 -POWER_SUPPLY_CAPACITY_LEVEL=Normal -POWER_SUPPLY_MODEL_NAME=LNV-45N1 -POWER_SUPPLY_MANUFACTURER=LGC -POWER_SUPPLY_SERIAL_NUMBER=38109 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design -Lines: 1 -10800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now -Lines: 1 -12229000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class -Lines: 1 -0x020000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device -Lines: 1 -0x15d7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override -Lines: 1 -(null) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq -Lines: 1 -140 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias -Lines: 1 -pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource -Lines: 13 -0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision -Lines: 1 -0x21 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device -Lines: 1 -0x225a -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor -Lines: 1 -0x17aa -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent -Lines: 6 -DRIVER=e1000e -PCI_CLASS=20000 -PCI_ID=8086:15D7 -PCI_SUBSYS_ID=17AA:225A -PCI_SLOT_NAME=0000:00:1f.6 -MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor -Lines: 1 -0x8086 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/name -Lines: 1 -demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/pool -Lines: 1 -iscsi-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/name -Lines: 1 -wrong -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/pool -Lines: 1 -wrong-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource/clocksource0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource -Lines: 1 -tsc hpet acpi_pm -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource -Lines: 1 -tsc -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq -SymlinkTo: ../cpufreq/policy0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count -Lines: 1 -10084 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings -Lines: 1 -11 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list -Lines: 1 -0,4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq -Lines: 1 -1200195 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency -Lines: 1 -4294967295 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus -Lines: 1 -1 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors -Lines: 1 -performance powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver -Lines: 1 -intel_pstate -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor -Lines: 1 -powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed -Lines: 1 - -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count -Lines: 1 -523 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings -Lines: 1 -22 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list -Lines: 1 -1,5 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq -Lines: 1 -2400000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq -Lines: 1 -800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors -Lines: 1 -performance powersave -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq -Lines: 1 -1219917 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver -Lines: 1 -intel_pstate -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor -Lines: 1 -powersave -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq -Lines: 1 -2400000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq -Lines: 1 -800000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node1/vmstat -Lines: 6 -nr_free_pages 1 -nr_zone_inactive_anon 2 -nr_zone_active_anon 3 -nr_zone_inactive_file 4 -nr_zone_active_file 5 -nr_zone_unevictable 6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node2/vmstat -Lines: 6 -nr_free_pages 7 -nr_zone_inactive_anon 8 -nr_zone_active_anon 9 -nr_zone_inactive_file 10 -nr_zone_active_file 11 -nr_zone_unevictable 12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug -Lines: 7 -rate: 1.1M/sec -dirty: 20.4G -target: 20.4G -proportional: 427.5k -integral: 790.0k -change: 321.5k/sec -next io: 17ms -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us -Lines: 1 -1305 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly -Lines: 1 -131072 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used -Lines: 1 -1867776 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used -Lines: 1 -32768 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label -Lines: 1 -fixture -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid -Lines: 1 -0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly -Lines: 1 -262144 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 -SymlinkTo: ../../../../devices/virtual/block/loop22 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 -SymlinkTo: ../../../../devices/virtual/block/loop23 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 -SymlinkTo: ../../../../devices/virtual/block/loop24 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 -SymlinkTo: ../../../../devices/virtual/block/loop25 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid -Lines: 1 -7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sda1/stats/stats -Lines: 1 -extent_alloc 1 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sdb1/stats/stats -Lines: 1 -extent_alloc 2 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path -Lines: 1 -/home/iscsi/file_back_1G -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path -Lines: 1 -/dev/rbd1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path -Lines: 1 -/dev/rbd/iscsi-images/demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d -SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -204950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -40325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026 -SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -104950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -20095 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -71235 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686 -SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -301950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -30195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893 -SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -1234 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -1504 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -4733 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 0040753b1c18..3c18c7610ef5 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -26,7 +26,7 @@ const ( // DefaultSysMountPoint is the common mount point of the sys filesystem. DefaultSysMountPoint = "/sys" - // DefaultConfigfsMountPoint is the common mount point of the configfs + // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" ) diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 22cb07a6bbb5..b030951faf98 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,7 +14,7 @@ package util import ( - "io/ioutil" + "os" "strconv" "strings" ) @@ -66,7 +66,7 @@ func ParsePInt64s(ss []string) ([]*int64, error) { // ReadUintFromFile reads a file and attempts to parse a uint64 from it. func ReadUintFromFile(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } @@ -75,7 +75,7 @@ func ReadUintFromFile(path string) (uint64, error) { // ReadIntFromFile reads a file and attempts to parse a int64 from it. func ReadIntFromFile(path string) (int64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go index 8051161b2aa4..71b7a70ebd68 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -15,17 +15,16 @@ package util import ( "io" - "io/ioutil" "os" ) -// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. -// This is similar to ioutil.ReadFile but without the call to os.Stat, because +// ReadFileNoStat uses io.ReadAll to read contents of entire file. +// This is similar to os.ReadFile but without the call to os.Stat, because // many files in /proc and /sys report incorrect file sizes (either 0 or 4096). -// Reads a max file size of 512kB. For files larger than this, a scanner +// Reads a max file size of 1024kB. For files larger than this, a scanner // should be used. func ReadFileNoStat(filename string) ([]byte, error) { - const maxBufferSize = 1024 * 512 + const maxBufferSize = 1024 * 1024 f, err := os.Open(filename) if err != nil { @@ -34,5 +33,5 @@ func ReadFileNoStat(filename string) ([]byte, error) { defer f.Close() reader := io.LimitReader(f, maxBufferSize) - return ioutil.ReadAll(reader) + return io.ReadAll(reader) } diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index c07de0b6c9c6..1ab875ceec6a 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -11,7 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux,!appengine +//go:build (linux || darwin) && !appengine +// +build linux darwin +// +build !appengine package util @@ -21,7 +23,7 @@ import ( "syscall" ) -// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly. // https://github.com/prometheus/node_exporter/pull/728/files // // Note that this function will not read files larger than 128 bytes. @@ -33,7 +35,7 @@ func SysReadFile(file string) (string, error) { defer f.Close() // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's ioutil.ReadFile implementation to poll forever. + // Go's os.ReadFile implementation to poll forever. // // Since we either want to read data or bail immediately, do the simplest // possible read using syscall directly. diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go index bd55b45377db..1d86f5e63f3c 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -11,7 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux,appengine !linux +//go:build (linux && appengine) || (!linux && !darwin) +// +build linux,appengine !linux,!darwin package util diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 89e447746cfe..391c07957e90 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "os" "strconv" @@ -84,7 +83,7 @@ func parseIPVSStats(r io.Reader) (IPVSStats, error) { stats IPVSStats ) - statContent, err := ioutil.ReadAll(r) + statContent, err := io.ReadAll(r) if err != nil { return IPVSStats{}, err } diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go index da3a941d60b9..db88566bdf0a 100644 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 0cce190ec22b..0096cafbdf86 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -21,7 +21,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// LoadAvg represents an entry in /proc/loadavg +// LoadAvg represents an entry in /proc/loadavg. type LoadAvg struct { Load1 float64 Load5 float64 diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index f0b9e5f75a9e..a95c889cb9e2 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -15,7 +15,7 @@ package procfs import ( "fmt" - "io/ioutil" + "os" "regexp" "strconv" "strings" @@ -64,7 +64,7 @@ type MDStat struct { // structs containing the relevant info. More information available here: // https://raid.wiki.kernel.org/index.php/Mdstat func (fs FS) MDStat() ([]MDStat, error) { - data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) + data, err := os.ReadFile(fs.proc.Path("mdstat")) if err != nil { return nil, err } @@ -166,8 +166,12 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { + statusFields := strings.Fields(statusLine) + if len(statusFields) < 1 { + return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) + } - sizeStr := strings.Fields(statusLine)[0] + sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 9964a3600b4b..8300daca0545 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -25,7 +25,7 @@ import ( ) // A ConntrackStatEntry represents one line from net/stat/nf_conntrack -// and contains netfilter conntrack statistics at one CPU core +// and contains netfilter conntrack statistics at one CPU core. type ConntrackStatEntry struct { Entries uint64 Found uint64 @@ -38,12 +38,12 @@ type ConntrackStatEntry struct { SearchRestart uint64 } -// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores +// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores. func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) } -// Parses a slice of ConntrackStatEntries from the given filepath +// Parses a slice of ConntrackStatEntries from the given filepath. func readConntrackStat(path string) ([]ConntrackStatEntry, error) { // This file is small and can be read with one syscall. b, err := util.ReadFileNoStat(path) @@ -61,7 +61,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { return stat, nil } -// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries. func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { var entries []ConntrackStatEntry @@ -79,7 +79,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { return entries, nil } -// Parses a ConntrackStatEntry from given array of fields +// Parses a ConntrackStatEntry from given array of fields. func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { if len(fields) != 17 { return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") @@ -143,7 +143,7 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { return entry, nil } -// Parses a uint64 from given hex in string +// Parses a uint64 from given hex in string. func parseConntrackStatField(field string) (uint64, error) { val, err := strconv.ParseUint(field, 16, 64) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index 47a710befb93..e66208aa05fe 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -87,17 +87,17 @@ func newNetDev(file string) (NetDev, error) { // parseLine parses a single line from the /proc/net/dev file. Header lines // must be filtered prior to calling this method. func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { - parts := strings.SplitN(rawLine, ":", 2) - if len(parts) != 2 { + idx := strings.LastIndex(rawLine, ":") + if idx == -1 { return nil, errors.New("invalid net/dev line, missing colon") } - fields := strings.Fields(strings.TrimSpace(parts[1])) + fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:])) var err error line := &NetDevLine{} // Interface Name - line.Name = strings.TrimSpace(parts[0]) + line.Name = strings.TrimSpace(rawLine[:idx]) if line.Name == "" { return nil, errors.New("invalid net/dev line, empty interface name") } diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 8c9ee3de8786..7fd57d7f463b 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -34,7 +34,7 @@ const ( readLimit = 4294967296 // Byte -> 4 GiB ) -// this contains generic data structures for both udp and tcp sockets +// This contains generic data structures for both udp and tcp sockets. type ( // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. NetIPSocket []*netIPSocketLine diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index 8c6de3791baf..374b6f73f821 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// NetProtocolStats stores the contents from /proc/net/protocols +// NetProtocolStats stores the contents from /proc/net/protocols. type NetProtocolStats map[string]NetProtocolStatLine // NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We @@ -41,7 +41,7 @@ type NetProtocolStatLine struct { Capabilities NetProtocolCapabilities } -// NetProtocolCapabilities contains a list of capabilities for each protocol +// NetProtocolCapabilities contains a list of capabilities for each protocol. type NetProtocolCapabilities struct { Close bool // 8 Connect bool // 9 diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 46f12c61d3e9..a94f86dc4ae6 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -30,13 +30,13 @@ import ( // * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 // and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. -// SoftnetStat contains a single row of data from /proc/net/softnet_stat +// SoftnetStat contains a single row of data from /proc/net/softnet_stat. type SoftnetStat struct { - // Number of processed packets + // Number of processed packets. Processed uint32 - // Number of dropped packets + // Number of dropped packets. Dropped uint32 - // Number of times processing packets ran out of quota + // Number of times processing packets ran out of quota. TimeSqueezed uint32 } diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go new file mode 100644 index 000000000000..f9d9d243db38 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -0,0 +1,189 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + // Forward routing of a packet is not allowed + XfrmFwdHdrError int + // State is invalid, perhaps expired + XfrmOutStateInvalid int + // State hasn’t been fully acquired before use + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.proc.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index 94d892f11348..dcea9c5a671f 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -21,13 +21,13 @@ import ( "strings" ) -// NetStat contains statistics for all the counters from one file +// NetStat contains statistics for all the counters from one file. type NetStat struct { - Filename string Stats map[string][]uint64 + Filename string } -// NetStat retrieves stats from /proc/net/stat/ +// NetStat retrieves stats from `/proc/net/stat/`. func (fs FS) NetStat() ([]NetStat, error) { statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*")) if err != nil { @@ -55,7 +55,7 @@ func (fs FS) NetStat() ([]NetStat, error) { // Other strings represent per-CPU counters for scanner.Scan() { for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 32) + value, err := strconv.ParseUint(counter, 16, 64) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 28f696803f6f..c30223af72ad 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -16,7 +16,7 @@ package procfs import ( "bytes" "fmt" - "io/ioutil" + "io" "os" "strconv" "strings" @@ -82,7 +82,7 @@ func (fs FS) Self() (Proc, error) { // NewProc returns a process for the given pid. // -// Deprecated: use fs.Proc() instead +// Deprecated: Use fs.Proc() instead. func (fs FS) NewProc(pid int) (Proc, error) { return fs.Proc(pid) } @@ -142,7 +142,7 @@ func (p Proc) Wchan() (string, error) { } defer f.Close() - data, err := ioutil.ReadAll(f) + data, err := io.ReadAll(f) if err != nil { return "", err } @@ -185,7 +185,7 @@ func (p Proc) Cwd() (string, error) { return wd, err } -// RootDir returns the absolute path to the process's root directory (as set by chroot) +// RootDir returns the absolute path to the process's root directory (as set by chroot). func (p Proc) RootDir() (string, error) { rdir, err := os.Readlink(p.path("root")) if os.IsNotExist(err) { @@ -311,7 +311,7 @@ func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { // Schedstat returns task scheduling information for the process. func (p Proc) Schedstat() (ProcSchedstat, error) { - contents, err := ioutil.ReadFile(p.path("schedstat")) + contents, err := os.ReadFile(p.path("schedstat")) if err != nil { return ProcSchedstat{}, err } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index be45b798733b..cca03327c3fe 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -45,7 +45,7 @@ type Cgroup struct { } // parseCgroupString parses each line of the /proc/[pid]/cgroup file -// Line format is hierarchyID:[controller1,controller2]:path +// Line format is hierarchyID:[controller1,controller2]:path. func parseCgroupString(cgroupStr string) (*Cgroup, error) { var err error @@ -69,7 +69,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { return cgroup, nil } -// parseCgroups reads each line of the /proc/[pid]/cgroup file +// parseCgroups reads each line of the /proc/[pid]/cgroup file. func parseCgroups(data []byte) ([]Cgroup, error) { var cgroups []Cgroup scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -88,7 +88,7 @@ func parseCgroups(data []byte) ([]Cgroup, error) { // Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process // control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, -// so the len of the returned struct is equal to the number of active hierarchies on this system +// so the len of the returned struct is equal to the number of active hierarchies on this system. func (p Proc) Cgroups() ([]Cgroup, error) { data, err := util.ReadFileNoStat(p.path("cgroup")) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go new file mode 100644 index 000000000000..24d4dce9cfc7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -0,0 +1,98 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CgroupSummary models one line from /proc/cgroups. +// This file contains information about the controllers that are compiled into the kernel. +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type CgroupSummary struct { + // The name of the controller. controller is also known as subsystem. + SubsysName string + // The unique ID of the cgroup hierarchy on which this controller is mounted. + Hierarchy int + // The number of control groups in this hierarchy using this controller. + Cgroups int + // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled + Enabled int +} + +// parseCgroupSummary parses each line of the /proc/cgroup file +// Line format is `subsys_name hierarchy num_cgroups enabled`. +func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { + var err error + + fields := strings.Fields(CgroupSummaryStr) + // require at least 4 fields + if len(fields) < 4 { + return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) + } + + CgroupSummary := &CgroupSummary{ + SubsysName: fields[0], + } + CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse hierarchy ID") + } + CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) + if err != nil { + return nil, fmt.Errorf("failed to parse Cgroup Num") + } + CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse Enabled") + } + return CgroupSummary, nil +} + +// parseCgroupSummary reads each line of the /proc/cgroup file. +func parseCgroupSummary(data []byte) ([]CgroupSummary, error) { + var CgroupSummarys []CgroupSummary + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + CgroupSummaryString := scanner.Text() + // ignore comment lines + if strings.HasPrefix(CgroupSummaryString, "#") { + continue + } + CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString) + if err != nil { + return nil, err + } + CgroupSummarys = append(CgroupSummarys, *CgroupSummary) + } + + err := scanner.Err() + return CgroupSummarys, err +} + +// CgroupSummarys returns information about current /proc/cgroups. +func (fs FS) CgroupSummarys() ([]CgroupSummary, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cgroups")) + if err != nil { + return nil, err + } + return parseCgroupSummary(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go index 6134b3580c45..57a89895d66a 100644 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -19,7 +19,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Environ reads process environments from /proc//environ +// Environ reads process environments from `/proc//environ`. func (p Proc) Environ() ([]string, error) { environments := make([]string, 0) diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index cf63227f064f..1bbdd4a8e998 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -22,7 +22,6 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Regexp variables var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) @@ -122,7 +121,7 @@ func (p ProcFDInfos) Len() int { return len(p) } func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } -// InotifyWatchLen returns the total number of inotify watches +// InotifyWatchLen returns the total number of inotify watches. func (p ProcFDInfos) InotifyWatchLen() (int, error) { length := 0 for _, f := range p { diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index dd20f198a308..7a1388185a97 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -79,7 +79,7 @@ var ( // NewLimits returns the current soft limits of the process. // -// Deprecated: use p.Limits() instead +// Deprecated: Use p.Limits() instead. func (p Proc) NewLimits() (ProcLimits, error) { return p.Limits() } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 1d7772d516a4..f1bcbf32bb3d 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -11,7 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build !js package procfs @@ -25,7 +27,7 @@ import ( "golang.org/x/sys/unix" ) -// ProcMapPermissions contains permission settings read from /proc/[pid]/maps +// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`. type ProcMapPermissions struct { // mapping has the [R]ead flag set Read bool @@ -39,8 +41,8 @@ type ProcMapPermissions struct { Private bool } -// ProcMap contains the process memory-mappings of the process, -// read from /proc/[pid]/maps +// ProcMap contains the process memory-mappings of the process +// read from `/proc/[pid]/maps`. type ProcMap struct { // The start address of current mapping. StartAddr uintptr @@ -79,7 +81,7 @@ func parseDevice(s string) (uint64, error) { return unix.Mkdev(uint32(major), uint32(minor)), nil } -// parseAddress just converts a hex-string to a uintptr +// parseAddress converts a hex-string to a uintptr. func parseAddress(s string) (uintptr, error) { a, err := strconv.ParseUint(s, 16, 0) if err != nil { @@ -89,7 +91,7 @@ func parseAddress(s string) (uintptr, error) { return uintptr(a), nil } -// parseAddresses parses the start-end address +// parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { toks := strings.Split(s, "-") if len(toks) < 2 { diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go new file mode 100644 index 000000000000..48b5238194e8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -0,0 +1,440 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcNetstat models the content of /proc//net/netstat. +type ProcNetstat struct { + // The process ID. + PID int + TcpExt + IpExt +} + +type TcpExt struct { // nolint:revive + SyncookiesSent float64 + SyncookiesRecv float64 + SyncookiesFailed float64 + EmbryonicRsts float64 + PruneCalled float64 + RcvPruned float64 + OfoPruned float64 + OutOfWindowIcmps float64 + LockDroppedIcmps float64 + ArpFilter float64 + TW float64 + TWRecycled float64 + TWKilled float64 + PAWSActive float64 + PAWSEstab float64 + DelayedACKs float64 + DelayedACKLocked float64 + DelayedACKLost float64 + ListenOverflows float64 + ListenDrops float64 + TCPHPHits float64 + TCPPureAcks float64 + TCPHPAcks float64 + TCPRenoRecovery float64 + TCPSackRecovery float64 + TCPSACKReneging float64 + TCPSACKReorder float64 + TCPRenoReorder float64 + TCPTSReorder float64 + TCPFullUndo float64 + TCPPartialUndo float64 + TCPDSACKUndo float64 + TCPLossUndo float64 + TCPLostRetransmit float64 + TCPRenoFailures float64 + TCPSackFailures float64 + TCPLossFailures float64 + TCPFastRetrans float64 + TCPSlowStartRetrans float64 + TCPTimeouts float64 + TCPLossProbes float64 + TCPLossProbeRecovery float64 + TCPRenoRecoveryFail float64 + TCPSackRecoveryFail float64 + TCPRcvCollapsed float64 + TCPDSACKOldSent float64 + TCPDSACKOfoSent float64 + TCPDSACKRecv float64 + TCPDSACKOfoRecv float64 + TCPAbortOnData float64 + TCPAbortOnClose float64 + TCPAbortOnMemory float64 + TCPAbortOnTimeout float64 + TCPAbortOnLinger float64 + TCPAbortFailed float64 + TCPMemoryPressures float64 + TCPMemoryPressuresChrono float64 + TCPSACKDiscard float64 + TCPDSACKIgnoredOld float64 + TCPDSACKIgnoredNoUndo float64 + TCPSpuriousRTOs float64 + TCPMD5NotFound float64 + TCPMD5Unexpected float64 + TCPMD5Failure float64 + TCPSackShifted float64 + TCPSackMerged float64 + TCPSackShiftFallback float64 + TCPBacklogDrop float64 + PFMemallocDrop float64 + TCPMinTTLDrop float64 + TCPDeferAcceptDrop float64 + IPReversePathFilter float64 + TCPTimeWaitOverflow float64 + TCPReqQFullDoCookies float64 + TCPReqQFullDrop float64 + TCPRetransFail float64 + TCPRcvCoalesce float64 + TCPOFOQueue float64 + TCPOFODrop float64 + TCPOFOMerge float64 + TCPChallengeACK float64 + TCPSYNChallenge float64 + TCPFastOpenActive float64 + TCPFastOpenActiveFail float64 + TCPFastOpenPassive float64 + TCPFastOpenPassiveFail float64 + TCPFastOpenListenOverflow float64 + TCPFastOpenCookieReqd float64 + TCPFastOpenBlackhole float64 + TCPSpuriousRtxHostQueues float64 + BusyPollRxPackets float64 + TCPAutoCorking float64 + TCPFromZeroWindowAdv float64 + TCPToZeroWindowAdv float64 + TCPWantZeroWindowAdv float64 + TCPSynRetrans float64 + TCPOrigDataSent float64 + TCPHystartTrainDetect float64 + TCPHystartTrainCwnd float64 + TCPHystartDelayDetect float64 + TCPHystartDelayCwnd float64 + TCPACKSkippedSynRecv float64 + TCPACKSkippedPAWS float64 + TCPACKSkippedSeq float64 + TCPACKSkippedFinWait2 float64 + TCPACKSkippedTimeWait float64 + TCPACKSkippedChallenge float64 + TCPWinProbe float64 + TCPKeepAlive float64 + TCPMTUPFail float64 + TCPMTUPSuccess float64 + TCPWqueueTooBig float64 +} + +type IpExt struct { // nolint:revive + InNoRoutes float64 + InTruncatedPkts float64 + InMcastPkts float64 + OutMcastPkts float64 + InBcastPkts float64 + OutBcastPkts float64 + InOctets float64 + OutOctets float64 + InMcastOctets float64 + OutMcastOctets float64 + InBcastOctets float64 + OutBcastOctets float64 + InCsumErrors float64 + InNoECTPkts float64 + InECT1Pkts float64 + InECT0Pkts float64 + InCEPkts float64 + ReasmOverlaps float64 +} + +func (p Proc) Netstat() (ProcNetstat, error) { + filename := p.path("net/netstat") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcNetstat{PID: p.PID}, err + } + procNetstat, err := parseNetstat(bytes.NewReader(data), filename) + procNetstat.PID = p.PID + return procNetstat, err +} + +// parseNetstat parses the metrics from proc//net/netstat file +// and returns a ProcNetstat structure. +func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { + var ( + scanner = bufio.NewScanner(r) + procNetstat = ProcNetstat{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", + fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procNetstat, err + } + key := nameParts[i] + + switch protocol { + case "TcpExt": + switch key { + case "SyncookiesSent": + procNetstat.TcpExt.SyncookiesSent = value + case "SyncookiesRecv": + procNetstat.TcpExt.SyncookiesRecv = value + case "SyncookiesFailed": + procNetstat.TcpExt.SyncookiesFailed = value + case "EmbryonicRsts": + procNetstat.TcpExt.EmbryonicRsts = value + case "PruneCalled": + procNetstat.TcpExt.PruneCalled = value + case "RcvPruned": + procNetstat.TcpExt.RcvPruned = value + case "OfoPruned": + procNetstat.TcpExt.OfoPruned = value + case "OutOfWindowIcmps": + procNetstat.TcpExt.OutOfWindowIcmps = value + case "LockDroppedIcmps": + procNetstat.TcpExt.LockDroppedIcmps = value + case "ArpFilter": + procNetstat.TcpExt.ArpFilter = value + case "TW": + procNetstat.TcpExt.TW = value + case "TWRecycled": + procNetstat.TcpExt.TWRecycled = value + case "TWKilled": + procNetstat.TcpExt.TWKilled = value + case "PAWSActive": + procNetstat.TcpExt.PAWSActive = value + case "PAWSEstab": + procNetstat.TcpExt.PAWSEstab = value + case "DelayedACKs": + procNetstat.TcpExt.DelayedACKs = value + case "DelayedACKLocked": + procNetstat.TcpExt.DelayedACKLocked = value + case "DelayedACKLost": + procNetstat.TcpExt.DelayedACKLost = value + case "ListenOverflows": + procNetstat.TcpExt.ListenOverflows = value + case "ListenDrops": + procNetstat.TcpExt.ListenDrops = value + case "TCPHPHits": + procNetstat.TcpExt.TCPHPHits = value + case "TCPPureAcks": + procNetstat.TcpExt.TCPPureAcks = value + case "TCPHPAcks": + procNetstat.TcpExt.TCPHPAcks = value + case "TCPRenoRecovery": + procNetstat.TcpExt.TCPRenoRecovery = value + case "TCPSackRecovery": + procNetstat.TcpExt.TCPSackRecovery = value + case "TCPSACKReneging": + procNetstat.TcpExt.TCPSACKReneging = value + case "TCPSACKReorder": + procNetstat.TcpExt.TCPSACKReorder = value + case "TCPRenoReorder": + procNetstat.TcpExt.TCPRenoReorder = value + case "TCPTSReorder": + procNetstat.TcpExt.TCPTSReorder = value + case "TCPFullUndo": + procNetstat.TcpExt.TCPFullUndo = value + case "TCPPartialUndo": + procNetstat.TcpExt.TCPPartialUndo = value + case "TCPDSACKUndo": + procNetstat.TcpExt.TCPDSACKUndo = value + case "TCPLossUndo": + procNetstat.TcpExt.TCPLossUndo = value + case "TCPLostRetransmit": + procNetstat.TcpExt.TCPLostRetransmit = value + case "TCPRenoFailures": + procNetstat.TcpExt.TCPRenoFailures = value + case "TCPSackFailures": + procNetstat.TcpExt.TCPSackFailures = value + case "TCPLossFailures": + procNetstat.TcpExt.TCPLossFailures = value + case "TCPFastRetrans": + procNetstat.TcpExt.TCPFastRetrans = value + case "TCPSlowStartRetrans": + procNetstat.TcpExt.TCPSlowStartRetrans = value + case "TCPTimeouts": + procNetstat.TcpExt.TCPTimeouts = value + case "TCPLossProbes": + procNetstat.TcpExt.TCPLossProbes = value + case "TCPLossProbeRecovery": + procNetstat.TcpExt.TCPLossProbeRecovery = value + case "TCPRenoRecoveryFail": + procNetstat.TcpExt.TCPRenoRecoveryFail = value + case "TCPSackRecoveryFail": + procNetstat.TcpExt.TCPSackRecoveryFail = value + case "TCPRcvCollapsed": + procNetstat.TcpExt.TCPRcvCollapsed = value + case "TCPDSACKOldSent": + procNetstat.TcpExt.TCPDSACKOldSent = value + case "TCPDSACKOfoSent": + procNetstat.TcpExt.TCPDSACKOfoSent = value + case "TCPDSACKRecv": + procNetstat.TcpExt.TCPDSACKRecv = value + case "TCPDSACKOfoRecv": + procNetstat.TcpExt.TCPDSACKOfoRecv = value + case "TCPAbortOnData": + procNetstat.TcpExt.TCPAbortOnData = value + case "TCPAbortOnClose": + procNetstat.TcpExt.TCPAbortOnClose = value + case "TCPDeferAcceptDrop": + procNetstat.TcpExt.TCPDeferAcceptDrop = value + case "IPReversePathFilter": + procNetstat.TcpExt.IPReversePathFilter = value + case "TCPTimeWaitOverflow": + procNetstat.TcpExt.TCPTimeWaitOverflow = value + case "TCPReqQFullDoCookies": + procNetstat.TcpExt.TCPReqQFullDoCookies = value + case "TCPReqQFullDrop": + procNetstat.TcpExt.TCPReqQFullDrop = value + case "TCPRetransFail": + procNetstat.TcpExt.TCPRetransFail = value + case "TCPRcvCoalesce": + procNetstat.TcpExt.TCPRcvCoalesce = value + case "TCPOFOQueue": + procNetstat.TcpExt.TCPOFOQueue = value + case "TCPOFODrop": + procNetstat.TcpExt.TCPOFODrop = value + case "TCPOFOMerge": + procNetstat.TcpExt.TCPOFOMerge = value + case "TCPChallengeACK": + procNetstat.TcpExt.TCPChallengeACK = value + case "TCPSYNChallenge": + procNetstat.TcpExt.TCPSYNChallenge = value + case "TCPFastOpenActive": + procNetstat.TcpExt.TCPFastOpenActive = value + case "TCPFastOpenActiveFail": + procNetstat.TcpExt.TCPFastOpenActiveFail = value + case "TCPFastOpenPassive": + procNetstat.TcpExt.TCPFastOpenPassive = value + case "TCPFastOpenPassiveFail": + procNetstat.TcpExt.TCPFastOpenPassiveFail = value + case "TCPFastOpenListenOverflow": + procNetstat.TcpExt.TCPFastOpenListenOverflow = value + case "TCPFastOpenCookieReqd": + procNetstat.TcpExt.TCPFastOpenCookieReqd = value + case "TCPFastOpenBlackhole": + procNetstat.TcpExt.TCPFastOpenBlackhole = value + case "TCPSpuriousRtxHostQueues": + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value + case "BusyPollRxPackets": + procNetstat.TcpExt.BusyPollRxPackets = value + case "TCPAutoCorking": + procNetstat.TcpExt.TCPAutoCorking = value + case "TCPFromZeroWindowAdv": + procNetstat.TcpExt.TCPFromZeroWindowAdv = value + case "TCPToZeroWindowAdv": + procNetstat.TcpExt.TCPToZeroWindowAdv = value + case "TCPWantZeroWindowAdv": + procNetstat.TcpExt.TCPWantZeroWindowAdv = value + case "TCPSynRetrans": + procNetstat.TcpExt.TCPSynRetrans = value + case "TCPOrigDataSent": + procNetstat.TcpExt.TCPOrigDataSent = value + case "TCPHystartTrainDetect": + procNetstat.TcpExt.TCPHystartTrainDetect = value + case "TCPHystartTrainCwnd": + procNetstat.TcpExt.TCPHystartTrainCwnd = value + case "TCPHystartDelayDetect": + procNetstat.TcpExt.TCPHystartDelayDetect = value + case "TCPHystartDelayCwnd": + procNetstat.TcpExt.TCPHystartDelayCwnd = value + case "TCPACKSkippedSynRecv": + procNetstat.TcpExt.TCPACKSkippedSynRecv = value + case "TCPACKSkippedPAWS": + procNetstat.TcpExt.TCPACKSkippedPAWS = value + case "TCPACKSkippedSeq": + procNetstat.TcpExt.TCPACKSkippedSeq = value + case "TCPACKSkippedFinWait2": + procNetstat.TcpExt.TCPACKSkippedFinWait2 = value + case "TCPACKSkippedTimeWait": + procNetstat.TcpExt.TCPACKSkippedTimeWait = value + case "TCPACKSkippedChallenge": + procNetstat.TcpExt.TCPACKSkippedChallenge = value + case "TCPWinProbe": + procNetstat.TcpExt.TCPWinProbe = value + case "TCPKeepAlive": + procNetstat.TcpExt.TCPKeepAlive = value + case "TCPMTUPFail": + procNetstat.TcpExt.TCPMTUPFail = value + case "TCPMTUPSuccess": + procNetstat.TcpExt.TCPMTUPSuccess = value + case "TCPWqueueTooBig": + procNetstat.TcpExt.TCPWqueueTooBig = value + } + case "IpExt": + switch key { + case "InNoRoutes": + procNetstat.IpExt.InNoRoutes = value + case "InTruncatedPkts": + procNetstat.IpExt.InTruncatedPkts = value + case "InMcastPkts": + procNetstat.IpExt.InMcastPkts = value + case "OutMcastPkts": + procNetstat.IpExt.OutMcastPkts = value + case "InBcastPkts": + procNetstat.IpExt.InBcastPkts = value + case "OutBcastPkts": + procNetstat.IpExt.OutBcastPkts = value + case "InOctets": + procNetstat.IpExt.InOctets = value + case "OutOctets": + procNetstat.IpExt.OutOctets = value + case "InMcastOctets": + procNetstat.IpExt.InMcastOctets = value + case "OutMcastOctets": + procNetstat.IpExt.OutMcastOctets = value + case "InBcastOctets": + procNetstat.IpExt.InBcastOctets = value + case "OutBcastOctets": + procNetstat.IpExt.OutBcastOctets = value + case "InCsumErrors": + procNetstat.IpExt.InCsumErrors = value + case "InNoECTPkts": + procNetstat.IpExt.InNoECTPkts = value + case "InECT1Pkts": + procNetstat.IpExt.InECT1Pkts = value + case "InECT0Pkts": + procNetstat.IpExt.InECT0Pkts = value + case "InCEPkts": + procNetstat.IpExt.InCEPkts = value + case "ReasmOverlaps": + procNetstat.IpExt.ReasmOverlaps = value + } + } + } + } + return procNetstat, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index dc6c14f0a4c1..a68fe15290a8 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -35,9 +35,10 @@ import ( const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" -// PSILine is a single line of values as returned by /proc/pressure/* -// The Avg entries are averages over n seconds, as a percentage -// The Total line is in microseconds +// PSILine is a single line of values as returned by `/proc/pressure/*`. +// +// The Avg entries are averages over n seconds, as a percentage. +// The Total line is in microseconds. type PSILine struct { Avg10 float64 Avg60 float64 @@ -46,8 +47,9 @@ type PSILine struct { } // PSIStats represent pressure stall information from /proc/pressure/* -// Some indicates the share of time in which at least some tasks are stalled -// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +// +// "Some" indicates the share of time in which at least some tasks are stalled. +// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously. type PSIStats struct { Some *PSILine Full *PSILine @@ -65,7 +67,7 @@ func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { return parsePSIStats(resource, bytes.NewReader(data)) } -// parsePSIStats parses the specified file for pressure stall information +// parsePSIStats parses the specified file for pressure stall information. func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { psiStats := PSIStats{} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index a576a720a442..0e97d99575e7 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs @@ -28,30 +29,30 @@ import ( ) var ( - // match the header line before each mapped zone in /proc/pid/smaps + // match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) type ProcSMapsRollup struct { - // Amount of the mapping that is currently resident in RAM + // Amount of the mapping that is currently resident in RAM. Rss uint64 - // Process's proportional share of this mapping + // Process's proportional share of this mapping. Pss uint64 - // Size in bytes of clean shared pages + // Size in bytes of clean shared pages. SharedClean uint64 - // Size in bytes of dirty shared pages + // Size in bytes of dirty shared pages. SharedDirty uint64 - // Size in bytes of clean private pages + // Size in bytes of clean private pages. PrivateClean uint64 - // Size in bytes of dirty private pages + // Size in bytes of dirty private pages. PrivateDirty uint64 - // Amount of memory currently marked as referenced or accessed + // Amount of memory currently marked as referenced or accessed. Referenced uint64 - // Amount of memory that does not belong to any file + // Amount of memory that does not belong to any file. Anonymous uint64 - // Amount would-be-anonymous memory currently on swap + // Amount would-be-anonymous memory currently on swap. Swap uint64 - // Process's proportional memory on swap + // Process's proportional memory on swap. SwapPss uint64 } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go new file mode 100644 index 000000000000..ae191896cbd7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -0,0 +1,353 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp models the content of /proc//net/snmp. +type ProcSnmp struct { + // The process ID. + PID int + Ip + Icmp + IcmpMsg + Tcp + Udp + UdpLite +} + +type Ip struct { // nolint:revive + Forwarding float64 + DefaultTTL float64 + InReceives float64 + InHdrErrors float64 + InAddrErrors float64 + ForwDatagrams float64 + InUnknownProtos float64 + InDiscards float64 + InDelivers float64 + OutRequests float64 + OutDiscards float64 + OutNoRoutes float64 + ReasmTimeout float64 + ReasmReqds float64 + ReasmOKs float64 + ReasmFails float64 + FragOKs float64 + FragFails float64 + FragCreates float64 +} + +type Icmp struct { + InMsgs float64 + InErrors float64 + InCsumErrors float64 + InDestUnreachs float64 + InTimeExcds float64 + InParmProbs float64 + InSrcQuenchs float64 + InRedirects float64 + InEchos float64 + InEchoReps float64 + InTimestamps float64 + InTimestampReps float64 + InAddrMasks float64 + InAddrMaskReps float64 + OutMsgs float64 + OutErrors float64 + OutDestUnreachs float64 + OutTimeExcds float64 + OutParmProbs float64 + OutSrcQuenchs float64 + OutRedirects float64 + OutEchos float64 + OutEchoReps float64 + OutTimestamps float64 + OutTimestampReps float64 + OutAddrMasks float64 + OutAddrMaskReps float64 +} + +type IcmpMsg struct { + InType3 float64 + OutType3 float64 +} + +type Tcp struct { // nolint:revive + RtoAlgorithm float64 + RtoMin float64 + RtoMax float64 + MaxConn float64 + ActiveOpens float64 + PassiveOpens float64 + AttemptFails float64 + EstabResets float64 + CurrEstab float64 + InSegs float64 + OutSegs float64 + RetransSegs float64 + InErrs float64 + OutRsts float64 + InCsumErrors float64 +} + +type Udp struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 + IgnoredMulti float64 +} + +type UdpLite struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 + IgnoredMulti float64 +} + +func (p Proc) Snmp() (ProcSnmp, error) { + filename := p.path("net/snmp") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcSnmp{PID: p.PID}, err + } + procSnmp, err := parseSnmp(bytes.NewReader(data), filename) + procSnmp.PID = p.PID + return procSnmp, err +} + +// parseSnmp parses the metrics from proc//net/snmp file +// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}). +func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp = ProcSnmp{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", + fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procSnmp, err + } + key := nameParts[i] + + switch protocol { + case "Ip": + switch key { + case "Forwarding": + procSnmp.Ip.Forwarding = value + case "DefaultTTL": + procSnmp.Ip.DefaultTTL = value + case "InReceives": + procSnmp.Ip.InReceives = value + case "InHdrErrors": + procSnmp.Ip.InHdrErrors = value + case "InAddrErrors": + procSnmp.Ip.InAddrErrors = value + case "ForwDatagrams": + procSnmp.Ip.ForwDatagrams = value + case "InUnknownProtos": + procSnmp.Ip.InUnknownProtos = value + case "InDiscards": + procSnmp.Ip.InDiscards = value + case "InDelivers": + procSnmp.Ip.InDelivers = value + case "OutRequests": + procSnmp.Ip.OutRequests = value + case "OutDiscards": + procSnmp.Ip.OutDiscards = value + case "OutNoRoutes": + procSnmp.Ip.OutNoRoutes = value + case "ReasmTimeout": + procSnmp.Ip.ReasmTimeout = value + case "ReasmReqds": + procSnmp.Ip.ReasmReqds = value + case "ReasmOKs": + procSnmp.Ip.ReasmOKs = value + case "ReasmFails": + procSnmp.Ip.ReasmFails = value + case "FragOKs": + procSnmp.Ip.FragOKs = value + case "FragFails": + procSnmp.Ip.FragFails = value + case "FragCreates": + procSnmp.Ip.FragCreates = value + } + case "Icmp": + switch key { + case "InMsgs": + procSnmp.Icmp.InMsgs = value + case "InErrors": + procSnmp.Icmp.InErrors = value + case "InCsumErrors": + procSnmp.Icmp.InCsumErrors = value + case "InDestUnreachs": + procSnmp.Icmp.InDestUnreachs = value + case "InTimeExcds": + procSnmp.Icmp.InTimeExcds = value + case "InParmProbs": + procSnmp.Icmp.InParmProbs = value + case "InSrcQuenchs": + procSnmp.Icmp.InSrcQuenchs = value + case "InRedirects": + procSnmp.Icmp.InRedirects = value + case "InEchos": + procSnmp.Icmp.InEchos = value + case "InEchoReps": + procSnmp.Icmp.InEchoReps = value + case "InTimestamps": + procSnmp.Icmp.InTimestamps = value + case "InTimestampReps": + procSnmp.Icmp.InTimestampReps = value + case "InAddrMasks": + procSnmp.Icmp.InAddrMasks = value + case "InAddrMaskReps": + procSnmp.Icmp.InAddrMaskReps = value + case "OutMsgs": + procSnmp.Icmp.OutMsgs = value + case "OutErrors": + procSnmp.Icmp.OutErrors = value + case "OutDestUnreachs": + procSnmp.Icmp.OutDestUnreachs = value + case "OutTimeExcds": + procSnmp.Icmp.OutTimeExcds = value + case "OutParmProbs": + procSnmp.Icmp.OutParmProbs = value + case "OutSrcQuenchs": + procSnmp.Icmp.OutSrcQuenchs = value + case "OutRedirects": + procSnmp.Icmp.OutRedirects = value + case "OutEchos": + procSnmp.Icmp.OutEchos = value + case "OutEchoReps": + procSnmp.Icmp.OutEchoReps = value + case "OutTimestamps": + procSnmp.Icmp.OutTimestamps = value + case "OutTimestampReps": + procSnmp.Icmp.OutTimestampReps = value + case "OutAddrMasks": + procSnmp.Icmp.OutAddrMasks = value + case "OutAddrMaskReps": + procSnmp.Icmp.OutAddrMaskReps = value + } + case "IcmpMsg": + switch key { + case "InType3": + procSnmp.IcmpMsg.InType3 = value + case "OutType3": + procSnmp.IcmpMsg.OutType3 = value + } + case "Tcp": + switch key { + case "RtoAlgorithm": + procSnmp.Tcp.RtoAlgorithm = value + case "RtoMin": + procSnmp.Tcp.RtoMin = value + case "RtoMax": + procSnmp.Tcp.RtoMax = value + case "MaxConn": + procSnmp.Tcp.MaxConn = value + case "ActiveOpens": + procSnmp.Tcp.ActiveOpens = value + case "PassiveOpens": + procSnmp.Tcp.PassiveOpens = value + case "AttemptFails": + procSnmp.Tcp.AttemptFails = value + case "EstabResets": + procSnmp.Tcp.EstabResets = value + case "CurrEstab": + procSnmp.Tcp.CurrEstab = value + case "InSegs": + procSnmp.Tcp.InSegs = value + case "OutSegs": + procSnmp.Tcp.OutSegs = value + case "RetransSegs": + procSnmp.Tcp.RetransSegs = value + case "InErrs": + procSnmp.Tcp.InErrs = value + case "OutRsts": + procSnmp.Tcp.OutRsts = value + case "InCsumErrors": + procSnmp.Tcp.InCsumErrors = value + } + case "Udp": + switch key { + case "InDatagrams": + procSnmp.Udp.InDatagrams = value + case "NoPorts": + procSnmp.Udp.NoPorts = value + case "InErrors": + procSnmp.Udp.InErrors = value + case "OutDatagrams": + procSnmp.Udp.OutDatagrams = value + case "RcvbufErrors": + procSnmp.Udp.RcvbufErrors = value + case "SndbufErrors": + procSnmp.Udp.SndbufErrors = value + case "InCsumErrors": + procSnmp.Udp.InCsumErrors = value + case "IgnoredMulti": + procSnmp.Udp.IgnoredMulti = value + } + case "UdpLite": + switch key { + case "InDatagrams": + procSnmp.UdpLite.InDatagrams = value + case "NoPorts": + procSnmp.UdpLite.NoPorts = value + case "InErrors": + procSnmp.UdpLite.InErrors = value + case "OutDatagrams": + procSnmp.UdpLite.OutDatagrams = value + case "RcvbufErrors": + procSnmp.UdpLite.RcvbufErrors = value + case "SndbufErrors": + procSnmp.UdpLite.SndbufErrors = value + case "InCsumErrors": + procSnmp.UdpLite.InCsumErrors = value + case "IgnoredMulti": + procSnmp.UdpLite.IgnoredMulti = value + } + } + } + } + return procSnmp, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go new file mode 100644 index 000000000000..f611992d52ca --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -0,0 +1,381 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp6 models the content of /proc//net/snmp6. +type ProcSnmp6 struct { + // The process ID. + PID int + Ip6 + Icmp6 + Udp6 + UdpLite6 +} + +type Ip6 struct { // nolint:revive + InReceives float64 + InHdrErrors float64 + InTooBigErrors float64 + InNoRoutes float64 + InAddrErrors float64 + InUnknownProtos float64 + InTruncatedPkts float64 + InDiscards float64 + InDelivers float64 + OutForwDatagrams float64 + OutRequests float64 + OutDiscards float64 + OutNoRoutes float64 + ReasmTimeout float64 + ReasmReqds float64 + ReasmOKs float64 + ReasmFails float64 + FragOKs float64 + FragFails float64 + FragCreates float64 + InMcastPkts float64 + OutMcastPkts float64 + InOctets float64 + OutOctets float64 + InMcastOctets float64 + OutMcastOctets float64 + InBcastOctets float64 + OutBcastOctets float64 + InNoECTPkts float64 + InECT1Pkts float64 + InECT0Pkts float64 + InCEPkts float64 +} + +type Icmp6 struct { + InMsgs float64 + InErrors float64 + OutMsgs float64 + OutErrors float64 + InCsumErrors float64 + InDestUnreachs float64 + InPktTooBigs float64 + InTimeExcds float64 + InParmProblems float64 + InEchos float64 + InEchoReplies float64 + InGroupMembQueries float64 + InGroupMembResponses float64 + InGroupMembReductions float64 + InRouterSolicits float64 + InRouterAdvertisements float64 + InNeighborSolicits float64 + InNeighborAdvertisements float64 + InRedirects float64 + InMLDv2Reports float64 + OutDestUnreachs float64 + OutPktTooBigs float64 + OutTimeExcds float64 + OutParmProblems float64 + OutEchos float64 + OutEchoReplies float64 + OutGroupMembQueries float64 + OutGroupMembResponses float64 + OutGroupMembReductions float64 + OutRouterSolicits float64 + OutRouterAdvertisements float64 + OutNeighborSolicits float64 + OutNeighborAdvertisements float64 + OutRedirects float64 + OutMLDv2Reports float64 + InType1 float64 + InType134 float64 + InType135 float64 + InType136 float64 + InType143 float64 + OutType133 float64 + OutType135 float64 + OutType136 float64 + OutType143 float64 +} + +type Udp6 struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 + IgnoredMulti float64 +} + +type UdpLite6 struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 +} + +func (p Proc) Snmp6() (ProcSnmp6, error) { + filename := p.path("net/snmp6") + data, err := util.ReadFileNoStat(filename) + if err != nil { + // On systems with IPv6 disabled, this file won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return ProcSnmp6{PID: p.PID}, nil + } + + return ProcSnmp6{PID: p.PID}, err + } + + procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data)) + procSnmp6.PID = p.PID + return procSnmp6, err +} + +// parseSnmp6 parses the metrics from proc//net/snmp6 file +// and returns a map contains those metrics. +func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp6 = ProcSnmp6{} + ) + + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + // Expect to have "6" in metric name, skip line otherwise + if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 { + protocol := stat[0][:sixIndex+1] + key := stat[0][sixIndex+1:] + value, err := strconv.ParseFloat(stat[1], 64) + if err != nil { + return procSnmp6, err + } + + switch protocol { + case "Ip6": + switch key { + case "InReceives": + procSnmp6.Ip6.InReceives = value + case "InHdrErrors": + procSnmp6.Ip6.InHdrErrors = value + case "InTooBigErrors": + procSnmp6.Ip6.InTooBigErrors = value + case "InNoRoutes": + procSnmp6.Ip6.InNoRoutes = value + case "InAddrErrors": + procSnmp6.Ip6.InAddrErrors = value + case "InUnknownProtos": + procSnmp6.Ip6.InUnknownProtos = value + case "InTruncatedPkts": + procSnmp6.Ip6.InTruncatedPkts = value + case "InDiscards": + procSnmp6.Ip6.InDiscards = value + case "InDelivers": + procSnmp6.Ip6.InDelivers = value + case "OutForwDatagrams": + procSnmp6.Ip6.OutForwDatagrams = value + case "OutRequests": + procSnmp6.Ip6.OutRequests = value + case "OutDiscards": + procSnmp6.Ip6.OutDiscards = value + case "OutNoRoutes": + procSnmp6.Ip6.OutNoRoutes = value + case "ReasmTimeout": + procSnmp6.Ip6.ReasmTimeout = value + case "ReasmReqds": + procSnmp6.Ip6.ReasmReqds = value + case "ReasmOKs": + procSnmp6.Ip6.ReasmOKs = value + case "ReasmFails": + procSnmp6.Ip6.ReasmFails = value + case "FragOKs": + procSnmp6.Ip6.FragOKs = value + case "FragFails": + procSnmp6.Ip6.FragFails = value + case "FragCreates": + procSnmp6.Ip6.FragCreates = value + case "InMcastPkts": + procSnmp6.Ip6.InMcastPkts = value + case "OutMcastPkts": + procSnmp6.Ip6.OutMcastPkts = value + case "InOctets": + procSnmp6.Ip6.InOctets = value + case "OutOctets": + procSnmp6.Ip6.OutOctets = value + case "InMcastOctets": + procSnmp6.Ip6.InMcastOctets = value + case "OutMcastOctets": + procSnmp6.Ip6.OutMcastOctets = value + case "InBcastOctets": + procSnmp6.Ip6.InBcastOctets = value + case "OutBcastOctets": + procSnmp6.Ip6.OutBcastOctets = value + case "InNoECTPkts": + procSnmp6.Ip6.InNoECTPkts = value + case "InECT1Pkts": + procSnmp6.Ip6.InECT1Pkts = value + case "InECT0Pkts": + procSnmp6.Ip6.InECT0Pkts = value + case "InCEPkts": + procSnmp6.Ip6.InCEPkts = value + + } + case "Icmp6": + switch key { + case "InMsgs": + procSnmp6.Icmp6.InMsgs = value + case "InErrors": + procSnmp6.Icmp6.InErrors = value + case "OutMsgs": + procSnmp6.Icmp6.OutMsgs = value + case "OutErrors": + procSnmp6.Icmp6.OutErrors = value + case "InCsumErrors": + procSnmp6.Icmp6.InCsumErrors = value + case "InDestUnreachs": + procSnmp6.Icmp6.InDestUnreachs = value + case "InPktTooBigs": + procSnmp6.Icmp6.InPktTooBigs = value + case "InTimeExcds": + procSnmp6.Icmp6.InTimeExcds = value + case "InParmProblems": + procSnmp6.Icmp6.InParmProblems = value + case "InEchos": + procSnmp6.Icmp6.InEchos = value + case "InEchoReplies": + procSnmp6.Icmp6.InEchoReplies = value + case "InGroupMembQueries": + procSnmp6.Icmp6.InGroupMembQueries = value + case "InGroupMembResponses": + procSnmp6.Icmp6.InGroupMembResponses = value + case "InGroupMembReductions": + procSnmp6.Icmp6.InGroupMembReductions = value + case "InRouterSolicits": + procSnmp6.Icmp6.InRouterSolicits = value + case "InRouterAdvertisements": + procSnmp6.Icmp6.InRouterAdvertisements = value + case "InNeighborSolicits": + procSnmp6.Icmp6.InNeighborSolicits = value + case "InNeighborAdvertisements": + procSnmp6.Icmp6.InNeighborAdvertisements = value + case "InRedirects": + procSnmp6.Icmp6.InRedirects = value + case "InMLDv2Reports": + procSnmp6.Icmp6.InMLDv2Reports = value + case "OutDestUnreachs": + procSnmp6.Icmp6.OutDestUnreachs = value + case "OutPktTooBigs": + procSnmp6.Icmp6.OutPktTooBigs = value + case "OutTimeExcds": + procSnmp6.Icmp6.OutTimeExcds = value + case "OutParmProblems": + procSnmp6.Icmp6.OutParmProblems = value + case "OutEchos": + procSnmp6.Icmp6.OutEchos = value + case "OutEchoReplies": + procSnmp6.Icmp6.OutEchoReplies = value + case "OutGroupMembQueries": + procSnmp6.Icmp6.OutGroupMembQueries = value + case "OutGroupMembResponses": + procSnmp6.Icmp6.OutGroupMembResponses = value + case "OutGroupMembReductions": + procSnmp6.Icmp6.OutGroupMembReductions = value + case "OutRouterSolicits": + procSnmp6.Icmp6.OutRouterSolicits = value + case "OutRouterAdvertisements": + procSnmp6.Icmp6.OutRouterAdvertisements = value + case "OutNeighborSolicits": + procSnmp6.Icmp6.OutNeighborSolicits = value + case "OutNeighborAdvertisements": + procSnmp6.Icmp6.OutNeighborAdvertisements = value + case "OutRedirects": + procSnmp6.Icmp6.OutRedirects = value + case "OutMLDv2Reports": + procSnmp6.Icmp6.OutMLDv2Reports = value + case "InType1": + procSnmp6.Icmp6.InType1 = value + case "InType134": + procSnmp6.Icmp6.InType134 = value + case "InType135": + procSnmp6.Icmp6.InType135 = value + case "InType136": + procSnmp6.Icmp6.InType136 = value + case "InType143": + procSnmp6.Icmp6.InType143 = value + case "OutType133": + procSnmp6.Icmp6.OutType133 = value + case "OutType135": + procSnmp6.Icmp6.OutType135 = value + case "OutType136": + procSnmp6.Icmp6.OutType136 = value + case "OutType143": + procSnmp6.Icmp6.OutType143 = value + } + case "Udp6": + switch key { + case "InDatagrams": + procSnmp6.Udp6.InDatagrams = value + case "NoPorts": + procSnmp6.Udp6.NoPorts = value + case "InErrors": + procSnmp6.Udp6.InErrors = value + case "OutDatagrams": + procSnmp6.Udp6.OutDatagrams = value + case "RcvbufErrors": + procSnmp6.Udp6.RcvbufErrors = value + case "SndbufErrors": + procSnmp6.Udp6.SndbufErrors = value + case "InCsumErrors": + procSnmp6.Udp6.InCsumErrors = value + case "IgnoredMulti": + procSnmp6.Udp6.IgnoredMulti = value + } + case "UdpLite6": + switch key { + case "InDatagrams": + procSnmp6.UdpLite6.InDatagrams = value + case "NoPorts": + procSnmp6.UdpLite6.NoPorts = value + case "InErrors": + procSnmp6.UdpLite6.InErrors = value + case "OutDatagrams": + procSnmp6.UdpLite6.OutDatagrams = value + case "RcvbufErrors": + procSnmp6.UdpLite6.RcvbufErrors = value + case "SndbufErrors": + procSnmp6.UdpLite6.SndbufErrors = value + case "InCsumErrors": + procSnmp6.UdpLite6.InCsumErrors = value + } + } + } + } + return procSnmp6, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 8c7b6e80a31f..06c556ef9623 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -81,10 +81,10 @@ type ProcStat struct { STime uint // Amount of time that this process's waited-for children have been // scheduled in user mode, measured in clock ticks. - CUTime uint + CUTime int // Amount of time that this process's waited-for children have been // scheduled in kernel mode, measured in clock ticks. - CSTime uint + CSTime int // For processes running a real-time scheduling policy, this is the negated // scheduling priority, minus one. Priority int @@ -115,7 +115,7 @@ type ProcStat struct { // NewStat returns the current status information of the process. // -// Deprecated: use p.Stat() instead +// Deprecated: Use p.Stat() instead. func (p Proc) NewStat() (ProcStat, error) { return p.Stat() } @@ -141,6 +141,11 @@ func (p Proc) Stat() (ProcStat, error) { } s.Comm = string(data[l+1 : r]) + + // Check the following resources for the details about the particular stat + // fields and their data types: + // * https://man7.org/linux/man-pages/man5/proc.5.html + // * https://man7.org/linux/man-pages/man3/scanf.3.html _, err = fmt.Fscan( bytes.NewBuffer(data[r+2:]), &s.State, diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 6edd8333b334..594022ded48a 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -33,37 +33,37 @@ type ProcStatus struct { TGID int // Peak virtual memory size. - VmPeak uint64 // nolint:golint + VmPeak uint64 // nolint:revive // Virtual memory size. - VmSize uint64 // nolint:golint + VmSize uint64 // nolint:revive // Locked memory size. - VmLck uint64 // nolint:golint + VmLck uint64 // nolint:revive // Pinned memory size. - VmPin uint64 // nolint:golint + VmPin uint64 // nolint:revive // Peak resident set size. - VmHWM uint64 // nolint:golint + VmHWM uint64 // nolint:revive // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 // nolint:golint + VmRSS uint64 // nolint:revive // Size of resident anonymous memory. - RssAnon uint64 // nolint:golint + RssAnon uint64 // nolint:revive // Size of resident file mappings. - RssFile uint64 // nolint:golint + RssFile uint64 // nolint:revive // Size of resident shared memory. - RssShmem uint64 // nolint:golint + RssShmem uint64 // nolint:revive // Size of data segments. - VmData uint64 // nolint:golint + VmData uint64 // nolint:revive // Size of stack segments. - VmStk uint64 // nolint:golint + VmStk uint64 // nolint:revive // Size of text segments. - VmExe uint64 // nolint:golint + VmExe uint64 // nolint:revive // Shared library code size. - VmLib uint64 // nolint:golint + VmLib uint64 // nolint:revive // Page table entries size. - VmPTE uint64 // nolint:golint + VmPTE uint64 // nolint:revive // Size of second-level page tables. - VmPMD uint64 // nolint:golint + VmPMD uint64 // nolint:revive // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 // nolint:golint + VmSwap uint64 // nolint:revive // Size of hugetlb memory portions HugetlbPages uint64 diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go new file mode 100644 index 000000000000..d46533ebf419 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +func sysctlToPath(sysctl string) string { + return strings.Replace(sysctl, ".", "/", -1) +} + +func (fs FS) SysctlStrings(sysctl string) ([]string, error) { + value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl))) + if err != nil { + return nil, err + } + return strings.Fields(value), nil + +} + +func (fs FS) SysctlInts(sysctl string) ([]int, error) { + fields, err := fs.SysctlStrings(sysctl) + if err != nil { + return nil, err + } + + values := make([]int, len(fields)) + for i, f := range fields { + vp := util.NewValueParser(f) + values[i] = vp.Int() + if err := vp.Err(); err != nil { + return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) + } + } + return values, nil +} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go index 28228164efba..5f7f32dc83c6 100644 --- a/vendor/github.com/prometheus/procfs/schedstat.go +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -40,7 +40,7 @@ type Schedstat struct { CPUs []*SchedstatCPU } -// SchedstatCPU contains the values from one "cpu" line +// SchedstatCPU contains the values from one "cpu" line. type SchedstatCPU struct { CPUNum string @@ -49,14 +49,14 @@ type SchedstatCPU struct { RunTimeslices uint64 } -// ProcSchedstat contains the values from /proc//schedstat +// ProcSchedstat contains the values from `/proc//schedstat`. type ProcSchedstat struct { RunningNanoseconds uint64 WaitingNanoseconds uint64 RunTimeslices uint64 } -// Schedstat reads data from /proc/schedstat +// Schedstat reads data from `/proc/schedstat`. func (fs FS) Schedstat() (*Schedstat, error) { file, err := os.Open(fs.proc.Path("schedstat")) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go index 7896fd724283..bc9aaf5c2889 100644 --- a/vendor/github.com/prometheus/procfs/slab.go +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -137,7 +137,7 @@ func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { return s, nil } -// SlabInfo reads data from /proc/slabinfo +// SlabInfo reads data from `/proc/slabinfo`. func (fs FS) SlabInfo() (SlabInfo, error) { // TODO: Consider passing options to allow for parsing different // slabinfo versions. However, slabinfo 2.1 has been stable since diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go new file mode 100644 index 000000000000..559129cbca3c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Softirqs represents the softirq statistics. +type Softirqs struct { + Hi []uint64 + Timer []uint64 + NetTx []uint64 + NetRx []uint64 + Block []uint64 + IRQPoll []uint64 + Tasklet []uint64 + Sched []uint64 + HRTimer []uint64 + RCU []uint64 +} + +func (fs FS) Softirqs() (Softirqs, error) { + fileName := fs.proc.Path("softirqs") + data, err := util.ReadFileNoStat(fileName) + if err != nil { + return Softirqs{}, err + } + + reader := bytes.NewReader(data) + + return parseSoftirqs(reader) +} + +func parseSoftirqs(r io.Reader) (Softirqs, error) { + var ( + softirqs = Softirqs{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return Softirqs{}, fmt.Errorf("softirqs empty") + } + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + var err error + + // require at least one cpu + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "HI:": + perCPU := parts[1:] + softirqs.Hi = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) + } + } + case parts[0] == "TIMER:": + perCPU := parts[1:] + softirqs.Timer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) + } + } + case parts[0] == "NET_TX:": + perCPU := parts[1:] + softirqs.NetTx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) + } + } + case parts[0] == "NET_RX:": + perCPU := parts[1:] + softirqs.NetRx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) + } + } + case parts[0] == "BLOCK:": + perCPU := parts[1:] + softirqs.Block = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) + } + } + case parts[0] == "IRQ_POLL:": + perCPU := parts[1:] + softirqs.IRQPoll = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) + } + } + case parts[0] == "TASKLET:": + perCPU := parts[1:] + softirqs.Tasklet = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) + } + } + case parts[0] == "SCHED:": + perCPU := parts[1:] + softirqs.Sched = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) + } + } + case parts[0] == "HRTIMER:": + perCPU := parts[1:] + softirqs.HRTimer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) + } + } + case parts[0] == "RCU:": + perCPU := parts[1:] + softirqs.RCU = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) + } + } + } + } + + if err := scanner.Err(); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) + } + + return softirqs, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 6d8727541e40..33f97caa08da 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -41,7 +41,7 @@ type CPUStat struct { // SoftIRQStat represent the softirq statistics as exported in the procfs stat file. // A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading /proc/softirqs +// It is possible to get per-cpu stats by reading `/proc/softirqs`. type SoftIRQStat struct { Hi uint64 Timer uint64 @@ -145,7 +145,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { // NewStat returns information about current cpu/process statistics. // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt // -// Deprecated: use fs.Stat() instead +// Deprecated: Use fs.Stat() instead. func NewStat() (Stat, error) { fs, err := NewFS(fs.DefaultProcMountPoint) if err != nil { @@ -155,15 +155,15 @@ func NewStat() (Stat, error) { } // NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt // -// Deprecated: use fs.Stat() instead +// Deprecated: Use fs.Stat() instead. func (fs FS) NewStat() (Stat, error) { return fs.Stat() } // Stat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt func (fs FS) Stat() (Stat, error) { fileName := fs.proc.Path("stat") data, err := util.ReadFileNoStat(fileName) diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index cb13891414b1..20ceb77e2df7 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -11,13 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs import ( "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -29,7 +29,7 @@ import ( // https://www.kernel.org/doc/Documentation/sysctl/vm.txt // Each setting is exposed as a single file. // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array -// and numa_zonelist_order (deprecated) which is a string +// and numa_zonelist_order (deprecated) which is a string. type VM struct { AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes BlockDump *int64 // /proc/sys/vm/block_dump @@ -87,7 +87,7 @@ func (fs FS) VM() (*VM, error) { return nil, fmt.Errorf("%s is not a directory", path) } - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go deleted file mode 100644 index eed07c7d7748..000000000000 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2017 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// XfrmStat models the contents of /proc/net/xfrm_stat. -type XfrmStat struct { - // All errors which are not matched by other - XfrmInError int - // No buffer is left - XfrmInBufferError int - // Header Error - XfrmInHdrError int - // No state found - // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong - XfrmInNoStates int - // Transformation protocol specific error - // e.g. SA Key is wrong - XfrmInStateProtoError int - // Transformation mode specific error - XfrmInStateModeError int - // Sequence error - // e.g. sequence number is out of window - XfrmInStateSeqError int - // State is expired - XfrmInStateExpired int - // State has mismatch option - // e.g. UDP encapsulation type is mismatched - XfrmInStateMismatch int - // State is invalid - XfrmInStateInvalid int - // No matching template for states - // e.g. Inbound SAs are correct but SP rule is wrong - XfrmInTmplMismatch int - // No policy is found for states - // e.g. Inbound SAs are correct but no SP is found - XfrmInNoPols int - // Policy discards - XfrmInPolBlock int - // Policy error - XfrmInPolError int - // All errors which are not matched by others - XfrmOutError int - // Bundle generation error - XfrmOutBundleGenError int - // Bundle check error - XfrmOutBundleCheckError int - // No state was found - XfrmOutNoStates int - // Transformation protocol specific error - XfrmOutStateProtoError int - // Transportation mode specific error - XfrmOutStateModeError int - // Sequence error - // i.e sequence number overflow - XfrmOutStateSeqError int - // State is expired - XfrmOutStateExpired int - // Policy discads - XfrmOutPolBlock int - // Policy is dead - XfrmOutPolDead int - // Policy Error - XfrmOutPolError int - XfrmFwdHdrError int - XfrmOutStateInvalid int - XfrmAcquireError int -} - -// NewXfrmStat reads the xfrm_stat statistics. -func NewXfrmStat() (XfrmStat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return XfrmStat{}, err - } - - return fs.NewXfrmStat() -} - -// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. -func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.proc.Path("net/xfrm_stat")) - if err != nil { - return XfrmStat{}, err - } - defer file.Close() - - var ( - x = XfrmStat{} - s = bufio.NewScanner(file) - ) - - for s.Scan() { - fields := strings.Fields(s.Text()) - - if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) - } - - name := fields[0] - value, err := strconv.Atoi(fields[1]) - if err != nil { - return XfrmStat{}, err - } - - switch name { - case "XfrmInError": - x.XfrmInError = value - case "XfrmInBufferError": - x.XfrmInBufferError = value - case "XfrmInHdrError": - x.XfrmInHdrError = value - case "XfrmInNoStates": - x.XfrmInNoStates = value - case "XfrmInStateProtoError": - x.XfrmInStateProtoError = value - case "XfrmInStateModeError": - x.XfrmInStateModeError = value - case "XfrmInStateSeqError": - x.XfrmInStateSeqError = value - case "XfrmInStateExpired": - x.XfrmInStateExpired = value - case "XfrmInStateInvalid": - x.XfrmInStateInvalid = value - case "XfrmInTmplMismatch": - x.XfrmInTmplMismatch = value - case "XfrmInNoPols": - x.XfrmInNoPols = value - case "XfrmInPolBlock": - x.XfrmInPolBlock = value - case "XfrmInPolError": - x.XfrmInPolError = value - case "XfrmOutError": - x.XfrmOutError = value - case "XfrmInStateMismatch": - x.XfrmInStateMismatch = value - case "XfrmOutBundleGenError": - x.XfrmOutBundleGenError = value - case "XfrmOutBundleCheckError": - x.XfrmOutBundleCheckError = value - case "XfrmOutNoStates": - x.XfrmOutNoStates = value - case "XfrmOutStateProtoError": - x.XfrmOutStateProtoError = value - case "XfrmOutStateModeError": - x.XfrmOutStateModeError = value - case "XfrmOutStateSeqError": - x.XfrmOutStateSeqError = value - case "XfrmOutStateExpired": - x.XfrmOutStateExpired = value - case "XfrmOutPolBlock": - x.XfrmOutPolBlock = value - case "XfrmOutPolDead": - x.XfrmOutPolDead = value - case "XfrmOutPolError": - x.XfrmOutPolError = value - case "XfrmFwdHdrError": - x.XfrmFwdHdrError = value - case "XfrmOutStateInvalid": - x.XfrmOutStateInvalid = value - case "XfrmAcquireError": - x.XfrmAcquireError = value - } - - } - - return x, s.Err() -} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index 209e2ac98798..c745a4c04ff1 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs @@ -18,7 +19,7 @@ package procfs import ( "bytes" "fmt" - "io/ioutil" + "os" "regexp" "strings" @@ -72,7 +73,7 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) // structs containing the relevant info. More information available here: // https://www.kernel.org/doc/Documentation/sysctl/vm.txt func (fs FS) Zoneinfo() ([]Zoneinfo, error) { - data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) + data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) } diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md index d5a8649bd532..d9c08a22fc54 100644 --- a/vendor/github.com/russross/blackfriday/v2/README.md +++ b/vendor/github.com/russross/blackfriday/v2/README.md @@ -1,4 +1,6 @@ -Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday) +Blackfriday +[![Build Status][BuildV2SVG]][BuildV2URL] +[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] =========== Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It @@ -16,19 +18,21 @@ It started as a translation from C of [Sundown][3]. Installation ------------ -Blackfriday is compatible with any modern Go release. With Go 1.7 and git -installed: +Blackfriday is compatible with modern Go releases in module mode. +With Go installed: - go get gopkg.in/russross/blackfriday.v2 + go get github.com/russross/blackfriday/v2 -will download, compile, and install the package into your `$GOPATH` -directory hierarchy. Alternatively, you can achieve the same if you -import it into a project: +will resolve and add the package to the current development module, +then build and install it. Alternatively, you can achieve the same +if you import it in a package: - import "gopkg.in/russross/blackfriday.v2" + import "github.com/russross/blackfriday/v2" and `go get` without parameters. +Legacy GOPATH mode is unsupported. + Versions -------- @@ -36,13 +40,9 @@ Versions Currently maintained and recommended version of Blackfriday is `v2`. It's being developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the documentation is available at -https://godoc.org/gopkg.in/russross/blackfriday.v2. +https://pkg.go.dev/github.com/russross/blackfriday/v2. -It is `go get`-able via via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, -but we highly recommend using package management tool like [dep][7] or -[Glide][8] and make use of semantic versioning. With package management you -should import `github.com/russross/blackfriday` and specify that you're using -version 2.0.0. +It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. Version 2 offers a number of improvements over v1: @@ -62,6 +62,11 @@ Potential drawbacks: v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for tracking. +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://pkg.go.dev/github.com/russross/blackfriday. + + Usage ----- @@ -91,7 +96,7 @@ Here's an example of simple usage of Blackfriday together with Bluemonday: ```go import ( "github.com/microcosm-cc/bluemonday" - "github.com/russross/blackfriday" + "github.com/russross/blackfriday/v2" ) // ... @@ -104,6 +109,8 @@ html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) If you want to customize the set of options, use `blackfriday.WithExtensions`, `blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. +### `blackfriday-tool` + You can also check out `blackfriday-tool` for a more complete example of how to use it. Download and install it using: @@ -114,7 +121,7 @@ markdown file using a standalone program. You can also browse the source directly on github if you are just looking for some example code: -* +* Note that if you have not already done so, installing `blackfriday-tool` will be sufficient to download and install @@ -123,6 +130,22 @@ installed in `$GOPATH/bin`. This is a statically-linked binary that can be copied to wherever you need it without worrying about dependencies and library versions. +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `AutoHeadingIDs` extension is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + Features -------- @@ -199,6 +222,15 @@ implements the following extensions: You can use 3 or more backticks to mark the beginning of the block, and the same number to mark the end of the block. + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ```go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + * **Definition lists**. A simple definition list is made of a single-line term followed by a colon and the definition for that term. @@ -250,7 +282,7 @@ Other renderers Blackfriday is structured to allow alternative rendering engines. Here are a few of note: -* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): +* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): provides a GitHub Flavored Markdown renderer with fenced code block highlighting, clickable heading anchor links. @@ -261,20 +293,28 @@ are a few of note: * [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, but for markdown. -* [LaTeX output](https://github.com/Ambrevar/Blackfriday-LaTeX): +* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): renders output as LaTeX. +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + * [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. +* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style + -Todo +TODO ---- * More unit testing -* Improve unicode support. It does not understand all unicode +* Improve Unicode support. It does not understand all Unicode rules (about what constitutes a letter, a punctuation symbol, etc.), so it may fail to detect word boundaries correctly in - some instances. It is safe on all utf-8 input. + some instances. It is safe on all UTF-8 input. License @@ -286,6 +326,10 @@ License [1]: https://daringfireball.net/projects/markdown/ "Markdown" [2]: https://golang.org/ "Go Language" [3]: https://github.com/vmg/sundown "Sundown" - [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" + [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" - [6]: https://labix.org/gopkg.in "gopkg.in" + + [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 + [BuildV2URL]: https://travis-ci.org/russross/blackfriday + [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 + [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go index b8607474e599..dcd61e6e35bc 100644 --- a/vendor/github.com/russross/blackfriday/v2/block.go +++ b/vendor/github.com/russross/blackfriday/v2/block.go @@ -18,8 +18,7 @@ import ( "html" "regexp" "strings" - - "github.com/shurcooL/sanitized_anchor_name" + "unicode" ) const ( @@ -259,7 +258,7 @@ func (p *Markdown) prefixHeading(data []byte) int { } if end > i { if id == "" && p.extensions&AutoHeadingIDs != 0 { - id = sanitized_anchor_name.Create(string(data[i:end])) + id = SanitizedAnchorName(string(data[i:end])) } block := p.addBlock(Heading, data[i:end]) block.HeadingID = id @@ -673,6 +672,7 @@ func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { if beg == 0 || beg >= len(data) { return 0 } + fenceLength := beg - 1 var work bytes.Buffer work.Write([]byte(info)) @@ -706,6 +706,7 @@ func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { if doRender { block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer block.IsFenced = true + block.FenceLength = fenceLength finalizeCodeBlock(block) } @@ -1503,7 +1504,7 @@ func (p *Markdown) paragraph(data []byte) int { id := "" if p.extensions&AutoHeadingIDs != 0 { - id = sanitized_anchor_name.Create(string(data[prev:eol])) + id = SanitizedAnchorName(string(data[prev:eol])) } block := p.addBlock(Heading, data[prev:eol]) @@ -1588,3 +1589,24 @@ func skipUntilChar(text []byte, start int, char byte) int { } return i } + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go index 5b3fa9876ac8..57ff152a0568 100644 --- a/vendor/github.com/russross/blackfriday/v2/doc.go +++ b/vendor/github.com/russross/blackfriday/v2/doc.go @@ -15,4 +15,32 @@ // // If you're interested in calling Blackfriday from command line, see // https://github.com/russross/blackfriday-tool. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when AutoHeadingIDs extension is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that precede the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go new file mode 100644 index 000000000000..a2c3edb691c8 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/entities.go @@ -0,0 +1,2236 @@ +package blackfriday + +// Extracted from https://html.spec.whatwg.org/multipage/entities.json +var entities = map[string]bool{ + "Æ": true, + "Æ": true, + "&": true, + "&": true, + "Á": true, + "Á": true, + "Ă": true, + "Â": true, + "Â": true, + "А": true, + "𝔄": true, + "À": true, + "À": true, + "Α": true, + "Ā": true, + "⩓": true, + "Ą": true, + "𝔸": true, + "⁡": true, + "Å": true, + "Å": true, + "𝒜": true, + "≔": true, + "Ã": true, + "Ã": true, + "Ä": true, + "Ä": true, + "∖": true, + "⫧": true, + "⌆": true, + "Б": true, + "∵": true, + "ℬ": true, + "Β": true, + "𝔅": true, + "𝔹": true, + "˘": true, + "ℬ": true, + "≎": true, + "Ч": true, + "©": true, + "©": true, + "Ć": true, + "⋒": true, + "ⅅ": true, + "ℭ": true, + "Č": true, + "Ç": true, + "Ç": true, + "Ĉ": true, + "∰": true, + "Ċ": true, + "¸": true, + "·": true, + "ℭ": true, + "Χ": true, + "⊙": true, + "⊖": true, + "⊕": true, + "⊗": true, + "∲": true, + "”": true, + "’": true, + "∷": true, + "⩴": true, + "≡": true, + "∯": true, + "∮": true, + "ℂ": true, + "∐": true, + "∳": true, + "⨯": true, + "𝒞": true, + "⋓": true, + "≍": true, + "ⅅ": true, + "⤑": true, + "Ђ": true, + "Ѕ": true, + "Џ": true, + "‡": true, + "↡": true, + "⫤": true, + "Ď": true, + "Д": true, + "∇": true, + "Δ": true, + "𝔇": true, + "´": true, + "˙": true, + "˝": true, + "`": true, + "˜": true, + "⋄": true, + "ⅆ": true, + "𝔻": true, + "¨": true, + "⃜": true, + "≐": true, + "∯": true, + "¨": true, + "⇓": true, + "⇐": true, + "⇔": true, + "⫤": true, + "⟸": true, + "⟺": true, + "⟹": true, + "⇒": true, + "⊨": true, + "⇑": true, + "⇕": true, + "∥": true, + "↓": true, + "⤓": true, + "⇵": true, + "̑": true, + "⥐": true, + "⥞": true, + "↽": true, + "⥖": true, + "⥟": true, + "⇁": true, + "⥗": true, + "⊤": true, + "↧": true, + "⇓": true, + "𝒟": true, + "Đ": true, + "Ŋ": true, + "Ð": true, + "Ð": true, + "É": true, + "É": true, + "Ě": true, + "Ê": true, + "Ê": true, + "Э": true, + "Ė": true, + "𝔈": true, + "È": true, + "È": true, + "∈": true, + "Ē": true, + "◻": true, + "▫": true, + "Ę": true, + "𝔼": true, + "Ε": true, + "⩵": true, + "≂": true, + "⇌": true, + "ℰ": true, + "⩳": true, + "Η": true, + "Ë": true, + "Ë": true, + "∃": true, + "ⅇ": true, + "Ф": true, + "𝔉": true, + "◼": true, + "▪": true, + "𝔽": true, + "∀": true, + "ℱ": true, + "ℱ": true, + "Ѓ": true, + ">": true, + ">": true, + "Γ": true, + "Ϝ": true, + "Ğ": true, + "Ģ": true, + "Ĝ": true, + "Г": true, + "Ġ": true, + "𝔊": true, + "⋙": true, + "𝔾": true, + "≥": true, + "⋛": true, + "≧": true, + "⪢": true, + "≷": true, + "⩾": true, + "≳": true, + "𝒢": true, + "≫": true, + "Ъ": true, + "ˇ": true, + "^": true, + "Ĥ": true, + "ℌ": true, + "ℋ": true, + "ℍ": true, + "─": true, + "ℋ": true, + "Ħ": true, + "≎": true, + "≏": true, + "Е": true, + "IJ": true, + "Ё": true, + "Í": true, + "Í": true, + "Î": true, + "Î": true, + "И": true, + "İ": true, + "ℑ": true, + "Ì": true, + "Ì": true, + "ℑ": true, + "Ī": true, + "ⅈ": true, + "⇒": true, + "∬": true, + "∫": true, + "⋂": true, + "⁣": true, + "⁢": true, + "Į": true, + "𝕀": true, + "Ι": true, + "ℐ": true, + "Ĩ": true, + "І": true, + "Ï": true, + "Ï": true, + "Ĵ": true, + "Й": true, + "𝔍": true, + "𝕁": true, + "𝒥": true, + "Ј": true, + "Є": true, + "Х": true, + "Ќ": true, + "Κ": true, + "Ķ": true, + "К": true, + "𝔎": true, + "𝕂": true, + "𝒦": true, + "Љ": true, + "<": true, + "<": true, + "Ĺ": true, + "Λ": true, + "⟪": true, + "ℒ": true, + "↞": true, + "Ľ": true, + "Ļ": true, + "Л": true, + "⟨": true, + "←": true, + "⇤": true, + "⇆": true, + "⌈": true, + "⟦": true, + "⥡": true, + "⇃": true, + "⥙": true, + "⌊": true, + "↔": true, + "⥎": true, + "⊣": true, + "↤": true, + "⥚": true, + "⊲": true, + "⧏": true, + "⊴": true, + "⥑": true, + "⥠": true, + "↿": true, + "⥘": true, + "↼": true, + "⥒": true, + "⇐": true, + "⇔": true, + "⋚": true, + "≦": true, + "≶": true, + "⪡": true, + "⩽": true, + "≲": true, + "𝔏": true, + "⋘": true, + "⇚": true, + "Ŀ": true, + "⟵": true, + "⟷": true, + "⟶": true, + "⟸": true, + "⟺": true, + "⟹": true, + "𝕃": true, + "↙": true, + "↘": true, + "ℒ": true, + "↰": true, + "Ł": true, + "≪": true, + "⤅": true, + "М": true, + " ": true, + "ℳ": true, + "𝔐": true, + "∓": true, + "𝕄": true, + "ℳ": true, + "Μ": true, + "Њ": true, + "Ń": true, + "Ň": true, + "Ņ": true, + "Н": true, + "​": true, + "​": true, + "​": true, + "​": true, + "≫": true, + "≪": true, + " ": true, + "𝔑": true, + "⁠": true, + " ": true, + "ℕ": true, + "⫬": true, + "≢": true, + "≭": true, + "∦": true, + "∉": true, + "≠": true, + "≂̸": true, + "∄": true, + "≯": true, + "≱": true, + "≧̸": true, + "≫̸": true, + "≹": true, + "⩾̸": true, + "≵": true, + "≎̸": true, + "≏̸": true, + "⋪": true, + "⧏̸": true, + "⋬": true, + "≮": true, + "≰": true, + "≸": true, + "≪̸": true, + "⩽̸": true, + "≴": true, + "⪢̸": true, + "⪡̸": true, + "⊀": true, + "⪯̸": true, + "⋠": true, + "∌": true, + "⋫": true, + "⧐̸": true, + "⋭": true, + "⊏̸": true, + "⋢": true, + "⊐̸": true, + "⋣": true, + "⊂⃒": true, + "⊈": true, + "⊁": true, + "⪰̸": true, + "⋡": true, + "≿̸": true, + "⊃⃒": true, + "⊉": true, + "≁": true, + "≄": true, + "≇": true, + "≉": true, + "∤": true, + "𝒩": true, + "Ñ": true, + "Ñ": true, + "Ν": true, + "Œ": true, + "Ó": true, + "Ó": true, + "Ô": true, + "Ô": true, + "О": true, + "Ő": true, + "𝔒": true, + "Ò": true, + "Ò": true, + "Ō": true, + "Ω": true, + "Ο": true, + "𝕆": true, + "“": true, + "‘": true, + "⩔": true, + "𝒪": true, + "Ø": true, + "Ø": true, + "Õ": true, + "Õ": true, + "⨷": true, + "Ö": true, + "Ö": true, + "‾": true, + "⏞": true, + "⎴": true, + "⏜": true, + "∂": true, + "П": true, + "𝔓": true, + "Φ": true, + "Π": true, + "±": true, + "ℌ": true, + "ℙ": true, + "⪻": true, + "≺": true, + "⪯": true, + "≼": true, + "≾": true, + "″": true, + "∏": true, + "∷": true, + "∝": true, + "𝒫": true, + "Ψ": true, + """: true, + """: true, + "𝔔": true, + "ℚ": true, + "𝒬": true, + "⤐": true, + "®": true, + "®": true, + "Ŕ": true, + "⟫": true, + "↠": true, + "⤖": true, + "Ř": true, + "Ŗ": true, + "Р": true, + "ℜ": true, + "∋": true, + "⇋": true, + "⥯": true, + "ℜ": true, + "Ρ": true, + "⟩": true, + "→": true, + "⇥": true, + "⇄": true, + "⌉": true, + "⟧": true, + "⥝": true, + "⇂": true, + "⥕": true, + "⌋": true, + "⊢": true, + "↦": true, + "⥛": true, + "⊳": true, + "⧐": true, + "⊵": true, + "⥏": true, + "⥜": true, + "↾": true, + "⥔": true, + "⇀": true, + "⥓": true, + "⇒": true, + "ℝ": true, + "⥰": true, + "⇛": true, + "ℛ": true, + "↱": true, + "⧴": true, + "Щ": true, + "Ш": true, + "Ь": true, + "Ś": true, + "⪼": true, + "Š": true, + "Ş": true, + "Ŝ": true, + "С": true, + "𝔖": true, + "↓": true, + "←": true, + "→": true, + "↑": true, + "Σ": true, + "∘": true, + "𝕊": true, + "√": true, + "□": true, + "⊓": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊔": true, + "𝒮": true, + "⋆": true, + "⋐": true, + "⋐": true, + "⊆": true, + "≻": true, + "⪰": true, + "≽": true, + "≿": true, + "∋": true, + "∑": true, + "⋑": true, + "⊃": true, + "⊇": true, + "⋑": true, + "Þ": true, + "Þ": true, + "™": true, + "Ћ": true, + "Ц": true, + " ": true, + "Τ": true, + "Ť": true, + "Ţ": true, + "Т": true, + "𝔗": true, + "∴": true, + "Θ": true, + "  ": true, + " ": true, + "∼": true, + "≃": true, + "≅": true, + "≈": true, + "𝕋": true, + "⃛": true, + "𝒯": true, + "Ŧ": true, + "Ú": true, + "Ú": true, + "↟": true, + "⥉": true, + "Ў": true, + "Ŭ": true, + "Û": true, + "Û": true, + "У": true, + "Ű": true, + "𝔘": true, + "Ù": true, + "Ù": true, + "Ū": true, + "_": true, + "⏟": true, + "⎵": true, + "⏝": true, + "⋃": true, + "⊎": true, + "Ų": true, + "𝕌": true, + "↑": true, + "⤒": true, + "⇅": true, + "↕": true, + "⥮": true, + "⊥": true, + "↥": true, + "⇑": true, + "⇕": true, + "↖": true, + "↗": true, + "ϒ": true, + "Υ": true, + "Ů": true, + "𝒰": true, + "Ũ": true, + "Ü": true, + "Ü": true, + "⊫": true, + "⫫": true, + "В": true, + "⊩": true, + "⫦": true, + "⋁": true, + "‖": true, + "‖": true, + "∣": true, + "|": true, + "❘": true, + "≀": true, + " ": true, + "𝔙": true, + "𝕍": true, + "𝒱": true, + "⊪": true, + "Ŵ": true, + "⋀": true, + "𝔚": true, + "𝕎": true, + "𝒲": true, + "𝔛": true, + "Ξ": true, + "𝕏": true, + "𝒳": true, + "Я": true, + "Ї": true, + "Ю": true, + "Ý": true, + "Ý": true, + "Ŷ": true, + "Ы": true, + "𝔜": true, + "𝕐": true, + "𝒴": true, + "Ÿ": true, + "Ж": true, + "Ź": true, + "Ž": true, + "З": true, + "Ż": true, + "​": true, + "Ζ": true, + "ℨ": true, + "ℤ": true, + "𝒵": true, + "á": true, + "á": true, + "ă": true, + "∾": true, + "∾̳": true, + "∿": true, + "â": true, + "â": true, + "´": true, + "´": true, + "а": true, + "æ": true, + "æ": true, + "⁡": true, + "𝔞": true, + "à": true, + "à": true, + "ℵ": true, + "ℵ": true, + "α": true, + "ā": true, + "⨿": true, + "&": true, + "&": true, + "∧": true, + "⩕": true, + "⩜": true, + "⩘": true, + "⩚": true, + "∠": true, + "⦤": true, + "∠": true, + "∡": true, + "⦨": true, + "⦩": true, + "⦪": true, + "⦫": true, + "⦬": true, + "⦭": true, + "⦮": true, + "⦯": true, + "∟": true, + "⊾": true, + "⦝": true, + "∢": true, + "Å": true, + "⍼": true, + "ą": true, + "𝕒": true, + "≈": true, + "⩰": true, + "⩯": true, + "≊": true, + "≋": true, + "'": true, + "≈": true, + "≊": true, + "å": true, + "å": true, + "𝒶": true, + "*": true, + "≈": true, + "≍": true, + "ã": true, + "ã": true, + "ä": true, + "ä": true, + "∳": true, + "⨑": true, + "⫭": true, + "≌": true, + "϶": true, + "‵": true, + "∽": true, + "⋍": true, + "⊽": true, + "⌅": true, + "⌅": true, + "⎵": true, + "⎶": true, + "≌": true, + "б": true, + "„": true, + "∵": true, + "∵": true, + "⦰": true, + "϶": true, + "ℬ": true, + "β": true, + "ℶ": true, + "≬": true, + "𝔟": true, + "⋂": true, + "◯": true, + "⋃": true, + "⨀": true, + "⨁": true, + "⨂": true, + "⨆": true, + "★": true, + "▽": true, + "△": true, + "⨄": true, + "⋁": true, + "⋀": true, + "⤍": true, + "⧫": true, + "▪": true, + "▴": true, + "▾": true, + "◂": true, + "▸": true, + "␣": true, + "▒": true, + "░": true, + "▓": true, + "█": true, + "=⃥": true, + "≡⃥": true, + "⌐": true, + "𝕓": true, + "⊥": true, + "⊥": true, + "⋈": true, + "╗": true, + "╔": true, + "╖": true, + "╓": true, + "═": true, + "╦": true, + "╩": true, + "╤": true, + "╧": true, + "╝": true, + "╚": true, + "╜": true, + "╙": true, + "║": true, + "╬": true, + "╣": true, + "╠": true, + "╫": true, + "╢": true, + "╟": true, + "⧉": true, + "╕": true, + "╒": true, + "┐": true, + "┌": true, + "─": true, + "╥": true, + "╨": true, + "┬": true, + "┴": true, + "⊟": true, + "⊞": true, + "⊠": true, + "╛": true, + "╘": true, + "┘": true, + "└": true, + "│": true, + "╪": true, + "╡": true, + "╞": true, + "┼": true, + "┤": true, + "├": true, + "‵": true, + "˘": true, + "¦": true, + "¦": true, + "𝒷": true, + "⁏": true, + "∽": true, + "⋍": true, + "\": true, + "⧅": true, + "⟈": true, + "•": true, + "•": true, + "≎": true, + "⪮": true, + "≏": true, + "≏": true, + "ć": true, + "∩": true, + "⩄": true, + "⩉": true, + "⩋": true, + "⩇": true, + "⩀": true, + "∩︀": true, + "⁁": true, + "ˇ": true, + "⩍": true, + "č": true, + "ç": true, + "ç": true, + "ĉ": true, + "⩌": true, + "⩐": true, + "ċ": true, + "¸": true, + "¸": true, + "⦲": true, + "¢": true, + "¢": true, + "·": true, + "𝔠": true, + "ч": true, + "✓": true, + "✓": true, + "χ": true, + "○": true, + "⧃": true, + "ˆ": true, + "≗": true, + "↺": true, + "↻": true, + "®": true, + "Ⓢ": true, + "⊛": true, + "⊚": true, + "⊝": true, + "≗": true, + "⨐": true, + "⫯": true, + "⧂": true, + "♣": true, + "♣": true, + ":": true, + "≔": true, + "≔": true, + ",": true, + "@": true, + "∁": true, + "∘": true, + "∁": true, + "ℂ": true, + "≅": true, + "⩭": true, + "∮": true, + "𝕔": true, + "∐": true, + "©": true, + "©": true, + "℗": true, + "↵": true, + "✗": true, + "𝒸": true, + "⫏": true, + "⫑": true, + "⫐": true, + "⫒": true, + "⋯": true, + "⤸": true, + "⤵": true, + "⋞": true, + "⋟": true, + "↶": true, + "⤽": true, + "∪": true, + "⩈": true, + "⩆": true, + "⩊": true, + "⊍": true, + "⩅": true, + "∪︀": true, + "↷": true, + "⤼": true, + "⋞": true, + "⋟": true, + "⋎": true, + "⋏": true, + "¤": true, + "¤": true, + "↶": true, + "↷": true, + "⋎": true, + "⋏": true, + "∲": true, + "∱": true, + "⌭": true, + "⇓": true, + "⥥": true, + "†": true, + "ℸ": true, + "↓": true, + "‐": true, + "⊣": true, + "⤏": true, + "˝": true, + "ď": true, + "д": true, + "ⅆ": true, + "‡": true, + "⇊": true, + "⩷": true, + "°": true, + "°": true, + "δ": true, + "⦱": true, + "⥿": true, + "𝔡": true, + "⇃": true, + "⇂": true, + "⋄": true, + "⋄": true, + "♦": true, + "♦": true, + "¨": true, + "ϝ": true, + "⋲": true, + "÷": true, + "÷": true, + "÷": true, + "⋇": true, + "⋇": true, + "ђ": true, + "⌞": true, + "⌍": true, + "$": true, + "𝕕": true, + "˙": true, + "≐": true, + "≑": true, + "∸": true, + "∔": true, + "⊡": true, + "⌆": true, + "↓": true, + "⇊": true, + "⇃": true, + "⇂": true, + "⤐": true, + "⌟": true, + "⌌": true, + "𝒹": true, + "ѕ": true, + "⧶": true, + "đ": true, + "⋱": true, + "▿": true, + "▾": true, + "⇵": true, + "⥯": true, + "⦦": true, + "џ": true, + "⟿": true, + "⩷": true, + "≑": true, + "é": true, + "é": true, + "⩮": true, + "ě": true, + "≖": true, + "ê": true, + "ê": true, + "≕": true, + "э": true, + "ė": true, + "ⅇ": true, + "≒": true, + "𝔢": true, + "⪚": true, + "è": true, + "è": true, + "⪖": true, + "⪘": true, + "⪙": true, + "⏧": true, + "ℓ": true, + "⪕": true, + "⪗": true, + "ē": true, + "∅": true, + "∅": true, + "∅": true, + " ": true, + " ": true, + " ": true, + "ŋ": true, + " ": true, + "ę": true, + "𝕖": true, + "⋕": true, + "⧣": true, + "⩱": true, + "ε": true, + "ε": true, + "ϵ": true, + "≖": true, + "≕": true, + "≂": true, + "⪖": true, + "⪕": true, + "=": true, + "≟": true, + "≡": true, + "⩸": true, + "⧥": true, + "≓": true, + "⥱": true, + "ℯ": true, + "≐": true, + "≂": true, + "η": true, + "ð": true, + "ð": true, + "ë": true, + "ë": true, + "€": true, + "!": true, + "∃": true, + "ℰ": true, + "ⅇ": true, + "≒": true, + "ф": true, + "♀": true, + "ffi": true, + "ff": true, + "ffl": true, + "𝔣": true, + "fi": true, + "fj": true, + "♭": true, + "fl": true, + "▱": true, + "ƒ": true, + "𝕗": true, + "∀": true, + "⋔": true, + "⫙": true, + "⨍": true, + "½": true, + "½": true, + "⅓": true, + "¼": true, + "¼": true, + "⅕": true, + "⅙": true, + "⅛": true, + "⅔": true, + "⅖": true, + "¾": true, + "¾": true, + "⅗": true, + "⅜": true, + "⅘": true, + "⅚": true, + "⅝": true, + "⅞": true, + "⁄": true, + "⌢": true, + "𝒻": true, + "≧": true, + "⪌": true, + "ǵ": true, + "γ": true, + "ϝ": true, + "⪆": true, + "ğ": true, + "ĝ": true, + "г": true, + "ġ": true, + "≥": true, + "⋛": true, + "≥": true, + "≧": true, + "⩾": true, + "⩾": true, + "⪩": true, + "⪀": true, + "⪂": true, + "⪄": true, + "⋛︀": true, + "⪔": true, + "𝔤": true, + "≫": true, + "⋙": true, + "ℷ": true, + "ѓ": true, + "≷": true, + "⪒": true, + "⪥": true, + "⪤": true, + "≩": true, + "⪊": true, + "⪊": true, + "⪈": true, + "⪈": true, + "≩": true, + "⋧": true, + "𝕘": true, + "`": true, + "ℊ": true, + "≳": true, + "⪎": true, + "⪐": true, + ">": true, + ">": true, + "⪧": true, + "⩺": true, + "⋗": true, + "⦕": true, + "⩼": true, + "⪆": true, + "⥸": true, + "⋗": true, + "⋛": true, + "⪌": true, + "≷": true, + "≳": true, + "≩︀": true, + "≩︀": true, + "⇔": true, + " ": true, + "½": true, + "ℋ": true, + "ъ": true, + "↔": true, + "⥈": true, + "↭": true, + "ℏ": true, + "ĥ": true, + "♥": true, + "♥": true, + "…": true, + "⊹": true, + "𝔥": true, + "⤥": true, + "⤦": true, + "⇿": true, + "∻": true, + "↩": true, + "↪": true, + "𝕙": true, + "―": true, + "𝒽": true, + "ℏ": true, + "ħ": true, + "⁃": true, + "‐": true, + "í": true, + "í": true, + "⁣": true, + "î": true, + "î": true, + "и": true, + "е": true, + "¡": true, + "¡": true, + "⇔": true, + "𝔦": true, + "ì": true, + "ì": true, + "ⅈ": true, + "⨌": true, + "∭": true, + "⧜": true, + "℩": true, + "ij": true, + "ī": true, + "ℑ": true, + "ℐ": true, + "ℑ": true, + "ı": true, + "⊷": true, + "Ƶ": true, + "∈": true, + "℅": true, + "∞": true, + "⧝": true, + "ı": true, + "∫": true, + "⊺": true, + "ℤ": true, + "⊺": true, + "⨗": true, + "⨼": true, + "ё": true, + "į": true, + "𝕚": true, + "ι": true, + "⨼": true, + "¿": true, + "¿": true, + "𝒾": true, + "∈": true, + "⋹": true, + "⋵": true, + "⋴": true, + "⋳": true, + "∈": true, + "⁢": true, + "ĩ": true, + "і": true, + "ï": true, + "ï": true, + "ĵ": true, + "й": true, + "𝔧": true, + "ȷ": true, + "𝕛": true, + "𝒿": true, + "ј": true, + "є": true, + "κ": true, + "ϰ": true, + "ķ": true, + "к": true, + "𝔨": true, + "ĸ": true, + "х": true, + "ќ": true, + "𝕜": true, + "𝓀": true, + "⇚": true, + "⇐": true, + "⤛": true, + "⤎": true, + "≦": true, + "⪋": true, + "⥢": true, + "ĺ": true, + "⦴": true, + "ℒ": true, + "λ": true, + "⟨": true, + "⦑": true, + "⟨": true, + "⪅": true, + "«": true, + "«": true, + "←": true, + "⇤": true, + "⤟": true, + "⤝": true, + "↩": true, + "↫": true, + "⤹": true, + "⥳": true, + "↢": true, + "⪫": true, + "⤙": true, + "⪭": true, + "⪭︀": true, + "⤌": true, + "❲": true, + "{": true, + "[": true, + "⦋": true, + "⦏": true, + "⦍": true, + "ľ": true, + "ļ": true, + "⌈": true, + "{": true, + "л": true, + "⤶": true, + "“": true, + "„": true, + "⥧": true, + "⥋": true, + "↲": true, + "≤": true, + "←": true, + "↢": true, + "↽": true, + "↼": true, + "⇇": true, + "↔": true, + "⇆": true, + "⇋": true, + "↭": true, + "⋋": true, + "⋚": true, + "≤": true, + "≦": true, + "⩽": true, + "⩽": true, + "⪨": true, + "⩿": true, + "⪁": true, + "⪃": true, + "⋚︀": true, + "⪓": true, + "⪅": true, + "⋖": true, + "⋚": true, + "⪋": true, + "≶": true, + "≲": true, + "⥼": true, + "⌊": true, + "𝔩": true, + "≶": true, + "⪑": true, + "↽": true, + "↼": true, + "⥪": true, + "▄": true, + "љ": true, + "≪": true, + "⇇": true, + "⌞": true, + "⥫": true, + "◺": true, + "ŀ": true, + "⎰": true, + "⎰": true, + "≨": true, + "⪉": true, + "⪉": true, + "⪇": true, + "⪇": true, + "≨": true, + "⋦": true, + "⟬": true, + "⇽": true, + "⟦": true, + "⟵": true, + "⟷": true, + "⟼": true, + "⟶": true, + "↫": true, + "↬": true, + "⦅": true, + "𝕝": true, + "⨭": true, + "⨴": true, + "∗": true, + "_": true, + "◊": true, + "◊": true, + "⧫": true, + "(": true, + "⦓": true, + "⇆": true, + "⌟": true, + "⇋": true, + "⥭": true, + "‎": true, + "⊿": true, + "‹": true, + "𝓁": true, + "↰": true, + "≲": true, + "⪍": true, + "⪏": true, + "[": true, + "‘": true, + "‚": true, + "ł": true, + "<": true, + "<": true, + "⪦": true, + "⩹": true, + "⋖": true, + "⋋": true, + "⋉": true, + "⥶": true, + "⩻": true, + "⦖": true, + "◃": true, + "⊴": true, + "◂": true, + "⥊": true, + "⥦": true, + "≨︀": true, + "≨︀": true, + "∺": true, + "¯": true, + "¯": true, + "♂": true, + "✠": true, + "✠": true, + "↦": true, + "↦": true, + "↧": true, + "↤": true, + "↥": true, + "▮": true, + "⨩": true, + "м": true, + "—": true, + "∡": true, + "𝔪": true, + "℧": true, + "µ": true, + "µ": true, + "∣": true, + "*": true, + "⫰": true, + "·": true, + "·": true, + "−": true, + "⊟": true, + "∸": true, + "⨪": true, + "⫛": true, + "…": true, + "∓": true, + "⊧": true, + "𝕞": true, + "∓": true, + "𝓂": true, + "∾": true, + "μ": true, + "⊸": true, + "⊸": true, + "⋙̸": true, + "≫⃒": true, + "≫̸": true, + "⇍": true, + "⇎": true, + "⋘̸": true, + "≪⃒": true, + "≪̸": true, + "⇏": true, + "⊯": true, + "⊮": true, + "∇": true, + "ń": true, + "∠⃒": true, + "≉": true, + "⩰̸": true, + "≋̸": true, + "ʼn": true, + "≉": true, + "♮": true, + "♮": true, + "ℕ": true, + " ": true, + " ": true, + "≎̸": true, + "≏̸": true, + "⩃": true, + "ň": true, + "ņ": true, + "≇": true, + "⩭̸": true, + "⩂": true, + "н": true, + "–": true, + "≠": true, + "⇗": true, + "⤤": true, + "↗": true, + "↗": true, + "≐̸": true, + "≢": true, + "⤨": true, + "≂̸": true, + "∄": true, + "∄": true, + "𝔫": true, + "≧̸": true, + "≱": true, + "≱": true, + "≧̸": true, + "⩾̸": true, + "⩾̸": true, + "≵": true, + "≯": true, + "≯": true, + "⇎": true, + "↮": true, + "⫲": true, + "∋": true, + "⋼": true, + "⋺": true, + "∋": true, + "њ": true, + "⇍": true, + "≦̸": true, + "↚": true, + "‥": true, + "≰": true, + "↚": true, + "↮": true, + "≰": true, + "≦̸": true, + "⩽̸": true, + "⩽̸": true, + "≮": true, + "≴": true, + "≮": true, + "⋪": true, + "⋬": true, + "∤": true, + "𝕟": true, + "¬": true, + "¬": true, + "∉": true, + "⋹̸": true, + "⋵̸": true, + "∉": true, + "⋷": true, + "⋶": true, + "∌": true, + "∌": true, + "⋾": true, + "⋽": true, + "∦": true, + "∦": true, + "⫽⃥": true, + "∂̸": true, + "⨔": true, + "⊀": true, + "⋠": true, + "⪯̸": true, + "⊀": true, + "⪯̸": true, + "⇏": true, + "↛": true, + "⤳̸": true, + "↝̸": true, + "↛": true, + "⋫": true, + "⋭": true, + "⊁": true, + "⋡": true, + "⪰̸": true, + "𝓃": true, + "∤": true, + "∦": true, + "≁": true, + "≄": true, + "≄": true, + "∤": true, + "∦": true, + "⋢": true, + "⋣": true, + "⊄": true, + "⫅̸": true, + "⊈": true, + "⊂⃒": true, + "⊈": true, + "⫅̸": true, + "⊁": true, + "⪰̸": true, + "⊅": true, + "⫆̸": true, + "⊉": true, + "⊃⃒": true, + "⊉": true, + "⫆̸": true, + "≹": true, + "ñ": true, + "ñ": true, + "≸": true, + "⋪": true, + "⋬": true, + "⋫": true, + "⋭": true, + "ν": true, + "#": true, + "№": true, + " ": true, + "⊭": true, + "⤄": true, + "≍⃒": true, + "⊬": true, + "≥⃒": true, + ">⃒": true, + "⧞": true, + "⤂": true, + "≤⃒": true, + "<⃒": true, + "⊴⃒": true, + "⤃": true, + "⊵⃒": true, + "∼⃒": true, + "⇖": true, + "⤣": true, + "↖": true, + "↖": true, + "⤧": true, + "Ⓢ": true, + "ó": true, + "ó": true, + "⊛": true, + "⊚": true, + "ô": true, + "ô": true, + "о": true, + "⊝": true, + "ő": true, + "⨸": true, + "⊙": true, + "⦼": true, + "œ": true, + "⦿": true, + "𝔬": true, + "˛": true, + "ò": true, + "ò": true, + "⧁": true, + "⦵": true, + "Ω": true, + "∮": true, + "↺": true, + "⦾": true, + "⦻": true, + "‾": true, + "⧀": true, + "ō": true, + "ω": true, + "ο": true, + "⦶": true, + "⊖": true, + "𝕠": true, + "⦷": true, + "⦹": true, + "⊕": true, + "∨": true, + "↻": true, + "⩝": true, + "ℴ": true, + "ℴ": true, + "ª": true, + "ª": true, + "º": true, + "º": true, + "⊶": true, + "⩖": true, + "⩗": true, + "⩛": true, + "ℴ": true, + "ø": true, + "ø": true, + "⊘": true, + "õ": true, + "õ": true, + "⊗": true, + "⨶": true, + "ö": true, + "ö": true, + "⌽": true, + "∥": true, + "¶": true, + "¶": true, + "∥": true, + "⫳": true, + "⫽": true, + "∂": true, + "п": true, + "%": true, + ".": true, + "‰": true, + "⊥": true, + "‱": true, + "𝔭": true, + "φ": true, + "ϕ": true, + "ℳ": true, + "☎": true, + "π": true, + "⋔": true, + "ϖ": true, + "ℏ": true, + "ℎ": true, + "ℏ": true, + "+": true, + "⨣": true, + "⊞": true, + "⨢": true, + "∔": true, + "⨥": true, + "⩲": true, + "±": true, + "±": true, + "⨦": true, + "⨧": true, + "±": true, + "⨕": true, + "𝕡": true, + "£": true, + "£": true, + "≺": true, + "⪳": true, + "⪷": true, + "≼": true, + "⪯": true, + "≺": true, + "⪷": true, + "≼": true, + "⪯": true, + "⪹": true, + "⪵": true, + "⋨": true, + "≾": true, + "′": true, + "ℙ": true, + "⪵": true, + "⪹": true, + "⋨": true, + "∏": true, + "⌮": true, + "⌒": true, + "⌓": true, + "∝": true, + "∝": true, + "≾": true, + "⊰": true, + "𝓅": true, + "ψ": true, + " ": true, + "𝔮": true, + "⨌": true, + "𝕢": true, + "⁗": true, + "𝓆": true, + "ℍ": true, + "⨖": true, + "?": true, + "≟": true, + """: true, + """: true, + "⇛": true, + "⇒": true, + "⤜": true, + "⤏": true, + "⥤": true, + "∽̱": true, + "ŕ": true, + "√": true, + "⦳": true, + "⟩": true, + "⦒": true, + "⦥": true, + "⟩": true, + "»": true, + "»": true, + "→": true, + "⥵": true, + "⇥": true, + "⤠": true, + "⤳": true, + "⤞": true, + "↪": true, + "↬": true, + "⥅": true, + "⥴": true, + "↣": true, + "↝": true, + "⤚": true, + "∶": true, + "ℚ": true, + "⤍": true, + "❳": true, + "}": true, + "]": true, + "⦌": true, + "⦎": true, + "⦐": true, + "ř": true, + "ŗ": true, + "⌉": true, + "}": true, + "р": true, + "⤷": true, + "⥩": true, + "”": true, + "”": true, + "↳": true, + "ℜ": true, + "ℛ": true, + "ℜ": true, + "ℝ": true, + "▭": true, + "®": true, + "®": true, + "⥽": true, + "⌋": true, + "𝔯": true, + "⇁": true, + "⇀": true, + "⥬": true, + "ρ": true, + "ϱ": true, + "→": true, + "↣": true, + "⇁": true, + "⇀": true, + "⇄": true, + "⇌": true, + "⇉": true, + "↝": true, + "⋌": true, + "˚": true, + "≓": true, + "⇄": true, + "⇌": true, + "‏": true, + "⎱": true, + "⎱": true, + "⫮": true, + "⟭": true, + "⇾": true, + "⟧": true, + "⦆": true, + "𝕣": true, + "⨮": true, + "⨵": true, + ")": true, + "⦔": true, + "⨒": true, + "⇉": true, + "›": true, + "𝓇": true, + "↱": true, + "]": true, + "’": true, + "’": true, + "⋌": true, + "⋊": true, + "▹": true, + "⊵": true, + "▸": true, + "⧎": true, + "⥨": true, + "℞": true, + "ś": true, + "‚": true, + "≻": true, + "⪴": true, + "⪸": true, + "š": true, + "≽": true, + "⪰": true, + "ş": true, + "ŝ": true, + "⪶": true, + "⪺": true, + "⋩": true, + "⨓": true, + "≿": true, + "с": true, + "⋅": true, + "⊡": true, + "⩦": true, + "⇘": true, + "⤥": true, + "↘": true, + "↘": true, + "§": true, + "§": true, + ";": true, + "⤩": true, + "∖": true, + "∖": true, + "✶": true, + "𝔰": true, + "⌢": true, + "♯": true, + "щ": true, + "ш": true, + "∣": true, + "∥": true, + "­": true, + "­": true, + "σ": true, + "ς": true, + "ς": true, + "∼": true, + "⩪": true, + "≃": true, + "≃": true, + "⪞": true, + "⪠": true, + "⪝": true, + "⪟": true, + "≆": true, + "⨤": true, + "⥲": true, + "←": true, + "∖": true, + "⨳": true, + "⧤": true, + "∣": true, + "⌣": true, + "⪪": true, + "⪬": true, + "⪬︀": true, + "ь": true, + "/": true, + "⧄": true, + "⌿": true, + "𝕤": true, + "♠": true, + "♠": true, + "∥": true, + "⊓": true, + "⊓︀": true, + "⊔": true, + "⊔︀": true, + "⊏": true, + "⊑": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊐": true, + "⊒": true, + "□": true, + "□": true, + "▪": true, + "▪": true, + "→": true, + "𝓈": true, + "∖": true, + "⌣": true, + "⋆": true, + "☆": true, + "★": true, + "ϵ": true, + "ϕ": true, + "¯": true, + "⊂": true, + "⫅": true, + "⪽": true, + "⊆": true, + "⫃": true, + "⫁": true, + "⫋": true, + "⊊": true, + "⪿": true, + "⥹": true, + "⊂": true, + "⊆": true, + "⫅": true, + "⊊": true, + "⫋": true, + "⫇": true, + "⫕": true, + "⫓": true, + "≻": true, + "⪸": true, + "≽": true, + "⪰": true, + "⪺": true, + "⪶": true, + "⋩": true, + "≿": true, + "∑": true, + "♪": true, + "¹": true, + "¹": true, + "²": true, + "²": true, + "³": true, + "³": true, + "⊃": true, + "⫆": true, + "⪾": true, + "⫘": true, + "⊇": true, + "⫄": true, + "⟉": true, + "⫗": true, + "⥻": true, + "⫂": true, + "⫌": true, + "⊋": true, + "⫀": true, + "⊃": true, + "⊇": true, + "⫆": true, + "⊋": true, + "⫌": true, + "⫈": true, + "⫔": true, + "⫖": true, + "⇙": true, + "⤦": true, + "↙": true, + "↙": true, + "⤪": true, + "ß": true, + "ß": true, + "⌖": true, + "τ": true, + "⎴": true, + "ť": true, + "ţ": true, + "т": true, + "⃛": true, + "⌕": true, + "𝔱": true, + "∴": true, + "∴": true, + "θ": true, + "ϑ": true, + "ϑ": true, + "≈": true, + "∼": true, + " ": true, + "≈": true, + "∼": true, + "þ": true, + "þ": true, + "˜": true, + "×": true, + "×": true, + "⊠": true, + "⨱": true, + "⨰": true, + "∭": true, + "⤨": true, + "⊤": true, + "⌶": true, + "⫱": true, + "𝕥": true, + "⫚": true, + "⤩": true, + "‴": true, + "™": true, + "▵": true, + "▿": true, + "◃": true, + "⊴": true, + "≜": true, + "▹": true, + "⊵": true, + "◬": true, + "≜": true, + "⨺": true, + "⨹": true, + "⧍": true, + "⨻": true, + "⏢": true, + "𝓉": true, + "ц": true, + "ћ": true, + "ŧ": true, + "≬": true, + "↞": true, + "↠": true, + "⇑": true, + "⥣": true, + "ú": true, + "ú": true, + "↑": true, + "ў": true, + "ŭ": true, + "û": true, + "û": true, + "у": true, + "⇅": true, + "ű": true, + "⥮": true, + "⥾": true, + "𝔲": true, + "ù": true, + "ù": true, + "↿": true, + "↾": true, + "▀": true, + "⌜": true, + "⌜": true, + "⌏": true, + "◸": true, + "ū": true, + "¨": true, + "¨": true, + "ų": true, + "𝕦": true, + "↑": true, + "↕": true, + "↿": true, + "↾": true, + "⊎": true, + "υ": true, + "ϒ": true, + "υ": true, + "⇈": true, + "⌝": true, + "⌝": true, + "⌎": true, + "ů": true, + "◹": true, + "𝓊": true, + "⋰": true, + "ũ": true, + "▵": true, + "▴": true, + "⇈": true, + "ü": true, + "ü": true, + "⦧": true, + "⇕": true, + "⫨": true, + "⫩": true, + "⊨": true, + "⦜": true, + "ϵ": true, + "ϰ": true, + "∅": true, + "ϕ": true, + "ϖ": true, + "∝": true, + "↕": true, + "ϱ": true, + "ς": true, + "⊊︀": true, + "⫋︀": true, + "⊋︀": true, + "⫌︀": true, + "ϑ": true, + "⊲": true, + "⊳": true, + "в": true, + "⊢": true, + "∨": true, + "⊻": true, + "≚": true, + "⋮": true, + "|": true, + "|": true, + "𝔳": true, + "⊲": true, + "⊂⃒": true, + "⊃⃒": true, + "𝕧": true, + "∝": true, + "⊳": true, + "𝓋": true, + "⫋︀": true, + "⊊︀": true, + "⫌︀": true, + "⊋︀": true, + "⦚": true, + "ŵ": true, + "⩟": true, + "∧": true, + "≙": true, + "℘": true, + "𝔴": true, + "𝕨": true, + "℘": true, + "≀": true, + "≀": true, + "𝓌": true, + "⋂": true, + "◯": true, + "⋃": true, + "▽": true, + "𝔵": true, + "⟺": true, + "⟷": true, + "ξ": true, + "⟸": true, + "⟵": true, + "⟼": true, + "⋻": true, + "⨀": true, + "𝕩": true, + "⨁": true, + "⨂": true, + "⟹": true, + "⟶": true, + "𝓍": true, + "⨆": true, + "⨄": true, + "△": true, + "⋁": true, + "⋀": true, + "ý": true, + "ý": true, + "я": true, + "ŷ": true, + "ы": true, + "¥": true, + "¥": true, + "𝔶": true, + "ї": true, + "𝕪": true, + "𝓎": true, + "ю": true, + "ÿ": true, + "ÿ": true, + "ź": true, + "ž": true, + "з": true, + "ż": true, + "ℨ": true, + "ζ": true, + "𝔷": true, + "ж": true, + "⇝": true, + "𝕫": true, + "𝓏": true, + "‍": true, + "‌": true, +} diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go index 6385f27cb6a4..6ab60102c9bf 100644 --- a/vendor/github.com/russross/blackfriday/v2/esc.go +++ b/vendor/github.com/russross/blackfriday/v2/esc.go @@ -13,13 +13,27 @@ var htmlEscaper = [256][]byte{ } func escapeHTML(w io.Writer, s []byte) { + escapeEntities(w, s, false) +} + +func escapeAllHTML(w io.Writer, s []byte) { + escapeEntities(w, s, true) +} + +func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) { var start, end int for end < len(s) { escSeq := htmlEscaper[s[end]] if escSeq != nil { - w.Write(s[start:end]) - w.Write(escSeq) - start = end + 1 + isEntity, entityEnd := nodeIsEntity(s, end) + if isEntity && !escapeValidEntities { + w.Write(s[start : entityEnd+1]) + start = entityEnd + 1 + } else { + w.Write(s[start:end]) + w.Write(escSeq) + start = end + 1 + } } end++ } @@ -28,6 +42,28 @@ func escapeHTML(w io.Writer, s []byte) { } } +func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) { + isEntity = false + endEntityPos = end + 1 + + if s[end] == '&' { + for endEntityPos < len(s) { + if s[endEntityPos] == ';' { + if entities[string(s[end:endEntityPos+1])] { + isEntity = true + break + } + } + if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' { + break + } + endEntityPos++ + } + } + + return isEntity, endEntityPos +} + func escLink(w io.Writer, text []byte) { unesc := html.UnescapeString(string(text)) escapeHTML(w, []byte(unesc)) diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go index 284c87184f77..cb4f26e30fd5 100644 --- a/vendor/github.com/russross/blackfriday/v2/html.go +++ b/vendor/github.com/russross/blackfriday/v2/html.go @@ -132,7 +132,10 @@ func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { } if params.FootnoteReturnLinkContents == "" { - params.FootnoteReturnLinkContents = `[return]` + // U+FE0E is VARIATION SELECTOR-15. + // It suppresses automatic emoji presentation of the preceding + // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. + params.FootnoteReturnLinkContents = "↩\ufe0e" } return &HTMLRenderer{ @@ -616,7 +619,7 @@ func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkSt } case Code: r.out(w, codeTag) - escapeHTML(w, node.Literal) + escapeAllHTML(w, node.Literal) r.out(w, codeCloseTag) case Document: break @@ -762,7 +765,7 @@ func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkSt r.cr(w) r.out(w, preTag) r.tag(w, codeTag[:len(codeTag)-1], attrs) - escapeHTML(w, node.Literal) + escapeAllHTML(w, node.Literal) r.out(w, codeCloseTag) r.out(w, preCloseTag) if node.Parent.Type != Item { diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go index 4ed2907921e0..d45bd941726e 100644 --- a/vendor/github.com/russross/blackfriday/v2/inline.go +++ b/vendor/github.com/russross/blackfriday/v2/inline.go @@ -278,7 +278,7 @@ func link(p *Markdown, data []byte, offset int) (int, *Node) { case data[i] == '\n': textHasNl = true - case data[i-1] == '\\': + case isBackslashEscaped(data, i): continue case data[i] == '[': diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go index 51b9e8c1b538..04e6050ceeae 100644 --- a/vendor/github.com/russross/blackfriday/v2/node.go +++ b/vendor/github.com/russross/blackfriday/v2/node.go @@ -199,7 +199,8 @@ func (n *Node) InsertBefore(sibling *Node) { } } -func (n *Node) isContainer() bool { +// IsContainer returns true if 'n' can contain children. +func (n *Node) IsContainer() bool { switch n.Type { case Document: fallthrough @@ -238,6 +239,11 @@ func (n *Node) isContainer() bool { } } +// IsLeaf returns true if 'n' is a leaf node. +func (n *Node) IsLeaf() bool { + return !n.IsContainer() +} + func (n *Node) canContain(t NodeType) bool { if n.Type == List { return t == Item @@ -309,11 +315,11 @@ func newNodeWalker(root *Node) *nodeWalker { } func (nw *nodeWalker) next() { - if (!nw.current.isContainer() || !nw.entering) && nw.current == nw.root { + if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { nw.current = nil return } - if nw.entering && nw.current.isContainer() { + if nw.entering && nw.current.IsContainer() { if nw.current.FirstChild != nil { nw.current = nw.current.FirstChild nw.entering = true diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE b/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE new file mode 100644 index 000000000000..e51324f9b5b4 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2021 NYU Secure Systems Lab + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go new file mode 100644 index 000000000000..fb1d5918b282 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go @@ -0,0 +1,145 @@ +package cjson + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "regexp" + "sort" +) + +/* +encodeCanonicalString is a helper function to canonicalize the passed string +according to the OLPC canonical JSON specification for strings (see +http://wiki.laptop.org/go/Canonical_JSON). String canonicalization consists of +escaping backslashes ("\") and double quotes (") and wrapping the resulting +string in double quotes ("). +*/ +func encodeCanonicalString(s string) string { + re := regexp.MustCompile(`([\"\\])`) + return fmt.Sprintf("\"%s\"", re.ReplaceAllString(s, "\\$1")) +} + +/* +encodeCanonical is a helper function to recursively canonicalize the passed +object according to the OLPC canonical JSON specification (see +http://wiki.laptop.org/go/Canonical_JSON) and write it to the passed +*bytes.Buffer. If canonicalization fails it returns an error. +*/ +func encodeCanonical(obj interface{}, result *bytes.Buffer) (err error) { + // Since this function is called recursively, we use panic if an error occurs + // and recover in a deferred function, which is always called before + // returning. There we set the error that is returned eventually. + defer func() { + if r := recover(); r != nil { + err = errors.New(r.(string)) + } + }() + + switch objAsserted := obj.(type) { + case string: + result.WriteString(encodeCanonicalString(objAsserted)) + + case bool: + if objAsserted { + result.WriteString("true") + } else { + result.WriteString("false") + } + + // The wrapping `EncodeCanonical` function decodes the passed json data with + // `decoder.UseNumber` so that any numeric value is stored as `json.Number` + // (instead of the default `float64`). This allows us to assert that it is a + // non-floating point number, which are the only numbers allowed by the used + // canonicalization specification. + case json.Number: + if _, err := objAsserted.Int64(); err != nil { + panic(fmt.Sprintf("Can't canonicalize floating point number '%s'", + objAsserted)) + } + result.WriteString(objAsserted.String()) + + case nil: + result.WriteString("null") + + // Canonicalize slice + case []interface{}: + result.WriteString("[") + for i, val := range objAsserted { + if err := encodeCanonical(val, result); err != nil { + return err + } + if i < (len(objAsserted) - 1) { + result.WriteString(",") + } + } + result.WriteString("]") + + case map[string]interface{}: + result.WriteString("{") + + // Make a list of keys + var mapKeys []string + for key := range objAsserted { + mapKeys = append(mapKeys, key) + } + // Sort keys + sort.Strings(mapKeys) + + // Canonicalize map + for i, key := range mapKeys { + // Note: `key` must be a `string` (see `case map[string]interface{}`) and + // canonicalization of strings cannot err out (see `case string`), thus + // no error handling is needed here. + encodeCanonical(key, result) + + result.WriteString(":") + if err := encodeCanonical(objAsserted[key], result); err != nil { + return err + } + if i < (len(mapKeys) - 1) { + result.WriteString(",") + } + i++ + } + result.WriteString("}") + + default: + // We recover in a deferred function defined above + panic(fmt.Sprintf("Can't canonicalize '%s' of type '%s'", + objAsserted, reflect.TypeOf(objAsserted))) + } + return nil +} + +/* +EncodeCanonical JSON canonicalizes the passed object and returns it as a byte +slice. It uses the OLPC canonical JSON specification (see +http://wiki.laptop.org/go/Canonical_JSON). If canonicalization fails the byte +slice is nil and the second return value contains the error. +*/ +func EncodeCanonical(obj interface{}) ([]byte, error) { + // FIXME: Terrible hack to turn the passed struct into a map, converting + // the struct's variable names to the json key names defined in the struct + data, err := json.Marshal(obj) + if err != nil { + return nil, err + } + var jsonMap interface{} + + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + if err := dec.Decode(&jsonMap); err != nil { + return nil, err + } + + // Create a buffer and write the canonicalized JSON bytes to it + var result bytes.Buffer + if err := encodeCanonical(jsonMap, &result); err != nil { + return nil, err + } + + return result.Bytes(), nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go new file mode 100644 index 000000000000..3dc05a4294e1 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go @@ -0,0 +1,197 @@ +/* +Package dsse implements the Dead Simple Signing Envelope (DSSE) +https://github.com/secure-systems-lab/dsse +*/ +package dsse + +import ( + "encoding/base64" + "errors" + "fmt" +) + +// ErrUnknownKey indicates that the implementation does not recognize the +// key. +var ErrUnknownKey = errors.New("unknown key") + +// ErrNoSignature indicates that an envelope did not contain any signatures. +var ErrNoSignature = errors.New("no signature found") + +// ErrNoSigners indicates that no signer was provided. +var ErrNoSigners = errors.New("no signers provided") + +/* +Envelope captures an envelope as described by the Secure Systems Lab +Signing Specification. See here: +https://github.com/secure-systems-lab/signing-spec/blob/master/envelope.md +*/ +type Envelope struct { + PayloadType string `json:"payloadType"` + Payload string `json:"payload"` + Signatures []Signature `json:"signatures"` +} + +/* +DecodeB64Payload returns the serialized body, decoded +from the envelope's payload field. A flexible +decoder is used, first trying standard base64, then +URL-encoded base64. +*/ +func (e *Envelope) DecodeB64Payload() ([]byte, error) { + return b64Decode(e.Payload) +} + +/* +Signature represents a generic in-toto signature that contains the identifier +of the key which was used to create the signature. +The used signature scheme has to be agreed upon by the signer and verifer +out of band. +The signature is a base64 encoding of the raw bytes from the signature +algorithm. +*/ +type Signature struct { + KeyID string `json:"keyid"` + Sig string `json:"sig"` +} + +/* +PAE implementes the DSSE Pre-Authentic Encoding +https://github.com/secure-systems-lab/dsse/blob/master/protocol.md#signature-definition +*/ +func PAE(payloadType string, payload []byte) []byte { + return []byte(fmt.Sprintf("DSSEv1 %d %s %d %s", + len(payloadType), payloadType, + len(payload), payload)) +} + +/* +Signer defines the interface for an abstract signing algorithm. +The Signer interface is used to inject signature algorithm implementations +into the EnevelopeSigner. This decoupling allows for any signing algorithm +and key management system can be used. +The full message is provided as the parameter. If the signature algorithm +depends on hashing of the message prior to signature calculation, the +implementor of this interface must perform such hashing. +The function must return raw bytes representing the calculated signature +using the current algorithm, and the key used (if applicable). +For an example see EcdsaSigner in sign_test.go. +*/ +type Signer interface { + Sign(data []byte) ([]byte, error) + KeyID() (string, error) +} + +// SignVerifer provides both the signing and verification interface. +type SignVerifier interface { + Signer + Verifier +} + +// EnvelopeSigner creates signed Envelopes. +type EnvelopeSigner struct { + providers []SignVerifier + ev *EnvelopeVerifier +} + +/* +NewEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer +algorithms to sign the data. +Creates a verifier with threshold=1, at least one of the providers must validate signitures successfully. +*/ +func NewEnvelopeSigner(p ...SignVerifier) (*EnvelopeSigner, error) { + return NewMultiEnvelopeSigner(1, p...) +} + +/* +NewMultiEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer +algorithms to sign the data. +Creates a verifier with threshold. +threashold indicates the amount of providers that must validate the envelope. +*/ +func NewMultiEnvelopeSigner(threshold int, p ...SignVerifier) (*EnvelopeSigner, error) { + var providers []SignVerifier + + for _, sv := range p { + if sv != nil { + providers = append(providers, sv) + } + } + + if len(providers) == 0 { + return nil, ErrNoSigners + } + + evps := []Verifier{} + for _, p := range providers { + evps = append(evps, p.(Verifier)) + } + + ev, err := NewMultiEnvelopeVerifier(threshold, evps...) + if err != nil { + return nil, err + } + + return &EnvelopeSigner{ + providers: providers, + ev: ev, + }, nil +} + +/* +SignPayload signs a payload and payload type according to DSSE. +Returned is an envelope as defined here: +https://github.com/secure-systems-lab/dsse/blob/master/envelope.md +One signature will be added for each Signer in the EnvelopeSigner. +*/ +func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelope, error) { + var e = Envelope{ + Payload: base64.StdEncoding.EncodeToString(body), + PayloadType: payloadType, + } + + paeEnc := PAE(payloadType, body) + + for _, signer := range es.providers { + sig, err := signer.Sign(paeEnc) + if err != nil { + return nil, err + } + keyID, err := signer.KeyID() + if err != nil { + keyID = "" + } + + e.Signatures = append(e.Signatures, Signature{ + KeyID: keyID, + Sig: base64.StdEncoding.EncodeToString(sig), + }) + } + + return &e, nil +} + +/* +Verify decodes the payload and verifies the signature. +Any domain specific validation such as parsing the decoded body and +validating the payload type is left out to the caller. +Verify returns a list of accepted keys each including a keyid, public and signiture of the accepted provider keys. +*/ +func (es *EnvelopeSigner) Verify(e *Envelope) ([]AcceptedKey, error) { + return es.ev.Verify(e) +} + +/* +Both standard and url encoding are allowed: +https://github.com/secure-systems-lab/dsse/blob/master/envelope.md +*/ +func b64Decode(s string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b, err = base64.URLEncoding.DecodeString(s) + if err != nil { + return nil, err + } + } + + return b, nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go new file mode 100644 index 000000000000..ead1c32ca80b --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go @@ -0,0 +1,146 @@ +package dsse + +import ( + "crypto" + "errors" + "fmt" + + "golang.org/x/crypto/ssh" +) + +/* +Verifier verifies a complete message against a signature and key. +If the message was hashed prior to signature generation, the verifier +must perform the same steps. +If KeyID returns successfully, only signature matching the key ID will be verified. +*/ +type Verifier interface { + Verify(data, sig []byte) error + KeyID() (string, error) + Public() crypto.PublicKey +} + +type EnvelopeVerifier struct { + providers []Verifier + threshold int +} + +type AcceptedKey struct { + Public crypto.PublicKey + KeyID string + Sig Signature +} + +func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { + if e == nil { + return nil, errors.New("cannot verify a nil envelope") + } + + if len(e.Signatures) == 0 { + return nil, ErrNoSignature + } + + // Decode payload (i.e serialized body) + body, err := e.DecodeB64Payload() + if err != nil { + return nil, err + } + // Generate PAE(payloadtype, serialized body) + paeEnc := PAE(e.PayloadType, body) + + // If *any* signature is found to be incorrect, it is skipped + var acceptedKeys []AcceptedKey + usedKeyids := make(map[string]string) + unverified_providers := ev.providers + for _, s := range e.Signatures { + sig, err := b64Decode(s.Sig) + if err != nil { + return nil, err + } + + // Loop over the providers. + // If provider and signature include key IDs but do not match skip. + // If a provider recognizes the key, we exit + // the loop and use the result. + providers := unverified_providers + for i, v := range providers { + keyID, err := v.KeyID() + + // Verifiers that do not provide a keyid will be generated one using public. + if err != nil || keyID == "" { + keyID, err = SHA256KeyID(v.Public()) + if err != nil { + keyID = "" + } + } + + if s.KeyID != "" && keyID != "" && err == nil && s.KeyID != keyID { + continue + } + + err = v.Verify(paeEnc, sig) + if err != nil { + continue + } + + acceptedKey := AcceptedKey{ + Public: v.Public(), + KeyID: keyID, + Sig: s, + } + unverified_providers = removeIndex(providers, i) + + // See https://github.com/in-toto/in-toto/pull/251 + if _, ok := usedKeyids[keyID]; ok { + fmt.Printf("Found envelope signed by different subkeys of the same main key, Only one of them is counted towards the step threshold, KeyID=%s\n", keyID) + continue + } + + usedKeyids[keyID] = "" + acceptedKeys = append(acceptedKeys, acceptedKey) + break + } + } + + // Sanity if with some reflect magic this happens. + if ev.threshold <= 0 || ev.threshold > len(ev.providers) { + return nil, errors.New("Invalid threshold") + } + + if len(usedKeyids) < ev.threshold { + return acceptedKeys, errors.New(fmt.Sprintf("Accepted signatures do not match threshold, Found: %d, Expected %d", len(acceptedKeys), ev.threshold)) + } + + return acceptedKeys, nil +} + +func NewEnvelopeVerifier(v ...Verifier) (*EnvelopeVerifier, error) { + return NewMultiEnvelopeVerifier(1, v...) +} + +func NewMultiEnvelopeVerifier(threshold int, p ...Verifier) (*EnvelopeVerifier, error) { + + if threshold <= 0 || threshold > len(p) { + return nil, errors.New("Invalid threshold") + } + + ev := EnvelopeVerifier{ + providers: p, + threshold: threshold, + } + return &ev, nil +} + +func SHA256KeyID(pub crypto.PublicKey) (string, error) { + // Generate public key fingerprint + sshpk, err := ssh.NewPublicKey(pub) + if err != nil { + return "", err + } + fingerprint := ssh.FingerprintSHA256(sshpk) + return fingerprint, nil +} + +func removeIndex(v []Verifier, index int) []Verifier { + return append(v[:index], v[index+1:]...) +} diff --git a/vendor/github.com/shibumi/go-pathspec/.gitignore b/vendor/github.com/shibumi/go-pathspec/.gitignore new file mode 100644 index 000000000000..3e32393f1238 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +# ignore .idea +.idea diff --git a/vendor/github.com/shibumi/go-pathspec/GO-LICENSE b/vendor/github.com/shibumi/go-pathspec/GO-LICENSE new file mode 100644 index 000000000000..74487567632c --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/GO-LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/shibumi/go-pathspec/LICENSE b/vendor/github.com/shibumi/go-pathspec/LICENSE new file mode 100644 index 000000000000..5c304d1a4a7b --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/shibumi/go-pathspec/README.md b/vendor/github.com/shibumi/go-pathspec/README.md new file mode 100644 index 000000000000..c146cf69b012 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/README.md @@ -0,0 +1,45 @@ +# go-pathspec + +[![build](https://github.com/shibumi/go-pathspec/workflows/build/badge.svg)](https://github.com/shibumi/go-pathspec/actions?query=workflow%3Abuild) [![Coverage Status](https://coveralls.io/repos/github/shibumi/go-pathspec/badge.svg)](https://coveralls.io/github/shibumi/go-pathspec) [![PkgGoDev](https://pkg.go.dev/badge/github.com/shibumi/go-pathspec)](https://pkg.go.dev/github.com/shibumi/go-pathspec) + +go-pathspec implements gitignore-style pattern matching for paths. + +## Alternatives + +There are a few alternatives, that try to be gitignore compatible or even state +gitignore compatibility: + +### https://github.com/go-git/go-git + +go-git states it would be gitignore compatible, but actually they are missing a few +special cases. This issue describes one of the not working patterns: https://github.com/go-git/go-git/issues/108 + +What does not work is global filename pattern matching. Consider the following +`.gitignore` file: + +```gitignore +# gitignore test file +parse.go +``` + +Then `parse.go` should match on all filenames called `parse.go`. You can test this via +this shell script: +```shell +mkdir -p /tmp/test/internal/util +touch /tmp/test/internal/util/parse.go +cd /tmp/test/ +git init +echo "parse.go" > .gitignore +``` + +With git `parse.go` will be excluded. The go-git implementation behaves different. + +### https://github.com/monochromegane/go-gitignore + +monochromegane's go-gitignore does not support the use of `**`-operators. +This is not consistent to real gitignore behavior, too. + +## Authors + +Sander van Harmelen () +Christian Rebischke () diff --git a/vendor/github.com/shibumi/go-pathspec/gitignore.go b/vendor/github.com/shibumi/go-pathspec/gitignore.go new file mode 100644 index 000000000000..2b08d4e8a573 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/gitignore.go @@ -0,0 +1,299 @@ +// +// Copyright 2014, Sander van Harmelen +// Copyright 2020, Christian Rebischke +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package pathspec implements git compatible gitignore pattern matching. +// See the description below, if you are unfamiliar with it: +// +// A blank line matches no files, so it can serve as a separator for readability. +// +// A line starting with # serves as a comment. Put a backslash ("\") in front of +// the first hash for patterns that begin with a hash. +// +// An optional prefix "!" which negates the pattern; any matching file excluded +// by a previous pattern will become included again. If a negated pattern matches, +// this will override lower precedence patterns sources. Put a backslash ("\") in +// front of the first "!" for patterns that begin with a literal "!", for example, +// "\!important!.txt". +// +// If the pattern ends with a slash, it is removed for the purpose of the following +// description, but it would only find a match with a directory. In other words, +// foo/ will match a directory foo and paths underneath it, but will not match a +// regular file or a symbolic link foo (this is consistent with the way how pathspec +// works in general in Git). +// +// If the pattern does not contain a slash /, Git treats it as a shell glob pattern +// and checks for a match against the pathname relative to the location of the +// .gitignore file (relative to the toplevel of the work tree if not from a +// .gitignore file). +// +// Otherwise, Git treats the pattern as a shell glob suitable for consumption by +// fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will not match +// a / in the pathname. For example, "Documentation/*.html" matches +// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or/ +// "tools/perf/Documentation/perf.html". +// +// A leading slash matches the beginning of the pathname. For example, "/*.c" +// matches "cat-file.c" but not "mozilla-sha1/sha1.c". +// +// Two consecutive asterisks ("**") in patterns matched against full pathname +// may have special meaning: +// +// A leading "**" followed by a slash means match in all directories. For example, +// "**/foo" matches file or directory "foo" anywhere, the same as pattern "foo". +// "**/foo/bar" matches file or directory "bar" anywhere that is directly under +// directory "foo". +// +// A trailing "/" matches everything inside. For example, "abc/" matches all files +// inside directory "abc", relative to the location of the .gitignore file, with +// infinite depth. +// +// A slash followed by two consecutive asterisks then a slash matches zero or more +// directories. For example, "a/**/b" matches "a/b", "a/x/b", "a/x/y/b" and so on. +// +// Other consecutive asterisks are considered invalid. +package pathspec + +import ( + "bufio" + "bytes" + "io" + "path/filepath" + "regexp" + "strings" +) + +type gitIgnorePattern struct { + Regex string + Include bool +} + +// GitIgnore uses a string slice of patterns for matching on a filepath string. +// On match it returns true, otherwise false. On error it passes the error through. +func GitIgnore(patterns []string, name string) (ignore bool, err error) { + for _, pattern := range patterns { + p := parsePattern(pattern) + // Convert Windows paths to Unix paths + name = filepath.ToSlash(name) + match, err := regexp.MatchString(p.Regex, name) + if err != nil { + return ignore, err + } + if match { + if p.Include { + return false, nil + } + ignore = true + } + } + return ignore, nil +} + +// ReadGitIgnore implements the io.Reader interface for reading a gitignore file +// line by line. It behaves exactly like the GitIgnore function. The only difference +// is that GitIgnore works on a string slice. +// +// ReadGitIgnore returns a boolean value if we match or not and an error. +func ReadGitIgnore(content io.Reader, name string) (ignore bool, err error) { + scanner := bufio.NewScanner(content) + + for scanner.Scan() { + pattern := strings.TrimSpace(scanner.Text()) + if len(pattern) == 0 || pattern[0] == '#' { + continue + } + p := parsePattern(pattern) + // Convert Windows paths to Unix paths + name = filepath.ToSlash(name) + match, err := regexp.MatchString(p.Regex, name) + if err != nil { + return ignore, err + } + if match { + if p.Include { + return false, scanner.Err() + } + ignore = true + } + } + return ignore, scanner.Err() +} + +func parsePattern(pattern string) *gitIgnorePattern { + p := &gitIgnorePattern{} + + // An optional prefix "!" which negates the pattern; any matching file + // excluded by a previous pattern will become included again. + if strings.HasPrefix(pattern, "!") { + pattern = pattern[1:] + p.Include = true + } else { + p.Include = false + } + + // Remove leading back-slash escape for escaped hash ('#') or + // exclamation mark ('!'). + if strings.HasPrefix(pattern, "\\") { + pattern = pattern[1:] + } + + // Split pattern into segments. + patternSegs := strings.Split(pattern, "/") + + // A pattern beginning with a slash ('/') will only match paths + // directly on the root directory instead of any descendant paths. + // So remove empty first segment to make pattern absoluut to root. + // A pattern without a beginning slash ('/') will match any + // descendant path. This is equivilent to "**/{pattern}". So + // prepend with double-asterisks to make pattern relative to + // root. + if patternSegs[0] == "" { + patternSegs = patternSegs[1:] + } else if patternSegs[0] != "**" { + patternSegs = append([]string{"**"}, patternSegs...) + } + + // A pattern ending with a slash ('/') will match all descendant + // paths of if it is a directory but not if it is a regular file. + // This is equivalent to "{pattern}/**". So, set last segment to + // double asterisks to include all descendants. + if patternSegs[len(patternSegs)-1] == "" { + patternSegs[len(patternSegs)-1] = "**" + } + + // Build regular expression from pattern. + var expr bytes.Buffer + expr.WriteString("^") + needSlash := false + + for i, seg := range patternSegs { + switch seg { + case "**": + switch { + case i == 0 && i == len(patternSegs)-1: + // A pattern consisting solely of double-asterisks ('**') + // will match every path. + expr.WriteString(".+") + case i == 0: + // A normalized pattern beginning with double-asterisks + // ('**') will match any leading path segments. + expr.WriteString("(?:.+/)?") + needSlash = false + case i == len(patternSegs)-1: + // A normalized pattern ending with double-asterisks ('**') + // will match any trailing path segments. + expr.WriteString("/.+") + default: + // A pattern with inner double-asterisks ('**') will match + // multiple (or zero) inner path segments. + expr.WriteString("(?:/.+)?") + needSlash = true + } + case "*": + // Match single path segment. + if needSlash { + expr.WriteString("/") + } + expr.WriteString("[^/]+") + needSlash = true + default: + // Match segment glob pattern. + if needSlash { + expr.WriteString("/") + } + expr.WriteString(translateGlob(seg)) + needSlash = true + } + } + expr.WriteString("$") + p.Regex = expr.String() + return p +} + +// NOTE: This is derived from `fnmatch.translate()` and is similar to +// the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set. +func translateGlob(glob string) string { + var regex bytes.Buffer + escape := false + + for i := 0; i < len(glob); i++ { + char := glob[i] + // Escape the character. + switch { + case escape: + escape = false + regex.WriteString(regexp.QuoteMeta(string(char))) + case char == '\\': + // Escape character, escape next character. + escape = true + case char == '*': + // Multi-character wildcard. Match any string (except slashes), + // including an empty string. + regex.WriteString("[^/]*") + case char == '?': + // Single-character wildcard. Match any single character (except + // a slash). + regex.WriteString("[^/]") + case char == '[': + regex.WriteString(translateBracketExpression(&i, glob)) + default: + // Regular character, escape it for regex. + regex.WriteString(regexp.QuoteMeta(string(char))) + } + } + return regex.String() +} + +// Bracket expression wildcard. Except for the beginning +// exclamation mark, the whole bracket expression can be used +// directly as regex but we have to find where the expression +// ends. +// - "[][!]" matches ']', '[' and '!'. +// - "[]-]" matches ']' and '-'. +// - "[!]a-]" matches any character except ']', 'a' and '-'. +func translateBracketExpression(i *int, glob string) string { + regex := string(glob[*i]) + *i++ + j := *i + + // Pass bracket expression negation. + if j < len(glob) && glob[j] == '!' { + j++ + } + // Pass first closing bracket if it is at the beginning of the + // expression. + if j < len(glob) && glob[j] == ']' { + j++ + } + // Find closing bracket. Stop once we reach the end or find it. + for j < len(glob) && glob[j] != ']' { + j++ + } + + if j < len(glob) { + if glob[*i] == '!' { + regex = regex + "^" + *i++ + } + regex = regexp.QuoteMeta(glob[*i:j]) + *i = j + } else { + // Failed to find closing bracket, treat opening bracket as a + // bracket literal instead of as an expression. + regex = regexp.QuoteMeta(string(glob[*i])) + } + return "[" + regex + "]" +} diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml b/vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml deleted file mode 100644 index 93b1fcdb31a2..000000000000 --- a/vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -sudo: false -language: go -go: - - 1.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE b/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE deleted file mode 100644 index c35c17af9808..000000000000 --- a/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2015 Dmitri Shuralyov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/README.md b/vendor/github.com/shurcooL/sanitized_anchor_name/README.md deleted file mode 100644 index 670bf0fe6c79..000000000000 --- a/vendor/github.com/shurcooL/sanitized_anchor_name/README.md +++ /dev/null @@ -1,36 +0,0 @@ -sanitized_anchor_name -===================== - -[![Build Status](https://travis-ci.org/shurcooL/sanitized_anchor_name.svg?branch=master)](https://travis-ci.org/shurcooL/sanitized_anchor_name) [![GoDoc](https://godoc.org/github.com/shurcooL/sanitized_anchor_name?status.svg)](https://godoc.org/github.com/shurcooL/sanitized_anchor_name) - -Package sanitized_anchor_name provides a func to create sanitized anchor names. - -Its logic can be reused by multiple packages to create interoperable anchor names -and links to those anchors. - -At this time, it does not try to ensure that generated anchor names -are unique, that responsibility falls on the caller. - -Installation ------------- - -```bash -go get -u github.com/shurcooL/sanitized_anchor_name -``` - -Example -------- - -```Go -anchorName := sanitized_anchor_name.Create("This is a header") - -fmt.Println(anchorName) - -// Output: -// this-is-a-header -``` - -License -------- - -- [MIT License](LICENSE) diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/main.go b/vendor/github.com/shurcooL/sanitized_anchor_name/main.go deleted file mode 100644 index 6a77d1243173..000000000000 --- a/vendor/github.com/shurcooL/sanitized_anchor_name/main.go +++ /dev/null @@ -1,29 +0,0 @@ -// Package sanitized_anchor_name provides a func to create sanitized anchor names. -// -// Its logic can be reused by multiple packages to create interoperable anchor names -// and links to those anchors. -// -// At this time, it does not try to ensure that generated anchor names -// are unique, that responsibility falls on the caller. -package sanitized_anchor_name // import "github.com/shurcooL/sanitized_anchor_name" - -import "unicode" - -// Create returns a sanitized anchor name for the given text. -func Create(text string) string { - var anchorName []rune - var futureDash = false - for _, r := range text { - switch { - case unicode.IsLetter(r) || unicode.IsNumber(r): - if futureDash && len(anchorName) > 0 { - anchorName = append(anchorName, '-') - } - futureDash = false - anchorName = append(anchorName, unicode.ToLower(r)) - default: - futureDash = true - } - } - return string(anchorName) -} diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index 5152b6aa406f..b042c896f25b 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -1,4 +1,4 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. @@ -341,7 +341,7 @@ import ( log "github.com/sirupsen/logrus" ) -init() { +func init() { // do something here to set environment depending on an environment variable // or command-line flag if Environment == "production" { diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go index 4545dec07d8b..c7787f77cbfa 100644 --- a/vendor/github.com/sirupsen/logrus/buffer_pool.go +++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -26,15 +26,6 @@ func (p *defaultPool) Get() *bytes.Buffer { return p.pool.Get().(*bytes.Buffer) } -func getBuffer() *bytes.Buffer { - return bufferPool.Get() -} - -func putBuffer(buf *bytes.Buffer) { - buf.Reset() - bufferPool.Put(buf) -} - // SetBufferPool allows to replace the default logrus buffer pool // to better meets the specific needs of an application. func SetBufferPool(bp BufferPool) { diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 07a1e5fa7249..71cdbbc35d21 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -232,6 +232,7 @@ func (entry *Entry) log(level Level, msg string) { newEntry.Logger.mu.Lock() reportCaller := newEntry.Logger.ReportCaller + bufPool := newEntry.getBufferPool() newEntry.Logger.mu.Unlock() if reportCaller { @@ -239,11 +240,11 @@ func (entry *Entry) log(level Level, msg string) { } newEntry.fireHooks() - - buffer = getBuffer() + buffer = bufPool.Get() defer func() { newEntry.Buffer = nil - putBuffer(buffer) + buffer.Reset() + bufPool.Put(buffer) }() buffer.Reset() newEntry.Buffer = buffer @@ -260,6 +261,13 @@ func (entry *Entry) log(level Level, msg string) { } } +func (entry *Entry) getBufferPool() (pool BufferPool) { + if entry.Logger.BufferPool != nil { + return entry.Logger.BufferPool + } + return bufferPool +} + func (entry *Entry) fireHooks() { var tmpHooks LevelHooks entry.Logger.mu.Lock() @@ -276,18 +284,21 @@ func (entry *Entry) fireHooks() { } func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) return } - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() if _, err := entry.Logger.Out.Write(serialized); err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Entry.Panic or Entry.Fatal should be used instead. func (entry *Entry) Log(level Level, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.log(level, fmt.Sprint(args...)) diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index 337704457a28..5ff0aef6d3f1 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -44,6 +44,9 @@ type Logger struct { entryPool sync.Pool // Function to exit the application, defaults to `os.Exit()` ExitFunc exitFunc + // The buffer pool used to format the log. If it is nil, the default global + // buffer pool will be used. + BufferPool BufferPool } type exitFunc func(int) @@ -192,6 +195,9 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { logger.Logf(PanicLevel, format, args...) } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Logger.Panic or Logger.Fatal should be used instead. func (logger *Logger) Log(level Level, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() @@ -402,3 +408,10 @@ func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { logger.mu.Unlock() return oldHooks } + +// SetBufferPool sets the logger buffer pool. +func (logger *Logger) SetBufferPool(pool BufferPool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.BufferPool = pool +} diff --git a/vendor/github.com/spdx/tools-golang/LICENSE.code b/vendor/github.com/spdx/tools-golang/LICENSE.code new file mode 100644 index 000000000000..07efb6292aec --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/LICENSE.code @@ -0,0 +1,550 @@ +The tools-golang source code is provided and may be used, at your option, +under either: +* Apache License, version 2.0 (Apache-2.0), OR +* GNU General Public License, version 2.0 or later (GPL-2.0-or-later). + +Copies of both licenses are included below. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/github.com/spdx/tools-golang/LICENSE.docs b/vendor/github.com/spdx/tools-golang/LICENSE.docs new file mode 100644 index 000000000000..2c8e93cbda8c --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/LICENSE.docs @@ -0,0 +1,399 @@ +The tools-golang documentation is provided under the Creative Commons Attribution +4.0 International license (CC-BY-4.0), a copy of which is provided below. + +Attribution 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution 4.0 International Public License ("Public License"). To the +extent this Public License may be interpreted as a contract, You are +granted the Licensed Rights in consideration of Your acceptance of +these terms and conditions, and the Licensor grants You such rights in +consideration of benefits the Licensor receives from making the +Licensed Material available under these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + j. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + k. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. + diff --git a/vendor/github.com/spdx/tools-golang/json/parser.go b/vendor/github.com/spdx/tools-golang/json/parser.go new file mode 100644 index 000000000000..ee7915de0fc0 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/json/parser.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package spdx_json + +import ( + "bytes" + "encoding/json" + "io" + + "github.com/spdx/tools-golang/spdx/v2_2" + "github.com/spdx/tools-golang/spdx/v2_3" +) + +// Load2_2 takes in an io.Reader and returns an SPDX document. +func Load2_2(content io.Reader) (*v2_2.Document, error) { + // convert io.Reader to a slice of bytes and call the parser + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(content) + if err != nil { + return nil, err + } + + var doc v2_2.Document + err = json.Unmarshal(buf.Bytes(), &doc) + if err != nil { + return nil, err + } + + return &doc, nil +} + +// Load2_3 takes in an io.Reader and returns an SPDX document. +func Load2_3(content io.Reader) (*v2_3.Document, error) { + // convert io.Reader to a slice of bytes and call the parser + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(content) + if err != nil { + return nil, err + } + + var doc v2_3.Document + err = json.Unmarshal(buf.Bytes(), &doc) + if err != nil { + return nil, err + } + + return &doc, nil +} diff --git a/vendor/github.com/spdx/tools-golang/json/writer.go b/vendor/github.com/spdx/tools-golang/json/writer.go new file mode 100644 index 000000000000..8f2b94dc6036 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/json/writer.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package spdx_json + +import ( + "encoding/json" + "github.com/spdx/tools-golang/spdx/v2_3" + "io" + + "github.com/spdx/tools-golang/spdx/v2_2" +) + +// Save2_2 takes an SPDX Document (version 2.2) and an io.Writer, and writes the document to the writer in JSON format. +func Save2_2(doc *v2_2.Document, w io.Writer) error { + buf, err := json.Marshal(doc) + if err != nil { + return err + } + + _, err = w.Write(buf) + if err != nil { + return err + } + + return nil +} + +// Save2_3 takes an SPDX Document (version 2.2) and an io.Writer, and writes the document to the writer in JSON format. +func Save2_3(doc *v2_3.Document, w io.Writer) error { + buf, err := json.Marshal(doc) + if err != nil { + return err + } + + _, err = w.Write(buf) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/common/annotation.go new file mode 100644 index 000000000000..e77d7b780a17 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/annotation.go @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +import ( + "encoding/json" + "fmt" + "strings" +) + +type Annotator struct { + Annotator string + // including AnnotatorType: one of "Person", "Organization" or "Tool" + AnnotatorType string +} + +// UnmarshalJSON takes an annotator in the typical one-line format and parses it into an Annotator struct. +// This function is also used when unmarshalling YAML +func (a *Annotator) UnmarshalJSON(data []byte) error { + // annotator will simply be a string + annotatorStr := string(data) + annotatorStr = strings.Trim(annotatorStr, "\"") + + annotatorFields := strings.SplitN(annotatorStr, ": ", 2) + + if len(annotatorFields) != 2 { + return fmt.Errorf("failed to parse Annotator '%s'", annotatorStr) + } + + a.AnnotatorType = annotatorFields[0] + a.Annotator = annotatorFields[1] + + return nil +} + +// MarshalJSON converts the receiver into a slice of bytes representing an Annotator in string form. +// This function is also used when marshalling to YAML +func (a Annotator) MarshalJSON() ([]byte, error) { + if a.Annotator != "" { + return json.Marshal(fmt.Sprintf("%s: %s", a.AnnotatorType, a.Annotator)) + } + + return []byte{}, nil +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/checksum.go b/vendor/github.com/spdx/tools-golang/spdx/common/checksum.go new file mode 100644 index 000000000000..aa2ae52ff146 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/checksum.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +// ChecksumAlgorithm represents the algorithm used to generate the file checksum in the Checksum struct. +type ChecksumAlgorithm string + +// The checksum algorithms mentioned in the spdxv2.2.0 https://spdx.github.io/spdx-spec/4-file-information/#44-file-checksum +const ( + SHA224 ChecksumAlgorithm = "SHA224" + SHA1 ChecksumAlgorithm = "SHA1" + SHA256 ChecksumAlgorithm = "SHA256" + SHA384 ChecksumAlgorithm = "SHA384" + SHA512 ChecksumAlgorithm = "SHA512" + MD2 ChecksumAlgorithm = "MD2" + MD4 ChecksumAlgorithm = "MD4" + MD5 ChecksumAlgorithm = "MD5" + MD6 ChecksumAlgorithm = "MD6" + SHA3_256 ChecksumAlgorithm = "SHA3-256" + SHA3_384 ChecksumAlgorithm = "SHA3-384" + SHA3_512 ChecksumAlgorithm = "SHA3-512" + BLAKE2b_256 ChecksumAlgorithm = "BLAKE2b-256" + BLAKE2b_384 ChecksumAlgorithm = "BLAKE2b-384" + BLAKE2b_512 ChecksumAlgorithm = "BLAKE2b-512" + BLAKE3 ChecksumAlgorithm = "BLAKE3" + ADLER32 ChecksumAlgorithm = "ADLER32" +) + +// Checksum provides a unique identifier to match analysis information on each specific file in a package. +// The Algorithm field describes the ChecksumAlgorithm used and the Value represents the file checksum +type Checksum struct { + Algorithm ChecksumAlgorithm `json:"algorithm"` + Value string `json:"checksumValue"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/common/creation_info.go new file mode 100644 index 000000000000..c87ae7be923f --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/creation_info.go @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Creator is a wrapper around the Creator SPDX field. The SPDX field contains two values, which requires special +// handling in order to marshal/unmarshal it to/from Go data types. +type Creator struct { + Creator string + // CreatorType should be one of "Person", "Organization", or "Tool" + CreatorType string +} + +// UnmarshalJSON takes an annotator in the typical one-line format and parses it into a Creator struct. +// This function is also used when unmarshalling YAML +func (c *Creator) UnmarshalJSON(data []byte) error { + str := string(data) + str = strings.Trim(str, "\"") + fields := strings.SplitN(str, ": ", 2) + + if len(fields) != 2 { + return fmt.Errorf("failed to parse Creator '%s'", str) + } + + c.CreatorType = fields[0] + c.Creator = fields[1] + + return nil +} + +// MarshalJSON converts the receiver into a slice of bytes representing a Creator in string form. +// This function is also used with marshalling to YAML +func (c Creator) MarshalJSON() ([]byte, error) { + if c.Creator != "" { + return json.Marshal(fmt.Sprintf("%s: %s", c.CreatorType, c.Creator)) + } + + return []byte{}, nil +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/external.go b/vendor/github.com/spdx/tools-golang/spdx/common/external.go new file mode 100644 index 000000000000..59c3f0f03f17 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/external.go @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +// Constants for various string types +const ( + // F.2 Security types + TypeSecurityCPE23Type string = "cpe23Type" + TypeSecurityCPE22Type string = "cpe22Type" + TypeSecurityAdvisory string = "advisory" + TypeSecurityFix string = "fix" + TypeSecurityUrl string = "url" + TypeSecuritySwid string = "swid" + + // F.3 Package-Manager types + TypePackageManagerMavenCentral string = "maven-central" + TypePackageManagerNpm string = "npm" + TypePackageManagerNuGet string = "nuget" + TypePackageManagerBower string = "bower" + TypePackageManagerPURL string = "purl" + + // 11.1 Relationship field types + TypeRelationshipDescribe string = "DESCRIBES" + TypeRelationshipDescribeBy string = "DESCRIBED_BY" + TypeRelationshipContains string = "CONTAINS" + TypeRelationshipContainedBy string = "CONTAINED_BY" + TypeRelationshipDependsOn string = "DEPENDS_ON" + TypeRelationshipDependencyOf string = "DEPENDENCY_OF" + TypeRelationshipBuildDependencyOf string = "BUILD_DEPENDENCY_OF" + TypeRelationshipDevDependencyOf string = "DEV_DEPENDENCY_OF" + TypeRelationshipOptionalDependencyOf string = "OPTIONAL_DEPENDENCY_OF" + TypeRelationshipProvidedDependencyOf string = "PROVIDED_DEPENDENCY_OF" + TypeRelationshipTestDependencyOf string = "TEST_DEPENDENCY_OF" + TypeRelationshipRuntimeDependencyOf string = "RUNTIME_DEPENDENCY_OF" + TypeRelationshipExampleOf string = "EXAMPLE_OF" + TypeRelationshipGenerates string = "GENERATES" + TypeRelationshipGeneratedFrom string = "GENERATED_FROM" + TypeRelationshipAncestorOf string = "ANCESTOR_OF" + TypeRelationshipDescendantOf string = "DESCENDANT_OF" + TypeRelationshipVariantOf string = "VARIANT_OF" + TypeRelationshipDistributionArtifact string = "DISTRIBUTION_ARTIFACT" + TypeRelationshipPatchFor string = "PATCH_FOR" + TypeRelationshipPatchApplied string = "PATCH_APPLIED" + TypeRelationshipCopyOf string = "COPY_OF" + TypeRelationshipFileAdded string = "FILE_ADDED" + TypeRelationshipFileDeleted string = "FILE_DELETED" + TypeRelationshipFileModified string = "FILE_MODIFIED" + TypeRelationshipExpandedFromArchive string = "EXPANDED_FROM_ARCHIVE" + TypeRelationshipDynamicLink string = "DYNAMIC_LINK" + TypeRelationshipStaticLink string = "STATIC_LINK" + TypeRelationshipDataFileOf string = "DATA_FILE_OF" + TypeRelationshipTestCaseOf string = "TEST_CASE_OF" + TypeRelationshipBuildToolOf string = "BUILD_TOOL_OF" + TypeRelationshipDevToolOf string = "DEV_TOOL_OF" + TypeRelationshipTestOf string = "TEST_OF" + TypeRelationshipTestToolOf string = "TEST_TOOL_OF" + TypeRelationshipDocumentationOf string = "DOCUMENTATION_OF" + TypeRelationshipOptionalComponentOf string = "OPTIONAL_COMPONENT_OF" + TypeRelationshipMetafileOf string = "METAFILE_OF" + TypeRelationshipPackageOf string = "PACKAGE_OF" + TypeRelationshipAmends string = "AMENDS" + TypeRelationshipPrerequisiteFor string = "PREREQUISITE_FOR" + TypeRelationshipHasPrerequisite string = "HAS_PREREQUISITE" + TypeRelationshipRequirementDescriptionFor string = "REQUIREMENT_DESCRIPTION_FOR" + TypeRelationshipSpecificationFor string = "SPECIFICATION_FOR" + TypeRelationshipOther string = "OTHER" +) diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/identifier.go b/vendor/github.com/spdx/tools-golang/spdx/common/identifier.go new file mode 100644 index 000000000000..806a8157e28c --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/identifier.go @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +import ( + "encoding/json" + "fmt" + "strings" +) + +const ( + spdxRefPrefix = "SPDXRef-" + documentRefPrefix = "DocumentRef-" +) + +// ElementID represents the identifier string portion of an SPDX element +// identifier. DocElementID should be used for any attributes which can +// contain identifiers defined in a different SPDX document. +// ElementIDs should NOT contain the mandatory 'SPDXRef-' portion. +type ElementID string + +// MarshalJSON returns an SPDXRef- prefixed JSON string +func (d ElementID) MarshalJSON() ([]byte, error) { + return json.Marshal(prefixElementId(d)) +} + +// UnmarshalJSON validates SPDXRef- prefixes and removes them when processing ElementIDs +func (d *ElementID) UnmarshalJSON(data []byte) error { + // SPDX identifier will simply be a string + idStr := string(data) + idStr = strings.Trim(idStr, "\"") + + e, err := trimElementIdPrefix(idStr) + if err != nil { + return err + } + *d = e + return nil +} + +// prefixElementId adds the SPDXRef- prefix to an element ID if it does not have one +func prefixElementId(id ElementID) string { + val := string(id) + if !strings.HasPrefix(val, spdxRefPrefix) { + return spdxRefPrefix + val + } + return val +} + +// trimElementIdPrefix removes the SPDXRef- prefix from an element ID string or returns an error if it +// does not start with SPDXRef- +func trimElementIdPrefix(id string) (ElementID, error) { + // handle SPDXRef- + idFields := strings.SplitN(id, spdxRefPrefix, 2) + if len(idFields) != 2 { + return "", fmt.Errorf("failed to parse SPDX identifier '%s'", id) + } + + e := ElementID(idFields[1]) + return e, nil +} + +// DocElementID represents an SPDX element identifier that could be defined +// in a different SPDX document, and therefore could have a "DocumentRef-" +// portion, such as Relationships and Annotations. +// ElementID is used for attributes in which a "DocumentRef-" portion cannot +// appear, such as a Package or File definition (since it is necessarily +// being defined in the present document). +// DocumentRefID will be the empty string for elements defined in the +// present document. +// DocElementIDs should NOT contain the mandatory 'DocumentRef-' or +// 'SPDXRef-' portions. +// SpecialID is used ONLY if the DocElementID matches a defined set of +// permitted special values for a particular field, e.g. "NONE" or +// "NOASSERTION" for the right-hand side of Relationships. If SpecialID +// is set, DocumentRefID and ElementRefID should be empty (and vice versa). +type DocElementID struct { + DocumentRefID string + ElementRefID ElementID + SpecialID string +} + +// MarshalJSON converts the receiver into a slice of bytes representing a DocElementID in string form. +// This function is also used when marshalling to YAML +func (d DocElementID) MarshalJSON() ([]byte, error) { + if d.DocumentRefID != "" && d.ElementRefID != "" { + idStr := prefixElementId(d.ElementRefID) + return json.Marshal(fmt.Sprintf("%s%s:%s", documentRefPrefix, d.DocumentRefID, idStr)) + } else if d.ElementRefID != "" { + return json.Marshal(prefixElementId(d.ElementRefID)) + } else if d.SpecialID != "" { + return json.Marshal(d.SpecialID) + } + + return []byte{}, fmt.Errorf("failed to marshal empty DocElementID") +} + +// UnmarshalJSON takes a SPDX Identifier string parses it into a DocElementID struct. +// This function is also used when unmarshalling YAML +func (d *DocElementID) UnmarshalJSON(data []byte) (err error) { + // SPDX identifier will simply be a string + idStr := string(data) + idStr = strings.Trim(idStr, "\"") + + // handle special cases + if idStr == "NONE" || idStr == "NOASSERTION" { + d.SpecialID = idStr + return nil + } + + var idFields []string + // handle DocumentRef- if present + if strings.HasPrefix(idStr, documentRefPrefix) { + // strip out the "DocumentRef-" so we can get the value + idFields = strings.SplitN(idStr, documentRefPrefix, 2) + idStr = idFields[1] + + // an SPDXRef can appear after a DocumentRef, separated by a colon + idFields = strings.SplitN(idStr, ":", 2) + d.DocumentRefID = idFields[0] + + if len(idFields) == 2 { + idStr = idFields[1] + } else { + return nil + } + } + + d.ElementRefID, err = trimElementIdPrefix(idStr) + return err +} + +// TODO: add equivalents for LicenseRef- identifiers + +// MakeDocElementID takes strings (without prefixes) for the DocumentRef- +// and SPDXRef- identifiers, and returns a DocElementID. An empty string +// should be used for the DocumentRef- portion if it is referring to the +// present document. +func MakeDocElementID(docRef string, eltRef string) DocElementID { + return DocElementID{ + DocumentRefID: docRef, + ElementRefID: ElementID(eltRef), + } +} + +// MakeDocElementSpecial takes a "special" string (e.g. "NONE" or +// "NOASSERTION" for the right side of a Relationship), nd returns +// a DocElementID with it in the SpecialID field. Other fields will +// be empty. +func MakeDocElementSpecial(specialID string) DocElementID { + return DocElementID{SpecialID: specialID} +} + +// RenderElementID takes an ElementID and returns the string equivalent, +// with the SPDXRef- prefix reinserted. +func RenderElementID(eID ElementID) string { + return spdxRefPrefix + string(eID) +} + +// RenderDocElementID takes a DocElementID and returns the string equivalent, +// with the SPDXRef- prefix (and, if applicable, the DocumentRef- prefix) +// reinserted. If a SpecialID is present, it will be rendered verbatim and +// DocumentRefID and ElementRefID will be ignored. +func RenderDocElementID(deID DocElementID) string { + if deID.SpecialID != "" { + return deID.SpecialID + } + prefix := "" + if deID.DocumentRefID != "" { + prefix = documentRefPrefix + deID.DocumentRefID + ":" + } + return prefix + spdxRefPrefix + string(deID.ElementRefID) +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/package.go b/vendor/github.com/spdx/tools-golang/spdx/common/package.go new file mode 100644 index 000000000000..de5a07523fd8 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/package.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +import ( + "encoding/json" + "fmt" + "strings" +) + +type Supplier struct { + // can be "NOASSERTION" + Supplier string + // SupplierType can be one of "Person", "Organization", or empty if Supplier is "NOASSERTION" + SupplierType string +} + +// UnmarshalJSON takes a supplier in the typical one-line format and parses it into a Supplier struct. +// This function is also used when unmarshalling YAML +func (s *Supplier) UnmarshalJSON(data []byte) error { + // the value is just a string presented as a slice of bytes + supplierStr := string(data) + supplierStr = strings.Trim(supplierStr, "\"") + + if supplierStr == "NOASSERTION" { + s.Supplier = supplierStr + return nil + } + + supplierFields := strings.SplitN(supplierStr, ": ", 2) + + if len(supplierFields) != 2 { + return fmt.Errorf("failed to parse Supplier '%s'", supplierStr) + } + + s.SupplierType = supplierFields[0] + s.Supplier = supplierFields[1] + + return nil +} + +// MarshalJSON converts the receiver into a slice of bytes representing a Supplier in string form. +// This function is also used when marshalling to YAML +func (s Supplier) MarshalJSON() ([]byte, error) { + if s.Supplier == "NOASSERTION" { + return json.Marshal(s.Supplier) + } else if s.SupplierType != "" && s.Supplier != "" { + return json.Marshal(fmt.Sprintf("%s: %s", s.SupplierType, s.Supplier)) + } + + return []byte{}, fmt.Errorf("failed to marshal invalid Supplier: %+v", s) +} + +type Originator struct { + // can be "NOASSERTION" + Originator string + // OriginatorType can be one of "Person", "Organization", or empty if Originator is "NOASSERTION" + OriginatorType string +} + +// UnmarshalJSON takes an originator in the typical one-line format and parses it into an Originator struct. +// This function is also used when unmarshalling YAML +func (o *Originator) UnmarshalJSON(data []byte) error { + // the value is just a string presented as a slice of bytes + originatorStr := string(data) + originatorStr = strings.Trim(originatorStr, "\"") + + if originatorStr == "NOASSERTION" { + o.Originator = originatorStr + return nil + } + + originatorFields := strings.SplitN(originatorStr, ": ", 2) + + if len(originatorFields) != 2 { + return fmt.Errorf("failed to parse Originator '%s'", originatorStr) + } + + o.OriginatorType = originatorFields[0] + o.Originator = originatorFields[1] + + return nil +} + +// MarshalJSON converts the receiver into a slice of bytes representing an Originator in string form. +// This function is also used when marshalling to YAML +func (o Originator) MarshalJSON() ([]byte, error) { + if o.Originator == "NOASSERTION" { + return json.Marshal(o.Originator) + } else if o.Originator != "" { + return json.Marshal(fmt.Sprintf("%s: %s", o.OriginatorType, o.Originator)) + } + + return []byte{}, nil +} + +type PackageVerificationCode struct { + // Cardinality: mandatory, one if filesAnalyzed is true / omitted; + // zero (must be omitted) if filesAnalyzed is false + Value string `json:"packageVerificationCodeValue"` + // Spec also allows specifying files to exclude from the + // verification code algorithm; intended to enable exclusion of + // the SPDX document file itself. + ExcludedFiles []string `json:"packageVerificationCodeExcludedFiles,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/common/snippet.go new file mode 100644 index 000000000000..63afac3ba2e5 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/snippet.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +type SnippetRangePointer struct { + // 5.3: Snippet Byte Range: [start byte]:[end byte] + // Cardinality: mandatory, one + Offset int `json:"offset,omitempty"` + + // 5.4: Snippet Line Range: [start line]:[end line] + // Cardinality: optional, one + LineNumber int `json:"lineNumber,omitempty"` + + FileSPDXIdentifier ElementID `json:"reference"` +} + +type SnippetRange struct { + StartPointer SnippetRangePointer `json:"startPointer"` + EndPointer SnippetRangePointer `json:"endPointer"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/annotation.go new file mode 100644 index 000000000000..35eddc617e77 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/annotation.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// Annotation is an Annotation section of an SPDX Document for version 2.2 of the spec. +type Annotation struct { + // 12.1: Annotator + // Cardinality: conditional (mandatory, one) if there is an Annotation + Annotator common.Annotator `json:"annotator"` + + // 12.2: Annotation Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationDate string `json:"annotationDate"` + + // 12.3: Annotation Type: "REVIEW" or "OTHER" + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationType string `json:"annotationType"` + + // 12.4: SPDX Identifier Reference + // Cardinality: conditional (mandatory, one) if there is an Annotation + // This field is not used in hierarchical data formats where the referenced element is clear, such as JSON or YAML. + AnnotationSPDXIdentifier common.DocElementID `json:"-"` + + // 12.5: Annotation Comment + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationComment string `json:"comment"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/creation_info.go new file mode 100644 index 000000000000..70e611f79b76 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/creation_info.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// CreationInfo is a Document Creation Information section of an +// SPDX Document for version 2.2 of the spec. +type CreationInfo struct { + // 6.7: License List Version + // Cardinality: optional, one + LicenseListVersion string `json:"licenseListVersion"` + + // 6.8: Creators: may have multiple keys for Person, Organization + // and/or Tool + // Cardinality: mandatory, one or many + Creators []common.Creator `json:"creators"` + + // 6.9: Created: data format YYYY-MM-DDThh:mm:ssZ + // Cardinality: mandatory, one + Created string `json:"created"` + + // 6.10: Creator Comment + // Cardinality: optional, one + CreatorComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/document.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/document.go new file mode 100644 index 000000000000..31ac08b6c7ce --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/document.go @@ -0,0 +1,65 @@ +// Package spdx contains the struct definition for an SPDX Document +// and its constituent parts. +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// ExternalDocumentRef is a reference to an external SPDX document +// as defined in section 6.6 for version 2.2 of the spec. +type ExternalDocumentRef struct { + // DocumentRefID is the ID string defined in the start of the + // reference. It should _not_ contain the "DocumentRef-" part + // of the mandatory ID string. + DocumentRefID string `json:"externalDocumentId"` + + // URI is the URI defined for the external document + URI string `json:"spdxDocument"` + + // Checksum is the actual hash data + Checksum common.Checksum `json:"checksum"` +} + +// Document is an SPDX Document for version 2.2 of the spec. +// See https://spdx.github.io/spdx-spec/v2-draft/ (DRAFT) +type Document struct { + // 6.1: SPDX Version; should be in the format "SPDX-2.2" + // Cardinality: mandatory, one + SPDXVersion string `json:"spdxVersion"` + + // 6.2: Data License; should be "CC0-1.0" + // Cardinality: mandatory, one + DataLicense string `json:"dataLicense"` + + // 6.3: SPDX Identifier; should be "DOCUMENT" to represent + // mandatory identifier of SPDXRef-DOCUMENT + // Cardinality: mandatory, one + SPDXIdentifier common.ElementID `json:"SPDXID"` + + // 6.4: Document Name + // Cardinality: mandatory, one + DocumentName string `json:"name"` + + // 6.5: Document Namespace + // Cardinality: mandatory, one + DocumentNamespace string `json:"documentNamespace"` + + // 6.6: External Document References + // Cardinality: optional, one or many + ExternalDocumentReferences []ExternalDocumentRef `json:"externalDocumentRefs,omitempty"` + + // 6.11: Document Comment + // Cardinality: optional, one + DocumentComment string `json:"comment,omitempty"` + + CreationInfo *CreationInfo `json:"creationInfo"` + Packages []*Package `json:"packages,omitempty"` + Files []*File `json:"files,omitempty"` + OtherLicenses []*OtherLicense `json:"hasExtractedLicensingInfos,omitempty"` + Relationships []*Relationship `json:"relationships,omitempty"` + Annotations []*Annotation `json:"annotations,omitempty"` + Snippets []Snippet `json:"snippets,omitempty"` + + // DEPRECATED in version 2.0 of spec + Reviews []*Review `json:"-"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/file.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/file.go new file mode 100644 index 000000000000..150e79f0bbc8 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/file.go @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// File is a File section of an SPDX Document for version 2.2 of the spec. +type File struct { + // 8.1: File Name + // Cardinality: mandatory, one + FileName string `json:"fileName"` + + // 8.2: File SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + FileSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 8.3: File Types + // Cardinality: optional, multiple + FileTypes []string `json:"fileTypes,omitempty"` + + // 8.4: File Checksum: may have keys for SHA1, SHA256 and/or MD5 + // Cardinality: mandatory, one SHA1, others may be optionally provided + Checksums []common.Checksum `json:"checksums"` + + // 8.5: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + LicenseConcluded string `json:"licenseConcluded"` + + // 8.6: License Information in File: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one or many + LicenseInfoInFiles []string `json:"licenseInfoInFiles"` + + // 8.7: Comments on License + // Cardinality: optional, one + LicenseComments string `json:"licenseComments,omitempty"` + + // 8.8: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + FileCopyrightText string `json:"copyrightText"` + + // DEPRECATED in version 2.1 of spec + // 8.9-8.11: Artifact of Project variables (defined below) + // Cardinality: optional, one or many + ArtifactOfProjects []*ArtifactOfProject `json:"-"` + + // 8.12: File Comment + // Cardinality: optional, one + FileComment string `json:"comment,omitempty"` + + // 8.13: File Notice + // Cardinality: optional, one + FileNotice string `json:"noticeText,omitempty"` + + // 8.14: File Contributor + // Cardinality: optional, one or many + FileContributors []string `json:"fileContributors,omitempty"` + + // 8.15: File Attribution Text + // Cardinality: optional, one or many + FileAttributionTexts []string `json:"attributionTexts,omitempty"` + + // DEPRECATED in version 2.0 of spec + // 8.16: File Dependencies + // Cardinality: optional, one or many + FileDependencies []string `json:"-"` + + // Snippets contained in this File + // Note that Snippets could be defined in a different Document! However, + // the only ones that _THIS_ document can contain are this ones that are + // defined here -- so this should just be an ElementID. + Snippets map[common.ElementID]*Snippet `json:"-"` + + Annotations []Annotation `json:"annotations,omitempty"` +} + +// ArtifactOfProject is a DEPRECATED collection of data regarding +// a Package, as defined in sections 8.9-8.11 in version 2.2 of the spec. +type ArtifactOfProject struct { + + // DEPRECATED in version 2.1 of spec + // 8.9: Artifact of Project Name + // Cardinality: conditional, required if present, one per AOP + Name string + + // DEPRECATED in version 2.1 of spec + // 8.10: Artifact of Project Homepage: URL or "UNKNOWN" + // Cardinality: optional, one per AOP + HomePage string + + // DEPRECATED in version 2.1 of spec + // 8.11: Artifact of Project Uniform Resource Identifier + // Cardinality: optional, one per AOP + URI string +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/other_license.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/other_license.go new file mode 100644 index 000000000000..1eaf048ddbda --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/other_license.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +// OtherLicense is an Other License Information section of an +// SPDX Document for version 2.2 of the spec. +type OtherLicense struct { + // 10.1: License Identifier: "LicenseRef-[idstring]" + // Cardinality: conditional (mandatory, one) if license is not + // on SPDX License List + LicenseIdentifier string `json:"licenseId"` + + // 10.2: Extracted Text + // Cardinality: conditional (mandatory, one) if there is a + // License Identifier assigned + ExtractedText string `json:"extractedText"` + + // 10.3: License Name: single line of text or "NOASSERTION" + // Cardinality: conditional (mandatory, one) if license is not + // on SPDX License List + LicenseName string `json:"name,omitempty"` + + // 10.4: License Cross Reference + // Cardinality: conditional (optional, one or many) if license + // is not on SPDX License List + LicenseCrossReferences []string `json:"seeAlsos,omitempty"` + + // 10.5: License Comment + // Cardinality: optional, one + LicenseComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/package.go new file mode 100644 index 000000000000..2d99e0456b0b --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/package.go @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// Package is a Package section of an SPDX Document for version 2.2 of the spec. +type Package struct { + // NOT PART OF SPEC + // flag: does this "package" contain files that were in fact "unpackaged", + // e.g. included directly in the Document without being in a Package? + IsUnpackaged bool `json:"-"` + + // 7.1: Package Name + // Cardinality: mandatory, one + PackageName string `json:"name"` + + // 7.2: Package SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + PackageSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 7.3: Package Version + // Cardinality: optional, one + PackageVersion string `json:"versionInfo,omitempty"` + + // 7.4: Package File Name + // Cardinality: optional, one + PackageFileName string `json:"packageFileName,omitempty"` + + // 7.5: Package Supplier: may have single result for either Person or Organization, + // or NOASSERTION + // Cardinality: optional, one + PackageSupplier *common.Supplier `json:"supplier,omitempty"` + + // 7.6: Package Originator: may have single result for either Person or Organization, + // or NOASSERTION + // Cardinality: optional, one + PackageOriginator *common.Originator `json:"originator,omitempty"` + + // 7.7: Package Download Location + // Cardinality: mandatory, one + PackageDownloadLocation string `json:"downloadLocation"` + + // 7.8: FilesAnalyzed + // Cardinality: optional, one; default value is "true" if omitted + FilesAnalyzed bool `json:"filesAnalyzed,omitempty"` + // NOT PART OF SPEC: did FilesAnalyzed tag appear? + IsFilesAnalyzedTagPresent bool `json:"-"` + + // 7.9: Package Verification Code + PackageVerificationCode common.PackageVerificationCode `json:"packageVerificationCode"` + + // 7.10: Package Checksum: may have keys for SHA1, SHA256, SHA512 and/or MD5 + // Cardinality: optional, one or many + PackageChecksums []common.Checksum `json:"checksums,omitempty"` + + // 7.11: Package Home Page + // Cardinality: optional, one + PackageHomePage string `json:"homepage,omitempty"` + + // 7.12: Source Information + // Cardinality: optional, one + PackageSourceInfo string `json:"sourceInfo,omitempty"` + + // 7.13: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageLicenseConcluded string `json:"licenseConcluded"` + + // 7.14: All Licenses Info from Files: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one or many if filesAnalyzed is true / omitted; + // zero (must be omitted) if filesAnalyzed is false + PackageLicenseInfoFromFiles []string `json:"licenseInfoFromFiles"` + + // 7.15: Declared License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageLicenseDeclared string `json:"licenseDeclared"` + + // 7.16: Comments on License + // Cardinality: optional, one + PackageLicenseComments string `json:"licenseComments,omitempty"` + + // 7.17: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageCopyrightText string `json:"copyrightText"` + + // 7.18: Package Summary Description + // Cardinality: optional, one + PackageSummary string `json:"summary,omitempty"` + + // 7.19: Package Detailed Description + // Cardinality: optional, one + PackageDescription string `json:"description,omitempty"` + + // 7.20: Package Comment + // Cardinality: optional, one + PackageComment string `json:"comment,omitempty"` + + // 7.21: Package External Reference + // Cardinality: optional, one or many + PackageExternalReferences []*PackageExternalReference `json:"externalRefs,omitempty"` + + // 7.22: Package External Reference Comment + // Cardinality: conditional (optional, one) for each External Reference + // contained within PackageExternalReference2_1 struct, if present + + // 7.23: Package Attribution Text + // Cardinality: optional, one or many + PackageAttributionTexts []string `json:"attributionTexts,omitempty"` + + // Files contained in this Package + Files []*File `json:"files,omitempty"` + + Annotations []Annotation `json:"annotations,omitempty"` +} + +// PackageExternalReference is an External Reference to additional info +// about a Package, as defined in section 7.21 in version 2.2 of the spec. +type PackageExternalReference struct { + // category is "SECURITY", "PACKAGE-MANAGER" or "OTHER" + Category string `json:"referenceCategory"` + + // type is an [idstring] as defined in Appendix VI; + // called RefType here due to "type" being a Golang keyword + RefType string `json:"referenceType"` + + // locator is a unique string to access the package-specific + // info, metadata or content within the target location + Locator string `json:"referenceLocator"` + + // 7.22: Package External Reference Comment + // Cardinality: conditional (optional, one) for each External Reference + ExternalRefComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/relationship.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/relationship.go new file mode 100644 index 000000000000..a93baa714d67 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/relationship.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// Relationship is a Relationship section of an SPDX Document for +// version 2.2 of the spec. +type Relationship struct { + + // 11.1: Relationship + // Cardinality: optional, one or more; one per Relationship + // one mandatory for SPDX Document with multiple packages + // RefA and RefB are first and second item + // Relationship is type from 11.1.1 + RefA common.DocElementID `json:"spdxElementId"` + RefB common.DocElementID `json:"relatedSpdxElement"` + Relationship string `json:"relationshipType"` + + // 11.2: Relationship Comment + // Cardinality: optional, one + RelationshipComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/review.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/review.go new file mode 100644 index 000000000000..22b3b8a08195 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/review.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +// Review is a Review section of an SPDX Document for version 2.2 of the spec. +// DEPRECATED in version 2.0 of spec; retained here for compatibility. +type Review struct { + + // DEPRECATED in version 2.0 of spec + // 13.1: Reviewer + // Cardinality: optional, one + Reviewer string + // including AnnotatorType: one of "Person", "Organization" or "Tool" + ReviewerType string + + // DEPRECATED in version 2.0 of spec + // 13.2: Review Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: conditional (mandatory, one) if there is a Reviewer + ReviewDate string + + // DEPRECATED in version 2.0 of spec + // 13.3: Review Comment + // Cardinality: optional, one + ReviewComment string +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/snippet.go new file mode 100644 index 000000000000..61045f1e0252 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/snippet.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// Snippet is a Snippet section of an SPDX Document for version 2.2 of the spec. +type Snippet struct { + + // 9.1: Snippet SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + SnippetSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 9.2: Snippet from File SPDX Identifier + // Cardinality: mandatory, one + SnippetFromFileSPDXIdentifier common.ElementID `json:"snippetFromFile"` + + // Ranges denotes the start/end byte offsets or line numbers that the snippet is relevant to + Ranges []common.SnippetRange `json:"ranges"` + + // 9.5: Snippet Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + SnippetLicenseConcluded string `json:"licenseConcluded"` + + // 9.6: License Information in Snippet: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one or many + LicenseInfoInSnippet []string `json:"licenseInfoInSnippets,omitempty"` + + // 9.7: Snippet Comments on License + // Cardinality: optional, one + SnippetLicenseComments string `json:"licenseComments,omitempty"` + + // 9.8: Snippet Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + SnippetCopyrightText string `json:"copyrightText"` + + // 9.9: Snippet Comment + // Cardinality: optional, one + SnippetComment string `json:"comment,omitempty"` + + // 9.10: Snippet Name + // Cardinality: optional, one + SnippetName string `json:"name,omitempty"` + + // 9.11: Snippet Attribution Text + // Cardinality: optional, one or many + SnippetAttributionTexts []string `json:"-"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/annotation.go new file mode 100644 index 000000000000..121e99523561 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/annotation.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// Annotation is an Annotation section of an SPDX Document for version 2.3 of the spec. +type Annotation struct { + // 12.1: Annotator + // Cardinality: conditional (mandatory, one) if there is an Annotation + Annotator common.Annotator `json:"annotator"` + + // 12.2: Annotation Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationDate string `json:"annotationDate"` + + // 12.3: Annotation Type: "REVIEW" or "OTHER" + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationType string `json:"annotationType"` + + // 12.4: SPDX Identifier Reference + // Cardinality: conditional (mandatory, one) if there is an Annotation + // This field is not used in hierarchical data formats where the referenced element is clear, such as JSON or YAML. + AnnotationSPDXIdentifier common.DocElementID `json:"-" yaml:"-"` + + // 12.5: Annotation Comment + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationComment string `json:"comment"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/creation_info.go new file mode 100644 index 000000000000..33b2caf070fb --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/creation_info.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// CreationInfo is a Document Creation Information section of an +// SPDX Document for version 2.3 of the spec. +type CreationInfo struct { + // 6.7: License List Version + // Cardinality: optional, one + LicenseListVersion string `json:"licenseListVersion"` + + // 6.8: Creators: may have multiple keys for Person, Organization + // and/or Tool + // Cardinality: mandatory, one or many + Creators []common.Creator `json:"creators"` + + // 6.9: Created: data format YYYY-MM-DDThh:mm:ssZ + // Cardinality: mandatory, one + Created string `json:"created"` + + // 6.10: Creator Comment + // Cardinality: optional, one + CreatorComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go new file mode 100644 index 000000000000..32fdb8db8439 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go @@ -0,0 +1,65 @@ +// Package spdx contains the struct definition for an SPDX Document +// and its constituent parts. +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// ExternalDocumentRef is a reference to an external SPDX document +// as defined in section 6.6 for version 2.3 of the spec. +type ExternalDocumentRef struct { + // DocumentRefID is the ID string defined in the start of the + // reference. It should _not_ contain the "DocumentRef-" part + // of the mandatory ID string. + DocumentRefID string `json:"externalDocumentId"` + + // URI is the URI defined for the external document + URI string `json:"spdxDocument"` + + // Checksum is the actual hash data + Checksum common.Checksum `json:"checksum"` +} + +// Document is an SPDX Document for version 2.3 of the spec. +// See https://spdx.github.io/spdx-spec/v2.3/document-creation-information +type Document struct { + // 6.1: SPDX Version; should be in the format "SPDX-2.3" + // Cardinality: mandatory, one + SPDXVersion string `json:"spdxVersion"` + + // 6.2: Data License; should be "CC0-1.0" + // Cardinality: mandatory, one + DataLicense string `json:"dataLicense"` + + // 6.3: SPDX Identifier; should be "DOCUMENT" to represent + // mandatory identifier of SPDXRef-DOCUMENT + // Cardinality: mandatory, one + SPDXIdentifier common.ElementID `json:"SPDXID"` + + // 6.4: Document Name + // Cardinality: mandatory, one + DocumentName string `json:"name"` + + // 6.5: Document Namespace + // Cardinality: mandatory, one + DocumentNamespace string `json:"documentNamespace"` + + // 6.6: External Document References + // Cardinality: optional, one or many + ExternalDocumentReferences []ExternalDocumentRef `json:"externalDocumentRefs,omitempty"` + + // 6.11: Document Comment + // Cardinality: optional, one + DocumentComment string `json:"comment,omitempty"` + + CreationInfo *CreationInfo `json:"creationInfo"` + Packages []*Package `json:"packages,omitempty"` + Files []*File `json:"files,omitempty"` + OtherLicenses []*OtherLicense `json:"hasExtractedLicensingInfos,omitempty"` + Relationships []*Relationship `json:"relationships,omitempty"` + Annotations []*Annotation `json:"annotations,omitempty"` + Snippets []Snippet `json:"snippets,omitempty"` + + // DEPRECATED in version 2.0 of spec + Reviews []*Review `json:"-" yaml:"-"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/file.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/file.go new file mode 100644 index 000000000000..c472fdb2fcfe --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/file.go @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// File is a File section of an SPDX Document for version 2.3 of the spec. +type File struct { + // 8.1: File Name + // Cardinality: mandatory, one + FileName string `json:"fileName"` + + // 8.2: File SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + FileSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 8.3: File Types + // Cardinality: optional, multiple + FileTypes []string `json:"fileTypes,omitempty"` + + // 8.4: File Checksum: may have keys for SHA1, SHA256, MD5, SHA3-256, SHA3-384, SHA3-512, BLAKE2b-256, BLAKE2b-384, BLAKE2b-512, BLAKE3, ADLER32 + // Cardinality: mandatory, one SHA1, others may be optionally provided + Checksums []common.Checksum `json:"checksums"` + + // 8.5: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one + LicenseConcluded string `json:"licenseConcluded,omitempty"` + + // 8.6: License Information in File: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one or many + LicenseInfoInFiles []string `json:"licenseInfoInFiles,omitempty"` + + // 8.7: Comments on License + // Cardinality: optional, one + LicenseComments string `json:"licenseComments,omitempty"` + + // 8.8: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + FileCopyrightText string `json:"copyrightText"` + + // DEPRECATED in version 2.1 of spec + // 8.9-8.11: Artifact of Project variables (defined below) + // Cardinality: optional, one or many + ArtifactOfProjects []*ArtifactOfProject `json:"artifactOfs,omitempty"` + + // 8.12: File Comment + // Cardinality: optional, one + FileComment string `json:"comment,omitempty"` + + // 8.13: File Notice + // Cardinality: optional, one + FileNotice string `json:"noticeText,omitempty"` + + // 8.14: File Contributor + // Cardinality: optional, one or many + FileContributors []string `json:"fileContributors,omitempty"` + + // 8.15: File Attribution Text + // Cardinality: optional, one or many + FileAttributionTexts []string `json:"attributionTexts,omitempty"` + + // DEPRECATED in version 2.0 of spec + // 8.16: File Dependencies + // Cardinality: optional, one or many + FileDependencies []string `json:"fileDependencies,omitempty"` + + // Snippets contained in this File + // Note that Snippets could be defined in a different Document! However, + // the only ones that _THIS_ document can contain are this ones that are + // defined here -- so this should just be an ElementID. + Snippets map[common.ElementID]*Snippet `json:"-" yaml:"-"` + + Annotations []Annotation `json:"annotations,omitempty"` +} + +// ArtifactOfProject is a DEPRECATED collection of data regarding +// a Package, as defined in sections 8.9-8.11 in version 2.3 of the spec. +// NOTE: the JSON schema does not define the structure of this object: +// https://github.com/spdx/spdx-spec/blob/development/v2.3.1/schemas/spdx-schema.json#L480 +type ArtifactOfProject struct { + + // DEPRECATED in version 2.1 of spec + // 8.9: Artifact of Project Name + // Cardinality: conditional, required if present, one per AOP + Name string `json:"name"` + + // DEPRECATED in version 2.1 of spec + // 8.10: Artifact of Project Homepage: URL or "UNKNOWN" + // Cardinality: optional, one per AOP + HomePage string `json:"homePage"` + + // DEPRECATED in version 2.1 of spec + // 8.11: Artifact of Project Uniform Resource Identifier + // Cardinality: optional, one per AOP + URI string `json:"URI"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/other_license.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/other_license.go new file mode 100644 index 000000000000..363bb412532e --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/other_license.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +// OtherLicense is an Other License Information section of an +// SPDX Document for version 2.3 of the spec. +type OtherLicense struct { + // 10.1: License Identifier: "LicenseRef-[idstring]" + // Cardinality: conditional (mandatory, one) if license is not + // on SPDX License List + LicenseIdentifier string `json:"licenseId"` + + // 10.2: Extracted Text + // Cardinality: conditional (mandatory, one) if there is a + // License Identifier assigned + ExtractedText string `json:"extractedText"` + + // 10.3: License Name: single line of text or "NOASSERTION" + // Cardinality: conditional (mandatory, one) if license is not + // on SPDX License List + LicenseName string `json:"name,omitempty"` + + // 10.4: License Cross Reference + // Cardinality: conditional (optional, one or many) if license + // is not on SPDX License List + LicenseCrossReferences []string `json:"seeAlsos,omitempty"` + + // 10.5: License Comment + // Cardinality: optional, one + LicenseComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/package.go new file mode 100644 index 000000000000..b9d5b9515b48 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/package.go @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// Package is a Package section of an SPDX Document for version 2.3 of the spec. +type Package struct { + // NOT PART OF SPEC + // flag: does this "package" contain files that were in fact "unpackaged", + // e.g. included directly in the Document without being in a Package? + IsUnpackaged bool `json:"-" yaml:"-"` + + // 7.1: Package Name + // Cardinality: mandatory, one + PackageName string `json:"name"` + + // 7.2: Package SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + PackageSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 7.3: Package Version + // Cardinality: optional, one + PackageVersion string `json:"versionInfo,omitempty"` + + // 7.4: Package File Name + // Cardinality: optional, one + PackageFileName string `json:"packageFileName,omitempty"` + + // 7.5: Package Supplier: may have single result for either Person or Organization, + // or NOASSERTION + // Cardinality: optional, one + PackageSupplier *common.Supplier `json:"supplier,omitempty"` + + // 7.6: Package Originator: may have single result for either Person or Organization, + // or NOASSERTION + // Cardinality: optional, one + PackageOriginator *common.Originator `json:"originator,omitempty"` + + // 7.7: Package Download Location + // Cardinality: mandatory, one + PackageDownloadLocation string `json:"downloadLocation"` + + // 7.8: FilesAnalyzed + // Cardinality: optional, one; default value is "true" if omitted + FilesAnalyzed bool `json:"filesAnalyzed,omitempty"` + // NOT PART OF SPEC: did FilesAnalyzed tag appear? + IsFilesAnalyzedTagPresent bool `json:"-" yaml:"-"` + + // 7.9: Package Verification Code + // Cardinality: if FilesAnalyzed == true must be present, if FilesAnalyzed == false must be omitted + PackageVerificationCode *common.PackageVerificationCode `json:"packageVerificationCode,omitempty"` + + // 7.10: Package Checksum: may have keys for SHA1, SHA256, SHA512, MD5, SHA3-256, SHA3-384, SHA3-512, BLAKE2b-256, BLAKE2b-384, BLAKE2b-512, BLAKE3, ADLER32 + // Cardinality: optional, one or many + PackageChecksums []common.Checksum `json:"checksums,omitempty"` + + // 7.11: Package Home Page + // Cardinality: optional, one + PackageHomePage string `json:"homepage,omitempty"` + + // 7.12: Source Information + // Cardinality: optional, one + PackageSourceInfo string `json:"sourceInfo,omitempty"` + + // 7.13: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one + PackageLicenseConcluded string `json:"licenseConcluded,omitempty"` + + // 7.14: All Licenses Info from Files: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one or many if filesAnalyzed is true / omitted; + // zero (must be omitted) if filesAnalyzed is false + PackageLicenseInfoFromFiles []string `json:"licenseInfoFromFiles,omitempty"` + + // 7.15: Declared License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one + PackageLicenseDeclared string `json:"licenseDeclared,omitempty"` + + // 7.16: Comments on License + // Cardinality: optional, one + PackageLicenseComments string `json:"licenseComments,omitempty"` + + // 7.17: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageCopyrightText string `json:"copyrightText"` + + // 7.18: Package Summary Description + // Cardinality: optional, one + PackageSummary string `json:"summary,omitempty"` + + // 7.19: Package Detailed Description + // Cardinality: optional, one + PackageDescription string `json:"description,omitempty"` + + // 7.20: Package Comment + // Cardinality: optional, one + PackageComment string `json:"comment,omitempty"` + + // 7.21: Package External Reference + // Cardinality: optional, one or many + PackageExternalReferences []*PackageExternalReference `json:"externalRefs,omitempty"` + + // 7.22: Package External Reference Comment + // Cardinality: conditional (optional, one) for each External Reference + // contained within PackageExternalReference2_1 struct, if present + + // 7.23: Package Attribution Text + // Cardinality: optional, one or many + PackageAttributionTexts []string `json:"attributionTexts,omitempty"` + + // 7.24: Primary Package Purpose + // Cardinality: optional, one or many + // Allowed values: APPLICATION, FRAMEWORK, LIBRARY, CONTAINER, OPERATING-SYSTEM, DEVICE, FIRMWARE, SOURCE, ARCHIVE, FILE, INSTALL, OTHER + PrimaryPackagePurpose string `json:"primaryPackagePurpose,omitempty"` + + // 7.25: Release Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: optional, one + ReleaseDate string `json:"releaseDate,omitempty"` + + // 7.26: Build Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: optional, one + BuiltDate string `json:"builtDate,omitempty"` + + // 7.27: Valid Until Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: optional, one + ValidUntilDate string `json:"validUntilDate,omitempty"` + + // Files contained in this Package + Files []*File `json:"files,omitempty"` + + Annotations []Annotation `json:"annotations,omitempty"` +} + +// PackageExternalReference is an External Reference to additional info +// about a Package, as defined in section 7.21 in version 2.3 of the spec. +type PackageExternalReference struct { + // category is "SECURITY", "PACKAGE-MANAGER" or "OTHER" + Category string `json:"referenceCategory"` + + // type is an [idstring] as defined in Appendix VI; + // called RefType here due to "type" being a Golang keyword + RefType string `json:"referenceType"` + + // locator is a unique string to access the package-specific + // info, metadata or content within the target location + Locator string `json:"referenceLocator"` + + // 7.22: Package External Reference Comment + // Cardinality: conditional (optional, one) for each External Reference + ExternalRefComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/relationship.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/relationship.go new file mode 100644 index 000000000000..af4c07d16483 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/relationship.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// Relationship is a Relationship section of an SPDX Document for +// version 2.3 of the spec. +type Relationship struct { + + // 11.1: Relationship + // Cardinality: optional, one or more; one per Relationship + // one mandatory for SPDX Document with multiple packages + // RefA and RefB are first and second item + // Relationship is type from 11.1.1 + RefA common.DocElementID `json:"spdxElementId"` + RefB common.DocElementID `json:"relatedSpdxElement"` + Relationship string `json:"relationshipType"` + + // 11.2: Relationship Comment + // Cardinality: optional, one + RelationshipComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/review.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/review.go new file mode 100644 index 000000000000..0463807fbd60 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/review.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +// Review is a Review section of an SPDX Document for version 2.3 of the spec. +// DEPRECATED in version 2.0 of spec; retained here for compatibility. +type Review struct { + + // DEPRECATED in version 2.0 of spec + // 13.1: Reviewer + // Cardinality: optional, one + Reviewer string + // including AnnotatorType: one of "Person", "Organization" or "Tool" + ReviewerType string + + // DEPRECATED in version 2.0 of spec + // 13.2: Review Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: conditional (mandatory, one) if there is a Reviewer + ReviewDate string + + // DEPRECATED in version 2.0 of spec + // 13.3: Review Comment + // Cardinality: optional, one + ReviewComment string +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/snippet.go new file mode 100644 index 000000000000..d55a1a968fbc --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/snippet.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// Snippet is a Snippet section of an SPDX Document for version 2.3 of the spec. +type Snippet struct { + + // 9.1: Snippet SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + SnippetSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 9.2: Snippet from File SPDX Identifier + // Cardinality: mandatory, one + SnippetFromFileSPDXIdentifier common.ElementID `json:"snippetFromFile"` + + // Ranges denotes the start/end byte offsets or line numbers that the snippet is relevant to + Ranges []common.SnippetRange `json:"ranges"` + + // 9.5: Snippet Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one + SnippetLicenseConcluded string `json:"licenseConcluded,omitempty"` + + // 9.6: License Information in Snippet: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one or many + LicenseInfoInSnippet []string `json:"licenseInfoInSnippets,omitempty"` + + // 9.7: Snippet Comments on License + // Cardinality: optional, one + SnippetLicenseComments string `json:"licenseComments,omitempty"` + + // 9.8: Snippet Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + SnippetCopyrightText string `json:"copyrightText"` + + // 9.9: Snippet Comment + // Cardinality: optional, one + SnippetComment string `json:"comment,omitempty"` + + // 9.10: Snippet Name + // Cardinality: optional, one + SnippetName string `json:"name,omitempty"` + + // 9.11: Snippet Attribution Text + // Cardinality: optional, one or many + SnippetAttributionTexts []string `json:"-" yaml:"-"` +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 41649d267924..95d8e59da69b 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -1,8 +1,10 @@ package assert import ( + "bytes" "fmt" "reflect" + "time" ) type CompareType int @@ -30,6 +32,9 @@ var ( float64Type = reflect.TypeOf(float64(1)) stringType = reflect.TypeOf("") + + timeType = reflect.TypeOf(time.Time{}) + bytesType = reflect.TypeOf([]byte{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -299,6 +304,47 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareLess, true } } + // Check for known struct types we can check for compare results. + case reflect.Struct: + { + // All structs enter here. We're not interested in most types. + if !canConvert(obj1Value, timeType) { + break + } + + // time.Time can compared! + timeObj1, ok := obj1.(time.Time) + if !ok { + timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) + } + + timeObj2, ok := obj2.(time.Time) + if !ok { + timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) + } + + return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + } + case reflect.Slice: + { + // We only care about the []byte type. + if !canConvert(obj1Value, bytesType) { + break + } + + // []byte can be compared! + bytesObj1, ok := obj1.([]byte) + if !ok { + bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte) + + } + bytesObj2, ok := obj2.([]byte) + if !ok { + bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) + } + + return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + } } return compareEqual, false @@ -310,7 +356,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -320,7 +369,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -329,7 +381,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -339,7 +394,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -347,8 +405,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // assert.Positive(t, 1) // assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -356,8 +417,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // assert.Negative(t, -1) // assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go new file mode 100644 index 000000000000..da867903e2fa --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go @@ -0,0 +1,16 @@ +//go:build go1.17 +// +build go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_legacy.go + +package assert + +import "reflect" + +// Wrapper around reflect.Value.CanConvert, for compatibility +// reasons. +func canConvert(value reflect.Value, to reflect.Type) bool { + return value.CanConvert(to) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go new file mode 100644 index 000000000000..1701af2a3c89 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go @@ -0,0 +1,16 @@ +//go:build !go1.17 +// +build !go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_can_convert.go + +package assert + +import "reflect" + +// Older versions of Go does not have the reflect.Value.CanConvert +// method. +func canConvert(value reflect.Value, to reflect.Type) bool { + return false +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 4dfd1229a861..7880b8f94333 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) } +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) +} + // ErrorIsf asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { @@ -724,6 +736,16 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...) +} + // YAMLEqf asserts that two YAML strings are equivalent. func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 25337a6f07e6..339515b8bfb9 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. return ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { @@ -1437,6 +1461,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta return WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1c3b47182a72..759448783585 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index bcac4401f57f..fa1245b18973 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "os" + "path/filepath" "reflect" "regexp" "runtime" @@ -144,7 +145,8 @@ func CallerInfo() []string { if len(parts) > 1 { dir := parts[len(parts)-2] if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + path, _ := filepath.Abs(file) + callers = append(callers, fmt.Sprintf("%s:%d", path, line)) } } @@ -563,16 +565,17 @@ func isEmpty(object interface{}) bool { switch objValue.Kind() { // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: if objValue.IsNil() { return true } deref := objValue.Elem().Interface() return isEmpty(deref) - // for all other types, compare against the zero value + // for all other types, compare against the zero value + // array types are empty when they match their zero-initialized state default: zero := reflect.Zero(objValue.Type()) return reflect.DeepEqual(object, zero.Interface()) @@ -718,10 +721,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (false, false) if impossible. // return (true, false) if element was not found. // return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { +func containsElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() + listType := reflect.TypeOf(list) + if listType == nil { + return false, false + } + listKind := listType.Kind() defer func() { if e := recover(); e != nil { ok = false @@ -764,7 +771,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } @@ -787,7 +794,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } @@ -811,7 +818,6 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -821,17 +827,35 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + } + } + + return true + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -852,10 +876,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) h.Helper() } if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -865,17 +888,35 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -1000,27 +1041,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() +func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { + didPanic = true + defer func() { + message = recover() + if didPanic { + stack = string(debug.Stack()) + } }() - return didPanic, message, stack + // call the target function + f() + didPanic = false + return } // Panics asserts that the code inside the specified PanicTestFunc panics. @@ -1111,6 +1146,27 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, return true } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if end.Before(start) { + return Fail(t, "Start should be before end", msgAndArgs...) + } + + if actual.Before(start) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...) + } else if actual.After(end) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...) + } + + return true +} + func toFloat(x interface{}) (float64, bool) { var xf float64 xok := true @@ -1161,11 +1217,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs bf, bok := toFloat(actual) if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + return Fail(t, "Parameters must be numerical", msgAndArgs...) + } + + if math.IsNaN(af) && math.IsNaN(bf) { + return true } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + return Fail(t, "Expected must not be NaN", msgAndArgs...) } if math.IsNaN(bf) { @@ -1188,7 +1248,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1250,8 +1310,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + bf, bok := toFloat(actual) + if !aok || !bok { + return 0, fmt.Errorf("Parameters must be numerical") + } + if math.IsNaN(af) && math.IsNaN(bf) { + return 0, nil } if math.IsNaN(af) { return 0, errors.New("expected value must not be NaN") @@ -1259,10 +1323,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } if math.IsNaN(bf) { return 0, errors.New("actual value must not be NaN") } @@ -1298,7 +1358,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1375,6 +1435,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte return true } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + + actual := theError.Error() + if !strings.Contains(actual, contains) { + return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) + } + + return true +} + // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { @@ -1588,12 +1669,17 @@ func diff(expected interface{}, actual interface{}) string { } var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { + + switch et { + case reflect.TypeOf(""): e = reflect.ValueOf(expected).String() a = reflect.ValueOf(actual).String() + case reflect.TypeOf(time.Time{}): + e = spewConfigStringerEnabled.Sdump(expected) + a = spewConfigStringerEnabled.Sdump(actual) + default: + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1625,6 +1711,14 @@ var spewConfig = spew.ConfigState{ MaxDepth: 10, } +var spewConfigStringerEnabled = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + MaxDepth: 10, +} + type tHelper interface { Helper() } diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 51820df2e672..880853f5a2c5 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int t.FailNow() } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContains(t, theError, contains, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorContainsf(t, theError, contains, msg, args...) { + return + } + t.FailNow() +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { @@ -1834,6 +1864,32 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim t.FailNow() } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRange(t, actual, start, end, msgAndArgs...) { + return + } + t.FailNow() +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinRangef(t, actual, start, end, msg, args...) { + return + } + t.FailNow() +} + // YAMLEq asserts that two YAML strings are equivalent. func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index ed54a9d83f35..960bf6f2cabf 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { @@ -1438,6 +1462,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/tonistiigi/fsutil/Dockerfile b/vendor/github.com/tonistiigi/fsutil/Dockerfile index 8ea4b426e567..252b49763872 100644 --- a/vendor/github.com/tonistiigi/fsutil/Dockerfile +++ b/vendor/github.com/tonistiigi/fsutil/Dockerfile @@ -1,29 +1,30 @@ -#syntax=docker/dockerfile:1.2 -ARG GO_VERSION=1.16 +#syntax=docker/dockerfile:1 +ARG GO_VERSION=1.18 -FROM --platform=amd64 tonistiigi/xx:golang AS goxx +FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.1.0 AS xx FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS base -RUN apk add --no-cache gcc musl-dev -COPY --from=goxx / / +RUN apk add --no-cache git +COPY --from=xx / / WORKDIR /src FROM base AS build ARG TARGETPLATFORM -RUN --mount=target=. \ +RUN --mount=target=. --mount=target=/go/pkg/mod,type=cache \ --mount=target=/root/.cache,type=cache \ - go build ./... + xx-go build ./... FROM base AS test -RUN --mount=target=. \ +ARG TESTFLAGS +RUN --mount=target=. --mount=target=/go/pkg/mod,type=cache \ --mount=target=/root/.cache,type=cache \ - go test -test.v ./... + CGO_ENABLED=0 xx-go test -test.v ${TESTFLAGS} ./... FROM base AS test-noroot RUN mkdir /go/pkg && chmod 0777 /go/pkg USER 1000:1000 RUN --mount=target=. \ --mount=target=/tmp/.cache,type=cache \ - GOCACHE=/tmp/gocache go test -test.v ./... + CGO_ENABLED=0 GOCACHE=/tmp/gocache xx-go test -test.v ./... FROM build diff --git a/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go b/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go index 74f08a15caa7..dd65a49ad106 100644 --- a/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go +++ b/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package fsutil diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy.go b/vendor/github.com/tonistiigi/fsutil/copy/copy.go index 41b82c32daa4..558c553f7c5a 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy.go @@ -2,7 +2,6 @@ package fs import ( "context" - "io/ioutil" "os" "path" "path/filepath" @@ -12,7 +11,7 @@ import ( "time" "github.com/containerd/continuity/fs" - "github.com/docker/docker/pkg/fileutils" + "github.com/moby/patternmatcher" "github.com/pkg/errors" "github.com/tonistiigi/fsutil" ) @@ -115,7 +114,7 @@ func Copy(ctx context.Context, srcRoot, src, dstRoot, dst string, opts ...Opt) e if err != nil { return err } - if err := c.copy(ctx, srcFollowed, "", dst, false, fileutils.MatchInfo{}, fileutils.MatchInfo{}); err != nil { + if err := c.copy(ctx, srcFollowed, "", dst, false, patternmatcher.MatchInfo{}, patternmatcher.MatchInfo{}); err != nil { return err } } @@ -154,6 +153,7 @@ func (c *copier) prepareTargetDir(srcFollowed, src, destPath string, copyDirCont type User struct { UID, GID int + SID string } type Chowner func(*User) (*User, error) @@ -232,8 +232,8 @@ type copier struct { mode *int inodes map[uint64]string xattrErrorHandler XAttrErrorHandler - includePatternMatcher *fileutils.PatternMatcher - excludePatternMatcher *fileutils.PatternMatcher + includePatternMatcher *patternmatcher.PatternMatcher + excludePatternMatcher *patternmatcher.PatternMatcher parentDirs []parentDir changefn fsutil.ChangeFunc root string @@ -252,19 +252,19 @@ func newCopier(root string, chown Chowner, tm *time.Time, mode *int, xeh XAttrEr } } - var includePatternMatcher *fileutils.PatternMatcher + var includePatternMatcher *patternmatcher.PatternMatcher if len(includePatterns) != 0 { var err error - includePatternMatcher, err = fileutils.NewPatternMatcher(includePatterns) + includePatternMatcher, err = patternmatcher.New(includePatterns) if err != nil { return nil, errors.Wrapf(err, "invalid includepatterns: %s", includePatterns) } } - var excludePatternMatcher *fileutils.PatternMatcher + var excludePatternMatcher *patternmatcher.PatternMatcher if len(excludePatterns) != 0 { var err error - excludePatternMatcher, err = fileutils.NewPatternMatcher(excludePatterns) + excludePatternMatcher, err = patternmatcher.New(excludePatterns) if err != nil { return nil, errors.Wrapf(err, "invalid excludepatterns: %s", excludePatterns) } @@ -284,7 +284,7 @@ func newCopier(root string, chown Chowner, tm *time.Time, mode *int, xeh XAttrEr } // dest is always clean -func (c *copier) copy(ctx context.Context, src, srcComponents, target string, overwriteTargetMetadata bool, parentIncludeMatchInfo, parentExcludeMatchInfo fileutils.MatchInfo) error { +func (c *copier) copy(ctx context.Context, src, srcComponents, target string, overwriteTargetMetadata bool, parentIncludeMatchInfo, parentExcludeMatchInfo patternmatcher.MatchInfo) error { select { case <-ctx.Done(): return ctx.Err() @@ -295,11 +295,15 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov if err != nil { return errors.Wrapf(err, "failed to stat %s", src) } + targetFi, err := os.Lstat(target) + if err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "failed to stat %s", src) + } include := true var ( - includeMatchInfo fileutils.MatchInfo - excludeMatchInfo fileutils.MatchInfo + includeMatchInfo patternmatcher.MatchInfo + excludeMatchInfo patternmatcher.MatchInfo ) if srcComponents != "" { matchesIncludePattern := false @@ -335,7 +339,8 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov } } - copyFileInfo := true + copyFileInfo := include + restoreFileTimestamp := false notify := true switch { @@ -345,8 +350,12 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov include, includeMatchInfo, excludeMatchInfo, ); err != nil { return err - } else if !overwriteTargetMetadata || c.includePatternMatcher != nil { + } else if !overwriteTargetMetadata { + // if we aren't supposed to overwrite existing target metadata, + // then we only need to copy the new file info if we newly created + // it, or restore the previous file timestamp if not copyFileInfo = created + restoreFileTimestamp = !created } notify = false case (fi.Mode() & os.ModeType) == 0: @@ -369,23 +378,26 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov if err := os.Symlink(link, target); err != nil { return errors.Wrapf(err, "failed to create symlink: %s", target) } - case (fi.Mode() & os.ModeDevice) == os.ModeDevice: + case (fi.Mode() & os.ModeDevice) == os.ModeDevice, + (fi.Mode() & os.ModeNamedPipe) == os.ModeNamedPipe, + (fi.Mode() & os.ModeSocket) == os.ModeSocket: if err := copyDevice(target, fi); err != nil { return errors.Wrapf(err, "failed to create device") } - default: - // TODO: Support pipes and sockets - return errors.Wrapf(err, "unsupported mode %s", fi.Mode()) } if copyFileInfo { - if err := c.copyFileInfo(fi, target); err != nil { + if err := c.copyFileInfo(fi, src, target); err != nil { return errors.Wrap(err, "failed to copy file info") } if err := copyXAttrs(target, src, c.xattrErrorHandler); err != nil { return errors.Wrap(err, "failed to copy xattrs") } + } else if restoreFileTimestamp && targetFi != nil { + if err := c.copyFileTimestamp(fi, target); err != nil { + return errors.Wrap(err, "failed to restore file timestamp") + } } if notify { if err := c.notifyChange(target, fi); err != nil { @@ -404,9 +416,9 @@ func (c *copier) notifyChange(target string, fi os.FileInfo) error { return nil } -func (c *copier) include(path string, fi os.FileInfo, parentIncludeMatchInfo fileutils.MatchInfo) (bool, fileutils.MatchInfo, error) { +func (c *copier) include(path string, fi os.FileInfo, parentIncludeMatchInfo patternmatcher.MatchInfo) (bool, patternmatcher.MatchInfo, error) { if c.includePatternMatcher == nil { - return true, fileutils.MatchInfo{}, nil + return true, patternmatcher.MatchInfo{}, nil } m, matchInfo, err := c.includePatternMatcher.MatchesUsingParentResults(path, parentIncludeMatchInfo) @@ -416,9 +428,9 @@ func (c *copier) include(path string, fi os.FileInfo, parentIncludeMatchInfo fil return m, matchInfo, nil } -func (c *copier) exclude(path string, fi os.FileInfo, parentExcludeMatchInfo fileutils.MatchInfo) (bool, fileutils.MatchInfo, error) { +func (c *copier) exclude(path string, fi os.FileInfo, parentExcludeMatchInfo patternmatcher.MatchInfo) (bool, patternmatcher.MatchInfo, error) { if c.excludePatternMatcher == nil { - return false, fileutils.MatchInfo{}, nil + return false, patternmatcher.MatchInfo{}, nil } m, matchInfo, err := c.excludePatternMatcher.MatchesUsingParentResults(path, parentExcludeMatchInfo) @@ -449,7 +461,7 @@ func (c *copier) createParentDirs(src, srcComponents, target string, overwriteTa return err } if created { - if err := c.copyFileInfo(fi, parentDir.dstPath); err != nil { + if err := c.copyFileInfo(fi, parentDir.srcPath, parentDir.dstPath); err != nil { return errors.Wrap(err, "failed to copy file info") } @@ -471,8 +483,8 @@ func (c *copier) copyDirectory( stat os.FileInfo, overwriteTargetMetadata bool, include bool, - includeMatchInfo fileutils.MatchInfo, - excludeMatchInfo fileutils.MatchInfo, + includeMatchInfo patternmatcher.MatchInfo, + excludeMatchInfo patternmatcher.MatchInfo, ) (bool, error) { if !stat.IsDir() { return false, errors.Errorf("source is not directory") @@ -509,7 +521,7 @@ func (c *copier) copyDirectory( c.parentDirs = c.parentDirs[:len(c.parentDirs)-1] }() - fis, err := ioutil.ReadDir(src) + fis, err := os.ReadDir(src) if err != nil { return false, errors.Wrapf(err, "failed to read %s", src) } diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go index 0d8149693a17..bc93b21cedaf 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go @@ -1,3 +1,4 @@ +//go:build darwin // +build darwin package fs @@ -40,3 +41,7 @@ func copyFileContent(dst, src *os.File) error { return err } + +func mknod(dst string, mode uint32, rDev int) error { + return unix.Mknod(dst, uint32(mode), rDev) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_freebsd.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_freebsd.go index 297a2c0335f0..1b9dbb3d00ee 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_freebsd.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package fs @@ -7,6 +8,7 @@ import ( "os" "github.com/pkg/errors" + "golang.org/x/sys/unix" ) func copyFile(source, target string) error { @@ -30,3 +32,7 @@ func copyFileContent(dst, src *os.File) error { bufferPool.Put(buf) return err } + +func mknod(dst string, mode uint32, rDev int) error { + return unix.Mknod(dst, uint32(mode), uint64(rDev)) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go index 01878525cf79..971cb5c5d49c 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go @@ -15,9 +15,7 @@ func getUIDGID(fi os.FileInfo) (uid, gid int) { return int(st.Uid), int(st.Gid) } -func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { - st := fi.Sys().(*syscall.Stat_t) - +func (c *copier) copyFileInfo(fi os.FileInfo, src, name string) error { chown := c.chown uid, gid := getUIDGID(fi) old := &User{UID: uid, GID: gid} @@ -40,17 +38,23 @@ func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { } } + if err := c.copyFileTimestamp(fi, name); err != nil { + return err + } + + return nil +} + +func (c *copier) copyFileTimestamp(fi os.FileInfo, name string) error { if c.utime != nil { - if err := Utimes(name, c.utime); err != nil { - return err - } - } else { - timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} - if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrapf(err, "failed to utime %s", name) - } + return Utimes(name, c.utime) } + st := fi.Sys().(*syscall.Stat_t) + timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } return nil } @@ -109,10 +113,6 @@ func copyFileContent(dst, src *os.File) error { return nil } -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +func mknod(dst string, mode uint32, rDev int) error { + return unix.Mknod(dst, uint32(mode), rDev) } diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go index cbd784e5f570..382fe201c1ae 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go @@ -1,8 +1,12 @@ +//go:build !windows // +build !windows package fs import ( + "os" + "syscall" + "github.com/pkg/errors" "github.com/containerd/continuity/sysx" @@ -26,3 +30,17 @@ func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { return nil } + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + var rDev int + if fi.Mode()&os.ModeDevice == os.ModeDevice || fi.Mode()&os.ModeCharDevice == os.ModeCharDevice { + rDev = int(st.Rdev) + } + mode := st.Mode + mode &^= syscall.S_IFSOCK // socket copied as stub + return mknod(dst, uint32(mode), rDev) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go index 22281ba5dde4..945e96c5f23d 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go @@ -16,8 +16,7 @@ func getUIDGID(fi os.FileInfo) (uid, gid int) { return int(st.Uid), int(st.Gid) } -func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { - st := fi.Sys().(*syscall.Stat_t) +func (c *copier) copyFileInfo(fi os.FileInfo, src, name string) error { chown := c.chown uid, gid := getUIDGID(fi) old := &User{UID: uid, GID: gid} @@ -40,15 +39,21 @@ func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { } } + if err := c.copyFileTimestamp(fi, name); err != nil { + return err + } + return nil +} + +func (c *copier) copyFileTimestamp(fi os.FileInfo, name string) error { if c.utime != nil { - if err := Utimes(name, c.utime); err != nil { - return err - } - } else { - timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} - if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrapf(err, "failed to utime %s", name) - } + return Utimes(name, c.utime) + } + + st := fi.Sys().(*syscall.Stat_t) + timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) } return nil } diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go index 330c0e3f2c35..19a44a752f09 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go @@ -4,14 +4,60 @@ import ( "io" "os" + "github.com/Microsoft/go-winio" "github.com/pkg/errors" + "golang.org/x/sys/windows" ) -func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { +const ( + seTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +func (c *copier) copyFileInfo(fi os.FileInfo, src, name string) error { if err := os.Chmod(name, fi.Mode()); err != nil { return errors.Wrapf(err, "failed to chmod %s", name) } + // Copy file ownership and ACL + // We need SeRestorePrivilege and SeTakeOwnershipPrivilege in order + // to restore security info on a file, especially if we're trying to + // apply security info which includes SIDs not necessarily present on + // the host. + privileges := []string{winio.SeRestorePrivilege, seTakeOwnershipPrivilege} + if err := winio.EnableProcessPrivileges(privileges); err != nil { + return err + } + defer winio.DisableProcessPrivileges(privileges) + + secInfo, err := windows.GetNamedSecurityInfo( + src, windows.SE_FILE_OBJECT, + windows.OWNER_SECURITY_INFORMATION|windows.DACL_SECURITY_INFORMATION) + + if err != nil { + return err + } + + dacl, _, err := secInfo.DACL() + if err != nil { + return err + } + + sid, _, err := secInfo.Owner() + if err != nil { + return err + } + + if err := windows.SetNamedSecurityInfo( + name, windows.SE_FILE_OBJECT, + windows.OWNER_SECURITY_INFORMATION|windows.DACL_SECURITY_INFORMATION, + sid, nil, dacl, nil); err != nil { + + return err + } + return nil +} + +func (c *copier) copyFileTimestamp(fi os.FileInfo, name string) error { // TODO: copy windows specific metadata return nil diff --git a/vendor/github.com/tonistiigi/fsutil/copy/device_darwin.go b/vendor/github.com/tonistiigi/fsutil/copy/device_darwin.go deleted file mode 100644 index 8a06d242a489..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/copy/device_darwin.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build darwin -// +build darwin - -package fs - -import ( - "os" - "syscall" - - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) -} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/device_freebsd.go b/vendor/github.com/tonistiigi/fsutil/copy/device_freebsd.go deleted file mode 100644 index 64a2fe4da35f..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/copy/device_freebsd.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build freebsd || solaris -// +build freebsd solaris - -package fs - -import ( - "os" - "syscall" - - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), st.Rdev) -} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go index 3b825c940bf7..a02c5a5857f0 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package fs diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go index 98547544759b..9553c08be307 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go @@ -4,26 +4,8 @@ import ( "os" "syscall" "time" - - "github.com/pkg/errors" ) -func Chown(p string, old *User, fn Chowner) error { - if fn == nil { - return nil - } - user, err := fn(old) - if err != nil { - return errors.WithStack(err) - } - if user != nil { - if err := os.Lchown(p, user.UID, user.GID); err != nil { - return err - } - } - return nil -} - // MkdirAll is forked os.MkdirAll func MkdirAll(path string, perm os.FileMode, user Chowner, tm *time.Time) error { // Fast path: if we can tell whether path is a directory or file, stop with success or error. diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go index 8fb0f6bc604b..8bc5711bf086 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go @@ -1,8 +1,10 @@ +//go:build !windows // +build !windows package fs import ( + "os" "time" "github.com/pkg/errors" @@ -30,3 +32,19 @@ func Utimes(p string, tm *time.Time) error { return nil } + +func Chown(p string, old *User, fn Chowner) error { + if fn == nil { + return nil + } + user, err := fn(old) + if err != nil { + return errors.WithStack(err) + } + if user != nil { + if err := os.Lchown(p, user.UID, user.GID); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go index 6bd17e813358..6edb1f5f7f6c 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go @@ -1,10 +1,21 @@ +//go:build windows // +build windows package fs import ( + "fmt" "os" + "syscall" "time" + + "github.com/Microsoft/go-winio" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +const ( + containerAdministratorSidString = "S-1-5-93-2-1" ) func fixRootDirectory(p string) string { @@ -19,3 +30,64 @@ func fixRootDirectory(p string) string { func Utimes(p string, tm *time.Time) error { return nil } + +func Chown(p string, old *User, fn Chowner) error { + if fn == nil { + return nil + } + user, err := fn(old) + if err != nil { + return errors.WithStack(err) + } + + userSIDstring := user.SID + if userSIDstring == "" { + userSIDstring = containerAdministratorSidString + + } + // Copy file ownership and ACL + // We need SeRestorePrivilege and SeTakeOwnershipPrivilege in order + // to restore security info on a file, especially if we're trying to + // apply security info which includes SIDs not necessarily present on + // the host. + privileges := []string{winio.SeRestorePrivilege, seTakeOwnershipPrivilege} + if err := winio.EnableProcessPrivileges(privileges); err != nil { + return err + } + defer winio.DisableProcessPrivileges(privileges) + + sidPtr, err := syscall.UTF16PtrFromString(userSIDstring) + if err != nil { + return errors.Wrap(err, "converting to utf16 ptr") + } + var userSID *windows.SID + if err := windows.ConvertStringSidToSid(sidPtr, &userSID); err != nil { + return errors.Wrap(err, "converting to windows SID") + } + var dacl *windows.ACL + newEntries := []windows.EXPLICIT_ACCESS{ + { + AccessPermissions: windows.GENERIC_ALL, + AccessMode: windows.GRANT_ACCESS, + Inheritance: windows.SUB_CONTAINERS_AND_OBJECTS_INHERIT, + Trustee: windows.TRUSTEE{ + TrusteeForm: windows.TRUSTEE_IS_SID, + TrusteeValue: windows.TrusteeValueFromSID(userSID), + }, + }, + } + newAcl, err := windows.ACLFromEntries(newEntries, dacl) + if err != nil { + return fmt.Errorf("adding acls: %w", err) + } + + if err := windows.SetNamedSecurityInfo( + p, windows.SE_FILE_OBJECT, + windows.OWNER_SECURITY_INFORMATION|windows.DACL_SECURITY_INFORMATION, + userSID, nil, newAcl, nil); err != nil { + + return err + } + + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go b/vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go index 59accf054d31..31ea3d9419a3 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go @@ -1,3 +1,4 @@ +//go:build dragonfly || linux || solaris // +build dragonfly linux solaris package fs diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter.go b/vendor/github.com/tonistiigi/fsutil/diskwriter.go index 786432264f0f..b822644ddc9c 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter.go @@ -4,6 +4,7 @@ import ( "context" "hash" "io" + gofs "io/fs" "os" "path/filepath" "strconv" @@ -33,10 +34,11 @@ type DiskWriter struct { opt DiskWriterOpt dest string - ctx context.Context - cancel func() - eg *errgroup.Group - filter FilterFunc + ctx context.Context + cancel func() + eg *errgroup.Group + filter FilterFunc + dirModTimes map[string]int64 } func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWriter, error) { @@ -51,17 +53,32 @@ func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWr eg, ctx := errgroup.WithContext(ctx) return &DiskWriter{ - opt: opt, - dest: dest, - eg: eg, - ctx: ctx, - cancel: cancel, - filter: opt.Filter, + opt: opt, + dest: dest, + eg: eg, + ctx: ctx, + cancel: cancel, + filter: opt.Filter, + dirModTimes: map[string]int64{}, }, nil } func (dw *DiskWriter) Wait(ctx context.Context) error { - return dw.eg.Wait() + if err := dw.eg.Wait(); err != nil { + return err + } + return filepath.WalkDir(dw.dest, func(path string, d gofs.DirEntry, prevErr error) error { + if prevErr != nil { + return prevErr + } + if !d.IsDir() { + return nil + } + if mtime, ok := dw.dirModTimes[path]; ok { + return chtimes(path, mtime) + } + return nil + }) } func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { @@ -147,6 +164,7 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er if err := os.Mkdir(newPath, fi.Mode()); err != nil { return errors.Wrapf(err, "failed to create dir %s", newPath) } + dw.dirModTimes[destPath] = statCopy.ModTime case fi.Mode()&os.ModeDevice != 0 || fi.Mode()&os.ModeNamedPipe != 0: if err := handleTarTypeBlockCharFifo(newPath, &statCopy); err != nil { return errors.Wrapf(err, "failed to create device %s", newPath) @@ -323,10 +341,6 @@ func (lfw *lazyFileWriter) Close() error { return err } -func mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} - // Random number state. // We generate random temporary file names so that there's a good // chance the file doesn't exist yet - keeps the number of tries in diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go index 6ca00618a16d..ed6356fabeab 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package fsutil @@ -8,7 +9,9 @@ import ( ) func createSpecialFile(path string, mode uint32, stat *types.Stat) error { - dev := unix.Mkdev(uint32(stat.Devmajor), uint32(stat.Devminor)) + return unix.Mknod(path, mode, mkdev(stat.Devmajor, stat.Devminor)) +} - return unix.Mknod(path, mode, dev) +func mkdev(major int64, minor int64) uint64 { + return unix.Mkdev(uint32(major), uint32(minor)) } diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go index 36bb78895cfd..1d97d6f9d7ca 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package fsutil diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go index 9f55ad88322a..927dba460236 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go @@ -1,13 +1,17 @@ +//go:build !windows && !freebsd // +build !windows,!freebsd package fsutil import ( - "syscall" - "github.com/tonistiigi/fsutil/types" + "golang.org/x/sys/unix" ) func createSpecialFile(path string, mode uint32, stat *types.Stat) error { - return syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))) + return unix.Mknod(path, mode, mkdev(stat.Devmajor, stat.Devminor)) +} + +func mkdev(major int64, minor int64) int { + return int(unix.Mkdev(uint32(major), uint32(minor))) } diff --git a/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl b/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl index 0d3c54172fc2..3d7d182c3cc4 100644 --- a/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl +++ b/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl @@ -1,5 +1,5 @@ variable "GO_VERSION" { - default = "1.16" + default = "1.18" } group "default" { @@ -63,5 +63,5 @@ target "shfmt" { target "cross" { inherits = ["build"] - platforms = ["linux/amd64", "linux/386", "linux/arm64", "linux/arm", "linux/ppc64le", "linux/s390x", "darwin/amd64", "darwin/arm64", "windows/amd64", "freebsd/amd64", "freebsd/arm64"] + platforms = ["linux/amd64", "linux/386", "linux/arm64", "linux/arm", "linux/ppc64le", "linux/s390x", "darwin/amd64", "darwin/arm64", "windows/amd64", "windows/arm64", "freebsd/amd64", "freebsd/arm64"] } diff --git a/vendor/github.com/tonistiigi/fsutil/followlinks.go b/vendor/github.com/tonistiigi/fsutil/followlinks.go index a0942413e811..136a908211c5 100644 --- a/vendor/github.com/tonistiigi/fsutil/followlinks.go +++ b/vendor/github.com/tonistiigi/fsutil/followlinks.go @@ -1,7 +1,6 @@ package fsutil import ( - "io/ioutil" "os" "path/filepath" "runtime" @@ -75,7 +74,7 @@ func (r *symlinkResolver) readSymlink(p string, allowWildcard bool) ([]string, e realPath := filepath.Join(r.root, p) base := filepath.Base(p) if allowWildcard && containsWildcards(base) { - fis, err := ioutil.ReadDir(filepath.Dir(realPath)) + fis, err := os.ReadDir(filepath.Dir(realPath)) if err != nil { if errors.Is(err, os.ErrNotExist) { return nil, nil diff --git a/vendor/github.com/tonistiigi/fsutil/fs.go b/vendor/github.com/tonistiigi/fsutil/fs.go index e26110b320b3..db587b77cd80 100644 --- a/vendor/github.com/tonistiigi/fsutil/fs.go +++ b/vendor/github.com/tonistiigi/fsutil/fs.go @@ -3,7 +3,6 @@ package fsutil import ( "context" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -103,7 +102,7 @@ func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error { func (fs *subDirFS) Open(p string) (io.ReadCloser, error) { parts := strings.SplitN(filepath.Clean(p), string(filepath.Separator), 2) if len(parts) == 0 { - return ioutil.NopCloser(&emptyReader{}), nil + return io.NopCloser(&emptyReader{}), nil } d, ok := fs.m[parts[0]] if !ok { diff --git a/vendor/github.com/tonistiigi/fsutil/send.go b/vendor/github.com/tonistiigi/fsutil/send.go index 2c1a3801d58a..f1c51b83652d 100644 --- a/vendor/github.com/tonistiigi/fsutil/send.go +++ b/vendor/github.com/tonistiigi/fsutil/send.go @@ -135,7 +135,7 @@ func (s *sender) sendFile(h *sendHandle) error { defer f.Close() buf := bufPool.Get().(*[]byte) defer bufPool.Put(buf) - if _, err := io.CopyBuffer(&fileSender{sender: s, id: h.id}, f, *buf); err != nil { + if _, err := io.CopyBuffer(&fileSender{sender: s, id: h.id}, struct{ io.Reader }{f}, *buf); err != nil { return err } } diff --git a/vendor/github.com/tonistiigi/fsutil/stat_unix.go b/vendor/github.com/tonistiigi/fsutil/stat_unix.go index dd0ed455166f..5923aefef165 100644 --- a/vendor/github.com/tonistiigi/fsutil/stat_unix.go +++ b/vendor/github.com/tonistiigi/fsutil/stat_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package fsutil diff --git a/vendor/github.com/tonistiigi/fsutil/walker.go b/vendor/github.com/tonistiigi/fsutil/walker.go index d0b5114b4038..f95101f319b3 100644 --- a/vendor/github.com/tonistiigi/fsutil/walker.go +++ b/vendor/github.com/tonistiigi/fsutil/walker.go @@ -8,7 +8,7 @@ import ( "syscall" "time" - "github.com/docker/docker/pkg/fileutils" + "github.com/moby/patternmatcher" "github.com/pkg/errors" "github.com/tonistiigi/fsutil/types" ) @@ -19,9 +19,29 @@ type WalkOpt struct { // FollowPaths contains symlinks that are resolved into include patterns // before performing the fs walk FollowPaths []string - Map FilterFunc + Map MapFunc } +type MapFunc func(string, *types.Stat) MapResult + +// The result of the walk function controls +// both how WalkDir continues and whether the path is kept. +type MapResult int + +const ( + // Keep the current path and continue. + MapResultKeep MapResult = iota + + // Exclude the current path and continue. + MapResultExclude + + // Exclude the current path, and skip the rest of the dir. + // If path is a dir, skip the current directory. + // If path is a file, skip the rest of the parent directory. + // (This matches the semantics of fs.SkipDir.) + MapResultSkipDir +) + func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error { root, err := filepath.EvalSymlinks(p) if err != nil { @@ -37,8 +57,8 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err var ( includePatterns []string - includeMatcher *fileutils.PatternMatcher - excludeMatcher *fileutils.PatternMatcher + includeMatcher *patternmatcher.PatternMatcher + excludeMatcher *patternmatcher.PatternMatcher ) if opt != nil && opt.IncludePatterns != nil { @@ -63,7 +83,7 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err onlyPrefixIncludes := true if len(includePatterns) != 0 { - includeMatcher, err = fileutils.NewPatternMatcher(includePatterns) + includeMatcher, err = patternmatcher.New(includePatterns) if err != nil { return errors.Wrapf(err, "invalid includepatterns: %s", opt.IncludePatterns) } @@ -79,7 +99,7 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err onlyPrefixExcludeExceptions := true if opt != nil && opt.ExcludePatterns != nil { - excludeMatcher, err = fileutils.NewPatternMatcher(opt.ExcludePatterns) + excludeMatcher, err = patternmatcher.New(opt.ExcludePatterns) if err != nil { return errors.Wrapf(err, "invalid excludepatterns: %s", opt.ExcludePatterns) } @@ -97,8 +117,8 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err path string origpath string pathWithSep string - includeMatchInfo fileutils.MatchInfo - excludeMatchInfo fileutils.MatchInfo + includeMatchInfo patternmatcher.MatchInfo + excludeMatchInfo patternmatcher.MatchInfo calledFn bool } @@ -123,7 +143,13 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err return nil } - var dir visitedDir + var ( + dir visitedDir + isDir bool + ) + if fi != nil { + isDir = fi.IsDir() + } if includeMatcher != nil || excludeMatcher != nil { for len(parentDirs) != 0 { @@ -134,7 +160,7 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err parentDirs = parentDirs[:len(parentDirs)-1] } - if fi.IsDir() { + if isDir { dir = visitedDir{ fi: fi, path: path, @@ -147,7 +173,7 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err skip := false if includeMatcher != nil { - var parentIncludeMatchInfo fileutils.MatchInfo + var parentIncludeMatchInfo patternmatcher.MatchInfo if len(parentDirs) != 0 { parentIncludeMatchInfo = parentDirs[len(parentDirs)-1].includeMatchInfo } @@ -156,12 +182,12 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err return errors.Wrap(err, "failed to match includepatterns") } - if fi.IsDir() { + if isDir { dir.includeMatchInfo = matchInfo } if !m { - if fi.IsDir() && onlyPrefixIncludes { + if isDir && onlyPrefixIncludes { // Optimization: we can skip walking this dir if no include // patterns could match anything inside it. dirSlash := path + string(filepath.Separator) @@ -182,7 +208,7 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err } if excludeMatcher != nil { - var parentExcludeMatchInfo fileutils.MatchInfo + var parentExcludeMatchInfo patternmatcher.MatchInfo if len(parentDirs) != 0 { parentExcludeMatchInfo = parentDirs[len(parentDirs)-1].excludeMatchInfo } @@ -191,12 +217,12 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err return errors.Wrap(err, "failed to match excludepatterns") } - if fi.IsDir() { + if isDir { dir.excludeMatchInfo = matchInfo } if m { - if fi.IsDir() && onlyPrefixExcludeExceptions { + if isDir && onlyPrefixExcludeExceptions { // Optimization: we can skip walking this dir if no // exceptions to exclude patterns could match anything // inside it. @@ -230,7 +256,7 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err if includeMatcher != nil || excludeMatcher != nil { defer func() { - if fi.IsDir() { + if isDir { parentDirs = append(parentDirs, dir) } }() @@ -252,7 +278,10 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err return ctx.Err() default: if opt != nil && opt.Map != nil { - if allowed := opt.Map(stat.Path, stat); !allowed { + result := opt.Map(stat.Path, stat) + if result == MapResultSkipDir { + return filepath.SkipDir + } else if result == MapResultExclude { return nil } } @@ -271,7 +300,8 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err default: } if opt != nil && opt.Map != nil { - if allowed := opt.Map(parentStat.Path, parentStat); !allowed { + result := opt.Map(parentStat.Path, parentStat) + if result == MapResultSkipDir || result == MapResultExclude { continue } } @@ -289,11 +319,11 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err }) } -func patternWithoutTrailingGlob(p *fileutils.Pattern) string { +func patternWithoutTrailingGlob(p *patternmatcher.Pattern) string { patStr := p.String() - // We use filepath.Separator here because fileutils.Pattern patterns + // We use filepath.Separator here because patternmatcher.Pattern patterns // get transformed to use the native path separator: - // https://github.com/moby/moby/blob/79651b7a979b40e26af353ad283ca7ea5d67a855/pkg/fileutils/fileutils.go#L54 + // https://github.com/moby/patternmatcher/blob/130b41bafc16209dc1b52a103fdac1decad04f1a/patternmatcher.go#L52 patStr = strings.TrimSuffix(patStr, string(filepath.Separator)+"**") patStr = strings.TrimSuffix(patStr, string(filepath.Separator)+"*") return patStr diff --git a/vendor/github.com/tonistiigi/go-actions-cache/cache.go b/vendor/github.com/tonistiigi/go-actions-cache/cache.go index 438d39a6b0a8..3a0f4b1f80cb 100644 --- a/vendor/github.com/tonistiigi/go-actions-cache/cache.go +++ b/vendor/github.com/tonistiigi/go-actions-cache/cache.go @@ -519,6 +519,9 @@ func (ce *Entry) Download(ctx context.Context) ReaderAtCloser { return nil, errors.WithStack(err) } if resp.StatusCode < 200 || resp.StatusCode >= 300 { + if resp.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return nil, errors.Errorf("invalid status response %v for %s, range: %v", resp.Status, ce.URL, req.Header.Get("Range")) + } return nil, errors.Errorf("invalid status response %v for %s", resp.Status, ce.URL) } if offset != 0 { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go new file mode 100644 index 000000000000..104489e79fdb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracetest is a testing helper package for the SDK. User can +// configure no-op or in-memory exporters to verify different SDK behaviors or +// custom instrumentation. +package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/sdk/trace" +) + +var _ trace.SpanExporter = (*NoopExporter)(nil) + +// NewNoopExporter returns a new no-op exporter. +func NewNoopExporter() *NoopExporter { + return new(NoopExporter) +} + +// NoopExporter is an exporter that drops all received spans and performs no +// action. +type NoopExporter struct{} + +// ExportSpans handles export of spans by dropping them. +func (nsb *NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil } + +// Shutdown stops the exporter by doing nothing. +func (nsb *NoopExporter) Shutdown(context.Context) error { return nil } + +var _ trace.SpanExporter = (*InMemoryExporter)(nil) + +// NewInMemoryExporter returns a new InMemoryExporter. +func NewInMemoryExporter() *InMemoryExporter { + return new(InMemoryExporter) +} + +// InMemoryExporter is an exporter that stores all received spans in-memory. +type InMemoryExporter struct { + mu sync.Mutex + ss SpanStubs +} + +// ExportSpans handles export of spans by storing them in memory. +func (imsb *InMemoryExporter) ExportSpans(_ context.Context, spans []trace.ReadOnlySpan) error { + imsb.mu.Lock() + defer imsb.mu.Unlock() + imsb.ss = append(imsb.ss, SpanStubsFromReadOnlySpans(spans)...) + return nil +} + +// Shutdown stops the exporter by clearing spans held in memory. +func (imsb *InMemoryExporter) Shutdown(context.Context) error { + imsb.Reset() + return nil +} + +// Reset the current in-memory storage. +func (imsb *InMemoryExporter) Reset() { + imsb.mu.Lock() + defer imsb.mu.Unlock() + imsb.ss = nil +} + +// GetSpans returns the current in-memory stored spans. +func (imsb *InMemoryExporter) GetSpans() SpanStubs { + imsb.mu.Lock() + defer imsb.mu.Unlock() + ret := make(SpanStubs, len(imsb.ss)) + copy(ret, imsb.ss) + return ret +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go new file mode 100644 index 000000000000..dcf32c148dd6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" + +import ( + "context" + "sync" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +// SpanRecorder records started and ended spans. +type SpanRecorder struct { + startedMu sync.RWMutex + started []sdktrace.ReadWriteSpan + + endedMu sync.RWMutex + ended []sdktrace.ReadOnlySpan +} + +var _ sdktrace.SpanProcessor = (*SpanRecorder)(nil) + +func NewSpanRecorder() *SpanRecorder { + return new(SpanRecorder) +} + +// OnStart records started spans. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) OnStart(_ context.Context, s sdktrace.ReadWriteSpan) { + sr.startedMu.Lock() + defer sr.startedMu.Unlock() + sr.started = append(sr.started, s) +} + +// OnEnd records completed spans. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) OnEnd(s sdktrace.ReadOnlySpan) { + sr.endedMu.Lock() + defer sr.endedMu.Unlock() + sr.ended = append(sr.ended, s) +} + +// Shutdown does nothing. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Shutdown(context.Context) error { + return nil +} + +// ForceFlush does nothing. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) ForceFlush(context.Context) error { + return nil +} + +// Started returns a copy of all started spans that have been recorded. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Started() []sdktrace.ReadWriteSpan { + sr.startedMu.RLock() + defer sr.startedMu.RUnlock() + dst := make([]sdktrace.ReadWriteSpan, len(sr.started)) + copy(dst, sr.started) + return dst +} + +// Ended returns a copy of all ended spans that have been recorded. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Ended() []sdktrace.ReadOnlySpan { + sr.endedMu.RLock() + defer sr.endedMu.RUnlock() + dst := make([]sdktrace.ReadOnlySpan, len(sr.ended)) + copy(dst, sr.ended) + return dst +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go new file mode 100644 index 000000000000..ece4633c5259 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go @@ -0,0 +1,163 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" +) + +type SpanStubs []SpanStub + +// SpanStubsFromReadOnlySpans returns SpanStubs populated from ro. +func SpanStubsFromReadOnlySpans(ro []tracesdk.ReadOnlySpan) SpanStubs { + if len(ro) == 0 { + return nil + } + + s := make(SpanStubs, 0, len(ro)) + for _, r := range ro { + s = append(s, SpanStubFromReadOnlySpan(r)) + } + + return s +} + +// Snapshots returns s as a slice of ReadOnlySpans. +func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan { + if len(s) == 0 { + return nil + } + + ro := make([]tracesdk.ReadOnlySpan, len(s)) + for i := 0; i < len(s); i++ { + ro[i] = s[i].Snapshot() + } + return ro +} + +// SpanStub is a stand-in for a Span. +type SpanStub struct { + Name string + SpanContext trace.SpanContext + Parent trace.SpanContext + SpanKind trace.SpanKind + StartTime time.Time + EndTime time.Time + Attributes []attribute.KeyValue + Events []tracesdk.Event + Links []tracesdk.Link + Status tracesdk.Status + DroppedAttributes int + DroppedEvents int + DroppedLinks int + ChildSpanCount int + Resource *resource.Resource + InstrumentationLibrary instrumentation.Library +} + +// SpanStubFromReadOnlySpan returns a SpanStub populated from ro. +func SpanStubFromReadOnlySpan(ro tracesdk.ReadOnlySpan) SpanStub { + if ro == nil { + return SpanStub{} + } + + return SpanStub{ + Name: ro.Name(), + SpanContext: ro.SpanContext(), + Parent: ro.Parent(), + SpanKind: ro.SpanKind(), + StartTime: ro.StartTime(), + EndTime: ro.EndTime(), + Attributes: ro.Attributes(), + Events: ro.Events(), + Links: ro.Links(), + Status: ro.Status(), + DroppedAttributes: ro.DroppedAttributes(), + DroppedEvents: ro.DroppedEvents(), + DroppedLinks: ro.DroppedLinks(), + ChildSpanCount: ro.ChildSpanCount(), + Resource: ro.Resource(), + InstrumentationLibrary: ro.InstrumentationLibrary(), + } +} + +// Snapshot returns a read-only copy of the SpanStub. +func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan { + return spanSnapshot{ + name: s.Name, + spanContext: s.SpanContext, + parent: s.Parent, + spanKind: s.SpanKind, + startTime: s.StartTime, + endTime: s.EndTime, + attributes: s.Attributes, + events: s.Events, + links: s.Links, + status: s.Status, + droppedAttributes: s.DroppedAttributes, + droppedEvents: s.DroppedEvents, + droppedLinks: s.DroppedLinks, + childSpanCount: s.ChildSpanCount, + resource: s.Resource, + instrumentationLibrary: s.InstrumentationLibrary, + } +} + +type spanSnapshot struct { + // Embed the interface to implement the private method. + tracesdk.ReadOnlySpan + + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []tracesdk.Event + links []tracesdk.Link + status tracesdk.Status + droppedAttributes int + droppedEvents int + droppedLinks int + childSpanCount int + resource *resource.Resource + instrumentationLibrary instrumentation.Library +} + +func (s spanSnapshot) Name() string { return s.name } +func (s spanSnapshot) SpanContext() trace.SpanContext { return s.spanContext } +func (s spanSnapshot) Parent() trace.SpanContext { return s.parent } +func (s spanSnapshot) SpanKind() trace.SpanKind { return s.spanKind } +func (s spanSnapshot) StartTime() time.Time { return s.startTime } +func (s spanSnapshot) EndTime() time.Time { return s.endTime } +func (s spanSnapshot) Attributes() []attribute.KeyValue { return s.attributes } +func (s spanSnapshot) Links() []tracesdk.Link { return s.links } +func (s spanSnapshot) Events() []tracesdk.Event { return s.events } +func (s spanSnapshot) Status() tracesdk.Status { return s.status } +func (s spanSnapshot) DroppedAttributes() int { return s.droppedAttributes } +func (s spanSnapshot) DroppedLinks() int { return s.droppedLinks } +func (s spanSnapshot) DroppedEvents() int { return s.droppedEvents } +func (s spanSnapshot) ChildSpanCount() int { return s.childSpanCount } +func (s spanSnapshot) Resource() *resource.Resource { return s.resource } +func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { + return s.instrumentationLibrary +} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS deleted file mode 100644 index 2b00ddba0dfe..000000000000 --- a/vendor/golang.org/x/crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS deleted file mode 100644 index 1fbd3e976faf..000000000000 --- a/vendor/golang.org/x/crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go index a2ecf5c325b9..93eb5ae6de6f 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go @@ -12,7 +12,7 @@ import ( "errors" "math/bits" - "golang.org/x/crypto/internal/subtle" + "golang.org/x/crypto/internal/alias" ) const ( @@ -189,7 +189,7 @@ func (s *Cipher) XORKeyStream(dst, src []byte) { panic("chacha20: output smaller than input") } dst = dst[:len(src)] - if subtle.InexactOverlap(dst, src) { + if alias.InexactOverlap(dst, src) { panic("chacha20: invalid buffer overlap") } diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go index c5898db46584..4652247b8a63 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -15,6 +15,7 @@ const bufSize = 256 // xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only // be called when the vector facility is available. Implementation in asm_s390x.s. +// //go:noescape func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go index cda3fdd3540d..bc62161d6e42 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -9,7 +9,8 @@ package curve25519 // import "golang.org/x/crypto/curve25519" import ( "crypto/subtle" - "fmt" + "errors" + "strconv" "golang.org/x/crypto/curve25519/internal/field" ) @@ -124,10 +125,10 @@ func X25519(scalar, point []byte) ([]byte, error) { func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { var in [32]byte if l := len(scalar); l != 32 { - return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32) + return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32") } if l := len(point); l != 32 { - return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32) + return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32") } copy(in[:], scalar) if &point[0] == &Basepoint[0] { @@ -138,7 +139,7 @@ func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { copy(base[:], point) ScalarMult(dst, &in, &base) if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { - return nil, fmt.Errorf("bad input point: low order point") + return nil, errors.New("bad input point: low order point") } } return dst[:], nil diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go index 44dc8e8caf91..edcf163c4ed4 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go @@ -1,13 +1,16 @@ // Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. +//go:build amd64 && gc && !purego // +build amd64,gc,!purego package field // feMul sets out = a * b. It works like feMulGeneric. +// //go:noescape func feMul(out *Element, a *Element, b *Element) // feSquare sets out = a * a. It works like feSquareGeneric. +// //go:noescape func feSquare(out *Element, a *Element) diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go index 71ad917dadd8..a7828345fcc4 100644 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -1,13 +1,7 @@ -// Copyright 2016 The Go Authors. All rights reserved. +// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// In Go 1.13, the ed25519 package was promoted to the standard library as -// crypto/ed25519, and this package became a wrapper for the standard library one. -// -//go:build !go1.13 -// +build !go1.13 - // Package ed25519 implements the Ed25519 signature algorithm. See // https://ed25519.cr.yp.to/. // @@ -16,21 +10,15 @@ // representation includes a public key suffix to make multiple signing // operations with the same key more efficient. This package refers to the RFC // 8032 private key as the “seed”. +// +// Beginning with Go 1.13, the functionality of this package was moved to the +// standard library as crypto/ed25519. This package only acts as a compatibility +// wrapper. package ed25519 -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - import ( - "bytes" - "crypto" - cryptorand "crypto/rand" - "crypto/sha512" - "errors" + "crypto/ed25519" "io" - "strconv" - - "golang.org/x/crypto/ed25519/internal/edwards25519" ) const ( @@ -45,57 +33,21 @@ const ( ) // PublicKey is the type of Ed25519 public keys. -type PublicKey []byte +// +// This type is an alias for crypto/ed25519's PublicKey type. +// See the crypto/ed25519 package for the methods on this type. +type PublicKey = ed25519.PublicKey // PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -type PrivateKey []byte - -// Public returns the PublicKey corresponding to priv. -func (priv PrivateKey) Public() crypto.PublicKey { - publicKey := make([]byte, PublicKeySize) - copy(publicKey, priv[32:]) - return PublicKey(publicKey) -} - -// Seed returns the private key seed corresponding to priv. It is provided for -// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds -// in this package. -func (priv PrivateKey) Seed() []byte { - seed := make([]byte, SeedSize) - copy(seed, priv[:32]) - return seed -} - -// Sign signs the given message with priv. -// Ed25519 performs two passes over messages to be signed and therefore cannot -// handle pre-hashed messages. Thus opts.HashFunc() must return zero to -// indicate the message hasn't been hashed. This can be achieved by passing -// crypto.Hash(0) as the value for opts. -func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { - if opts.HashFunc() != crypto.Hash(0) { - return nil, errors.New("ed25519: cannot sign hashed message") - } - - return Sign(priv, message), nil -} +// +// This type is an alias for crypto/ed25519's PrivateKey type. +// See the crypto/ed25519 package for the methods on this type. +type PrivateKey = ed25519.PrivateKey // GenerateKey generates a public/private key pair using entropy from rand. // If rand is nil, crypto/rand.Reader will be used. func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - if rand == nil { - rand = cryptorand.Reader - } - - seed := make([]byte, SeedSize) - if _, err := io.ReadFull(rand, seed); err != nil { - return nil, nil, err - } - - privateKey := NewKeyFromSeed(seed) - publicKey := make([]byte, PublicKeySize) - copy(publicKey, privateKey[32:]) - - return publicKey, privateKey, nil + return ed25519.GenerateKey(rand) } // NewKeyFromSeed calculates a private key from a seed. It will panic if @@ -103,121 +55,17 @@ func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { // with RFC 8032. RFC 8032's private keys correspond to seeds in this // package. func NewKeyFromSeed(seed []byte) PrivateKey { - if l := len(seed); l != SeedSize { - panic("ed25519: bad seed length: " + strconv.Itoa(l)) - } - - digest := sha512.Sum512(seed) - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest[:]) - edwards25519.GeScalarMultBase(&A, &hBytes) - var publicKeyBytes [32]byte - A.ToBytes(&publicKeyBytes) - - privateKey := make([]byte, PrivateKeySize) - copy(privateKey, seed) - copy(privateKey[32:], publicKeyBytes[:]) - - return privateKey + return ed25519.NewKeyFromSeed(seed) } // Sign signs the message with privateKey and returns a signature. It will // panic if len(privateKey) is not PrivateKeySize. func Sign(privateKey PrivateKey, message []byte) []byte { - if l := len(privateKey); l != PrivateKeySize { - panic("ed25519: bad private key length: " + strconv.Itoa(l)) - } - - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := make([]byte, SignatureSize) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - - return signature + return ed25519.Sign(privateKey, message) } // Verify reports whether sig is a valid signature of message by publicKey. It // will panic if len(publicKey) is not PublicKeySize. func Verify(publicKey PublicKey, message, sig []byte) bool { - if l := len(publicKey); l != PublicKeySize { - panic("ed25519: bad public key length: " + strconv.Itoa(l)) - } - - if len(sig) != SignatureSize || sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - var publicKeyBytes [32]byte - copy(publicKeyBytes[:], publicKey) - if !A.FromBytes(&publicKeyBytes) { - return false - } - edwards25519.FeNeg(&A.X, &A.X) - edwards25519.FeNeg(&A.T, &A.T) - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var s [32]byte - copy(s[:], sig[32:]) - - // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in - // the range [0, order) in order to prevent signature malleability. - if !edwards25519.ScMinimal(&s) { - return false - } - - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) - - var checkR [32]byte - R.ToBytes(&checkR) - return bytes.Equal(sig[:32], checkR[:]) + return ed25519.Verify(publicKey, message, sig) } diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go deleted file mode 100644 index b5974dc8b27b..000000000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go deleted file mode 100644 index e39f086c1d87..000000000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go +++ /dev/null @@ -1,1422 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -// These values are from the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// d is a constant in the Edwards curve equation. -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -// d2 is 2*d. -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -// SqrtM1 is the square-root of -1 in the field. -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -// A is a constant in the Montgomery-form of curve25519. -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -// bi contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -// base contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go deleted file mode 100644 index fd03c252af42..000000000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go +++ /dev/null @@ -1,1793 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -import "encoding/binary" - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -var zero FieldElement - -func FeZero(fe *FieldElement) { - copy(fe[:], zero[:]) -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - dst[0] = a[0] + b[0] - dst[1] = a[1] + b[1] - dst[2] = a[2] + b[2] - dst[3] = a[3] + b[3] - dst[4] = a[4] + b[4] - dst[5] = a[5] + b[5] - dst[6] = a[6] + b[6] - dst[7] = a[7] + b[7] - dst[8] = a[8] + b[8] - dst[9] = a[9] + b[9] -} - -func FeSub(dst, a, b *FieldElement) { - dst[0] = a[0] - b[0] - dst[1] = a[1] - b[1] - dst[2] = a[2] - b[2] - dst[3] = a[3] - b[3] - dst[4] = a[4] - b[4] - dst[5] = a[5] - b[5] - dst[6] = a[6] - b[6] - dst[7] = a[7] - b[7] - dst[8] = a[8] - b[8] - dst[9] = a[9] - b[9] -} - -func FeCopy(dst, src *FieldElement) { - copy(dst[:], src[:]) -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - b = -b - f[0] ^= b & (f[0] ^ g[0]) - f[1] ^= b & (f[1] ^ g[1]) - f[2] ^= b & (f[2] ^ g[2]) - f[3] ^= b & (f[3] ^ g[3]) - f[4] ^= b & (f[4] ^ g[4]) - f[5] ^= b & (f[5] ^ g[5]) - f[6] ^= b & (f[6] ^ g[6]) - f[7] ^= b & (f[7] ^ g[7]) - f[8] ^= b & (f[8] ^ g[8]) - f[9] ^= b & (f[9] ^ g[9]) -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - h[0] = -f[0] - h[1] = -f[1] - h[2] = -f[2] - h[3] = -f[3] - h[4] = -f[4] - h[5] = -f[5] - h[6] = -f[6] - h[7] = -f[7] - h[8] = -f[8] - h[9] = -f[9] -} - -func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - c1 = (h1 + (1 << 24)) >> 25 - h2 += c1 - h1 -= c1 << 25 - c5 = (h5 + (1 << 24)) >> 25 - h6 += c5 - h5 -= c5 << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - c2 = (h2 + (1 << 25)) >> 26 - h3 += c2 - h2 -= c2 << 26 - c6 = (h6 + (1 << 25)) >> 26 - h7 += c6 - h6 -= c6 << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - c3 = (h3 + (1 << 24)) >> 25 - h4 += c3 - h3 -= c3 << 25 - c7 = (h7 + (1 << 24)) >> 25 - h8 += c7 - h7 -= c7 << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - c8 = (h8 + (1 << 25)) >> 26 - h9 += c8 - h8 -= c8 << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - c9 = (h9 + (1 << 24)) >> 25 - h0 += c9 * 19 - h9 -= c9 << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs, can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - - f1_2 := int64(2 * f[1]) - f3_2 := int64(2 * f[3]) - f5_2 := int64(2 * f[5]) - f7_2 := int64(2 * f[7]) - f9_2 := int64(2 * f[9]) - - g0 := int64(g[0]) - g1 := int64(g[1]) - g2 := int64(g[2]) - g3 := int64(g[3]) - g4 := int64(g[4]) - g5 := int64(g[5]) - g6 := int64(g[6]) - g7 := int64(g[7]) - g8 := int64(g[8]) - g9 := int64(g[9]) - - g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ - g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ - g3_19 := int64(19 * g[3]) - g4_19 := int64(19 * g[4]) - g5_19 := int64(19 * g[5]) - g6_19 := int64(19 * g[6]) - g7_19 := int64(19 * g[7]) - g8_19 := int64(19 * g[8]) - g9_19 := int64(19 * g[9]) - - h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 - h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 - h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 - h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 - h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 - h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 - h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 - h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 - h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 - h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - f0_2 := int64(2 * f[0]) - f1_2 := int64(2 * f[1]) - f2_2 := int64(2 * f[2]) - f3_2 := int64(2 * f[3]) - f4_2 := int64(2 * f[4]) - f5_2 := int64(2 * f[5]) - f6_2 := int64(2 * f[6]) - f7_2 := int64(2 * f[7]) - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - - h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 - h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 - h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 - h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 - h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 - h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 - h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 - h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 - h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 - h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 - - return -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) != (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise, assuming that b and c are -// non-negative. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} - -// order is the order of Curve25519 in little-endian form. -var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} - -// ScMinimal returns true if the given scalar is less than the order of the -// curve. -func ScMinimal(scalar *[32]byte) bool { - for i := 3; ; i-- { - v := binary.LittleEndian.Uint64(scalar[i*8:]) - if v > order[i] { - return false - } else if v < order[i] { - break - } else if i == 0 { - return false - } - } - - return true -} diff --git a/vendor/golang.org/x/crypto/internal/alias/alias.go b/vendor/golang.org/x/crypto/internal/alias/alias.go new file mode 100644 index 000000000000..69c17f822b9a --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/alias/alias.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego +// +build !purego + +// Package alias implements memory aliasing tests. +package alias + +import "unsafe" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && + uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go new file mode 100644 index 000000000000..4775b0a43843 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego +// +build purego + +// Package alias implements memory aliasing tests. +package alias + +// This is the Google App Engine standard variant based on reflect +// because the unsafe package and cgo are disallowed. + +import "reflect" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && + reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go index c942a65904fa..e041da5ea3e7 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go @@ -136,7 +136,7 @@ func shiftRightBy2(a uint128) uint128 { // updateGeneric absorbs msg into the state.h accumulator. For each chunk m of // 128 bits of message, it computes // -// h₊ = (h + m) * r mod 2¹³⁰ - 5 +// h₊ = (h + m) * r mod 2¹³⁰ - 5 // // If the msg length is not a multiple of TagSize, it assumes the last // incomplete chunk is the final one. @@ -278,8 +278,7 @@ const ( // finalize completes the modular reduction of h and computes // -// out = h + s mod 2¹²⁸ -// +// out = h + s mod 2¹²⁸ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { h0, h1, h2 := h[0], h[1], h[2] diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go index 62cc9f84709e..ec9596688969 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go @@ -14,6 +14,7 @@ import ( // updateVX is an assembly implementation of Poly1305 that uses vector // instructions. It must only be called if the vector facility (vx) is // available. +// //go:noescape func updateVX(state *macState, msg []byte) diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go deleted file mode 100644 index 4fad24f8dcde..000000000000 --- a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego -// +build !purego - -// Package subtle implements functions that are often useful in cryptographic -// code but require careful thought to use correctly. -package subtle // import "golang.org/x/crypto/internal/subtle" - -import "unsafe" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && - uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go deleted file mode 100644 index 80ccbed2c0de..000000000000 --- a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego -// +build purego - -// Package subtle implements functions that are often useful in cryptographic -// code but require careful thought to use correctly. -package subtle // import "golang.org/x/crypto/internal/subtle" - -// This is the Google App Engine standard variant based on reflect -// because the unsafe package and cgo are disallowed. - -import "reflect" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && - reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/vendor/golang.org/x/crypto/nacl/sign/sign.go b/vendor/golang.org/x/crypto/nacl/sign/sign.go index d07627019ef5..8a6acdcc090a 100644 --- a/vendor/golang.org/x/crypto/nacl/sign/sign.go +++ b/vendor/golang.org/x/crypto/nacl/sign/sign.go @@ -24,7 +24,7 @@ import ( "io" "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/internal/subtle" + "golang.org/x/crypto/internal/alias" ) // Overhead is the number of bytes of overhead when signing a message. @@ -48,7 +48,7 @@ func GenerateKey(rand io.Reader) (publicKey *[32]byte, privateKey *[64]byte, err func Sign(out, message []byte, privateKey *[64]byte) []byte { sig := ed25519.Sign(ed25519.PrivateKey((*privateKey)[:]), message) ret, out := sliceForAppend(out, Overhead+len(message)) - if subtle.AnyOverlap(out, message) { + if alias.AnyOverlap(out, message) { panic("nacl: invalid buffer overlap") } copy(out, sig) @@ -67,7 +67,7 @@ func Open(out, signedMessage []byte, publicKey *[32]byte) ([]byte, bool) { return nil, false } ret, out := sliceForAppend(out, len(signedMessage)-Overhead) - if subtle.AnyOverlap(out, signedMessage) { + if alias.AnyOverlap(out, signedMessage) { panic("nacl: invalid buffer overlap") } copy(out, signedMessage[Overhead:]) diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go new file mode 100644 index 000000000000..233b8b62cc27 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "errors" + "unicode/utf16" +) + +// bmpString returns s encoded in UCS-2 with a zero terminator. +func bmpString(s string) ([]byte, error) { + // References: + // https://tools.ietf.org/html/rfc7292#appendix-B.1 + // https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes + // EncodeRune returns 0xfffd if the rune does not need special encoding + // - the above RFC provides the info that BMPStrings are NULL terminated. + + ret := make([]byte, 0, 2*len(s)+2) + + for _, r := range s { + if t, _ := utf16.EncodeRune(r); t != 0xfffd { + return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") + } + ret = append(ret, byte(r/256), byte(r%256)) + } + + return append(ret, 0, 0), nil +} + +func decodeBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go new file mode 100644 index 000000000000..96f4a1a56eca --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/cipher" + "crypto/des" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "golang.org/x/crypto/pkcs12/internal/rc2" +) + +var ( + oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) +) + +// pbeCipher is an abstraction of a PKCS#12 cipher. +type pbeCipher interface { + // create returns a cipher.Block given a key. + create(key []byte) (cipher.Block, error) + // deriveKey returns a key derived from the given password and salt. + deriveKey(salt, password []byte, iterations int) []byte + // deriveKey returns an IV derived from the given password and salt. + deriveIV(salt, password []byte, iterations int) []byte +} + +type shaWithTripleDESCBC struct{} + +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { + return des.NewTripleDESCipher(key) +} + +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) +} + +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type shaWith40BitRC2CBC struct{} + +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { + return rc2.New(key, len(key)*8) +} + +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) +} + +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type pbeParams struct { + Salt []byte + Iterations int +} + +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { + var cipherType pbeCipher + + switch { + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): + cipherType = shaWithTripleDESCBC{} + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): + cipherType = shaWith40BitRC2CBC{} + default: + return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") + } + + var params pbeParams + if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, 0, err + } + + key := cipherType.deriveKey(params.Salt, password, params.Iterations) + iv := cipherType.deriveIV(params.Salt, password, params.Iterations) + + block, err := cipherType.create(key) + if err != nil { + return nil, 0, err + } + + return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil +} + +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { + cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) + if err != nil { + return nil, err + } + + encrypted := info.Data() + if len(encrypted) == 0 { + return nil, errors.New("pkcs12: empty encrypted data") + } + if len(encrypted)%blockSize != 0 { + return nil, errors.New("pkcs12: input is not a multiple of the block size") + } + decrypted = make([]byte, len(encrypted)) + cbc.CryptBlocks(decrypted, encrypted) + + psLen := int(decrypted[len(decrypted)-1]) + if psLen == 0 || psLen > blockSize { + return nil, ErrDecryption + } + + if len(decrypted) < psLen { + return nil, ErrDecryption + } + ps := decrypted[len(decrypted)-psLen:] + decrypted = decrypted[:len(decrypted)-psLen] + if !bytes.Equal(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) { + return nil, ErrDecryption + } + + return +} + +// decryptable abstracts an object that contains ciphertext. +type decryptable interface { + Algorithm() pkix.AlgorithmIdentifier + Data() []byte +} diff --git a/vendor/golang.org/x/crypto/pkcs12/errors.go b/vendor/golang.org/x/crypto/pkcs12/errors.go new file mode 100644 index 000000000000..7377ce6fb2b8 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/errors.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import "errors" + +var ( + // ErrDecryption represents a failure to decrypt the input. + ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") + + // ErrIncorrectPassword is returned when an incorrect password is detected. + // Usually, P12/PFX data is signed to be able to verify the password. + ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") +) + +// NotImplementedError indicates that the input is not currently supported. +type NotImplementedError string + +func (e NotImplementedError) Error() string { + return "pkcs12: " + string(e) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go new file mode 100644 index 000000000000..05de9cc2cdcc --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -0,0 +1,268 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rc2 implements the RC2 cipher +/* +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. +*/ +package rc2 + +import ( + "crypto/cipher" + "encoding/binary" + "math/bits" +) + +// The rc2 block size in bytes +const BlockSize = 8 + +type rc2Cipher struct { + k [64]uint16 +} + +// New returns a new rc2 cipher with the given key and effective key length t1 +func New(key []byte, t1 int) (cipher.Block, error) { + // TODO(dgryski): error checking for key length + return &rc2Cipher{ + k: expandKey(key, t1), + }, nil +} + +func (*rc2Cipher) BlockSize() int { return BlockSize } + +var piTable = [256]byte{ + 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, + 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, + 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, + 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, + 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, + 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, + 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, + 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, + 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, + 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, + 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, + 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, + 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, + 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, + 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, + 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, +} + +func expandKey(key []byte, t1 int) [64]uint16 { + + l := make([]byte, 128) + copy(l, key) + + var t = len(key) + var t8 = (t1 + 7) / 8 + var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) + + for i := len(key); i < 128; i++ { + l[i] = piTable[l[i-1]+l[uint8(i-t)]] + } + + l[128-t8] = piTable[l[128-t8]&tm] + + for i := 127 - t8; i >= 0; i-- { + l[i] = piTable[l[i+1]^l[i+t8]] + } + + var k [64]uint16 + + for i := range k { + k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 + } + + return k +} + +func (c *rc2Cipher) Encrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + var j int + + for j <= 16 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = bits.RotateLeft16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = bits.RotateLeft16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = bits.RotateLeft16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = bits.RotateLeft16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 40 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = bits.RotateLeft16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = bits.RotateLeft16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = bits.RotateLeft16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = bits.RotateLeft16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 60 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = bits.RotateLeft16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = bits.RotateLeft16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = bits.RotateLeft16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = bits.RotateLeft16(r3, 5) + j++ + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} + +func (c *rc2Cipher) Decrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + j := 63 + + for j >= 44 { + // unmix r3 + r3 = bits.RotateLeft16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = bits.RotateLeft16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = bits.RotateLeft16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = bits.RotateLeft16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 20 { + // unmix r3 + r3 = bits.RotateLeft16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = bits.RotateLeft16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = bits.RotateLeft16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = bits.RotateLeft16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 0 { + // unmix r3 + r3 = bits.RotateLeft16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = bits.RotateLeft16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = bits.RotateLeft16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = bits.RotateLeft16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/mac.go b/vendor/golang.org/x/crypto/pkcs12/mac.go new file mode 100644 index 000000000000..5f38aa7de83c --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/mac.go @@ -0,0 +1,45 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/x509/pkix" + "encoding/asn1" +) + +type macData struct { + Mac digestInfo + MacSalt []byte + Iterations int `asn1:"optional,default:1"` +} + +// from PKCS#7: +type digestInfo struct { + Algorithm pkix.AlgorithmIdentifier + Digest []byte +} + +var ( + oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) +) + +func verifyMac(macData *macData, message, password []byte) error { + if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { + return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) + } + + key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) + + mac := hmac.New(sha1.New, key) + mac.Write(message) + expectedMAC := mac.Sum(nil) + + if !hmac.Equal(macData.Mac.Digest, expectedMAC) { + return ErrIncorrectPassword + } + return nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go new file mode 100644 index 000000000000..5c419d41e32c --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/sha1" + "math/big" +) + +var ( + one = big.NewInt(1) +) + +// sha1Sum returns the SHA-1 hash of in. +func sha1Sum(in []byte) []byte { + sum := sha1.Sum(in) + return sum[:] +} + +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of +// repeats of pattern. +func fillWithRepeats(pattern []byte, v int) []byte { + if len(pattern) == 0 { + return nil + } + outputLen := v * ((len(pattern) + v - 1) / v) + return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] +} + +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { + // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments + + // Let H be a hash function built around a compression function f: + + // Z_2^u x Z_2^v -> Z_2^u + + // (that is, H has a chaining variable and output of length u bits, and + // the message input to the compression function of H is v bits). The + // values for u and v are as follows: + + // HASH FUNCTION VALUE u VALUE v + // MD2, MD5 128 512 + // SHA-1 160 512 + // SHA-224 224 512 + // SHA-256 256 512 + // SHA-384 384 1024 + // SHA-512 512 1024 + // SHA-512/224 224 1024 + // SHA-512/256 256 1024 + + // Furthermore, let r be the iteration count. + + // We assume here that u and v are both multiples of 8, as are the + // lengths of the password and salt strings (which we denote by p and s, + // respectively) and the number n of pseudorandom bits required. In + // addition, u and v are of course non-zero. + + // For information on security considerations for MD5 [19], see [25] and + // [1], and on those for MD2, see [18]. + + // The following procedure can be used to produce pseudorandom bits for + // a particular "purpose" that is identified by a byte called "ID". + // This standard specifies 3 different values for the ID byte: + + // 1. If ID=1, then the pseudorandom bits being produced are to be used + // as key material for performing encryption or decryption. + + // 2. If ID=2, then the pseudorandom bits being produced are to be used + // as an IV (Initial Value) for encryption or decryption. + + // 3. If ID=3, then the pseudorandom bits being produced are to be used + // as an integrity key for MACing. + + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 + // copies of ID. + var D []byte + for i := 0; i < v; i++ { + D = append(D, ID) + } + + // 2. Concatenate copies of the salt together to create a string S of + // length v(ceiling(s/v)) bits (the final copy of the salt may be + // truncated to create S). Note that if the salt is the empty + // string, then so is S. + + S := fillWithRepeats(salt, v) + + // 3. Concatenate copies of the password together to create a string P + // of length v(ceiling(p/v)) bits (the final copy of the password + // may be truncated to create P). Note that if the password is the + // empty string, then so is P. + + P := fillWithRepeats(password, v) + + // 4. Set I=S||P to be the concatenation of S and P. + I := append(S, P...) + + // 5. Set c=ceiling(n/u). + c := (size + u - 1) / u + + // 6. For i=1, 2, ..., c, do the following: + A := make([]byte, c*20) + var IjBuf []byte + for i := 0; i < c; i++ { + // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, + // H(H(H(... H(D||I)))) + Ai := hash(append(D, I...)) + for j := 1; j < r; j++ { + Ai = hash(Ai) + } + copy(A[i*20:], Ai[:]) + + if i < c-1 { // skip on last iteration + // B. Concatenate copies of Ai to create a string B of length v + // bits (the final copy of Ai may be truncated to create B). + var B []byte + for len(B) < v { + B = append(B, Ai[:]...) + } + B = B[:v] + + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit + // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by + // setting I_j=(I_j+B+1) mod 2^v for each j. + { + Bbi := new(big.Int).SetBytes(B) + Ij := new(big.Int) + + for j := 0; j < len(I)/v; j++ { + Ij.SetBytes(I[j*v : (j+1)*v]) + Ij.Add(Ij, Bbi) + Ij.Add(Ij, one) + Ijb := Ij.Bytes() + // We expect Ijb to be exactly v bytes, + // if it is longer or shorter we must + // adjust it accordingly. + if len(Ijb) > v { + Ijb = Ijb[len(Ijb)-v:] + } + if len(Ijb) < v { + if IjBuf == nil { + IjBuf = make([]byte, v) + } + bytesShort := v - len(Ijb) + for i := 0; i < bytesShort; i++ { + IjBuf[i] = 0 + } + copy(IjBuf[bytesShort:], Ijb) + Ijb = IjBuf + } + copy(I[j*v:(j+1)*v], Ijb) + } + } + } + } + // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom + // bit string, A. + + // 8. Use the first n bits of A as the output of this entire process. + return A[:size] + + // If the above process is being used to generate a DES key, the process + // should be used to create 64 random bits, and the key's parity bits + // should be set after the 64 bits have been produced. Similar concerns + // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any + // similar keys with parity bits "built into them". +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go new file mode 100644 index 000000000000..3a89bdb3e393 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go @@ -0,0 +1,360 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs12 implements some of PKCS#12. +// +// This implementation is distilled from https://tools.ietf.org/html/rfc7292 +// and referenced documents. It is intended for decoding P12/PFX-stored +// certificates and keys for use with the crypto/tls package. +// +// This package is frozen. If it's missing functionality you need, consider +// an alternative like software.sslmate.com/src/go-pkcs12. +package pkcs12 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" +) + +var ( + oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) + oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) + + oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) + oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) + oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) + + errUnknownAttributeOID = errors.New("pkcs12: unknown attribute OID") +) + +type pfxPdu struct { + Version int + AuthSafe contentInfo + MacData macData `asn1:"optional"` +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.ContentEncryptionAlgorithm +} + +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } + +type safeBag struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"tag:0,explicit"` + Attributes []pkcs12Attribute `asn1:"set,optional"` +} + +type pkcs12Attribute struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +type encryptedPrivateKeyInfo struct { + AlgorithmIdentifier pkix.AlgorithmIdentifier + EncryptedData []byte +} + +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.AlgorithmIdentifier +} + +func (i encryptedPrivateKeyInfo) Data() []byte { + return i.EncryptedData +} + +// PEM block types +const ( + certificateType = "CERTIFICATE" + privateKeyType = "PRIVATE KEY" +) + +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any +// trailing data after unmarshaling. +func unmarshal(in []byte, out interface{}) error { + trailing, err := asn1.Unmarshal(in, out) + if err != nil { + return err + } + if len(trailing) != 0 { + return errors.New("pkcs12: trailing data found") + } + return nil +} + +// ToPEM converts all "safe bags" contained in pfxData to PEM blocks. +// Unknown attributes are discarded. +// +// Note that although the returned PEM blocks for private keys have type +// "PRIVATE KEY", the bytes are not encoded according to PKCS #8, but according +// to PKCS #1 for RSA keys and SEC 1 for ECDSA keys. +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, ErrIncorrectPassword + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + + if err != nil { + return nil, err + } + + blocks := make([]*pem.Block, 0, len(bags)) + for _, bag := range bags { + block, err := convertBag(&bag, encodedPassword) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { + block := &pem.Block{ + Headers: make(map[string]string), + } + + for _, attribute := range bag.Attributes { + k, v, err := convertAttribute(&attribute) + if err == errUnknownAttributeOID { + continue + } + if err != nil { + return nil, err + } + block.Headers[k] = v + } + + switch { + case bag.Id.Equal(oidCertBag): + block.Type = certificateType + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, err + } + block.Bytes = certsData + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + block.Type = privateKeyType + + key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey: + block.Bytes = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + block.Bytes, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + default: + return nil, errors.New("found unknown private key type in PKCS#8 wrapping") + } + default: + return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) + } + return block, nil +} + +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { + isString := false + + switch { + case attribute.Id.Equal(oidFriendlyName): + key = "friendlyName" + isString = true + case attribute.Id.Equal(oidLocalKeyID): + key = "localKeyId" + case attribute.Id.Equal(oidMicrosoftCSPName): + // This key is chosen to match OpenSSL. + key = "Microsoft CSP Name" + isString = true + default: + return "", "", errUnknownAttributeOID + } + + if isString { + if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { + return "", "", err + } + if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { + return "", "", err + } + } else { + var id []byte + if err := unmarshal(attribute.Value.Bytes, &id); err != nil { + return "", "", err + } + value = hex.EncodeToString(id) + } + + return key, value, nil +} + +// Decode extracts a certificate and private key from pfxData. This function +// assumes that there is only one certificate and only one private key in the +// pfxData; if there are more use ToPEM instead. +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, nil, err + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, nil, err + } + + if len(bags) != 2 { + err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") + return + } + + for _, bag := range bags { + switch { + case bag.Id.Equal(oidCertBag): + if certificate != nil { + err = errors.New("pkcs12: expected exactly one certificate bag") + } + + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, nil, err + } + certs, err := x509.ParseCertificates(certsData) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + err = errors.New("pkcs12: expected exactly one certificate in the certBag") + return nil, nil, err + } + certificate = certs[0] + + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + if privateKey != nil { + err = errors.New("pkcs12: expected exactly one key bag") + return nil, nil, err + } + + if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { + return nil, nil, err + } + } + } + + if certificate == nil { + return nil, nil, errors.New("pkcs12: certificate missing") + } + if privateKey == nil { + return nil, nil, errors.New("pkcs12: private key missing") + } + + return +} + +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { + pfx := new(pfxPdu) + if err := unmarshal(p12Data, pfx); err != nil { + return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) + } + + if pfx.Version != 3 { + return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") + } + + if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { + return nil, nil, NotImplementedError("only password-protected PFX is implemented") + } + + // unmarshal the explicit bytes in the content for type 'data' + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { + return nil, nil, err + } + + if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { + return nil, nil, errors.New("pkcs12: no MAC in data") + } + + if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { + if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { + // some implementations use an empty byte array + // for the empty string password try one more + // time with empty-empty password + password = nil + err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) + } + if err != nil { + return nil, nil, err + } + } + + var authenticatedSafe []contentInfo + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { + return nil, nil, err + } + + if len(authenticatedSafe) != 2 { + return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") + } + + for _, ci := range authenticatedSafe { + var data []byte + + switch { + case ci.ContentType.Equal(oidDataContentType): + if err := unmarshal(ci.Content.Bytes, &data); err != nil { + return nil, nil, err + } + case ci.ContentType.Equal(oidEncryptedDataContentType): + var encryptedData encryptedData + if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { + return nil, nil, err + } + if encryptedData.Version != 0 { + return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") + } + if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { + return nil, nil, err + } + default: + return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") + } + + var safeContents []safeBag + if err := unmarshal(data, &safeContents); err != nil { + return nil, nil, err + } + bags = append(bags, safeContents...) + } + + return bags, password, nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/safebags.go b/vendor/golang.org/x/crypto/pkcs12/safebags.go new file mode 100644 index 000000000000..def1f7b98d7d --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/safebags.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/x509" + "encoding/asn1" + "errors" +) + +var ( + // see https://tools.ietf.org/html/rfc7292#appendix-D + oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) + oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) + oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) +) + +type certBag struct { + Id asn1.ObjectIdentifier + Data []byte `asn1:"tag:0,explicit"` +} + +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { + pkinfo := new(encryptedPrivateKeyInfo) + if err = unmarshal(asn1Data, pkinfo); err != nil { + return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) + } + + pkData, err := pbDecrypt(pkinfo, password) + if err != nil { + return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) + } + + ret := new(asn1.RawValue) + if err = unmarshal(pkData, ret); err != nil { + return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) + } + + if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { + return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) + } + + return privateKey, nil +} + +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { + bag := new(certBag) + if err := unmarshal(asn1Data, bag); err != nil { + return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) + } + if !bag.Id.Equal(oidCertTypeX509Certificate) { + return nil, NotImplementedError("only X509 certificates are supported") + } + return bag.Data, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go index b909471cc066..c3e112a93961 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -8,7 +8,8 @@ // ssh-agent process using the sample server. // // References: -// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00 +// +// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00 package agent // import "golang.org/x/crypto/ssh/agent" import ( @@ -25,7 +26,6 @@ import ( "math/big" "sync" - "crypto" "golang.org/x/crypto/ed25519" "golang.org/x/crypto/ssh" ) @@ -93,7 +93,7 @@ type ExtendedAgent interface { type ConstraintExtension struct { // ExtensionName consist of a UTF-8 string suffixed by the // implementation domain following the naming scheme defined - // in Section 4.2 of [RFC4251], e.g. "foo@example.com". + // in Section 4.2 of RFC 4251, e.g. "foo@example.com". ExtensionName string // ExtensionDetails contains the actual content of the extended // constraint. @@ -226,7 +226,9 @@ var ErrExtensionUnsupported = errors.New("agent: extension unsupported") type extensionAgentMsg struct { ExtensionType string `sshtype:"27"` - Contents []byte + // NOTE: this matches OpenSSH's PROTOCOL.agent, not the IETF draft [PROTOCOL.agent], + // so that it matches what OpenSSH actually implements in the wild. + Contents []byte `ssh:"rest"` } // Key represents a protocol 2 public key as defined in @@ -729,7 +731,7 @@ func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string if err != nil { return err } - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + if !bytes.Equal(cert.Key.Marshal(), signer.PublicKey().Marshal()) { return errors.New("agent: signer and cert have different public key") } @@ -771,19 +773,53 @@ func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, return s.agent.Sign(s.pub, data) } -func (s *agentKeyringSigner) SignWithOpts(rand io.Reader, data []byte, opts crypto.SignerOpts) (*ssh.Signature, error) { +func (s *agentKeyringSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*ssh.Signature, error) { + if algorithm == "" || algorithm == underlyingAlgo(s.pub.Type()) { + return s.Sign(rand, data) + } + var flags SignatureFlags - if opts != nil { - switch opts.HashFunc() { - case crypto.SHA256: - flags = SignatureFlagRsaSha256 - case crypto.SHA512: - flags = SignatureFlagRsaSha512 - } + switch algorithm { + case ssh.KeyAlgoRSASHA256: + flags = SignatureFlagRsaSha256 + case ssh.KeyAlgoRSASHA512: + flags = SignatureFlagRsaSha512 + default: + return nil, fmt.Errorf("agent: unsupported algorithm %q", algorithm) } + return s.agent.SignWithFlags(s.pub, data, flags) } +var _ ssh.AlgorithmSigner = &agentKeyringSigner{} + +// certKeyAlgoNames is a mapping from known certificate algorithm names to the +// corresponding public key signature algorithm. +// +// This map must be kept in sync with the one in certs.go. +var certKeyAlgoNames = map[string]string{ + ssh.CertAlgoRSAv01: ssh.KeyAlgoRSA, + ssh.CertAlgoRSASHA256v01: ssh.KeyAlgoRSASHA256, + ssh.CertAlgoRSASHA512v01: ssh.KeyAlgoRSASHA512, + ssh.CertAlgoDSAv01: ssh.KeyAlgoDSA, + ssh.CertAlgoECDSA256v01: ssh.KeyAlgoECDSA256, + ssh.CertAlgoECDSA384v01: ssh.KeyAlgoECDSA384, + ssh.CertAlgoECDSA521v01: ssh.KeyAlgoECDSA521, + ssh.CertAlgoSKECDSA256v01: ssh.KeyAlgoSKECDSA256, + ssh.CertAlgoED25519v01: ssh.KeyAlgoED25519, + ssh.CertAlgoSKED25519v01: ssh.KeyAlgoSKED25519, +} + +// underlyingAlgo returns the signature algorithm associated with algo (which is +// an advertised or negotiated public key or host key algorithm). These are +// usually the same, except for certificate algorithms. +func underlyingAlgo(algo string) string { + if a, ok := certKeyAlgoNames[algo]; ok { + return a + } + return algo +} + // Calls an extension method. It is up to the agent implementation as to whether or not // any particular extension is supported and may always return an error. Because the // type of the response is up to the implementation, this returns the bytes of the diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go index c9d979430712..21bfa870fa4d 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go @@ -113,7 +113,7 @@ func (r *keyring) Unlock(passphrase []byte) error { // expireKeysLocked removes expired keys from the keyring. If a key was added // with a lifetimesecs contraint and seconds >= lifetimesecs seconds have -// ellapsed, it is removed. The caller *must* be holding the keyring mutex. +// elapsed, it is removed. The caller *must* be holding the keyring mutex. func (r *keyring) expireKeysLocked() { for _, k := range r.keys { if k.expire != nil && time.Now().After(*k.expire) { @@ -205,9 +205,9 @@ func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureF var algorithm string switch flags { case SignatureFlagRsaSha256: - algorithm = ssh.SigAlgoRSASHA2256 + algorithm = ssh.KeyAlgoRSASHA256 case SignatureFlagRsaSha512: - algorithm = ssh.SigAlgoRSASHA2512 + algorithm = ssh.KeyAlgoRSASHA512 default: return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags) } diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index 6605bf64497e..fc04d03e1987 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -14,8 +14,10 @@ import ( "time" ) -// These constants from [PROTOCOL.certkeys] represent the key algorithm names -// for certificate types supported by this package. +// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear +// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms. +// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't +// appear in the Signature.Format field. const ( CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" @@ -25,14 +27,21 @@ const ( CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" + + // CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a + // Certificate.Type (or PublicKey.Type), but only in + // ClientConfig.HostKeyAlgorithms. + CertAlgoRSASHA256v01 = "rsa-sha2-256-cert-v01@openssh.com" + CertAlgoRSASHA512v01 = "rsa-sha2-512-cert-v01@openssh.com" ) -// These constants from [PROTOCOL.certkeys] represent additional signature -// algorithm names for certificate types supported by this package. const ( - CertSigAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertSigAlgoRSASHA2256v01 = "rsa-sha2-256-cert-v01@openssh.com" - CertSigAlgoRSASHA2512v01 = "rsa-sha2-512-cert-v01@openssh.com" + // Deprecated: use CertAlgoRSAv01. + CertSigAlgoRSAv01 = CertAlgoRSAv01 + // Deprecated: use CertAlgoRSASHA256v01. + CertSigAlgoRSASHA2256v01 = CertAlgoRSASHA256v01 + // Deprecated: use CertAlgoRSASHA512v01. + CertSigAlgoRSASHA2512v01 = CertAlgoRSASHA512v01 ) // Certificate types distinguish between host and user @@ -242,7 +251,7 @@ type algorithmOpenSSHCertSigner struct { // private key is held by signer. It returns an error if the public key in cert // doesn't match the key used by signer. func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + if !bytes.Equal(cert.Key.Marshal(), signer.PublicKey().Marshal()) { return nil, errors.New("ssh: signer and cert have different public key") } @@ -431,10 +440,14 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { } c.SignatureKey = authority.PublicKey() - if v, ok := authority.(AlgorithmSigner); ok { - if v.PublicKey().Type() == KeyAlgoRSA { - authority = &rsaSigner{v, SigAlgoRSASHA2512} + // Default to KeyAlgoRSASHA512 for ssh-rsa signers. + if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { + sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512) + if err != nil { + return err } + c.Signature = sig + return nil } sig, err := authority.Sign(rand, c.bytesForSigning()) @@ -445,32 +458,42 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { return nil } -// certAlgoNames includes a mapping from signature algorithms to the -// corresponding certificate signature algorithm. When a key type (such -// as ED25516) is associated with only one algorithm, the KeyAlgo -// constant is used instead of the SigAlgo. -var certAlgoNames = map[string]string{ - SigAlgoRSA: CertSigAlgoRSAv01, - SigAlgoRSASHA2256: CertSigAlgoRSASHA2256v01, - SigAlgoRSASHA2512: CertSigAlgoRSASHA2512v01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01, - KeyAlgoED25519: CertAlgoED25519v01, - KeyAlgoSKED25519: CertAlgoSKED25519v01, +// certKeyAlgoNames is a mapping from known certificate algorithm names to the +// corresponding public key signature algorithm. +// +// This map must be kept in sync with the one in agent/client.go. +var certKeyAlgoNames = map[string]string{ + CertAlgoRSAv01: KeyAlgoRSA, + CertAlgoRSASHA256v01: KeyAlgoRSASHA256, + CertAlgoRSASHA512v01: KeyAlgoRSASHA512, + CertAlgoDSAv01: KeyAlgoDSA, + CertAlgoECDSA256v01: KeyAlgoECDSA256, + CertAlgoECDSA384v01: KeyAlgoECDSA384, + CertAlgoECDSA521v01: KeyAlgoECDSA521, + CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256, + CertAlgoED25519v01: KeyAlgoED25519, + CertAlgoSKED25519v01: KeyAlgoSKED25519, +} + +// underlyingAlgo returns the signature algorithm associated with algo (which is +// an advertised or negotiated public key or host key algorithm). These are +// usually the same, except for certificate algorithms. +func underlyingAlgo(algo string) string { + if a, ok := certKeyAlgoNames[algo]; ok { + return a + } + return algo } -// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. -// Panics if a non-certificate algorithm is passed. -func certToPrivAlgo(algo string) string { - for privAlgo, pubAlgo := range certAlgoNames { - if pubAlgo == algo { - return privAlgo +// certificateAlgo returns the certificate algorithms that uses the provided +// underlying signature algorithm. +func certificateAlgo(algo string) (certAlgo string, ok bool) { + for certName, algoName := range certKeyAlgoNames { + if algoName == algo { + return certName, true } } - panic("unknown cert algorithm") + return "", false } func (cert *Certificate) bytesForSigning() []byte { @@ -514,13 +537,13 @@ func (c *Certificate) Marshal() []byte { return result } -// Type returns the key name. It is part of the PublicKey interface. +// Type returns the certificate algorithm name. It is part of the PublicKey interface. func (c *Certificate) Type() string { - algo, ok := certAlgoNames[c.Key.Type()] + certName, ok := certificateAlgo(c.Key.Type()) if !ok { - panic("unknown cert key type " + c.Key.Type()) + panic("unknown certificate type for key type " + c.Key.Type()) } - return algo + return certName } // Verify verifies a signature against the certificate's public diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go index f8bdf4984cb7..87f48552ce03 100644 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -15,7 +15,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "golang.org/x/crypto/chacha20" "golang.org/x/crypto/internal/poly1305" @@ -97,13 +96,13 @@ func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, // are not supported and will not be negotiated, even if explicitly requested in // ClientConfig.Crypto.Ciphers. var cipherModes = map[string]*cipherMode{ - // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms + // Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms // are defined in the order specified in the RFC. "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. + // Ciphers from RFC 4345, which introduces security-improved arcfour ciphers. // They are defined in the order specified in the RFC. "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, @@ -111,7 +110,7 @@ var cipherModes = map[string]*cipherMode{ // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and // RC4) has problems with weak keys, and should be used with caution." - // RFC4345 introduces improved versions of Arcfour. + // RFC 4345 introduces improved versions of Arcfour. "arcfour": {16, 0, streamCipherMode(0, newRC4)}, // AEAD ciphers @@ -497,7 +496,7 @@ func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) // data, to make distinguishing between // failing MAC and failing length check more // difficult. - io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) + io.CopyN(io.Discard, r, int64(c.oracleCamouflage)) } } return p, err @@ -640,9 +639,9 @@ const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" // chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com // AEAD, which is described here: // -// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 +// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 // -// the methods here also implement padding, which RFC4253 Section 6 +// the methods here also implement padding, which RFC 4253 Section 6 // also requires of stream ciphers. type chacha20Poly1305Cipher struct { lengthKey [32]byte diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go index ba8621a89153..bdc356cbdf1e 100644 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -113,25 +113,16 @@ func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) e return c.clientAuthenticate(config) } -// verifyHostKeySignature verifies the host key obtained in the key -// exchange. +// verifyHostKeySignature verifies the host key obtained in the key exchange. +// algo is the negotiated algorithm, and may be a certificate type. func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error { sig, rest, ok := parseSignatureBody(result.Signature) if len(rest) > 0 || !ok { return errors.New("ssh: signature parse error") } - // For keys, underlyingAlgo is exactly algo. For certificates, - // we have to look up the underlying key algorithm that SSH - // uses to evaluate signatures. - underlyingAlgo := algo - for sigAlgo, certAlgo := range certAlgoNames { - if certAlgo == algo { - underlyingAlgo = sigAlgo - } - } - if sig.Format != underlyingAlgo { - return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, underlyingAlgo) + if a := underlyingAlgo(algo); sig.Format != a { + return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, a) } return hostKey.Verify(result.H, sig) @@ -237,11 +228,11 @@ type ClientConfig struct { // be used for the connection. If empty, a reasonable default is used. ClientVersion string - // HostKeyAlgorithms lists the key types that the client will - // accept from the server as host key, in order of + // HostKeyAlgorithms lists the public key algorithms that the client will + // accept from the server for host key authentication, in order of // preference. If empty, a reasonable default is used. Any - // string returned from PublicKey.Type method may be used, or - // any of the CertAlgoXxxx and KeyAlgoXxxx constants. + // string returned from a PublicKey.Type method may be used, or + // any of the CertAlgo and KeyAlgo constants. HostKeyAlgorithms []string // Timeout is the maximum amount of time for the TCP connection to establish. diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index c611aeb68467..409b5ea1d49d 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "strings" ) type authResult int @@ -29,6 +30,33 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { if err != nil { return err } + // The server may choose to send a SSH_MSG_EXT_INFO at this point (if we + // advertised willingness to receive one, which we always do) or not. See + // RFC 8308, Section 2.4. + extensions := make(map[string][]byte) + if len(packet) > 0 && packet[0] == msgExtInfo { + var extInfo extInfoMsg + if err := Unmarshal(packet, &extInfo); err != nil { + return err + } + payload := extInfo.Payload + for i := uint32(0); i < extInfo.NumExtensions; i++ { + name, rest, ok := parseString(payload) + if !ok { + return parseError(msgExtInfo) + } + value, rest, ok := parseString(rest) + if !ok { + return parseError(msgExtInfo) + } + extensions[string(name)] = value + payload = rest + } + packet, err = c.transport.readPacket() + if err != nil { + return err + } + } var serviceAccept serviceAcceptMsg if err := Unmarshal(packet, &serviceAccept); err != nil { return err @@ -41,7 +69,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { sessionID := c.transport.getSessionID() for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) + ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) if err != nil { return err } @@ -93,7 +121,7 @@ type AuthMethod interface { // If authentication is not successful, a []string of alternative // method names is returned. If the slice is nil, it will be ignored // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) + auth(session []byte, user string, p packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) // method returns the RFC 4252 method name. method() string @@ -102,7 +130,7 @@ type AuthMethod interface { // "none" authentication, RFC 4252 section 5.2. type noneAuth int -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { if err := c.writePacket(Marshal(&userAuthRequestMsg{ User: user, Service: serviceSSH, @@ -122,7 +150,7 @@ func (n *noneAuth) method() string { // a function call, e.g. by prompting the user. type passwordCallback func() (password string, err error) -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { type passwordAuthMsg struct { User string `sshtype:"50"` Service string @@ -189,7 +217,46 @@ func (cb publicKeyCallback) method() string { return "publickey" } -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) { + keyFormat := signer.PublicKey().Type() + + // Like in sendKexInit, if the public key implements AlgorithmSigner we + // assume it supports all algorithms, otherwise only the key format one. + as, ok := signer.(AlgorithmSigner) + if !ok { + return algorithmSignerWrapper{signer}, keyFormat + } + + extPayload, ok := extensions["server-sig-algs"] + if !ok { + // If there is no "server-sig-algs" extension, fall back to the key + // format algorithm. + return as, keyFormat + } + + // The server-sig-algs extension only carries underlying signature + // algorithm, but we are trying to select a protocol-level public key + // algorithm, which might be a certificate type. Extend the list of server + // supported algorithms to include the corresponding certificate algorithms. + serverAlgos := strings.Split(string(extPayload), ",") + for _, algo := range serverAlgos { + if certAlgo, ok := certificateAlgo(algo); ok { + serverAlgos = append(serverAlgos, certAlgo) + } + } + + keyAlgos := algorithmsForKeyFormat(keyFormat) + algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) + if err != nil { + // If there is no overlap, try the key anyway with the key format + // algorithm, to support servers that fail to list all supported + // algorithms. + return as, keyFormat + } + return as, algo +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { // Authentication is performed by sending an enquiry to test if a key is // acceptable to the remote. If the key is acceptable, the client will // attempt to authenticate with the valid key. If not the client will repeat @@ -201,7 +268,10 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand } var methods []string for _, signer := range signers { - ok, err := validateKey(signer.PublicKey(), user, c) + pub := signer.PublicKey() + as, algo := pickSignatureAlgorithm(signer, extensions) + + ok, err := validateKey(pub, algo, user, c) if err != nil { return authFailure, nil, err } @@ -209,13 +279,13 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand continue } - pub := signer.PublicKey() pubKey := pub.Marshal() - sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ + data := buildDataSignedForAuth(session, userAuthRequestMsg{ User: user, Service: serviceSSH, Method: cb.method(), - }, []byte(pub.Type()), pubKey)) + }, algo, pubKey) + sign, err := as.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) if err != nil { return authFailure, nil, err } @@ -229,7 +299,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand Service: serviceSSH, Method: cb.method(), HasSig: true, - Algoname: pub.Type(), + Algoname: algo, PubKey: pubKey, Sig: sig, } @@ -266,26 +336,25 @@ func containsMethod(methods []string, method string) bool { } // validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, user string, c packetConn) (bool, error) { +func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, error) { pubKey := key.Marshal() msg := publickeyAuthMsg{ User: user, Service: serviceSSH, Method: "publickey", HasSig: false, - Algoname: key.Type(), + Algoname: algo, PubKey: pubKey, } if err := c.writePacket(Marshal(&msg)); err != nil { return false, err } - return confirmKeyAck(key, c) + return confirmKeyAck(key, algo, c) } -func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { +func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { pubKey := key.Marshal() - algoname := key.Type() for { packet, err := c.readPacket() @@ -302,14 +371,14 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { if err := Unmarshal(packet, &msg); err != nil { return false, err } - if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { + if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) { return false, nil } return true, nil case msgUserAuthFailure: return false, nil default: - return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + return false, unexpectedMessageError(msgUserAuthPubKeyOk, packet[0]) } } } @@ -330,6 +399,7 @@ func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMet // along with a list of remaining authentication methods to try next and // an error if an unexpected response was received. func handleAuthResponse(c packetConn) (authResult, []string, error) { + gotMsgExtInfo := false for { packet, err := c.readPacket() if err != nil { @@ -341,6 +411,12 @@ func handleAuthResponse(c packetConn) (authResult, []string, error) { if err := handleBannerResponse(c, packet); err != nil { return authFailure, nil, err } + case msgExtInfo: + // Ignore post-authentication RFC 8308 extensions, once. + if gotMsgExtInfo { + return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + gotMsgExtInfo = true case msgUserAuthFailure: var msg userAuthFailureMsg if err := Unmarshal(packet, &msg); err != nil { @@ -380,10 +456,10 @@ func handleBannerResponse(c packetConn, packet []byte) error { // disabling echoing (e.g. for passwords), and return all the answers. // Challenge may be called multiple times in a single session. After // successful authentication, the server may send a challenge with no -// questions, for which the user and instruction messages should be +// questions, for which the name and instruction messages should be // printed. RFC 4256 section 3.3 details how the UI should behave for // both CLI and GUI environments. -type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) +type KeyboardInteractiveChallenge func(name, instruction string, questions []string, echos []bool) (answers []string, err error) // KeyboardInteractive returns an AuthMethod using a prompt/response // sequence controlled by the server. @@ -395,7 +471,7 @@ func (cb KeyboardInteractiveChallenge) method() string { return "keyboard-interactive" } -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { type initiateMsg struct { User string `sshtype:"50"` Service string @@ -412,6 +488,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe return authFailure, nil, err } + gotMsgExtInfo := false for { packet, err := c.readPacket() if err != nil { @@ -425,6 +502,13 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe return authFailure, nil, err } continue + case msgExtInfo: + // Ignore post-authentication RFC 8308 extensions, once. + if gotMsgExtInfo { + return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + gotMsgExtInfo = true + continue case msgUserAuthInfoRequest: // OK case msgUserAuthFailure: @@ -465,7 +549,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") } - answers, err := cb(msg.User, msg.Instruction, prompts, echos) + answers, err := cb(msg.Name, msg.Instruction, prompts, echos) if err != nil { return authFailure, nil, err } @@ -497,9 +581,9 @@ type retryableAuthMethod struct { maxTries int } -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (ok authResult, methods []string, err error) { for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand) + ok, methods, err = r.authMethod.auth(session, user, c, rand, extensions) if ok != authFailure || err != nil { // either success, partial success or error terminate return ok, methods, err } @@ -542,7 +626,7 @@ type gssAPIWithMICCallback struct { target string } -func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { m := &userAuthRequestMsg{ User: user, Service: serviceSSH, diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index 5ae2275744c8..7a5ff2d2eb72 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -44,11 +44,11 @@ var preferredCiphers = []string{ // supportedKexAlgos specifies the supported key-exchange algorithms in // preference order. var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, // P384 and P521 are not constant-time yet, but since we don't // reuse ephemeral keys, using them for ECDH should be OK. kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, kexAlgoDH1SHA1, + kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1, } // serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden @@ -61,21 +61,21 @@ var serverForbiddenKexAlgos = map[string]struct{}{ // preferredKexAlgos specifies the default preference for key-exchange algorithms // in preference order. var preferredKexAlgos = []string{ - kexAlgoCurve25519SHA256, + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, + kexAlgoDH14SHA256, kexAlgoDH14SHA1, } // supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods // of authenticating servers) in preference order. var supportedHostKeyAlgos = []string{ - CertSigAlgoRSASHA2512v01, CertSigAlgoRSASHA2256v01, - CertSigAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, + CertAlgoRSASHA512v01, CertAlgoRSASHA256v01, + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - SigAlgoRSASHA2512, SigAlgoRSASHA2256, - SigAlgoRSA, KeyAlgoDSA, + KeyAlgoRSASHA512, KeyAlgoRSASHA256, + KeyAlgoRSA, KeyAlgoDSA, KeyAlgoED25519, } @@ -89,23 +89,33 @@ var supportedMACs = []string{ var supportedCompressions = []string{compressionNone} -// hashFuncs keeps the mapping of supported algorithms to their respective -// hashes needed for signature verification. +// hashFuncs keeps the mapping of supported signature algorithms to their +// respective hashes needed for signing and verification. var hashFuncs = map[string]crypto.Hash{ - SigAlgoRSA: crypto.SHA1, - SigAlgoRSASHA2256: crypto.SHA256, - SigAlgoRSASHA2512: crypto.SHA512, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertSigAlgoRSAv01: crypto.SHA1, - CertSigAlgoRSASHA2256v01: crypto.SHA256, - CertSigAlgoRSASHA2512v01: crypto.SHA512, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, + KeyAlgoRSA: crypto.SHA1, + KeyAlgoRSASHA256: crypto.SHA256, + KeyAlgoRSASHA512: crypto.SHA512, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + // KeyAlgoED25519 doesn't pre-hash. + KeyAlgoSKECDSA256: crypto.SHA256, + KeyAlgoSKED25519: crypto.SHA256, +} + +// algorithmsForKeyFormat returns the supported signature algorithms for a given +// public key format (PublicKey.Type), in order of preference. See RFC 8332, +// Section 2. See also the note in sendKexInit on backwards compatibility. +func algorithmsForKeyFormat(keyFormat string) []string { + switch keyFormat { + case KeyAlgoRSA: + return []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA} + case CertAlgoRSAv01: + return []string{CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01} + default: + return []string{keyFormat} + } } // unexpectedMessageError results when the SSH message that we received didn't @@ -139,7 +149,7 @@ type directionAlgorithms struct { // rekeyBytes returns a rekeying intervals in bytes. func (a *directionAlgorithms) rekeyBytes() int64 { - // According to RFC4344 block ciphers should rekey after + // According to RFC 4344 block ciphers should rekey after // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is // 128. switch a.Cipher { @@ -148,10 +158,15 @@ func (a *directionAlgorithms) rekeyBytes() int64 { } - // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. + // For others, stick with RFC 4253 recommendation to rekey after 1 Gb of data. return 1 << 30 } +var aeadCiphers = map[string]bool{ + gcmCipherID: true, + chacha20Poly1305ID: true, +} + type algorithms struct { kex string hostKey string @@ -187,14 +202,18 @@ func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMs return } - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return + if !aeadCiphers[ctos.Cipher] { + ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } } - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return + if !aeadCiphers[stoc.Cipher] { + stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } } ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) @@ -278,8 +297,9 @@ func (c *Config) SetDefaults() { } // buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { +// possession of a private key. See RFC 4252, section 7. algo is the advertised +// algorithm, and may be a certificate type. +func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo string, pubKey []byte) []byte { data := struct { Session []byte Type byte @@ -287,7 +307,7 @@ func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubK Service string Method string Sign bool - Algo []byte + Algo string PubKey []byte }{ sessionID, diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go index fd6b0681b512..35661a52be30 100644 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -52,7 +52,7 @@ type Conn interface { // SendRequest sends a global request, and returns the // reply. If wantReply is true, it returns the response status - // and payload. See also RFC4254, section 4. + // and payload. See also RFC 4254, section 4. SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) // OpenChannel tries to open an channel. If the request is diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go index 67b7322c0580..f6bff60dc741 100644 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -12,8 +12,9 @@ the multiplexed nature of SSH is exposed to users that wish to support others. References: - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 + + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD + [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 This package does not fall under the stability promise of the Go language itself, so its API may be changed when pressing needs arise. diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 05ad49c3647d..653dc4d2cfbe 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -455,21 +455,38 @@ func (t *handshakeTransport) sendKexInit() error { } io.ReadFull(rand.Reader, msg.Cookie[:]) - if len(t.hostKeys) > 0 { + isServer := len(t.hostKeys) > 0 + if isServer { for _, k := range t.hostKeys { - algo := k.PublicKey().Type() - switch algo { - case KeyAlgoRSA: - msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, []string{SigAlgoRSASHA2512, SigAlgoRSASHA2256, SigAlgoRSA}...) - case CertAlgoRSAv01: - msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, []string{CertSigAlgoRSASHA2512v01, CertSigAlgoRSASHA2256v01, CertSigAlgoRSAv01}...) - default: - msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo) + // If k is an AlgorithmSigner, presume it supports all signature algorithms + // associated with the key format. (Ideally AlgorithmSigner would have a + // method to advertise supported algorithms, but it doesn't. This means that + // adding support for a new algorithm is a breaking change, as we will + // immediately negotiate it even if existing implementations don't support + // it. If that ever happens, we'll have to figure something out.) + // If k is not an AlgorithmSigner, we can only assume it only supports the + // algorithms that matches the key format. (This means that Sign can't pick + // a different default.) + keyFormat := k.PublicKey().Type() + if _, ok := k.(AlgorithmSigner); ok { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...) + } else { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) } } } else { msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + + // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what + // algorithms the server supports for public key authentication. See RFC + // 8308, Section 2.1. + if firstKeyExchange := t.sessionID == nil; firstKeyExchange { + msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1) + msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) + msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") + } } + packet := Marshal(msg) // writePacket destroys the contents, so save a copy. @@ -589,9 +606,9 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { var result *kexResult if len(t.hostKeys) > 0 { - result, err = t.server(kex, t.algorithms, &magics) + result, err = t.server(kex, &magics) } else { - result, err = t.client(kex, t.algorithms, &magics) + result, err = t.client(kex, &magics) } if err != nil { @@ -618,33 +635,52 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return nil } -func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - var hostKey Signer - for _, k := range t.hostKeys { - kt := k.PublicKey().Type() - if kt == algs.hostKey { - hostKey = k - } else if signer, ok := k.(AlgorithmSigner); ok { - // Some signature algorithms don't show up as key types - // so we have to manually check for a compatible host key. - switch kt { - case KeyAlgoRSA: - if algs.hostKey == SigAlgoRSASHA2256 || algs.hostKey == SigAlgoRSASHA2512 { - hostKey = &rsaSigner{signer, algs.hostKey} - } - case CertAlgoRSAv01: - if algs.hostKey == CertSigAlgoRSASHA2256v01 || algs.hostKey == CertSigAlgoRSASHA2512v01 { - hostKey = &rsaSigner{signer, certToPrivAlgo(algs.hostKey)} - } +// algorithmSignerWrapper is an AlgorithmSigner that only supports the default +// key format algorithm. +// +// This is technically a violation of the AlgorithmSigner interface, but it +// should be unreachable given where we use this. Anyway, at least it returns an +// error instead of panicing or producing an incorrect signature. +type algorithmSignerWrapper struct { + Signer +} + +func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm != underlyingAlgo(a.PublicKey().Type()) { + return nil, errors.New("ssh: internal error: algorithmSignerWrapper invoked with non-default algorithm") + } + return a.Sign(rand, data) +} + +func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { + for _, k := range hostKeys { + if algo == k.PublicKey().Type() { + return algorithmSignerWrapper{k} + } + k, ok := k.(AlgorithmSigner) + if !ok { + continue + } + for _, a := range algorithmsForKeyFormat(k.PublicKey().Type()) { + if algo == a { + return k } } } + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { + hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey) + if hostKey == nil { + return nil, errors.New("ssh: internal error: negotiated unsupported signature type") + } - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey) return r, err } -func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { +func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { result, err := kex.Client(t.conn, t.config.Rand, magics) if err != nil { return nil, err @@ -655,7 +691,7 @@ func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics * return nil, err } - if err := verifyHostKeySignature(hostKey, algs.hostKey, result); err != nil { + if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil { return nil, err } diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index 766e9293975e..927a90cd46f8 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -20,12 +20,14 @@ import ( ) const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org" + kexAlgoCurve25519SHA256 = "curve25519-sha256" // For the following kex only the client half contains a production // ready implementation. The server half only consists of a minimal @@ -75,8 +77,9 @@ func (m *handshakeMagics) write(w io.Writer) { // kexAlgorithm abstracts different key exchange algorithms. type kexAlgorithm interface { // Server runs server-side key agreement, signing the result - // with a hostkey. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) + // with a hostkey. algo is the negotiated algorithm, and may + // be a certificate type. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s AlgorithmSigner, algo string) (*kexResult, error) // Client runs the client-side key agreement. Caller is // responsible for verifying the host key signature. @@ -86,6 +89,7 @@ type kexAlgorithm interface { // dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. type dhGroup struct { g, p, pMinus1 *big.Int + hashFunc crypto.Hash } func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { @@ -96,8 +100,6 @@ func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, } func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - hashFunc := crypto.SHA1 - var x *big.Int for { var err error @@ -132,7 +134,7 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha return nil, err } - h := hashFunc.New() + h := group.hashFunc.New() magics.write(h) writeString(h, kexDHReply.HostKey) writeInt(h, X) @@ -146,12 +148,11 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha K: K, HostKey: kexDHReply.HostKey, Signature: kexDHReply.Signature, - Hash: crypto.SHA1, + Hash: group.hashFunc, }, nil } -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - hashFunc := crypto.SHA1 +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { packet, err := c.readPacket() if err != nil { return @@ -179,7 +180,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha hostKeyBytes := priv.PublicKey().Marshal() - h := hashFunc.New() + h := group.hashFunc.New() magics.write(h) writeString(h, hostKeyBytes) writeInt(h, kexDHInit.X) @@ -193,7 +194,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha // H is already a hash, but the hostkey signing will apply its // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) + sig, err := signAndMarshal(priv, randSource, H, algo) if err != nil { return nil, err } @@ -211,7 +212,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha K: K, HostKey: hostKeyBytes, Signature: sig, - Hash: crypto.SHA1, + Hash: group.hashFunc, }, err } @@ -314,7 +315,7 @@ func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { return true } -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { packet, err := c.readPacket() if err != nil { return nil, err @@ -359,7 +360,7 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p // H is already a hash, but the hostkey signing will apply its // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H) + sig, err := signAndMarshal(priv, rand, H, algo) if err != nil { return nil, err } @@ -384,39 +385,62 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p }, nil } +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + var kexAlgoMap = map[string]kexAlgorithm{} func init() { - // This is the group called diffie-hellman-group1-sha1 in RFC - // 4253 and Oakley Group 2 in RFC 2409. + // This is the group called diffie-hellman-group1-sha1 in + // RFC 4253 and Oakley Group 2 in RFC 2409. p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + hashFunc: crypto.SHA1, } - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. + // This are the groups called diffie-hellman-group14-sha1 and + // diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268, + // and Oakley Group 14 in RFC 3526. p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + group14 := &dhGroup{ g: new(big.Int).SetInt64(2), p: p, pMinus1: new(big.Int).Sub(p, bigOne), } + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: group14.g, p: group14.p, pMinus1: group14.pMinus1, + hashFunc: crypto.SHA1, + } + kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{ + g: group14.g, p: group14.p, pMinus1: group14.pMinus1, + hashFunc: crypto.SHA256, + } + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} + kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{} kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} } -// curve25519sha256 implements the curve25519-sha256@libssh.org key -// agreement protocol, as described in -// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt +// curve25519sha256 implements the curve25519-sha256 (formerly known as +// curve25519-sha256@libssh.org) key exchange method, as described in RFC 8731. type curve25519sha256 struct{} type curve25519KeyPair struct { @@ -486,7 +510,7 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh }, nil } -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { packet, err := c.readPacket() if err != nil { return @@ -527,7 +551,7 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh H := h.Sum(nil) - sig, err := signAndMarshal(priv, rand, H) + sig, err := signAndMarshal(priv, rand, H, algo) if err != nil { return nil, err } @@ -553,7 +577,6 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh // diffie-hellman-group-exchange-sha256 key agreement protocols, // as described in RFC 4419 type dhGEXSHA struct { - g, p *big.Int hashFunc crypto.Hash } @@ -563,14 +586,7 @@ const ( dhGroupExchangeMaximumBits = 8192 ) -func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 { - return nil, fmt.Errorf("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil -} - -func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { +func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { // Send GexRequest kexDHGexRequest := kexDHGexRequestMsg{ MinBits: dhGroupExchangeMinimumBits, @@ -587,35 +603,29 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake return nil, err } - var kexDHGexGroup kexDHGexGroupMsg - if err = Unmarshal(packet, &kexDHGexGroup); err != nil { + var msg kexDHGexGroupMsg + if err = Unmarshal(packet, &msg); err != nil { return nil, err } // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits - if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits { - return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen()) + if msg.P.BitLen() < dhGroupExchangeMinimumBits || msg.P.BitLen() > dhGroupExchangeMaximumBits { + return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", msg.P.BitLen()) } - gex.p = kexDHGexGroup.P - gex.g = kexDHGexGroup.G - - // Check if g is safe by verifing that g > 1 and g < p - 1 - one := big.NewInt(1) - var pMinusOne = &big.Int{} - pMinusOne.Sub(gex.p, one) - if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 { + // Check if g is safe by verifying that 1 < g < p-1 + pMinusOne := new(big.Int).Sub(msg.P, bigOne) + if msg.G.Cmp(bigOne) <= 0 || msg.G.Cmp(pMinusOne) >= 0 { return nil, fmt.Errorf("ssh: server provided gex g is not safe") } // Send GexInit - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) + pHalf := new(big.Int).Rsh(msg.P, 1) x, err := rand.Int(randSource, pHalf) if err != nil { return nil, err } - X := new(big.Int).Exp(gex.g, x, gex.p) + X := new(big.Int).Exp(msg.G, x, msg.P) kexDHGexInit := kexDHGexInitMsg{ X: X, } @@ -634,13 +644,13 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake return nil, err } - kInt, err := gex.diffieHellman(kexDHGexReply.Y, x) - if err != nil { - return nil, err + if kexDHGexReply.Y.Cmp(bigOne) <= 0 || kexDHGexReply.Y.Cmp(pMinusOne) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") } + kInt := new(big.Int).Exp(kexDHGexReply.Y, x, msg.P) - // Check if k is safe by verifing that k > 1 and k < p - 1 - if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 { + // Check if k is safe by verifying that k > 1 and k < p - 1 + if kInt.Cmp(bigOne) <= 0 || kInt.Cmp(pMinusOne) >= 0 { return nil, fmt.Errorf("ssh: derived k is not safe") } @@ -650,8 +660,8 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) + writeInt(h, msg.P) + writeInt(h, msg.G) writeInt(h, X) writeInt(h, kexDHGexReply.Y) K := make([]byte, intLength(kInt)) @@ -670,7 +680,7 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake // Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. // // This is a minimal implementation to satisfy the automated tests. -func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { // Receive GexRequest packet, err := c.readPacket() if err != nil { @@ -681,35 +691,17 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake return } - // smoosh the user's preferred size into our own limits - if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits - } - if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits - } - // fix min/max if they're inconsistent. technically, we could just pout - // and hang up, but there's no harm in giving them the benefit of the - // doubt and just picking a bitsize for them. - if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits { - kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits - } - if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits { - kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits - } - // Send GexGroup // This is the group called diffie-hellman-group14-sha1 in RFC // 4253 and Oakley Group 14 in RFC 3526. p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - gex.p = p - gex.g = big.NewInt(2) + g := big.NewInt(2) - kexDHGexGroup := kexDHGexGroupMsg{ - P: gex.p, - G: gex.g, + msg := &kexDHGexGroupMsg{ + P: p, + G: g, } - if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil { + if err := c.writePacket(Marshal(msg)); err != nil { return nil, err } @@ -723,19 +715,19 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake return } - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) + pHalf := new(big.Int).Rsh(p, 1) y, err := rand.Int(randSource, pHalf) if err != nil { return } + Y := new(big.Int).Exp(g, y, p) - Y := new(big.Int).Exp(gex.g, y, gex.p) - kInt, err := gex.diffieHellman(kexDHGexInit.X, y) - if err != nil { - return nil, err + pMinusOne := new(big.Int).Sub(p, bigOne) + if kexDHGexInit.X.Cmp(bigOne) <= 0 || kexDHGexInit.X.Cmp(pMinusOne) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") } + kInt := new(big.Int).Exp(kexDHGexInit.X, y, p) hostKeyBytes := priv.PublicKey().Marshal() @@ -745,8 +737,8 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) + writeInt(h, p) + writeInt(h, g) writeInt(h, kexDHGexInit.X) writeInt(h, Y) @@ -758,7 +750,7 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake // H is already a hash, but the hostkey signing will apply its // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) + sig, err := signAndMarshal(priv, randSource, H, algo) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index c67d3a31cbe8..729698041352 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -30,8 +30,9 @@ import ( "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" ) -// These constants represent the algorithm names for key types supported by this -// package. +// Public key algorithms names. These values can appear in PublicKey.Type, +// ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner +// arguments. const ( KeyAlgoRSA = "ssh-rsa" KeyAlgoDSA = "ssh-dss" @@ -41,16 +42,21 @@ const ( KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" KeyAlgoED25519 = "ssh-ed25519" KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" + + // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not + // public key formats, so they can't appear as a PublicKey.Type. The + // corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. + KeyAlgoRSASHA256 = "rsa-sha2-256" + KeyAlgoRSASHA512 = "rsa-sha2-512" ) -// These constants represent non-default signature algorithms that are supported -// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See -// [PROTOCOL.agent] section 4.5.1 and -// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10 const ( - SigAlgoRSA = "ssh-rsa" - SigAlgoRSASHA2256 = "rsa-sha2-256" - SigAlgoRSASHA2512 = "rsa-sha2-512" + // Deprecated: use KeyAlgoRSA. + SigAlgoRSA = KeyAlgoRSA + // Deprecated: use KeyAlgoRSASHA256. + SigAlgoRSASHA2256 = KeyAlgoRSASHA256 + // Deprecated: use KeyAlgoRSASHA512. + SigAlgoRSASHA2512 = KeyAlgoRSASHA512 ) // parsePubKey parses a public key of the given algorithm. @@ -70,7 +76,7 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err case KeyAlgoSKED25519: return parseSKEd25519(in) case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - cert, err := parseCert(in, certToPrivAlgo(algo)) + cert, err := parseCert(in, certKeyAlgoNames[algo]) if err != nil { return nil, nil, err } @@ -178,7 +184,7 @@ func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey return "", nil, nil, "", nil, io.EOF } -// ParseAuthorizedKeys parses a public key from an authorized_keys +// ParseAuthorizedKey parses a public key from an authorized_keys // file used in OpenSSH according to the sshd(8) manual page. func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { for len(in) > 0 { @@ -289,18 +295,21 @@ func MarshalAuthorizedKey(key PublicKey) []byte { return b.Bytes() } -// PublicKey is an abstraction of different types of public keys. +// PublicKey represents a public key using an unspecified algorithm. +// +// Some PublicKeys provided by this package also implement CryptoPublicKey. type PublicKey interface { - // Type returns the key's type, e.g. "ssh-rsa". + // Type returns the key format name, e.g. "ssh-rsa". Type() string - // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. To unmarshal the returned data, use - // the ParsePublicKey function. + // Marshal returns the serialized key data in SSH wire format, with the name + // prefix. To unmarshal the returned data, use the ParsePublicKey function. Marshal() []byte - // Verify that sig is a signature on the given data using this - // key. This function will hash the data appropriately first. + // Verify that sig is a signature on the given data using this key. This + // method will hash the data appropriately first. sig.Format is allowed to + // be any signature algorithm compatible with the key type, the caller + // should check if it has more stringent requirements. Verify(data []byte, sig *Signature) error } @@ -311,25 +320,32 @@ type CryptoPublicKey interface { } // A Signer can create signatures that verify against a public key. +// +// Some Signers provided by this package also implement AlgorithmSigner. type Signer interface { - // PublicKey returns an associated PublicKey instance. + // PublicKey returns the associated PublicKey. PublicKey() PublicKey - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. + // Sign returns a signature for the given data. This method will hash the + // data appropriately first. The signature algorithm is expected to match + // the key format returned by the PublicKey.Type method (and not to be any + // alternative algorithm supported by the key format). Sign(rand io.Reader, data []byte) (*Signature, error) } -// A AlgorithmSigner is a Signer that also supports specifying a specific -// algorithm to use for signing. +// An AlgorithmSigner is a Signer that also supports specifying an algorithm to +// use for signing. +// +// An AlgorithmSigner can't advertise the algorithms it supports, so it should +// be prepared to be invoked with every algorithm supported by the public key +// format. type AlgorithmSigner interface { Signer - // SignWithAlgorithm is like Signer.Sign, but allows specification of a - // non-default signing algorithm. See the SigAlgo* constants in this - // package for signature algorithms supported by this package. Callers may - // pass an empty string for the algorithm in which case the AlgorithmSigner - // will use its default algorithm. + // SignWithAlgorithm is like Signer.Sign, but allows specifying a desired + // signing algorithm. Callers may pass an empty string for the algorithm in + // which case the AlgorithmSigner will use a default algorithm. This default + // doesn't currently control any behavior in this package. SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) } @@ -381,17 +397,11 @@ func (r *rsaPublicKey) Marshal() []byte { } func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - var hash crypto.Hash - switch sig.Format { - case SigAlgoRSA: - hash = crypto.SHA1 - case SigAlgoRSASHA2256: - hash = crypto.SHA256 - case SigAlgoRSASHA2512: - hash = crypto.SHA512 - default: + supportedAlgos := algorithmsForKeyFormat(r.Type()) + if !contains(supportedAlgos, sig.Format) { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) } + hash := hashFuncs[sig.Format] h := hash.New() h.Write(data) digest := h.Sum(nil) @@ -466,7 +476,7 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := crypto.SHA1.New() + h := hashFuncs[sig.Format].New() h.Write(data) digest := h.Sum(nil) @@ -499,7 +509,7 @@ func (k *dsaPrivateKey) PublicKey() PublicKey { } func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - return k.SignWithAlgorithm(rand, data, "") + return k.SignWithAlgorithm(rand, data, k.PublicKey().Type()) } func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { @@ -507,7 +517,7 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) } - h := crypto.SHA1.New() + h := hashFuncs[k.PublicKey().Type()].New() h.Write(data) digest := h.Sum(nil) r, s, err := dsa.Sign(rand, k.PrivateKey, digest) @@ -603,19 +613,6 @@ func supportedEllipticCurve(curve elliptic.Curve) bool { return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() } -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - // parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { var w struct { @@ -671,7 +668,7 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := ecHash(k.Curve).New() + h := hashFuncs[sig.Format].New() h.Write(data) digest := h.Sum(nil) @@ -775,7 +772,7 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := ecHash(k.Curve).New() + h := hashFuncs[sig.Format].New() h.Write([]byte(k.application)) appDigest := h.Sum(nil) @@ -874,7 +871,7 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("invalid size %d for Ed25519 public key", l) } - h := sha256.New() + h := hashFuncs[sig.Format].New() h.Write([]byte(k.application)) appDigest := h.Sum(nil) @@ -939,15 +936,6 @@ func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { return &dsaPrivateKey{key}, nil } -type rsaSigner struct { - AlgorithmSigner - defaultAlgorithm string -} - -func (s *rsaSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.AlgorithmSigner.SignWithAlgorithm(rand, data, s.defaultAlgorithm) -} - type wrappedSigner struct { signer crypto.Signer pubKey PublicKey @@ -970,44 +958,20 @@ func (s *wrappedSigner) PublicKey() PublicKey { } func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.SignWithAlgorithm(rand, data, "") + return s.SignWithAlgorithm(rand, data, s.pubKey.Type()) } func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - var hashFunc crypto.Hash - - if _, ok := s.pubKey.(*rsaPublicKey); ok { - // RSA keys support a few hash functions determined by the requested signature algorithm - switch algorithm { - case "", SigAlgoRSA: - algorithm = SigAlgoRSA - hashFunc = crypto.SHA1 - case SigAlgoRSASHA2256: - hashFunc = crypto.SHA256 - case SigAlgoRSASHA2512: - hashFunc = crypto.SHA512 - default: - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - } else { - // The only supported algorithm for all other key types is the same as the type of the key - if algorithm == "" { - algorithm = s.pubKey.Type() - } else if algorithm != s.pubKey.Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } + if algorithm == "" { + algorithm = s.pubKey.Type() + } - switch key := s.pubKey.(type) { - case *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } + supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type()) + if !contains(supportedAlgos, algorithm) { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) } + hashFunc := hashFuncs[algorithm] var digest []byte if hashFunc != 0 { h := hashFunc.New() diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go index ac41a4168bfe..922032d95256 100644 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -68,7 +68,7 @@ type kexInitMsg struct { // See RFC 4253, section 8. -// Diffie-Helman +// Diffie-Hellman const msgKexDHInit = 30 type kexDHInitMsg struct { @@ -141,6 +141,14 @@ type serviceAcceptMsg struct { Service string `sshtype:"6"` } +// See RFC 8308, section 2.3 +const msgExtInfo = 7 + +type extInfoMsg struct { + NumExtensions uint32 `sshtype:"7"` + Payload []byte `ssh:"rest"` +} + // See RFC 4252, section 5. const msgUserAuthRequest = 50 @@ -180,11 +188,11 @@ const msgUserAuthInfoRequest = 60 const msgUserAuthInfoResponse = 61 type userAuthInfoRequestMsg struct { - User string `sshtype:"60"` - Instruction string - DeprecatedLanguage string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` + Name string `sshtype:"60"` + Instruction string + Language string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` } // See RFC 4254, section 5.1. @@ -782,6 +790,8 @@ func decode(packet []byte) (interface{}, error) { msg = new(serviceRequestMsg) case msgServiceAccept: msg = new(serviceAcceptMsg) + case msgExtInfo: + msg = new(extInfoMsg) case msgKexInit: msg = new(kexInitMsg) case msgKexDHInit: @@ -843,6 +853,7 @@ var packetTypeNames = map[byte]string{ msgDisconnect: "disconnectMsg", msgServiceRequest: "serviceRequestMsg", msgServiceAccept: "serviceAcceptMsg", + msgExtInfo: "extInfoMsg", msgKexInit: "kexInitMsg", msgKexDHInit: "kexDHInitMsg", msgKexDHReply: "kexDHReplyMsg", diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 6a58e1208920..2260b20afc17 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -68,8 +68,16 @@ type ServerConfig struct { // NoClientAuth is true if clients are allowed to connect without // authenticating. + // To determine NoClientAuth at runtime, set NoClientAuth to true + // and the optional NoClientAuthCallback to a non-nil value. NoClientAuth bool + // NoClientAuthCallback, if non-nil, is called when a user + // attempts to authenticate with auth method "none". + // NoClientAuth must also be set to true for this be used, or + // this func is unused. + NoClientAuthCallback func(ConnMetadata) (*Permissions, error) + // MaxAuthTries specifies the maximum number of authentication attempts // permitted per connection. If set to a negative number, the number of // attempts are unlimited. If set to zero, the number of attempts are limited @@ -120,7 +128,7 @@ type ServerConfig struct { } // AddHostKey adds a private key as a host key. If an existing host -// key exists with the same algorithm, it is overwritten. Each server +// key exists with the same public key format, it is replaced. Each server // config must have at least one host key. func (s *ServerConfig) AddHostKey(key Signer) { for i, k := range s.hostKeys { @@ -212,9 +220,10 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha } // signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. -func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { - sig, err := k.Sign(rand, data) +// and serializes the result in SSH wire format. algo is the negotiate +// algorithm and may be a certificate type. +func signAndMarshal(k AlgorithmSigner, rand io.Reader, data []byte, algo string) ([]byte, error) { + sig, err := k.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) if err != nil { return nil, err } @@ -284,7 +293,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) func isAcceptableAlgo(algo string) bool { switch algo { - case SigAlgoRSA, SigAlgoRSASHA2256, SigAlgoRSASHA2512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, + case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: return true } @@ -454,7 +463,11 @@ userAuthLoop: switch userAuthReq.Method { case "none": if config.NoClientAuth { - authErr = nil + if config.NoClientAuthCallback != nil { + perms, authErr = config.NoClientAuthCallback(s) + } else { + authErr = nil + } } // allow initial attempt of 'none' without penalty @@ -553,6 +566,7 @@ userAuthLoop: if !ok || len(payload) > 0 { return nil, parseError(msgUserAuthRequest) } + // Ensure the public key algo and signature algo // are supported. Compare the private key // algorithm name that corresponds to algo with @@ -562,7 +576,12 @@ userAuthLoop: authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) break } - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) + if underlyingAlgo(algo) != sig.Format { + authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) + break + } + + signedData := buildDataSignedForAuth(sessionID, userAuthReq, algo, pubKeyData) if err := pubKey.Verify(signedData, sig); err != nil { return nil, err @@ -633,6 +652,30 @@ userAuthLoop: } authFailures++ + if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { + // If we have hit the max attempts, don't bother sending the + // final SSH_MSG_USERAUTH_FAILURE message, since there are + // no more authentication methods which can be attempted, + // and this message may cause the client to re-attempt + // authentication while we send the disconnect message. + // Continue, and trigger the disconnect at the start of + // the loop. + // + // The SSH specification is somewhat confusing about this, + // RFC 4252 Section 5.1 requires each authentication failure + // be responded to with a respective SSH_MSG_USERAUTH_FAILURE + // message, but Section 4 says the server should disconnect + // after some number of attempts, but it isn't explicit which + // message should take precedence (i.e. should there be a failure + // message than a disconnect message, or if we are going to + // disconnect, should we only send that message.) + // + // Either way, OpenSSH disconnects immediately after the last + // failed authnetication attempt, and given they are typically + // considered the golden implementation it seems reasonable + // to match that behavior. + continue + } var failureMsg userAuthFailureMsg if config.PasswordCallback != nil { @@ -670,7 +713,7 @@ type sshClientKeyboardInteractive struct { *connection } -func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { +func (c *sshClientKeyboardInteractive) Challenge(name, instruction string, questions []string, echos []bool) (answers []string, err error) { if len(questions) != len(echos) { return nil, errors.New("ssh: echos and questions must have equal length") } @@ -682,6 +725,7 @@ func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, quest } if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Name: name, Instruction: instruction, NumPrompts: uint32(len(questions)), Prompts: prompts, diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go index d3321f6b784b..acef62259fde 100644 --- a/vendor/golang.org/x/crypto/ssh/session.go +++ b/vendor/golang.org/x/crypto/ssh/session.go @@ -13,7 +13,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "sync" ) @@ -85,6 +84,7 @@ const ( IXANY = 39 IXOFF = 40 IMAXBEL = 41 + IUTF8 = 42 // RFC 8160 ISIG = 50 ICANON = 51 XCASE = 52 @@ -123,7 +123,7 @@ type Session struct { // output and error. // // If either is nil, Run connects the corresponding file - // descriptor to an instance of ioutil.Discard. There is a + // descriptor to an instance of io.Discard. There is a // fixed amount of buffering that is shared for the two streams. // If either blocks it may eventually cause the remote // command to block. @@ -505,7 +505,7 @@ func (s *Session) stdout() { return } if s.Stdout == nil { - s.Stdout = ioutil.Discard + s.Stdout = io.Discard } s.copyFuncs = append(s.copyFuncs, func() error { _, err := io.Copy(s.Stdout, s.ch) @@ -518,7 +518,7 @@ func (s *Session) stderr() { return } if s.Stderr == nil { - s.Stderr = ioutil.Discard + s.Stderr = io.Discard } s.copyFuncs = append(s.copyFuncs, func() error { _, err := io.Copy(s.Stderr, s.ch.Stderr()) diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go index 49ddc2e7de46..acf5a21bbb0e 100644 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -238,15 +238,19 @@ var ( // (to setup server->client keys) or clientKeys (for client->server keys). func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { cipherMode := cipherModes[algs.Cipher] - macMode := macModes[algs.MAC] iv := make([]byte, cipherMode.ivSize) key := make([]byte, cipherMode.keySize) - macKey := make([]byte, macMode.keySize) generateKeyMaterial(iv, d.ivTag, kex) generateKeyMaterial(key, d.keyTag, kex) - generateKeyMaterial(macKey, d.macKeyTag, kex) + + var macKey []byte + if !aeadCiphers[algs.Cipher] { + macMode := macModes[algs.MAC] + macKey = make([]byte, macMode.keySize) + generateKeyMaterial(macKey, d.macKeyTag, kex) + } return cipherModes[algs.Cipher].create(key, iv, macKey, algs) } diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index a3c021d3f88e..000000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 344bd1433450..000000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index 64d31ecc3ef4..000000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 5270db5db7db..000000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index 1f9715341faa..000000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index c79aa73f28bb..6e071e852432 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -173,13 +173,15 @@ func tokenEqual(t1, t2 string) bool { // isLWS reports whether b is linear white space, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// LWS = [CRLF] 1*( SP | HT ) +// +// LWS = [CRLF] 1*( SP | HT ) func isLWS(b byte) bool { return b == ' ' || b == '\t' } // isCTL reports whether b is a control byte, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// CTL = +// +// CTL = func isCTL(b byte) bool { const del = 0x7f // a CTL return b < ' ' || b == del @@ -189,12 +191,13 @@ func isCTL(b byte) bool { // HTTP/2 imposes the additional restriction that uppercase ASCII // letters are not allowed. // -// RFC 7230 says: -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// token = 1*tchar -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +// RFC 7230 says: +// +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false @@ -267,27 +270,28 @@ var validHostByte = [256]bool{ // ValidHeaderFieldValue reports whether v is a valid "field-value" according to // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : // -// message-header = field-name ":" [ field-value ] -// field-value = *( field-content | LWS ) -// field-content = +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = // // http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : // -// TEXT = -// LWS = [CRLF] 1*( SP | HT ) -// CTL = +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = // // RFC 7230 says: -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" +// +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" // // http2 further says: "Similarly, HTTP/2 allows header field values // that are not valid. While most of the values that can be encoded diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index c936843eafa1..780968d6c19b 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -139,7 +139,6 @@ func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *d func (c *dialCall) dial(ctx context.Context, addr string) { const singleUse = false // shared conn c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse) - close(c.done) c.p.mu.Lock() delete(c.p.dialing, addr) @@ -147,6 +146,8 @@ func (c *dialCall) dial(ctx context.Context, addr string) { c.p.addConnLocked(addr, c.res) } c.p.mu.Unlock() + + close(c.done) } // addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go index 2663e5d287ee..f2067dabc59e 100644 --- a/vendor/golang.org/x/net/http2/errors.go +++ b/vendor/golang.org/x/net/http2/errors.go @@ -136,7 +136,7 @@ func (e headerFieldNameError) Error() string { type headerFieldValueError string func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value %q", string(e)) + return fmt.Sprintf("invalid header field value for %q", string(e)) } var ( diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 96a747905241..184ac45feb70 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -23,7 +23,7 @@ const frameHeaderLen = 9 var padZeros = make([]byte, 255) // zeros for padding // A FrameType is a registered frame type as defined in -// http://http2.github.io/http2-spec/#rfc.section.11.2 +// https://httpwg.org/specs/rfc7540.html#rfc.section.11.2 type FrameType uint8 const ( @@ -146,7 +146,7 @@ func typeFrameParser(t FrameType) frameParser { // A FrameHeader is the 9 byte header of all HTTP/2 frames. // -// See http://http2.github.io/http2-spec/#FrameHeader +// See https://httpwg.org/specs/rfc7540.html#FrameHeader type FrameHeader struct { valid bool // caller can access []byte fields in the Frame @@ -575,7 +575,7 @@ func (fr *Framer) checkFrameOrder(f Frame) error { // A DataFrame conveys arbitrary, variable-length sequences of octets // associated with a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.1 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.1 type DataFrame struct { FrameHeader data []byte @@ -698,7 +698,7 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by // endpoints communicate, such as preferences and constraints on peer // behavior. // -// See http://http2.github.io/http2-spec/#SETTINGS +// See https://httpwg.org/specs/rfc7540.html#SETTINGS type SettingsFrame struct { FrameHeader p []byte @@ -837,7 +837,7 @@ func (f *Framer) WriteSettingsAck() error { // A PingFrame is a mechanism for measuring a minimal round trip time // from the sender, as well as determining whether an idle connection // is still functional. -// See http://http2.github.io/http2-spec/#rfc.section.6.7 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.7 type PingFrame struct { FrameHeader Data [8]byte @@ -870,7 +870,7 @@ func (f *Framer) WritePing(ack bool, data [8]byte) error { } // A GoAwayFrame informs the remote peer to stop creating streams on this connection. -// See http://http2.github.io/http2-spec/#rfc.section.6.8 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.8 type GoAwayFrame struct { FrameHeader LastStreamID uint32 @@ -934,7 +934,7 @@ func parseUnknownFrame(_ *frameCache, fh FrameHeader, countError func(string), p } // A WindowUpdateFrame is used to implement flow control. -// See http://http2.github.io/http2-spec/#rfc.section.6.9 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.9 type WindowUpdateFrame struct { FrameHeader Increment uint32 // never read with high bit set @@ -1123,7 +1123,7 @@ func (f *Framer) WriteHeaders(p HeadersFrameParam) error { } // A PriorityFrame specifies the sender-advised priority of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.3 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.3 type PriorityFrame struct { FrameHeader PriorityParam @@ -1193,7 +1193,7 @@ func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { } // A RSTStreamFrame allows for abnormal termination of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.4 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.4 type RSTStreamFrame struct { FrameHeader ErrCode ErrCode @@ -1225,7 +1225,7 @@ func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { } // A ContinuationFrame is used to continue a sequence of header block fragments. -// See http://http2.github.io/http2-spec/#rfc.section.6.10 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.10 type ContinuationFrame struct { FrameHeader headerFragBuf []byte @@ -1266,7 +1266,7 @@ func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlock } // A PushPromiseFrame is used to initiate a server stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.6 +// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.6 type PushPromiseFrame struct { FrameHeader PromiseID uint32 @@ -1532,7 +1532,8 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) } if !httpguts.ValidHeaderFieldValue(hf.Value) { - invalid = headerFieldValueError(hf.Value) + // Don't include the value in the error, because it may be sensitive. + invalid = headerFieldValueError(hf.Name) } isPseudo := strings.HasPrefix(hf.Name, ":") if isPseudo { diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go new file mode 100644 index 000000000000..aca4b2b31acd --- /dev/null +++ b/vendor/golang.org/x/net/http2/go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return tc.NetConn() +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go index 9e12941da4c3..149b3dd20e45 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -27,7 +27,14 @@ func buildCommonHeaderMaps() { "accept-language", "accept-ranges", "age", + "access-control-allow-credentials", + "access-control-allow-headers", + "access-control-allow-methods", "access-control-allow-origin", + "access-control-expose-headers", + "access-control-max-age", + "access-control-request-headers", + "access-control-request-method", "allow", "authorization", "cache-control", @@ -53,6 +60,7 @@ func buildCommonHeaderMaps() { "link", "location", "max-forwards", + "origin", "proxy-authenticate", "proxy-authorization", "range", @@ -68,6 +76,8 @@ func buildCommonHeaderMaps() { "vary", "via", "www-authenticate", + "x-forwarded-for", + "x-forwarded-proto", } commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) @@ -85,3 +95,11 @@ func lowerHeader(v string) (lower string, ascii bool) { } return asciiToLower(v) } + +func canonicalHeader(v string) string { + buildCommonHeaderMapsOnce() + if s, ok := commonCanonHeader[v]; ok { + return s + } + return http.CanonicalHeaderKey(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index 97f17831fc55..46219da2b01b 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -116,6 +116,11 @@ func (e *Encoder) SetMaxDynamicTableSize(v uint32) { e.dynTab.setMaxSize(v) } +// MaxDynamicTableSize returns the current dynamic header table size. +func (e *Encoder) MaxDynamicTableSize() (v uint32) { + return e.dynTab.maxSize +} + // SetMaxDynamicTableSizeLimit changes the maximum value that can be // specified in SetMaxDynamicTableSize to v. By default, it is set to // 4096, which is the same size of the default dynamic header table @@ -191,7 +196,7 @@ func appendTableSize(dst []byte, v uint32) []byte { // bit prefix, to dst and returns the extended buffer. // // See -// http://http2.github.io/http2-spec/compression.html#integer.representation +// https://httpwg.org/specs/rfc7541.html#integer.representation func appendVarInt(dst []byte, n byte, i uint64) []byte { k := uint64((1 << n) - 1) if i < k { diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index 85f18a2b0a86..ebdfbee964ae 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -59,7 +59,7 @@ func (hf HeaderField) String() string { // Size returns the size of an entry per RFC 7541 section 4.1. func (hf HeaderField) Size() uint32 { - // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // https://httpwg.org/specs/rfc7541.html#rfc.section.4.1 // "The size of the dynamic table is the sum of the size of // its entries. The size of an entry is the sum of its name's // length in octets (as defined in Section 5.2), its value's @@ -158,7 +158,7 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { } type dynamicTable struct { - // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + // https://httpwg.org/specs/rfc7541.html#rfc.section.2.3.2 table headerFieldTable size uint32 // in bytes maxSize uint32 // current maxSize @@ -307,27 +307,27 @@ func (d *Decoder) parseHeaderFieldRepr() error { case b&128 != 0: // Indexed representation. // High bit set? - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.1 return d.parseFieldIndexed() case b&192 == 64: // 6.2.1 Literal Header Field with Incremental Indexing // 0b10xxxxxx: top two bits are 10 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.1 return d.parseFieldLiteral(6, indexedTrue) case b&240 == 0: // 6.2.2 Literal Header Field without Indexing // 0b0000xxxx: top four bits are 0000 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.2 return d.parseFieldLiteral(4, indexedFalse) case b&240 == 16: // 6.2.3 Literal Header Field never Indexed // 0b0001xxxx: top four bits are 0001 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.3 return d.parseFieldLiteral(4, indexedNever) case b&224 == 32: // 6.3 Dynamic Table Size Update // Top three bits are '001'. - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + // https://httpwg.org/specs/rfc7541.html#rfc.section.6.3 return d.parseDynamicTableSizeUpdate() } @@ -420,7 +420,7 @@ var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} // readVarInt reads an unsigned variable length integer off the // beginning of p. n is the parameter as described in -// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// https://httpwg.org/specs/rfc7541.html#rfc.section.5.1. // // n must always be between 1 and 8. // diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index fe0b84ccd467..20d083a716da 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -169,25 +169,50 @@ func buildRootHuffmanNode() { // AppendHuffmanString appends s, as encoded in Huffman codes, to dst // and returns the extended buffer. func AppendHuffmanString(dst []byte, s string) []byte { - rembits := uint8(8) - + // This relies on the maximum huffman code length being 30 (See tables.go huffmanCodeLen array) + // So if a uint64 buffer has less than 32 valid bits can always accommodate another huffmanCode. + var ( + x uint64 // buffer + n uint // number valid of bits present in x + ) for i := 0; i < len(s); i++ { - if rembits == 8 { - dst = append(dst, 0) + c := s[i] + n += uint(huffmanCodeLen[c]) + x <<= huffmanCodeLen[c] % 64 + x |= uint64(huffmanCodes[c]) + if n >= 32 { + n %= 32 // Normally would be -= 32 but %= 32 informs compiler 0 <= n <= 31 for upcoming shift + y := uint32(x >> n) // Compiler doesn't combine memory writes if y isn't uint32 + dst = append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y)) } - dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i]) } - - if rembits < 8 { - // special EOS symbol - code := uint32(0x3fffffff) - nbits := uint8(30) - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t + // Add padding bits if necessary + if over := n % 8; over > 0 { + const ( + eosCode = 0x3fffffff + eosNBits = 30 + eosPadByte = eosCode >> (eosNBits - 8) + ) + pad := 8 - over + x = (x << pad) | (eosPadByte >> over) + n += pad // 8 now divides into n exactly } - - return dst + // n in (0, 8, 16, 24, 32) + switch n / 8 { + case 0: + return dst + case 1: + return append(dst, byte(x)) + case 2: + y := uint16(x) + return append(dst, byte(y>>8), byte(y)) + case 3: + y := uint16(x >> 8) + return append(dst, byte(y>>8), byte(y), byte(x)) + } + // case 4: + y := uint32(x) + return append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y)) } // HuffmanEncodeLength returns the number of bytes required to encode @@ -199,35 +224,3 @@ func HuffmanEncodeLength(s string) uint64 { } return (n + 7) / 8 } - -// appendByteToHuffmanCode appends Huffman code for c to dst and -// returns the extended buffer and the remaining bits in the last -// element. The appending is not byte aligned and the remaining bits -// in the last element of dst is given in rembits. -func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { - code := huffmanCodes[c] - nbits := huffmanCodeLen[c] - - for { - if rembits > nbits { - t := uint8(code << (rembits - nbits)) - dst[len(dst)-1] |= t - rembits -= nbits - break - } - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t - - nbits -= rembits - rembits = 8 - - if nbits == 0 { - break - } - - dst = append(dst, 0) - } - - return dst, rembits -} diff --git a/vendor/golang.org/x/net/http2/hpack/static_table.go b/vendor/golang.org/x/net/http2/hpack/static_table.go new file mode 100644 index 000000000000..754a1eb919e9 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/static_table.go @@ -0,0 +1,188 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package hpack + +var staticTable = &headerFieldTable{ + evictCount: 0, + byName: map[string]uint64{ + ":authority": 1, + ":method": 3, + ":path": 5, + ":scheme": 7, + ":status": 14, + "accept-charset": 15, + "accept-encoding": 16, + "accept-language": 17, + "accept-ranges": 18, + "accept": 19, + "access-control-allow-origin": 20, + "age": 21, + "allow": 22, + "authorization": 23, + "cache-control": 24, + "content-disposition": 25, + "content-encoding": 26, + "content-language": 27, + "content-length": 28, + "content-location": 29, + "content-range": 30, + "content-type": 31, + "cookie": 32, + "date": 33, + "etag": 34, + "expect": 35, + "expires": 36, + "from": 37, + "host": 38, + "if-match": 39, + "if-modified-since": 40, + "if-none-match": 41, + "if-range": 42, + "if-unmodified-since": 43, + "last-modified": 44, + "link": 45, + "location": 46, + "max-forwards": 47, + "proxy-authenticate": 48, + "proxy-authorization": 49, + "range": 50, + "referer": 51, + "refresh": 52, + "retry-after": 53, + "server": 54, + "set-cookie": 55, + "strict-transport-security": 56, + "transfer-encoding": 57, + "user-agent": 58, + "vary": 59, + "via": 60, + "www-authenticate": 61, + }, + byNameValue: map[pairNameValue]uint64{ + {name: ":authority", value: ""}: 1, + {name: ":method", value: "GET"}: 2, + {name: ":method", value: "POST"}: 3, + {name: ":path", value: "/"}: 4, + {name: ":path", value: "/index.html"}: 5, + {name: ":scheme", value: "http"}: 6, + {name: ":scheme", value: "https"}: 7, + {name: ":status", value: "200"}: 8, + {name: ":status", value: "204"}: 9, + {name: ":status", value: "206"}: 10, + {name: ":status", value: "304"}: 11, + {name: ":status", value: "400"}: 12, + {name: ":status", value: "404"}: 13, + {name: ":status", value: "500"}: 14, + {name: "accept-charset", value: ""}: 15, + {name: "accept-encoding", value: "gzip, deflate"}: 16, + {name: "accept-language", value: ""}: 17, + {name: "accept-ranges", value: ""}: 18, + {name: "accept", value: ""}: 19, + {name: "access-control-allow-origin", value: ""}: 20, + {name: "age", value: ""}: 21, + {name: "allow", value: ""}: 22, + {name: "authorization", value: ""}: 23, + {name: "cache-control", value: ""}: 24, + {name: "content-disposition", value: ""}: 25, + {name: "content-encoding", value: ""}: 26, + {name: "content-language", value: ""}: 27, + {name: "content-length", value: ""}: 28, + {name: "content-location", value: ""}: 29, + {name: "content-range", value: ""}: 30, + {name: "content-type", value: ""}: 31, + {name: "cookie", value: ""}: 32, + {name: "date", value: ""}: 33, + {name: "etag", value: ""}: 34, + {name: "expect", value: ""}: 35, + {name: "expires", value: ""}: 36, + {name: "from", value: ""}: 37, + {name: "host", value: ""}: 38, + {name: "if-match", value: ""}: 39, + {name: "if-modified-since", value: ""}: 40, + {name: "if-none-match", value: ""}: 41, + {name: "if-range", value: ""}: 42, + {name: "if-unmodified-since", value: ""}: 43, + {name: "last-modified", value: ""}: 44, + {name: "link", value: ""}: 45, + {name: "location", value: ""}: 46, + {name: "max-forwards", value: ""}: 47, + {name: "proxy-authenticate", value: ""}: 48, + {name: "proxy-authorization", value: ""}: 49, + {name: "range", value: ""}: 50, + {name: "referer", value: ""}: 51, + {name: "refresh", value: ""}: 52, + {name: "retry-after", value: ""}: 53, + {name: "server", value: ""}: 54, + {name: "set-cookie", value: ""}: 55, + {name: "strict-transport-security", value: ""}: 56, + {name: "transfer-encoding", value: ""}: 57, + {name: "user-agent", value: ""}: 58, + {name: "vary", value: ""}: 59, + {name: "via", value: ""}: 60, + {name: "www-authenticate", value: ""}: 61, + }, + ents: []HeaderField{ + {Name: ":authority", Value: "", Sensitive: false}, + {Name: ":method", Value: "GET", Sensitive: false}, + {Name: ":method", Value: "POST", Sensitive: false}, + {Name: ":path", Value: "/", Sensitive: false}, + {Name: ":path", Value: "/index.html", Sensitive: false}, + {Name: ":scheme", Value: "http", Sensitive: false}, + {Name: ":scheme", Value: "https", Sensitive: false}, + {Name: ":status", Value: "200", Sensitive: false}, + {Name: ":status", Value: "204", Sensitive: false}, + {Name: ":status", Value: "206", Sensitive: false}, + {Name: ":status", Value: "304", Sensitive: false}, + {Name: ":status", Value: "400", Sensitive: false}, + {Name: ":status", Value: "404", Sensitive: false}, + {Name: ":status", Value: "500", Sensitive: false}, + {Name: "accept-charset", Value: "", Sensitive: false}, + {Name: "accept-encoding", Value: "gzip, deflate", Sensitive: false}, + {Name: "accept-language", Value: "", Sensitive: false}, + {Name: "accept-ranges", Value: "", Sensitive: false}, + {Name: "accept", Value: "", Sensitive: false}, + {Name: "access-control-allow-origin", Value: "", Sensitive: false}, + {Name: "age", Value: "", Sensitive: false}, + {Name: "allow", Value: "", Sensitive: false}, + {Name: "authorization", Value: "", Sensitive: false}, + {Name: "cache-control", Value: "", Sensitive: false}, + {Name: "content-disposition", Value: "", Sensitive: false}, + {Name: "content-encoding", Value: "", Sensitive: false}, + {Name: "content-language", Value: "", Sensitive: false}, + {Name: "content-length", Value: "", Sensitive: false}, + {Name: "content-location", Value: "", Sensitive: false}, + {Name: "content-range", Value: "", Sensitive: false}, + {Name: "content-type", Value: "", Sensitive: false}, + {Name: "cookie", Value: "", Sensitive: false}, + {Name: "date", Value: "", Sensitive: false}, + {Name: "etag", Value: "", Sensitive: false}, + {Name: "expect", Value: "", Sensitive: false}, + {Name: "expires", Value: "", Sensitive: false}, + {Name: "from", Value: "", Sensitive: false}, + {Name: "host", Value: "", Sensitive: false}, + {Name: "if-match", Value: "", Sensitive: false}, + {Name: "if-modified-since", Value: "", Sensitive: false}, + {Name: "if-none-match", Value: "", Sensitive: false}, + {Name: "if-range", Value: "", Sensitive: false}, + {Name: "if-unmodified-since", Value: "", Sensitive: false}, + {Name: "last-modified", Value: "", Sensitive: false}, + {Name: "link", Value: "", Sensitive: false}, + {Name: "location", Value: "", Sensitive: false}, + {Name: "max-forwards", Value: "", Sensitive: false}, + {Name: "proxy-authenticate", Value: "", Sensitive: false}, + {Name: "proxy-authorization", Value: "", Sensitive: false}, + {Name: "range", Value: "", Sensitive: false}, + {Name: "referer", Value: "", Sensitive: false}, + {Name: "refresh", Value: "", Sensitive: false}, + {Name: "retry-after", Value: "", Sensitive: false}, + {Name: "server", Value: "", Sensitive: false}, + {Name: "set-cookie", Value: "", Sensitive: false}, + {Name: "strict-transport-security", Value: "", Sensitive: false}, + {Name: "transfer-encoding", Value: "", Sensitive: false}, + {Name: "user-agent", Value: "", Sensitive: false}, + {Name: "vary", Value: "", Sensitive: false}, + {Name: "via", Value: "", Sensitive: false}, + {Name: "www-authenticate", Value: "", Sensitive: false}, + }, +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go index a66cfbea69d9..8cbdf3f019cb 100644 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -96,8 +96,7 @@ func (t *headerFieldTable) evictOldest(n int) { // meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic // table, the return value i actually refers to the entry t.ents[t.len()-i]. // -// All tables are assumed to be a dynamic tables except for the global -// staticTable pointer. +// All tables are assumed to be a dynamic tables except for the global staticTable. // // See Section 2.3.3. func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { @@ -125,81 +124,6 @@ func (t *headerFieldTable) idToIndex(id uint64) uint64 { return k + 1 } -// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = newStaticTable() -var staticTableEntries = [...]HeaderField{ - {Name: ":authority"}, - {Name: ":method", Value: "GET"}, - {Name: ":method", Value: "POST"}, - {Name: ":path", Value: "/"}, - {Name: ":path", Value: "/index.html"}, - {Name: ":scheme", Value: "http"}, - {Name: ":scheme", Value: "https"}, - {Name: ":status", Value: "200"}, - {Name: ":status", Value: "204"}, - {Name: ":status", Value: "206"}, - {Name: ":status", Value: "304"}, - {Name: ":status", Value: "400"}, - {Name: ":status", Value: "404"}, - {Name: ":status", Value: "500"}, - {Name: "accept-charset"}, - {Name: "accept-encoding", Value: "gzip, deflate"}, - {Name: "accept-language"}, - {Name: "accept-ranges"}, - {Name: "accept"}, - {Name: "access-control-allow-origin"}, - {Name: "age"}, - {Name: "allow"}, - {Name: "authorization"}, - {Name: "cache-control"}, - {Name: "content-disposition"}, - {Name: "content-encoding"}, - {Name: "content-language"}, - {Name: "content-length"}, - {Name: "content-location"}, - {Name: "content-range"}, - {Name: "content-type"}, - {Name: "cookie"}, - {Name: "date"}, - {Name: "etag"}, - {Name: "expect"}, - {Name: "expires"}, - {Name: "from"}, - {Name: "host"}, - {Name: "if-match"}, - {Name: "if-modified-since"}, - {Name: "if-none-match"}, - {Name: "if-range"}, - {Name: "if-unmodified-since"}, - {Name: "last-modified"}, - {Name: "link"}, - {Name: "location"}, - {Name: "max-forwards"}, - {Name: "proxy-authenticate"}, - {Name: "proxy-authorization"}, - {Name: "range"}, - {Name: "referer"}, - {Name: "refresh"}, - {Name: "retry-after"}, - {Name: "server"}, - {Name: "set-cookie"}, - {Name: "strict-transport-security"}, - {Name: "transfer-encoding"}, - {Name: "user-agent"}, - {Name: "vary"}, - {Name: "via"}, - {Name: "www-authenticate"}, -} - -func newStaticTable() *headerFieldTable { - t := &headerFieldTable{} - t.init() - for _, e := range staticTableEntries[:] { - t.addEntry(e) - } - return t -} - var huffmanCodes = [256]uint32{ 0x1ff8, 0x7fffd8, diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 5571ccfd2613..6f2df281872e 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -13,7 +13,6 @@ // See https://http2.github.io/ for more information on HTTP/2. // // See https://http2.golang.org/ for a test server running this code. -// package http2 // import "golang.org/x/net/http2" import ( @@ -56,14 +55,14 @@ const ( ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" // SETTINGS_MAX_FRAME_SIZE default - // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + // https://httpwg.org/specs/rfc7540.html#rfc.section.6.5.2 initialMaxFrameSize = 16384 // NextProtoTLS is the NPN/ALPN protocol negotiated during // HTTP/2's TLS setup. NextProtoTLS = "h2" - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues initialHeaderTableSize = 4096 initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size @@ -112,7 +111,7 @@ func (st streamState) String() string { // Setting is a setting parameter: which setting it is, and its value. type Setting struct { // ID is which setting is being set. - // See http://http2.github.io/http2-spec/#SettingValues + // See https://httpwg.org/specs/rfc7540.html#SettingFormat ID SettingID // Val is the value. @@ -144,7 +143,7 @@ func (s Setting) Valid() error { } // A SettingID is an HTTP/2 setting as defined in -// http://http2.github.io/http2-spec/#iana-settings +// https://httpwg.org/specs/rfc7540.html#iana-settings type SettingID uint16 const ( @@ -176,10 +175,11 @@ func (s SettingID) String() string { // name (key). See httpguts.ValidHeaderName for the base rules. // // Further, http2 says: -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " +// +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " func validWireHeaderFieldName(v string) bool { if len(v) == 0 { return false @@ -365,8 +365,8 @@ func (s *sorter) SortStrings(ss []string) { // validPseudoPath reports whether v is a valid :path pseudo-header // value. It must be either: // -// *) a non-empty string starting with '/' -// *) the string '*', for OPTIONS requests. +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. // // For now this is only used a quick check for deciding when to clean // up Opaque URLs before sending requests from the Transport. diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go new file mode 100644 index 000000000000..eab532c96bc0 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go118.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package http2 + +import ( + "crypto/tls" + "net" +) + +func tlsUnderlyingConn(tc *tls.Conn) net.Conn { + return nil +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index e644d9b2f34d..4eb7617fa0db 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -98,6 +98,19 @@ type Server struct { // the HTTP/2 spec's recommendations. MaxConcurrentStreams uint32 + // MaxDecoderHeaderTableSize optionally specifies the http2 + // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It + // informs the remote endpoint of the maximum size of the header compression + // table used to decode header blocks, in octets. If zero, the default value + // of 4096 is used. + MaxDecoderHeaderTableSize uint32 + + // MaxEncoderHeaderTableSize optionally specifies an upper limit for the + // header compression table used for encoding request headers. Received + // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero, + // the default value of 4096 is used. + MaxEncoderHeaderTableSize uint32 + // MaxReadFrameSize optionally specifies the largest frame // this server is willing to read. A valid value is between // 16k and 16M, inclusive. If zero or otherwise invalid, a @@ -143,7 +156,7 @@ type Server struct { } func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection > initialWindowSize { + if s.MaxUploadBufferPerConnection >= initialWindowSize { return s.MaxUploadBufferPerConnection } return 1 << 20 @@ -170,6 +183,20 @@ func (s *Server) maxConcurrentStreams() uint32 { return defaultMaxStreams } +func (s *Server) maxDecoderHeaderTableSize() uint32 { + if v := s.MaxDecoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + +func (s *Server) maxEncoderHeaderTableSize() uint32 { + if v := s.MaxEncoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + // maxQueuedControlFrames is the maximum number of control frames like // SETTINGS, PING and RST_STREAM that will be queued for writing before // the connection is closed to prevent memory exhaustion attacks. @@ -315,6 +342,20 @@ type ServeConnOpts struct { // requests. If nil, BaseConfig.Handler is used. If BaseConfig // or BaseConfig.Handler is nil, http.DefaultServeMux is used. Handler http.Handler + + // UpgradeRequest is an initial request received on a connection + // undergoing an h2c upgrade. The request body must have been + // completely read from the connection before calling ServeConn, + // and the 101 Switching Protocols response written. + UpgradeRequest *http.Request + + // Settings is the decoded contents of the HTTP2-Settings header + // in an h2c upgrade request. + Settings []byte + + // SawClientPreface is set if the HTTP/2 connection preface + // has already been read from the connection. + SawClientPreface bool } func (o *ServeConnOpts) context() context.Context { @@ -380,9 +421,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { advMaxStreams: s.maxConcurrentStreams(), initialStreamSendWindowSize: initialWindowSize, maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, serveG: newGoroutineLock(), pushEnabled: true, + sawClientPreface: opts.SawClientPreface, } s.state.registerConn(sc) @@ -400,7 +441,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { if s.NewWriteScheduler != nil { sc.writeSched = s.NewWriteScheduler() } else { - sc.writeSched = NewRandomWriteScheduler() + sc.writeSched = NewPriorityWriteScheduler(nil) } // These start at the RFC-specified defaults. If there is a higher @@ -409,12 +450,13 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) fr := NewFramer(sc.bw, c) if s.CountError != nil { fr.countError = s.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() fr.SetMaxReadFrameSize(s.maxReadFrameSize()) sc.framer = fr @@ -465,9 +507,27 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { } } + if opts.Settings != nil { + fr := &SettingsFrame{ + FrameHeader: FrameHeader{valid: true}, + p: opts.Settings, + } + if err := fr.ForeachSetting(sc.processSetting); err != nil { + sc.rejectConn(ErrCodeProtocol, "invalid settings") + return + } + opts.Settings = nil + } + if hook := testHookGetServerConn; hook != nil { hook(sc) } + + if opts.UpgradeRequest != nil { + sc.upgradeRequest(opts.UpgradeRequest) + opts.UpgradeRequest = nil + } + sc.serve() } @@ -512,6 +572,7 @@ type serverConn struct { // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() pushEnabled bool + sawClientPreface bool // preface has already been read, used in h2c upgrade sawFirstSettings bool // got the initial SETTINGS frame after the preface needToSendSettingsAck bool unackedSettings int // how many SETTINGS have we sent without ACKs? @@ -525,9 +586,9 @@ type serverConn struct { streams map[uint32]*stream initialStreamSendWindowSize int32 maxFrameSize int32 - headerTableSize uint32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + canonHeaderKeysSize int // canonHeader keys size in bytes writingFrame bool // started writing a frame (on serve goroutine or separate) writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh needsFrameFlush bool // last frame write wasn't a flush @@ -588,7 +649,9 @@ type stream struct { resetQueued bool // RST_STREAM queued for write; set by sc.resetStream gotTrailerHeader bool // HEADER frame for trailers was seen wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline *time.Timer // nil if unused writeDeadline *time.Timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -704,6 +767,13 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { } } +// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size +// of the entries in the canonHeader cache. +// This should be larger than the size of unique, uncommon header keys likely to +// be sent by the peer, while not so high as to permit unreasonable memory usage +// if the peer sends an unbounded number of unique header keys. +const maxCachedCanonicalHeadersKeysSize = 2048 + func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() buildCommonHeaderMapsOnce() @@ -719,14 +789,10 @@ func (sc *serverConn) canonicalHeader(v string) string { sc.canonHeader = make(map[string]string) } cv = http.CanonicalHeaderKey(v) - // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of - // entries in the canonHeader cache. This should be larger than the number - // of unique, uncommon header keys likely to be sent by the peer, while not - // so high as to permit unreasonable memory usage if the peer sends an unbounded - // number of unique header keys. - const maxCachedCanonicalHeaders = 32 - if len(sc.canonHeader) < maxCachedCanonicalHeaders { + size := 100 + len(v)*2 // 100 bytes of map overhead + key + value + if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize { sc.canonHeader[v] = cv + sc.canonHeaderKeysSize += size } return cv } @@ -828,6 +894,7 @@ func (sc *serverConn) serve() { {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, }, }) @@ -914,6 +981,8 @@ func (sc *serverConn) serve() { } case *startPushRequest: sc.startPush(v) + case func(*serverConn): + v(sc) default: panic(fmt.Sprintf("unexpected type %T", v)) } @@ -974,6 +1043,9 @@ var errPrefaceTimeout = errors.New("timeout waiting for client preface") // returns errPrefaceTimeout on timeout, or an error if the greeting // is invalid. func (sc *serverConn) readPreface() error { + if sc.sawClientPreface { + return nil + } errc := make(chan error, 1) go func() { // Read the client preface @@ -1334,6 +1406,9 @@ func (sc *serverConn) startGracefulShutdownInternal() { func (sc *serverConn) goAway(code ErrCode) { sc.serveG.check() if sc.inGoAway { + if sc.goAwayCode == ErrCodeNo { + sc.goAwayCode = code + } return } sc.inGoAway = true @@ -1421,6 +1496,21 @@ func (sc *serverConn) processFrame(f Frame) error { sc.sawFirstSettings = true } + // Discard frames for streams initiated after the identified last + // stream sent in a GOAWAY, or all frames after sending an error. + // We still need to return connection-level flow control for DATA frames. + // RFC 9113 Section 6.8. + if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) { + + if f, ok := f.(*DataFrame); ok { + if sc.inflow.available() < int32(f.Length) { + return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl)) + } + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + } + return nil + } + switch f := f.(type) { case *SettingsFrame: return sc.processSettings(f) @@ -1463,9 +1553,6 @@ func (sc *serverConn) processPing(f *PingFrame) error { // PROTOCOL_ERROR." return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol)) } - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) return nil } @@ -1527,6 +1614,9 @@ func (sc *serverConn) closeStream(st *stream, err error) { panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) } st.state = stateClosed + if st.readDeadline != nil { + st.readDeadline.Stop() + } if st.writeDeadline != nil { st.writeDeadline.Stop() } @@ -1552,6 +1642,14 @@ func (sc *serverConn) closeStream(st *stream, err error) { p.CloseWithError(err) } + if e, ok := err.(StreamError); ok { + if e.Cause != nil { + err = e.Cause + } else { + err = errStreamClosed + } + } + st.closeErr = err st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -1594,7 +1692,6 @@ func (sc *serverConn) processSetting(s Setting) error { } switch s.ID { case SettingHeaderTableSize: - sc.headerTableSize = s.Val sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) case SettingEnablePush: sc.pushEnabled = s.Val != 0 @@ -1648,16 +1745,6 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { func (sc *serverConn) processData(f *DataFrame) error { sc.serveG.check() id := f.Header().StreamID - if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || id > sc.maxClientStreamID) { - // Discard all DATA frames if the GOAWAY is due to an - // error, or: - // - // Section 6.8: After sending a GOAWAY frame, the sender - // can discard frames for streams initiated by the - // receiver with identifiers higher than the identified - // last stream. - return nil - } data := f.Data() state, st := sc.state(id) @@ -1710,6 +1797,12 @@ func (sc *serverConn) processData(f *DataFrame) error { // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + if sc.inflow.available() < int32(f.Length) { + return sc.countError("data_flow", streamError(id, ErrCodeFlowControl)) + } + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the // value of a content-length header field does not equal the sum of the @@ -1794,19 +1887,27 @@ func (st *stream) copyTrailersToHandlerRequest() { } } +// onReadTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's ReadTimeout has fired. +func (st *stream) onReadTimeout() { + // Wrap the ErrDeadlineExceeded to avoid callers depending on us + // returning the bare error. + st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded)) +} + // onWriteTimeout is run on its own goroutine (from time.AfterFunc) // when the stream's WriteTimeout has fired. func (st *stream) onWriteTimeout() { - st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) + st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{ + StreamID: st.id, + Code: ErrCodeInternal, + Cause: os.ErrDeadlineExceeded, + }}) } func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { sc.serveG.check() id := f.StreamID - if sc.inGoAway { - // Ignore. - return nil - } // http://tools.ietf.org/html/rfc7540#section-5.1.1 // Streams initiated by a client MUST use odd-numbered stream // identifiers. [...] An endpoint that receives an unexpected @@ -1909,12 +2010,35 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout != 0 { sc.conn.SetReadDeadline(time.Time{}) + if st.body != nil { + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + } } go sc.runHandler(rw, req, handler) return nil } +func (sc *serverConn) upgradeRequest(req *http.Request) { + sc.serveG.check() + id := uint32(1) + sc.maxClientStreamID = id + st := sc.newStream(id, 0, stateHalfClosedRemote) + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + rw := sc.newResponseWriter(st, req) + + // Disable any read deadline set by the net/http package + // prior to the upgrade. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) +} + func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { sc := st.sc sc.serveG.check() @@ -1957,9 +2081,6 @@ func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error { } func (sc *serverConn) processPriority(f *PriorityFrame) error { - if sc.inGoAway { - return nil - } if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil { return err } @@ -2033,12 +2154,6 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - bodyOpen := !f.StreamEnded() - if rp.method == "HEAD" && bodyOpen { - // HEAD requests can't have bodies - return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol)) - } - rp.header = make(http.Header) for _, hf := range f.RegularFields() { rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) @@ -2051,6 +2166,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if err != nil { return nil, nil, err } + bodyOpen := !f.StreamEnded() if bodyOpen { if vv, ok := rp.header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { @@ -2145,6 +2261,11 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r } req = req.WithContext(st.ctx) + rw := sc.newResponseWriter(st, req) + return rw, req, nil +} + +func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter { rws := responseWriterStatePool.Get().(*responseWriterState) bwSave := rws.bw *rws = responseWriterState{} // zero all the fields @@ -2153,10 +2274,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r rws.bw.Reset(chunkWriter{rws}) rws.stream = st rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil + return &responseWriter{rws: rws} } // Run on its own goroutine. @@ -2164,6 +2282,9 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler didPanic := true defer func() { rw.rws.stream.cancelCtx() + if req.MultipartForm != nil { + req.MultipartForm.RemoveAll() + } if didPanic { e := recover() sc.writeFrameFromHandler(FrameWriteRequest{ @@ -2275,7 +2396,7 @@ func (sc *serverConn) sendWindowUpdate(st *stream, n int) { // a larger Read than this. Very unlikely, but we handle it here // rather than elsewhere for now. const maxUint31 = 1<<31 - 1 - for n >= maxUint31 { + for n > maxUint31 { sc.sendWindowUpdate32(st, maxUint31) n -= maxUint31 } @@ -2316,17 +2437,18 @@ type requestBody struct { _ incomparable stream *stream conn *serverConn - closed bool // for use by Close only - sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue + closeOnce sync.Once // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue } func (b *requestBody) Close() error { - if b.pipe != nil && !b.closed { - b.pipe.BreakWithError(errClosedBody) - } - b.closed = true + b.closeOnce.Do(func() { + if b.pipe != nil { + b.pipe.BreakWithError(errClosedBody) + } + }) return nil } @@ -2370,7 +2492,6 @@ type responseWriterState struct { // immutable within a request: stream *stream req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't conn *serverConn // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc @@ -2395,7 +2516,15 @@ type responseWriterState struct { type chunkWriter struct{ rws *responseWriterState } -func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } +func (cw chunkWriter) Write(p []byte) (n int, err error) { + n, err = cw.rws.writeChunk(p) + if err == errStreamClosed { + // If writing failed because the stream has been closed, + // return the reason it was closed. + err = cw.rws.stream.closeErr + } + return n, err +} func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 } @@ -2434,6 +2563,10 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { rws.writeHeader(200) } + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + isHeadResp := rws.req.Method == "HEAD" if !rws.sentHeader { rws.sentHeader = true @@ -2505,10 +2638,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { return 0, nil } - if rws.handlerDone { - rws.promoteUndeclaredTrailers() - } - // only send trailers if they have actually been defined by the // server handler. hasNonemptyTrailers := rws.hasNonemptyTrailers() @@ -2546,8 +2675,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { // prior to the headers being written. If the set of trailers is fixed // or known before the header is written, the normal Go trailers mechanism // is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +// +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers const TrailerPrefix = "Trailer:" // promoteUndeclaredTrailers permits http.Handlers to set trailers @@ -2588,23 +2718,85 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { } } +func (w *responseWriter) SetReadDeadline(deadline time.Time) error { + st := w.rws.stream + if !deadline.IsZero() && deadline.Before(time.Now()) { + // If we're setting a deadline in the past, reset the stream immediately + // so writes after SetWriteDeadline returns will fail. + st.onReadTimeout() + return nil + } + w.rws.conn.sendServeMsg(func(sc *serverConn) { + if st.readDeadline != nil { + if !st.readDeadline.Stop() { + // Deadline already exceeded, or stream has been closed. + return + } + } + if deadline.IsZero() { + st.readDeadline = nil + } else if st.readDeadline == nil { + st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + } else { + st.readDeadline.Reset(deadline.Sub(time.Now())) + } + }) + return nil +} + +func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { + st := w.rws.stream + if !deadline.IsZero() && deadline.Before(time.Now()) { + // If we're setting a deadline in the past, reset the stream immediately + // so writes after SetWriteDeadline returns will fail. + st.onWriteTimeout() + return nil + } + w.rws.conn.sendServeMsg(func(sc *serverConn) { + if st.writeDeadline != nil { + if !st.writeDeadline.Stop() { + // Deadline already exceeded, or stream has been closed. + return + } + } + if deadline.IsZero() { + st.writeDeadline = nil + } else if st.writeDeadline == nil { + st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + } else { + st.writeDeadline.Reset(deadline.Sub(time.Now())) + } + }) + return nil +} + func (w *responseWriter) Flush() { + w.FlushError() +} + +func (w *responseWriter) FlushError() error { rws := w.rws if rws == nil { panic("Header called after Handler finished") } + var err error if rws.bw.Buffered() > 0 { - if err := rws.bw.Flush(); err != nil { - // Ignore the error. The frame writer already knows. - return - } + err = rws.bw.Flush() } else { // The bufio.Writer won't call chunkWriter.Write // (writeChunk with zero bytes, so we have to do it // ourselves to force the HTTP response header and/or // final DATA frame (with END_STREAM) to be sent. - rws.writeChunk(nil) + _, err = chunkWriter{rws}.Write(nil) + if err == nil { + select { + case <-rws.stream.cw: + err = rws.stream.closeErr + default: + } + } } + return err } func (w *responseWriter) CloseNotify() <-chan bool { @@ -2643,8 +2835,7 @@ func checkWriteHeaderCode(code int) { // Issue 22880: require valid WriteHeader status codes. // For now we only enforce that it's three digits. // In the future we might block things over 599 (600 and above aren't defined - // at http://httpwg.org/specs/rfc7231.html#status.codes) - // and we might block under 200 (once we have more mature 1xx support). + // at http://httpwg.org/specs/rfc7231.html#status.codes). // But for now any three digits. // // We used to send "HTTP/1.1 000 0" on the wire in responses but there's @@ -2665,13 +2856,41 @@ func (w *responseWriter) WriteHeader(code int) { } func (rws *responseWriterState) writeHeader(code int) { - if !rws.wroteHeader { - checkWriteHeaderCode(code) - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) + if rws.wroteHeader { + return + } + + checkWriteHeaderCode(code) + + // Handle informational headers + if code >= 100 && code <= 199 { + // Per RFC 8297 we must not clear the current header map + h := rws.handlerHeader + + _, cl := h["Content-Length"] + _, te := h["Transfer-Encoding"] + if cl || te { + h = h.Clone() + h.Del("Content-Length") + h.Del("Transfer-Encoding") + } + + if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: code, + h: h, + endStream: rws.handlerDone && !rws.hasTrailers(), + }) != nil { + rws.dirty = true } + + return + } + + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) } } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f135b0f75189..30f706e6cb81 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -16,7 +16,7 @@ import ( "errors" "fmt" "io" - "io/ioutil" + "io/fs" "log" "math" mathrand "math/rand" @@ -68,13 +68,23 @@ const ( // A Transport internally caches connections to servers. It is safe // for concurrent use by multiple goroutines. type Transport struct { - // DialTLS specifies an optional dial function for creating - // TLS connections for requests. + // DialTLSContext specifies an optional dial function with context for + // creating TLS connections for requests. // - // If DialTLS is nil, tls.Dial is used. + // If DialTLSContext and DialTLS is nil, tls.Dial is used. // // If the returned net.Conn has a ConnectionState method like tls.Conn, // it will be used to set http.Response.TLS. + DialTLSContext func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error) + + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLSContext and DialTLS is nil, tls.Dial is used. + // + // Deprecated: Use DialTLSContext instead, which allows the transport + // to cancel dials as soon as they are no longer needed. + // If both are set, DialTLSContext takes priority. DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) // TLSClientConfig specifies the TLS configuration to use with @@ -108,6 +118,28 @@ type Transport struct { // to mean no limit. MaxHeaderListSize uint32 + // MaxReadFrameSize is the http2 SETTINGS_MAX_FRAME_SIZE to send in the + // initial settings frame. It is the size in bytes of the largest frame + // payload that the sender is willing to receive. If 0, no setting is + // sent, and the value is provided by the peer, which should be 16384 + // according to the spec: + // https://datatracker.ietf.org/doc/html/rfc7540#section-6.5.2. + // Values are bounded in the range 16k to 16M. + MaxReadFrameSize uint32 + + // MaxDecoderHeaderTableSize optionally specifies the http2 + // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It + // informs the remote endpoint of the maximum size of the header compression + // table used to decode header blocks, in octets. If zero, the default value + // of 4096 is used. + MaxDecoderHeaderTableSize uint32 + + // MaxEncoderHeaderTableSize optionally specifies an upper limit for the + // header compression table used for encoding request headers. Received + // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero, + // the default value of 4096 is used. + MaxEncoderHeaderTableSize uint32 + // StrictMaxConcurrentStreams controls whether the server's // SETTINGS_MAX_CONCURRENT_STREAMS should be respected // globally. If false, new TCP connections are created to the @@ -161,6 +193,19 @@ func (t *Transport) maxHeaderListSize() uint32 { return t.MaxHeaderListSize } +func (t *Transport) maxFrameReadSize() uint32 { + if t.MaxReadFrameSize == 0 { + return 0 // use the default provided by the peer + } + if t.MaxReadFrameSize < minMaxFrameSize { + return minMaxFrameSize + } + if t.MaxReadFrameSize > maxFrameSize { + return maxFrameSize + } + return t.MaxReadFrameSize +} + func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } @@ -249,7 +294,8 @@ func (t *Transport) initConnPool() { // HTTP/2 server. type ClientConn struct { t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls + tconn net.Conn // usually *tls.Conn, except specialized impls + tconnClosed bool tlsState *tls.ConnectionState // nil only for specialized impls reused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request @@ -282,10 +328,11 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -335,8 +382,8 @@ type clientStream struct { readErr error // sticky read error; owned by transportResponseBody.Read reqBody io.ReadCloser - reqBodyContentLength int64 // -1 means unknown - reqBodyClosed bool // body has been closed; guarded by cc.mu + reqBodyContentLength int64 // -1 means unknown + reqBodyClosed chan struct{} // guarded by cc.mu; non-nil on Close, closed when done // owned by writeRequest: sentEndStream bool // sent an END_STREAM flag to the peer @@ -376,9 +423,8 @@ func (cs *clientStream) abortStreamLocked(err error) { cs.abortErr = err close(cs.abort) }) - if cs.reqBody != nil && !cs.reqBodyClosed { - cs.reqBody.Close() - cs.reqBodyClosed = true + if cs.reqBody != nil { + cs.closeReqBodyLocked() } // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { @@ -391,13 +437,24 @@ func (cs *clientStream) abortRequestBodyWrite() { cc := cs.cc cc.mu.Lock() defer cc.mu.Unlock() - if cs.reqBody != nil && !cs.reqBodyClosed { - cs.reqBody.Close() - cs.reqBodyClosed = true + if cs.reqBody != nil && cs.reqBodyClosed == nil { + cs.closeReqBodyLocked() cc.cond.Broadcast() } } +func (cs *clientStream) closeReqBodyLocked() { + if cs.reqBodyClosed != nil { + return + } + cs.reqBodyClosed = make(chan struct{}) + reqBodyClosed := cs.reqBodyClosed + go func() { + cs.reqBody.Close() + close(reqBodyClosed) + }() +} + type stickyErrWriter struct { conn net.Conn timeout time.Duration @@ -481,6 +538,15 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } +var retryBackoffHook func(time.Duration) *time.Timer + +func backoffNewTimer(d time.Duration) *time.Timer { + if retryBackoffHook != nil { + return retryBackoffHook(d) + } + return time.NewTimer(d) +} + // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -501,14 +567,19 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res if req, err = shouldRetryRequest(req, err); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { + t.vlogf("RoundTrip retrying after failure: %v", err) continue } backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) + d := time.Second * time.Duration(backoff) + timer := backoffNewTimer(d) select { - case <-time.After(time.Second * time.Duration(backoff)): + case <-timer.C: + t.vlogf("RoundTrip retrying after failure: %v", err) continue case <-req.Context().Done(): + timer.Stop() err = req.Context().Err() } } @@ -591,7 +662,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host)) + tconn, err := t.dialTLS(ctx, "tcp", addr, t.newTLSConfig(host)) if err != nil { return nil, err } @@ -612,24 +683,25 @@ func (t *Transport) newTLSConfig(host string) *tls.Config { return cfg } -func (t *Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS +func (t *Transport) dialTLS(ctx context.Context, network, addr string, tlsCfg *tls.Config) (net.Conn, error) { + if t.DialTLSContext != nil { + return t.DialTLSContext(ctx, network, addr, tlsCfg) + } else if t.DialTLS != nil { + return t.DialTLS(network, addr, tlsCfg) } - return func(network, addr string, cfg *tls.Config) (net.Conn, error) { - tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg) - if err != nil { - return nil, err - } - state := tlsCn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return tlsCn, nil + + tlsCn, err := t.dialTLSWithContext(ctx, network, addr, tlsCfg) + if err != nil { + return nil, err + } + state := tlsCn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") } + return tlsCn, nil } // disableKeepAlives reports whether connections should be closed as @@ -645,6 +717,20 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } +func (t *Transport) maxDecoderHeaderTableSize() uint32 { + if v := t.MaxDecoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + +func (t *Transport) maxEncoderHeaderTableSize() uint32 { + if v := t.MaxEncoderHeaderTableSize; v > 0 { + return v + } + return initialHeaderTableSize +} + func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } @@ -685,15 +771,19 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) + if t.maxFrameReadSize() != 0 { + cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) + } if t.CountError != nil { cc.fr.countError = t.CountError } - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + maxHeaderTableSize := t.maxDecoderHeaderTableSize() + cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.peerMaxHeaderTableSize = initialHeaderTableSize if t.AllowHTTP { cc.nextStreamID = 3 @@ -708,9 +798,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro {ID: SettingEnablePush, Val: 0}, {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, } + if max := t.maxFrameReadSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + } if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } + if maxHeaderTableSize != initialHeaderTableSize { + initialSettings = append(initialSettings, Setting{ID: SettingHeaderTableSize, Val: maxHeaderTableSize}) + } cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) @@ -732,11 +828,13 @@ func (cc *ClientConn) healthCheck() { // trigger the healthCheck again if there is no frame received. ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() + cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) if err != nil { + cc.vlogf("http2: Transport health check failure: %v", err) cc.closeForLostPing() - cc.t.connPool().MarkDead(cc) - return + } else { + cc.vlogf("http2: Transport health check success") } } @@ -907,6 +1005,24 @@ func (cc *ClientConn) onIdleTimeout() { cc.closeIfIdle() } +func (cc *ClientConn) closeConn() { + t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) + defer t.Stop() + cc.tconn.Close() +} + +// A tls.Conn.Close can hang for a long time if the peer is unresponsive. +// Try to shut it down more aggressively. +func (cc *ClientConn) forceCloseConn() { + tc, ok := cc.tconn.(*tls.Conn) + if !ok { + return + } + if nc := tlsUnderlyingConn(tc); nc != nil { + nc.Close() + } +} + func (cc *ClientConn) closeIfIdle() { cc.mu.Lock() if len(cc.streams) > 0 || cc.streamsReserved > 0 { @@ -921,7 +1037,7 @@ func (cc *ClientConn) closeIfIdle() { if VerboseLogs { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) } - cc.tconn.Close() + cc.closeConn() } func (cc *ClientConn) isDoNotReuseAndIdle() bool { @@ -938,7 +1054,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { return err } // Wait for all in-flight streams to complete or connection to close - done := make(chan error, 1) + done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { cc.mu.Lock() @@ -946,7 +1062,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { for { if len(cc.streams) == 0 || cc.closed { cc.closed = true - done <- cc.tconn.Close() + close(done) break } if cancelled { @@ -957,8 +1073,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { }() shutdownEnterWaitStateHook() select { - case err := <-done: - return err + case <-done: + cc.closeConn() + return nil case <-ctx.Done(): cc.mu.Lock() // Free the goroutine above @@ -995,15 +1112,15 @@ func (cc *ClientConn) sendGoAway() error { // closes the client connection immediately. In-flight requests are interrupted. // err is sent to streams. -func (cc *ClientConn) closeForError(err error) error { +func (cc *ClientConn) closeForError(err error) { cc.mu.Lock() cc.closed = true for _, cs := range cc.streams { cs.abortStreamLocked(err) } - defer cc.cond.Broadcast() - defer cc.mu.Unlock() - return cc.tconn.Close() + cc.cond.Broadcast() + cc.mu.Unlock() + cc.closeConn() } // Close closes the client connection immediately. @@ -1011,16 +1128,17 @@ func (cc *ClientConn) closeForError(err error) error { // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. func (cc *ClientConn) Close() error { err := errors.New("http2: client connection force closed via ClientConn.Close") - return cc.closeForError(err) + cc.closeForError(err) + return nil } // closes the client connection immediately. In-flight requests are interrupted. -func (cc *ClientConn) closeForLostPing() error { +func (cc *ClientConn) closeForLostPing() { err := errors.New("http2: client connection lost") if f := cc.t.CountError; f != nil { f("conn_close_lost_ping") } - return cc.closeForError(err) + cc.closeForError(err) } // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not @@ -1030,7 +1148,7 @@ var errRequestCanceled = errors.New("net/http: request canceled") func commaSeparatedTrailers(req *http.Request) (string, error) { keys := make([]string, 0, len(req.Trailer)) for k := range req.Trailer { - k = http.CanonicalHeaderKey(k) + k = canonicalHeader(k) switch k { case "Transfer-Encoding", "Trailer", "Content-Length": return "", fmt.Errorf("invalid Trailer key %q", k) @@ -1398,11 +1516,19 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // and in multiple cases: server replies <=299 and >299 // while still writing request body cc.mu.Lock() + mustCloseBody := false + if cs.reqBody != nil && cs.reqBodyClosed == nil { + mustCloseBody = true + cs.reqBodyClosed = make(chan struct{}) + } bodyClosed := cs.reqBodyClosed - cs.reqBodyClosed = true cc.mu.Unlock() - if !bodyClosed && cs.reqBody != nil { + if mustCloseBody { cs.reqBody.Close() + close(bodyClosed) + } + if bodyClosed != nil { + <-bodyClosed } if err != nil && cs.sentEndStream { @@ -1559,7 +1685,7 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { var sawEOF bool for !sawEOF { - n, err := body.Read(buf[:len(buf)]) + n, err := body.Read(buf) if hasContentLen { remainLen -= int64(n) if remainLen == 0 && err == nil { @@ -1582,7 +1708,7 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { } if err != nil { cc.mu.Lock() - bodyClosed := cs.reqBodyClosed + bodyClosed := cs.reqBodyClosed != nil cc.mu.Unlock() switch { case bodyClosed: @@ -1677,7 +1803,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) if cc.closed { return 0, errClientConnClosed } - if cs.reqBodyClosed { + if cs.reqBodyClosed != nil { return 0, errStopReqBodyWrite } select { @@ -1748,7 +1874,8 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } for _, v := range vv { if !httpguts.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + // Don't include the value in the error, because it may be sensitive. + return nil, fmt.Errorf("invalid HTTP header value for header %q", k) } } } @@ -1861,7 +1988,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // Header list size is ok. Write the headers. enumerateHeaders(func(name, value string) { - name, ascii := asciiToLower(name) + name, ascii := lowerHeader(name) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -1914,7 +2041,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { } for k, vv := range trailer { - lowKey, ascii := asciiToLower(k) + lowKey, ascii := lowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -1972,13 +2099,13 @@ func (cc *ClientConn) forgetStreamID(id uint32) { // wake up RoundTrip if there is a pending request. cc.cond.Broadcast() - closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { if VerboseLogs { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) } cc.closed = true - defer cc.tconn.Close() + defer cc.closeConn() } cc.mu.Unlock() @@ -2025,8 +2152,8 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) + cc.t.connPool().MarkDead(cc) + defer cc.closeConn() defer close(cc.readerDone) if cc.idleTimer != nil { @@ -2048,6 +2175,7 @@ func (rl *clientConnReadLoop) cleanup() { err = io.ErrUnexpectedEOF } cc.closed = true + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2246,7 +2374,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra Status: status + " " + http.StatusText(statusCode), } for _, hf := range regularFields { - key := http.CanonicalHeaderKey(hf.Name) + key := canonicalHeader(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { @@ -2254,7 +2382,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil + t[canonicalHeader(v)] = nil }) } else { vv := header[key] @@ -2359,7 +2487,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr trailer := make(http.Header) for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) + key := canonicalHeader(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer @@ -2641,7 +2769,6 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { if fn := cc.t.CountError; fn != nil { fn("recv_goaway_" + f.ErrCode.stringToken()) } - } cc.setGoAway(f) return nil @@ -2706,8 +2833,10 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc.cond.Broadcast() cc.initialWindowSize = s.Val + case SettingHeaderTableSize: + cc.henc.SetMaxDynamicTableSize(s.Val) + cc.peerMaxHeaderTableSize = s.Val default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. cc.vlogf("Unhandled Setting: %v", s) } return nil @@ -2881,7 +3010,12 @@ func (t *Transport) logf(format string, args ...interface{}) { log.Printf(format, args...) } -var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) +var noBody io.ReadCloser = noBodyReader{} + +type noBodyReader struct{} + +func (noBodyReader) Close() error { return nil } +func (noBodyReader) Read([]byte) (int, error) { return 0, io.EOF } type missingBody struct{} @@ -2926,7 +3060,11 @@ func (gz *gzipReader) Read(p []byte) (n int, err error) { } func (gz *gzipReader) Close() error { - return gz.body.Close() + if err := gz.body.Close(); err != nil { + return err + } + gz.zerr = fs.ErrClosed + return nil } type errorReader struct{ err error } @@ -2990,7 +3128,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Now().Sub(cc.lastActive) + ci.IdleTime = time.Since(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 2618b2c11d22..0a242c669e2c 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -383,16 +383,15 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { var n *priorityNode - if id := wr.StreamID(); id == 0 { + if wr.isControl() { n = &ws.root } else { + id := wr.StreamID() n = ws.nodes[id] if n == nil { // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. However, wr can be a RST_STREAM. In this case, we - // push wr onto the root, rather than creating a new priorityNode, - // since RST_STREAM is tiny and the stream's priority is unknown - // anyway. See issue #17919. + // DATA frame. In other case, we push wr onto the root, rather + // than creating a new priorityNode. if wr.DataSize() > 0 { panic("add DATA on non-open stream") } diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go index 7a8cf889b5bc..9c070a44b377 100644 --- a/vendor/golang.org/x/net/idna/trieval.go +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -17,23 +17,23 @@ package idna // // The per-rune values have the following format: // -// if mapped { -// if inlinedXOR { -// 15..13 inline XOR marker -// 12..11 unused -// 10..3 inline XOR mask -// } else { -// 15..3 index into xor or mapping table -// } -// } else { -// 15..14 unused -// 13 mayNeedNorm -// 12..11 attributes -// 10..8 joining type -// 7..3 category type -// } -// 2 use xor pattern -// 1..0 mapped category +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category // // See the definitions below for a more detailed description of the various // bits. diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go index 3ebf6f2daa30..eae2a99f54c6 100644 --- a/vendor/golang.org/x/net/trace/trace.go +++ b/vendor/golang.org/x/net/trace/trace.go @@ -395,7 +395,7 @@ func New(family, title string) Trace { } func (tr *trace) Finish() { - elapsed := time.Now().Sub(tr.Start) + elapsed := time.Since(tr.Start) tr.mu.Lock() tr.Elapsed = elapsed tr.mu.Unlock() diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/sync/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/sync/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 9857fe53d3c9..cbee7a4e230d 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -8,22 +8,35 @@ package errgroup import ( "context" + "fmt" "sync" ) +type token struct{} + // A Group is a collection of goroutines working on subtasks that are part of // the same overall task. // -// A zero Group is valid and does not cancel on error. +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. type Group struct { cancel func() wg sync.WaitGroup + sem chan token + errOnce sync.Once err error } +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + // WithContext returns a new Group and an associated Context derived from ctx. // // The derived Context is canceled the first time a function passed to Go @@ -45,14 +58,48 @@ func (g *Group) Wait() error { } // Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. // -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. +// The first call to return a non-nil error cancels the group's context, if the +// group was created by calling WithContext. The error will be returned by Wait. func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + g.wg.Add(1) go func() { - defer g.wg.Done() + defer g.done() if err := f(); err != nil { g.errOnce.Do(func() { @@ -63,4 +110,23 @@ func (g *Group) Go(f func() error) { }) } }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) } diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go index 690eb8501347..8473fb7922c1 100644 --- a/vendor/golang.org/x/sync/singleflight/singleflight.go +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -52,10 +52,6 @@ type call struct { val interface{} err error - // forgotten indicates whether Forget was called with this call's key - // while the call was still in flight. - forgotten bool - // These fields are read and written with the singleflight // mutex held before the WaitGroup is done, and are read but // not written after the WaitGroup is done. @@ -148,10 +144,10 @@ func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { c.err = errGoexit } - c.wg.Done() g.mu.Lock() defer g.mu.Unlock() - if !c.forgotten { + c.wg.Done() + if g.m[key] == c { delete(g.m, key) } @@ -204,9 +200,6 @@ func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { // an earlier call to complete. func (g *Group) Forget(key string) { g.mu.Lock() - if c, ok := g.m[key]; ok { - c.forgotten = true - } delete(g.m, key) g.mu.Unlock() } diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/sys/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/sys/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go index dcbb14ef35a4..271055be0b1e 100644 --- a/vendor/golang.org/x/sys/cpu/byteorder.go +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -46,6 +46,7 @@ func hostByteOrder() byteOrder { case "386", "amd64", "amd64p32", "alpha", "arm", "arm64", + "loong64", "mipsle", "mips64le", "mips64p32le", "nios2", "ppc64le", diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index b56886f26163..83f112c4c808 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -106,8 +106,8 @@ var ARM64 struct { // ARM contains the supported CPU features of the current ARM (32-bit) platform. // All feature flags are false if: -// 1. the current platform is not arm, or -// 2. the current operating system is not Linux. +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. var ARM struct { _ CacheLinePad HasSWP bool // SWP instruction support diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 87dd5e30215b..f3eb993bf24b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -6,7 +6,10 @@ package cpu import "runtime" -const cacheLineSize = 64 +// cacheLineSize is used to prevent false sharing of cache lines. +// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. +// It doesn't cost much and is much more future-proof. +const cacheLineSize = 128 func initOptions() { options = []option{ @@ -41,13 +44,10 @@ func archInit() { switch runtime.GOOS { case "freebsd": readARM64Registers() - case "linux", "netbsd": + case "linux", "netbsd", "openbsd": doinit() default: - // Most platforms don't seem to allow reading these registers. - // - // OpenBSD: - // See https://golang.org/issue/31746 + // Many platforms don't seem to allow reading these registers. setMinimalFeatures() } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c index e363c7d13197..a4605e6d12e8 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -7,6 +7,7 @@ #include #include +#include // Need to wrap __get_cpuid_count because it's declared as static. int @@ -17,27 +18,21 @@ gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); } +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + // xgetbv reads the contents of an XCR (Extended Control Register) // specified in the ECX register into registers EDX:EAX. // Currently, the only supported value for XCR is 0. -// -// TODO: Replace with a better alternative: -// -// #include -// -// #pragma GCC target("xsave") -// -// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { -// unsigned long long x = _xgetbv(0); -// *eax = x & 0xffffffff; -// *edx = (x >> 32) & 0xffffffff; -// } -// -// Note that _xgetbv is defined starting with GCC 8. void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { - __asm(" xorl %%ecx, %%ecx\n" - " xgetbv" - : "=a"(*eax), "=d"(*edx)); + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; } + +#pragma clang attribute pop +#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go new file mode 100644 index 000000000000..0f57b05bdbe5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -0,0 +1,13 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 +// +build loong64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go new file mode 100644 index 000000000000..85b64d5ccb73 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 +) + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 + +func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +func sysctlUint64(mib []uint32) (uint64, bool) { + var out uint64 + nout := unsafe.Sizeof(out) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { + return 0, false + } + return out, true +} + +func doinit() { + setMinimalFeatures() + + // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) + if !ok { + return + } + parseARM64SystemRegisters(isar0, isar1, 0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s new file mode 100644 index 000000000000..054ba05d607b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index f8c484f589f5..f3cde129b634 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !linux && !netbsd && arm64 -// +build !linux,!netbsd,arm64 +//go:build !linux && !netbsd && !openbsd && arm64 +// +build !linux,!netbsd,!openbsd,arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go new file mode 100644 index 000000000000..060d46b6eacc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -0,0 +1,15 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !linux && (ppc64 || ppc64le) +// +build !aix +// +build !linux +// +build ppc64 ppc64le + +package cpu + +func archInit() { + PPC64.IsPOWER8 = true + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go new file mode 100644 index 000000000000..dd10eb79feef --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 +// +build !linux,riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go index a864f24d7589..96134157a10d 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -5,7 +5,7 @@ // Recreate a getsystemcfg syscall handler instead of // using the one provided by x/sys/unix to avoid having // the dependency between them. (See golang.org/issue/32102) -// Morever, this file will be used during the building of +// Moreover, this file will be used during the building of // gccgo's libgo and thus must not used a CGo method. //go:build aix && gccgo diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go index 78192498db01..b981cfbb4ae3 100644 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ b/vendor/golang.org/x/sys/execabs/execabs.go @@ -53,7 +53,7 @@ func relError(file, path string) error { // LookPath instead returns an error. func LookPath(file string) (string, error) { path, err := exec.LookPath(file) - if err != nil { + if err != nil && !isGo119ErrDot(err) { return "", err } if filepath.Base(file) == file && !filepath.IsAbs(path) { diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go new file mode 100644 index 000000000000..6ab5f50894e2 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 +// +build !go1.19 + +package execabs + +func isGo119ErrDot(err error) bool { + return false +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go new file mode 100644 index 000000000000..46c5b525e7b8 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package execabs + +import ( + "errors" + "os/exec" +) + +func isGo119ErrDot(err error) bool { + return errors.Is(err, exec.ErrDot) +} diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s new file mode 100644 index 000000000000..e5b9a84899ac --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s @@ -0,0 +1,31 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd +// +build gc + +#include "textflag.h" + +// +// System call support for ppc64, BSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s new file mode 100644 index 000000000000..d560019ea29e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -0,0 +1,29 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd +// +build gc + +#include "textflag.h" + +// System call support for RISCV64 BSD + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s new file mode 100644 index 000000000000..565357288a81 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -0,0 +1,54 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && loong64 && gc +// +build linux +// +build loong64 +// +build gc + +#include "textflag.h" + + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + JAL runtime·entersyscall(SB) + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) + MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + JAL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) + MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + RET diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index e74e5eaa3bfe..2499f977b070 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index 4362f47e2c00..b0f2bc4ae3b2 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh +// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go deleted file mode 100644 index 761db66efece..000000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IFF_SMART = 0x20 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_IPXIP = 0xf9 - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_FAITH = 0x16 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 - SIOCADDRT = 0x8030720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8030720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go deleted file mode 100644 index 070f44b65104..000000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IFF_SMART = 0x20 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_IPXIP = 0xf9 - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_FAITH = 0x16 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 - SIOCADDRT = 0x8040720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8040720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go deleted file mode 100644 index 856dca325438..000000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix - -const ( - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - - // missing constants on FreeBSD-11.1-RELEASE, copied from old values in ztypes_freebsd_arm.go - IFF_SMART = 0x20 - IFT_FAITH = 0xf2 - IFT_IPXIP = 0xf9 - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IP_FAITH = 0x16 - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - SIOCADDRT = 0x8030720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8030720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go deleted file mode 100644 index 946dcf3fc7ec..000000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 934af313c323..15721a5104e4 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -8,7 +8,6 @@ package unix import ( - "bytes" "unsafe" ) @@ -45,13 +44,7 @@ func NewIfreq(name string) (*Ifreq, error) { // Name returns the interface name associated with the Ifreq. func (ifr *Ifreq) Name() string { - // BytePtrToString requires a NULL terminator or the program may crash. If - // one is not present, just return the empty string. - if !bytes.Contains(ifr.raw.Ifrn[:], []byte{0x00}) { - return "" - } - - return BytePtrToString(&ifr.raw.Ifrn[0]) + return ByteSliceToString(ifr.raw.Ifrn[:]) } // According to netdevice(7), only AF_INET addresses are returned for numerous diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 1dadead21e6d..0d12c0851adf 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -4,9 +4,7 @@ package unix -import ( - "unsafe" -) +import "unsafe" // IoctlRetInt performs an ioctl operation specified by req on a device // associated with opened file descriptor fd, and returns a non-negative @@ -194,3 +192,42 @@ func ioctlIfreqData(fd int, req uint, value *ifreqData) error { // identical so pass *IfreqData directly. return ioctlPtr(fd, req, unsafe.Pointer(value)) } + +// IoctlKCMClone attaches a new file descriptor to a multiplexor by cloning an +// existing KCM socket, returning a structure containing the file descriptor of +// the new socket. +func IoctlKCMClone(fd int) (*KCMClone, error) { + var info KCMClone + if err := ioctlPtr(fd, SIOCKCMCLONE, unsafe.Pointer(&info)); err != nil { + return nil, err + } + + return &info, nil +} + +// IoctlKCMAttach attaches a TCP socket and associated BPF program file +// descriptor to a multiplexor. +func IoctlKCMAttach(fd int, info KCMAttach) error { + return ioctlPtr(fd, SIOCKCMATTACH, unsafe.Pointer(&info)) +} + +// IoctlKCMUnattach unattaches a TCP socket file descriptor from a multiplexor. +func IoctlKCMUnattach(fd int, info KCMUnattach) error { + return ioctlPtr(fd, SIOCKCMUNATTACH, unsafe.Pointer(&info)) +} + +// IoctlLoopGetStatus64 gets the status of the loop device associated with the +// file descriptor fd using the LOOP_GET_STATUS64 operation. +func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { + var value LoopInfo64 + if err := ioctlPtr(fd, LOOP_GET_STATUS64, unsafe.Pointer(&value)); err != nil { + return nil, err + } + return &value, nil +} + +// IoctlLoopSetStatus64 sets the status of the loop device associated with the +// file descriptor fd using the LOOP_SET_STATUS64 operation. +func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { + return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index ee73623489b0..727cba212704 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -73,12 +73,12 @@ aix_ppc64) darwin_amd64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm_darwin.go" + mkasm="go run mkasm.go" ;; darwin_arm64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm_darwin.go" + mkasm="go run mkasm.go" ;; dragonfly_amd64) mkerrors="$mkerrors -m64" @@ -89,25 +89,30 @@ dragonfly_amd64) freebsd_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_amd64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32 -arm" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; freebsd_arm64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +freebsd_riscv64) + mkerrors="$mkerrors -m64" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; netbsd_386) @@ -137,33 +142,33 @@ netbsd_arm64) mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_386) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m32" - mksyscall="go run mksyscall.go -l32 -openbsd" + mksyscall="go run mksyscall.go -l32 -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_amd64) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd" + mksyscall="go run mksyscall.go -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_arm) + mkasm="go run mkasm.go" mkerrors="$mkerrors" - mksyscall="go run mksyscall.go -l32 -openbsd -arm" + mksyscall="go run mksyscall.go -l32 -openbsd -arm -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; openbsd_arm64) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd" + mksyscall="go run mksyscall.go -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" @@ -177,6 +182,24 @@ openbsd_mips64) # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; +openbsd_ppc64) + mkasm="go run mkasm.go" + mkerrors="$mkerrors -m64" + mksyscall="go run mksyscall.go -openbsd -libc" + mksysctl="go run mksysctl_openbsd.go" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +openbsd_riscv64) + mkasm="go run mkasm.go" + mkerrors="$mkerrors -m64" + mksyscall="go run mksyscall.go -openbsd -libc" + mksysctl="go run mksysctl_openbsd.go" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; solaris_amd64) mksyscall="go run mksyscall_solaris.go" mkerrors="$mkerrors -m64" @@ -209,11 +232,6 @@ esac if [ "$GOOSARCH" == "aix_ppc64" ]; then # aix/ppc64 script generates files instead of writing to stdin. echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ; - elif [ "$GOOS" == "darwin" ]; then - # 1.12 and later, syscalls via libSystem - echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; - # 1.13 and later, syscalls via libSystem (including syscallPtr) - echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go"; elif [ "$GOOS" == "illumos" ]; then # illumos code generation requires a --illumos switch echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go"; @@ -227,5 +245,5 @@ esac if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi - if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi + if [ -n "$mkasm" ]; then echo "$mkasm $GOOS $GOARCH"; fi ) | $run diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index a47b035f9af3..7456d9ddde16 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -128,6 +128,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -202,9 +203,11 @@ struct ltchars { #include #include #include +#include #include #include #include +#include #include #include #include @@ -214,6 +217,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -231,6 +235,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -292,6 +297,10 @@ struct ltchars { #define SOL_NETLINK 270 #endif +#ifndef SOL_SMC +#define SOL_SMC 286 +#endif + #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -503,6 +512,7 @@ ccflags="$@" $2 ~ /^O?XTABS$/ || $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || + $2 ~ /^KCM/ || $2 ~ /^LANDLOCK_/ || $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || @@ -525,7 +535,7 @@ ccflags="$@" $2 ~ /^(MS|MNT|MOUNT|UMOUNT)_/ || $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ || + $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|PIOD|TFD)_/ || $2 ~ /^KEXEC_/ || $2 ~ /^LINUX_REBOOT_CMD_/ || $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || @@ -549,6 +559,7 @@ ccflags="$@" $2 ~ /^CLONE_[A-Z_]+/ || $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && $2 ~ /^(BPF|DLT)_/ || + $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || $2 ~ /^CAP_/ || @@ -571,7 +582,6 @@ ccflags="$@" $2 ~ /^SEEK_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || - $2 !~ /^AUDIT_RECORD_MAGIC/ && $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || @@ -597,8 +607,10 @@ ccflags="$@" $2 ~ /^DEVLINK_/ || $2 ~ /^ETHTOOL_/ || $2 ~ /^LWTUNNEL_IP/ || + $2 ~ /^ITIMER_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || + $2 ~ /^P_/ || $2 ~/^PPPIOC/ || $2 ~ /^FAN_|FANOTIFY_/ || $2 == "HID_MAX_DESCRIPTOR_SIZE" || @@ -608,6 +620,7 @@ ccflags="$@" $2 ~ /^OTP/ || $2 ~ /^MEM/ || $2 ~ /^WG/ || + $2 ~ /^FIB_RULE_/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} @@ -629,7 +642,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | + grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | sort ) @@ -639,7 +652,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | + grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 453a942c5db3..3865943f6e27 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -52,6 +52,20 @@ func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) { return msgs, nil } +// ParseOneSocketControlMessage parses a single socket control message from b, returning the message header, +// message data (a slice of b), and the remainder of b after that single message. +// When there are no remaining messages, len(remainder) == 0. +func ParseOneSocketControlMessage(b []byte) (hdr Cmsghdr, data []byte, remainder []byte, err error) { + h, dbuf, err := socketControlMessageHeaderAndData(b) + if err != nil { + return Cmsghdr{}, nil, nil, err + } + if i := cmsgAlignOf(int(h.Len)); i < len(b) { + remainder = b[i:] + } + return *h, dbuf, remainder, nil +} + func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) { h := (*Cmsghdr)(unsafe.Pointer(&b[0])) if h.Len < SizeofCmsghdr || uint64(h.Len) > uint64(len(b)) { diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/unix/str.go deleted file mode 100644 index 8ba89ed8694f..000000000000 --- a/vendor/golang.org/x/sys/unix/str.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris - -package unix - -func itoa(val int) string { // do it here rather than with fmt to avoid dependency - if val < 0 { - return "-" + uitoa(uint(-val)) - } - return uitoa(uint(val)) -} - -func uitoa(val uint) string { - var buf [32]byte // big enough for int64 - i := len(buf) - 1 - for val >= 10 { - buf[i] = byte(val%10 + '0') - i-- - val /= 10 - } - buf[i] = byte(val + '0') - return string(buf[i:]) -} diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 649fa87405d1..63e8c838317f 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -29,8 +29,6 @@ import ( "bytes" "strings" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) // ByteSliceFromString returns a NUL-terminated slice of bytes @@ -82,13 +80,7 @@ func BytePtrToString(p *byte) string { ptr = unsafe.Pointer(uintptr(ptr) + 1) } - var s []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(s) + return string(unsafe.Slice(p, n)) } // Single-word zero for use when we need a valid pointer to 0 bytes. diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 4f55c8d99960..2db1b51e99f0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -37,6 +37,7 @@ func Creat(path string, mode uint32) (fd int, err error) { } //sys utimes(path string, times *[2]Timeval) (err error) + func Utimes(path string, tv []Timeval) error { if len(tv) != 2 { return EINVAL @@ -45,6 +46,7 @@ func Utimes(path string, tv []Timeval) error { } //sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) + func UtimesNano(path string, ts []Timespec) error { if len(ts) != 2 { return EINVAL @@ -215,20 +217,63 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } -func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { - // Recvmsg not implemented on AIX - sa := new(SockaddrUnix) - return -1, -1, -1, sa, ENOSYS -} - -func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { - _, err = SendmsgN(fd, p, oob, to, flags) +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(rsa)) + msg.Namelen = uint32(SizeofSockaddrAny) + var dummy byte + if len(oob) > 0 { + // receive at least one normal byte + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } + if n, err = recvmsg(fd, &msg, flags); n == -1 { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) return } -func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { - // SendmsgN not implemented on AIX - return -1, ENOSYS +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = uint32(salen) + var dummy byte + var empty bool + if len(oob) > 0 { + // send at least one normal byte + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && empty { + n = 0 + } + return n, nil } func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { @@ -306,11 +351,13 @@ func direntNamlen(buf []byte) (uint64, bool) { } //sys getdirent(fd int, buf []byte) (n int, err error) + func Getdents(fd int, buf []byte) (n int, err error) { return getdirent(fd, buf) } //sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) + func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { var status _C_int var r Pid_t @@ -378,6 +425,7 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range + func Fsync(fd int) error { return fsyncRange(fd, O_SYNC, 0, 0) } @@ -458,8 +506,8 @@ func Fsync(fd int) error { //sys Listen(s int, n int) (err error) //sys lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = pread64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = pread64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64 //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) //sysnb Setregid(rgid int, egid int) (err error) @@ -542,6 +590,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys Getsystemcfg(label int) (n uint64) //sys umount(target string) (err error) + func Unmount(target string, flags int) (err error) { if flags != 0 { // AIX doesn't have any flags for umount. diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 0ce45232611f..eda42671f195 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -325,80 +325,62 @@ func GetsockoptString(fd, level, opt int) (string, error) { //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) -func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr - var rsa RawSockaddrAny - msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } var dummy byte if len(oob) > 0 { // receive at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Control = (*byte)(unsafe.Pointer(&oob[0])) msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); err != nil { return } oobn = int(msg.Controllen) recvflags = int(msg.Flags) - // source address is only specified if the socket is unconnected - if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(fd, &rsa) - } return } //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) -func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { - _, err = SendmsgN(fd, p, oob, to, flags) - return -} - -func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { - var ptr unsafe.Pointer - var salen _Socklen - if to != nil { - ptr, salen, err = to.sockaddr() - if err != nil { - return 0, err - } - } +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } var dummy byte + var empty bool if len(oob) > 0 { // send at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Control = (*byte)(unsafe.Pointer(&oob[0])) msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil @@ -571,12 +553,7 @@ func UtimesNano(path string, ts []Timespec) error { if len(ts) != 2 { return EINVAL } - // Darwin setattrlist can set nanosecond timestamps - err := setattrlistTimes(path, ts, 0) - if err != ENOSYS { - return err - } - err = utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) + err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) if err != ENOSYS { return err } @@ -596,10 +573,6 @@ func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { if len(ts) != 2 { return EINVAL } - err := setattrlistTimes(path, ts, flags) - if err != ENOSYS { - return err - } return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go deleted file mode 100644 index b0098607c706..000000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && go1.12 && !go1.13 -// +build darwin,go1.12,!go1.13 - -package unix - -import ( - "unsafe" -) - -const _SYS_GETDIRENTRIES64 = 344 - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - // To implement this using libSystem we'd need syscall_syscallPtr for - // fdopendir. However, syscallPtr was only added in Go 1.13, so we fall - // back to raw syscalls for this func on Go 1.12. - var p unsafe.Pointer - if len(buf) > 0 { - p = unsafe.Pointer(&buf[0]) - } else { - p = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - return n, errnoErr(e1) - } - return n, nil -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go deleted file mode 100644 index 1596426b1e2e..000000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && go1.13 -// +build darwin,go1.13 - -package unix - -import ( - "unsafe" - - "golang.org/x/sys/internal/unsafeheader" -) - -//sys closedir(dir uintptr) (err error) -//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) - -func fdopendir(fd int) (dir uintptr, err error) { - r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0) - dir = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_fdopendir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - // Simulate Getdirentries using fdopendir/readdir_r/closedir. - // We store the number of entries to skip in the seek - // offset of fd. See issue #31368. - // It's not the full required semantics, but should handle the case - // of calling Getdirentries or ReadDirent repeatedly. - // It won't handle assigning the results of lseek to *basep, or handle - // the directory being edited underfoot. - skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) - if err != nil { - return 0, err - } - - // We need to duplicate the incoming file descriptor - // because the caller expects to retain control of it, but - // fdopendir expects to take control of its argument. - // Just Dup'ing the file descriptor is not enough, as the - // result shares underlying state. Use Openat to make a really - // new file descriptor referring to the same directory. - fd2, err := Openat(fd, ".", O_RDONLY, 0) - if err != nil { - return 0, err - } - d, err := fdopendir(fd2) - if err != nil { - Close(fd2) - return 0, err - } - defer closedir(d) - - var cnt int64 - for { - var entry Dirent - var entryp *Dirent - e := readdir_r(d, &entry, &entryp) - if e != 0 { - return n, errnoErr(e) - } - if entryp == nil { - break - } - if skip > 0 { - skip-- - cnt++ - continue - } - - reclen := int(entry.Reclen) - if reclen > len(buf) { - // Not enough room. Return for now. - // The counter will let us know where we should start up again. - // Note: this strategy for suspending in the middle and - // restarting is O(n^2) in the length of the directory. Oh well. - break - } - - // Copy entry into return buffer. - var s []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - hdr.Data = unsafe.Pointer(&entry) - hdr.Cap = reclen - hdr.Len = reclen - copy(buf, s) - - buf = buf[reclen:] - n += reclen - cnt++ - } - // Set the seek offset of the input fd to record - // how many files we've already returned. - _, err = Seek(fd, cnt, 0 /* SEEK_SET */) - if err != nil { - return n, err - } - - return n, nil -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 0eaab91314c6..1f63382182f3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -19,6 +19,96 @@ import ( "unsafe" ) +//sys closedir(dir uintptr) (err error) +//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) + +func fdopendir(fd int) (dir uintptr, err error) { + r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0) + dir = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fdopendir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // Simulate Getdirentries using fdopendir/readdir_r/closedir. + // We store the number of entries to skip in the seek + // offset of fd. See issue #31368. + // It's not the full required semantics, but should handle the case + // of calling Getdirentries or ReadDirent repeatedly. + // It won't handle assigning the results of lseek to *basep, or handle + // the directory being edited underfoot. + skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + return 0, err + } + + // We need to duplicate the incoming file descriptor + // because the caller expects to retain control of it, but + // fdopendir expects to take control of its argument. + // Just Dup'ing the file descriptor is not enough, as the + // result shares underlying state. Use Openat to make a really + // new file descriptor referring to the same directory. + fd2, err := Openat(fd, ".", O_RDONLY, 0) + if err != nil { + return 0, err + } + d, err := fdopendir(fd2) + if err != nil { + Close(fd2) + return 0, err + } + defer closedir(d) + + var cnt int64 + for { + var entry Dirent + var entryp *Dirent + e := readdir_r(d, &entry, &entryp) + if e != 0 { + return n, errnoErr(e) + } + if entryp == nil { + break + } + if skip > 0 { + skip-- + cnt++ + continue + } + + reclen := int(entry.Reclen) + if reclen > len(buf) { + // Not enough room. Return for now. + // The counter will let us know where we should start up again. + // Note: this strategy for suspending in the middle and + // restarting is O(n^2) in the length of the directory. Oh well. + break + } + + // Copy entry into return buffer. + s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen) + copy(buf, s) + + buf = buf[reclen:] + n += reclen + cnt++ + } + // Set the seek offset of the input fd to record + // how many files we've already returned. + _, err = Seek(fd, cnt, 0 /* SEEK_SET */) + if err != nil { + return n, err + } + + return n, nil +} + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -141,16 +231,6 @@ func direntNamlen(buf []byte) (uint64, bool) { func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } -type attrList struct { - bitmapCount uint16 - _ uint16 - CommonAttr uint32 - VolAttr uint32 - DirAttr uint32 - FileAttr uint32 - Forkattr uint32 -} - //sysnb pipe(p *[2]int32) (err error) func Pipe(p []int) (err error) { @@ -282,36 +362,7 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) { return flistxattr(fd, xattrPointer(dest), len(dest), 0) } -func setattrlistTimes(path string, times []Timespec, flags int) error { - _p0, err := BytePtrFromString(path) - if err != nil { - return err - } - - var attrList attrList - attrList.bitmapCount = ATTR_BIT_MAP_COUNT - attrList.CommonAttr = ATTR_CMN_MODTIME | ATTR_CMN_ACCTIME - - // order is mtime, atime: the opposite of Chtimes - attributes := [2]Timespec{times[1], times[0]} - options := 0 - if flags&AT_SYMLINK_NOFOLLOW != 0 { - options |= FSOPT_NOFOLLOW - } - return setattrlist( - _p0, - unsafe.Pointer(&attrList), - unsafe.Pointer(&attributes), - unsafe.Sizeof(attributes), - options) -} - -//sys setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) error { - // Darwin doesn't support SYS_UTIMENSAT - return ENOSYS -} +//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) /* * Wrapped @@ -432,6 +483,13 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { return x, err } +func GetsockoptTCPConnectionInfo(fd, level, opt int) (*TCPConnectionInfo, error) { + var value TCPConnectionInfo + vallen := _Socklen(SizeofTCPConnectionInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + func SysctlKinfoProc(name string, args ...int) (*KinfoProc, error) { mib, err := sysctlmib(name, args...) if err != nil { @@ -543,11 +601,12 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) @@ -611,7 +670,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { // Nfssvc // Getfh // Quotactl -// Mount // Csops // Waitid // Add_profil diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 2e37c3167f39..61c0d0de15d5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -125,12 +125,14 @@ func Pipe2(p []int, flags int) (err error) { } //sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error) -func Pread(fd int, p []byte, offset int64) (n int, err error) { + +func pread(fd int, p []byte, offset int64) (n int, err error) { return extpread(fd, p, 0, offset) } //sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { return extpwrite(fd, p, 0, offset) } @@ -169,11 +171,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -func setattrlistTimes(path string, times []Timespec, flags int) error { - // used on Darwin for UtimesNano - return ENOSYS -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 2f650ae665cc..de7c23e0648a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -17,25 +17,12 @@ import ( "unsafe" ) -const ( - SYS_FSTAT_FREEBSD12 = 551 // { int fstat(int fd, _Out_ struct stat *sb); } - SYS_FSTATAT_FREEBSD12 = 552 // { int fstatat(int fd, _In_z_ char *path, \ - SYS_GETDIRENTRIES_FREEBSD12 = 554 // { ssize_t getdirentries(int fd, \ - SYS_STATFS_FREEBSD12 = 555 // { int statfs(_In_z_ char *path, \ - SYS_FSTATFS_FREEBSD12 = 556 // { int fstatfs(int fd, \ - SYS_GETFSSTAT_FREEBSD12 = 557 // { int getfsstat( \ - SYS_MKNODAT_FREEBSD12 = 559 // { int mknodat(int fd, _In_z_ char *path, \ -) - // See https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions.html. var ( osreldateOnce sync.Once osreldate uint32 ) -// INO64_FIRST from /usr/src/lib/libc/sys/compat-ino64.h -const _ino64First = 1200031 - func supportsABI(ver uint32) bool { osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) return osreldate >= ver @@ -159,46 +146,21 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { var ( - _p0 unsafe.Pointer - bufsize uintptr - oldBuf []statfs_freebsd11_t - needsConvert bool + _p0 unsafe.Pointer + bufsize uintptr ) - if len(buf) > 0 { - if supportsABI(_ino64First) { - _p0 = unsafe.Pointer(&buf[0]) - bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) - } else { - n := len(buf) - oldBuf = make([]statfs_freebsd11_t, n) - _p0 = unsafe.Pointer(&oldBuf[0]) - bufsize = unsafe.Sizeof(statfs_freebsd11_t{}) * uintptr(n) - needsConvert = true - } - } - var sysno uintptr = SYS_GETFSSTAT - if supportsABI(_ino64First) { - sysno = SYS_GETFSSTAT_FREEBSD12 + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := Syscall(sysno, uintptr(_p0), bufsize, uintptr(flags)) + r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) n = int(r0) if e1 != 0 { err = e1 } - if e1 == 0 && needsConvert { - for i := range oldBuf { - buf[i].convertFrom(&oldBuf[i]) - } - } return } -func setattrlistTimes(path string, times []Timespec, flags int) error { - // used on Darwin for UtimesNano - return ENOSYS -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -250,87 +212,11 @@ func Uname(uname *Utsname) error { } func Stat(path string, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(AT_FDCWD, path, st, 0) - } - err = stat(path, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil + return Fstatat(AT_FDCWD, path, st, 0) } func Lstat(path string, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(AT_FDCWD, path, st, AT_SYMLINK_NOFOLLOW) - } - err = lstat(path, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Fstat(fd int, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstat_freebsd12(fd, st) - } - err = fstat(fd, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Fstatat(fd int, path string, st *Stat_t, flags int) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(fd, path, st, flags) - } - err = fstatat(fd, path, &oldStat, flags) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Statfs(path string, st *Statfs_t) (err error) { - var oldStatfs statfs_freebsd11_t - if supportsABI(_ino64First) { - return statfs_freebsd12(path, st) - } - err = statfs(path, &oldStatfs) - if err != nil { - return err - } - - st.convertFrom(&oldStatfs) - return nil -} - -func Fstatfs(fd int, st *Statfs_t) (err error) { - var oldStatfs statfs_freebsd11_t - if supportsABI(_ino64First) { - return fstatfs_freebsd12(fd, st) - } - err = fstatfs(fd, &oldStatfs) - if err != nil { - return err - } - - st.convertFrom(&oldStatfs) - return nil + return Fstatat(AT_FDCWD, path, st, AT_SYMLINK_NOFOLLOW) } func Getdents(fd int, buf []byte) (n int, err error) { @@ -338,162 +224,25 @@ func Getdents(fd int, buf []byte) (n int, err error) { } func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - if supportsABI(_ino64First) { - if basep == nil || unsafe.Sizeof(*basep) == 8 { - return getdirentries_freebsd12(fd, buf, (*uint64)(unsafe.Pointer(basep))) - } - // The freebsd12 syscall needs a 64-bit base. On 32-bit machines - // we can't just use the basep passed in. See #32498. - var base uint64 = uint64(*basep) - n, err = getdirentries_freebsd12(fd, buf, &base) - *basep = uintptr(base) - if base>>32 != 0 { - // We can't stuff the base back into a uintptr, so any - // future calls would be suspect. Generate an error. - // EIO is allowed by getdirentries. - err = EIO - } - return - } - - // The old syscall entries are smaller than the new. Use 1/4 of the original - // buffer size rounded up to DIRBLKSIZ (see /usr/src/lib/libc/sys/getdirentries.c). - oldBufLen := roundup(len(buf)/4, _dirblksiz) - oldBuf := make([]byte, oldBufLen) - n, err = getdirentries(fd, oldBuf, basep) - if err == nil && n > 0 { - n = convertFromDirents11(buf, oldBuf[:n]) + if basep == nil || unsafe.Sizeof(*basep) == 8 { + return getdirentries(fd, buf, (*uint64)(unsafe.Pointer(basep))) + } + // The syscall needs a 64-bit base. On 32-bit machines + // we can't just use the basep passed in. See #32498. + var base uint64 = uint64(*basep) + n, err = getdirentries(fd, buf, &base) + *basep = uintptr(base) + if base>>32 != 0 { + // We can't stuff the base back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO is allowed by getdirentries. + err = EIO } return } func Mknod(path string, mode uint32, dev uint64) (err error) { - var oldDev int - if supportsABI(_ino64First) { - return mknodat_freebsd12(AT_FDCWD, path, mode, dev) - } - oldDev = int(dev) - return mknod(path, mode, oldDev) -} - -func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { - var oldDev int - if supportsABI(_ino64First) { - return mknodat_freebsd12(fd, path, mode, dev) - } - oldDev = int(dev) - return mknodat(fd, path, mode, oldDev) -} - -// round x to the nearest multiple of y, larger or equal to x. -// -// from /usr/include/sys/param.h Macros for counting and rounding. -// #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) -func roundup(x, y int) int { - return ((x + y - 1) / y) * y -} - -func (s *Stat_t) convertFrom(old *stat_freebsd11_t) { - *s = Stat_t{ - Dev: uint64(old.Dev), - Ino: uint64(old.Ino), - Nlink: uint64(old.Nlink), - Mode: old.Mode, - Uid: old.Uid, - Gid: old.Gid, - Rdev: uint64(old.Rdev), - Atim: old.Atim, - Mtim: old.Mtim, - Ctim: old.Ctim, - Btim: old.Btim, - Size: old.Size, - Blocks: old.Blocks, - Blksize: old.Blksize, - Flags: old.Flags, - Gen: uint64(old.Gen), - } -} - -func (s *Statfs_t) convertFrom(old *statfs_freebsd11_t) { - *s = Statfs_t{ - Version: _statfsVersion, - Type: old.Type, - Flags: old.Flags, - Bsize: old.Bsize, - Iosize: old.Iosize, - Blocks: old.Blocks, - Bfree: old.Bfree, - Bavail: old.Bavail, - Files: old.Files, - Ffree: old.Ffree, - Syncwrites: old.Syncwrites, - Asyncwrites: old.Asyncwrites, - Syncreads: old.Syncreads, - Asyncreads: old.Asyncreads, - // Spare - Namemax: old.Namemax, - Owner: old.Owner, - Fsid: old.Fsid, - // Charspare - // Fstypename - // Mntfromname - // Mntonname - } - - sl := old.Fstypename[:] - n := clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Fstypename[:], old.Fstypename[:n]) - - sl = old.Mntfromname[:] - n = clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Mntfromname[:], old.Mntfromname[:n]) - - sl = old.Mntonname[:] - n = clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Mntonname[:], old.Mntonname[:n]) -} - -func convertFromDirents11(buf []byte, old []byte) int { - const ( - fixedSize = int(unsafe.Offsetof(Dirent{}.Name)) - oldFixedSize = int(unsafe.Offsetof(dirent_freebsd11{}.Name)) - ) - - dstPos := 0 - srcPos := 0 - for dstPos+fixedSize < len(buf) && srcPos+oldFixedSize < len(old) { - var dstDirent Dirent - var srcDirent dirent_freebsd11 - - // If multiple direntries are written, sometimes when we reach the final one, - // we may have cap of old less than size of dirent_freebsd11. - copy((*[unsafe.Sizeof(srcDirent)]byte)(unsafe.Pointer(&srcDirent))[:], old[srcPos:]) - - reclen := roundup(fixedSize+int(srcDirent.Namlen)+1, 8) - if dstPos+reclen > len(buf) { - break - } - - dstDirent.Fileno = uint64(srcDirent.Fileno) - dstDirent.Off = 0 - dstDirent.Reclen = uint16(reclen) - dstDirent.Type = srcDirent.Type - dstDirent.Pad0 = 0 - dstDirent.Namlen = uint16(srcDirent.Namlen) - dstDirent.Pad1 = 0 - - copy(dstDirent.Name[:], srcDirent.Name[:srcDirent.Namlen]) - copy(buf[dstPos:], (*[unsafe.Sizeof(dstDirent)]byte)(unsafe.Pointer(&dstDirent))[:]) - padding := buf[dstPos+fixedSize+int(dstDirent.Namlen) : dstPos+reclen] - for i := range padding { - padding[i] = 0 - } - - dstPos += int(dstDirent.Reclen) - srcPos += int(srcDirent.Reclen) - } - - return dstPos + return Mknodat(AT_FDCWD, path, mode, dev) } func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { @@ -506,31 +255,31 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ptrace(request int, pid int, addr uintptr, data int) (err error) func PtraceAttach(pid int) (err error) { - return ptrace(PTRACE_ATTACH, pid, 0, 0) + return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceCont(pid int, signal int) (err error) { - return ptrace(PTRACE_CONT, pid, 1, signal) + return ptrace(PT_CONTINUE, pid, 1, signal) } func PtraceDetach(pid int) (err error) { - return ptrace(PTRACE_DETACH, pid, 1, 0) + return ptrace(PT_DETACH, pid, 1, 0) } func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { - return ptrace(PTRACE_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) + return ptrace(PT_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) } func PtraceGetRegs(pid int, regsout *Reg) (err error) { - return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) + return ptrace(PT_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) } func PtraceLwpEvents(pid int, enable int) (err error) { - return ptrace(PTRACE_LWPEVENTS, pid, 0, enable) + return ptrace(PT_LWP_EVENTS, pid, 0, enable) } func PtraceLwpInfo(pid int, info uintptr) (err error) { - return ptrace(PTRACE_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) + return ptrace(PT_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) } func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { @@ -550,11 +299,11 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceSetRegs(pid int, regs *Reg) (err error) { - return ptrace(PTRACE_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) + return ptrace(PT_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) } func PtraceSingleStep(pid int) (err error) { - return ptrace(PTRACE_SINGLESTEP, pid, 1, 0) + return ptrace(PT_STEP, pid, 1, 0) } /* @@ -596,16 +345,12 @@ func PtraceSingleStep(pid int) (err error) { //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) -//sys fstat(fd int, stat *stat_freebsd11_t) (err error) -//sys fstat_freebsd12(fd int, stat *Stat_t) (err error) -//sys fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) -//sys fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) -//sys fstatfs(fd int, stat *statfs_freebsd11_t) (err error) -//sys fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) -//sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) -//sys getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) +//sys getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) @@ -627,19 +372,16 @@ func PtraceSingleStep(pid int) (err error) { //sys Link(path string, link string) (err error) //sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) -//sys lstat(path string, stat *stat_freebsd11_t) (err error) //sys Mkdir(path string, mode uint32) (err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) -//sys mknod(path string, mode uint32, dev int) (err error) -//sys mknodat(fd int, path string, mode uint32, dev int) (err error) -//sys mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) +//sys Mknodat(fd int, path string, mode uint32, dev uint64) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) @@ -663,9 +405,7 @@ func PtraceSingleStep(pid int) (err error) { //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) -//sys stat(path string, stat *stat_freebsd11_t) (err error) -//sys statfs(path string, stat *statfs_freebsd11_t) (err error) -//sys statfs_freebsd12(path string, stat *Statfs_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) //sys Symlink(path string, link string) (err error) //sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 342fc32b1686..b11ede89a960 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -57,11 +57,11 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) + return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index a32d5aa4aed4..9ed8eec6c287 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -57,11 +57,11 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) + return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 1e36d39abe01..f8ac98247905 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index a09a1537bd6f..8e932036ec37 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go new file mode 100644 index 000000000000..cbe12227896b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -0,0 +1,63 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} + err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 8d5f294c4250..87db5a6a8ccc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -10,8 +10,6 @@ package unix import ( - "fmt" - "runtime" "unsafe" ) @@ -20,10 +18,9 @@ func bytes2iovec(bs [][]byte) []Iovec { for i, b := range bs { iovecs[i].SetLen(len(b)) if len(b) > 0 { - // somehow Iovec.Base on illumos is (*int8), not (*byte) - iovecs[i].Base = (*int8)(unsafe.Pointer(&b[0])) + iovecs[i].Base = &b[0] } else { - iovecs[i].Base = (*int8)(unsafe.Pointer(&_zero)) + iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) } } return iovecs @@ -80,107 +77,3 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { } return } - -//sys putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) - -func Putmsg(fd int, cl []byte, data []byte, flags int) (err error) { - var clp, datap *strbuf - if len(cl) > 0 { - clp = &strbuf{ - Len: int32(len(cl)), - Buf: (*int8)(unsafe.Pointer(&cl[0])), - } - } - if len(data) > 0 { - datap = &strbuf{ - Len: int32(len(data)), - Buf: (*int8)(unsafe.Pointer(&data[0])), - } - } - return putmsg(fd, clp, datap, flags) -} - -//sys getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) - -func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags int, err error) { - var clp, datap *strbuf - if len(cl) > 0 { - clp = &strbuf{ - Maxlen: int32(len(cl)), - Buf: (*int8)(unsafe.Pointer(&cl[0])), - } - } - if len(data) > 0 { - datap = &strbuf{ - Maxlen: int32(len(data)), - Buf: (*int8)(unsafe.Pointer(&data[0])), - } - } - - if err = getmsg(fd, clp, datap, &flags); err != nil { - return nil, nil, 0, err - } - - if len(cl) > 0 { - retCl = cl[:clp.Len] - } - if len(data) > 0 { - retData = data[:datap.Len] - } - return retCl, retData, flags, nil -} - -func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) { - return ioctlRet(fd, req, uintptr(arg)) -} - -func IoctlSetString(fd int, req uint, val string) error { - bs := make([]byte, len(val)+1) - copy(bs[:len(bs)-1], val) - err := ioctl(fd, req, uintptr(unsafe.Pointer(&bs[0]))) - runtime.KeepAlive(&bs[0]) - return err -} - -// Lifreq Helpers - -func (l *Lifreq) SetName(name string) error { - if len(name) >= len(l.Name) { - return fmt.Errorf("name cannot be more than %d characters", len(l.Name)-1) - } - for i := range name { - l.Name[i] = int8(name[i]) - } - return nil -} - -func (l *Lifreq) SetLifruInt(d int) { - *(*int)(unsafe.Pointer(&l.Lifru[0])) = d -} - -func (l *Lifreq) GetLifruInt() int { - return *(*int)(unsafe.Pointer(&l.Lifru[0])) -} - -func (l *Lifreq) SetLifruUint(d uint) { - *(*uint)(unsafe.Pointer(&l.Lifru[0])) = d -} - -func (l *Lifreq) GetLifruUint() uint { - return *(*uint)(unsafe.Pointer(&l.Lifru[0])) -} - -func IoctlLifreq(fd int, req uint, l *Lifreq) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(l))) -} - -// Strioctl Helpers - -func (s *Strioctl) SetInt(i int) { - s.Len = int32(unsafe.Sizeof(i)) - s.Dp = (*int8)(unsafe.Pointer(&i)) -} - -func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) { - return ioctlRet(fd, req, uintptr(unsafe.Pointer(s))) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index f432b0684b8c..c5a98440eca1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,7 +13,9 @@ package unix import ( "encoding/binary" + "strconv" "syscall" + "time" "unsafe" ) @@ -232,7 +234,7 @@ func Futimesat(dirfd int, path string, tv []Timeval) error { func Futimes(fd int, tv []Timeval) (err error) { // Believe it or not, this is the best we can do on Linux // (and is what glibc does). - return Utimes("/proc/self/fd/"+itoa(fd), tv) + return Utimes("/proc/self/fd/"+strconv.Itoa(fd), tv) } const ImplementsGetwd = true @@ -249,6 +251,13 @@ func Getwd() (wd string, err error) { if n < 1 || n > len(buf) || buf[n-1] != 0 { return "", EINVAL } + // In some cases, Linux can return a path that starts with the + // "(unreachable)" prefix, which can potentially be a valid relative + // path. To work around that, return ENOENT if path is not absolute. + if buf[0] != '/' { + return "", ENOENT + } + return string(buf[0 : n-1]), nil } @@ -358,6 +367,8 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, return } +//sys Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) + func Mkfifo(path string, mode uint32) error { return Mknod(path, mode|S_IFIFO, 0) } @@ -502,24 +513,24 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { // // Server example: // -// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) -// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ -// Channel: 1, -// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 -// }) -// _ = Listen(fd, 1) -// nfd, sa, _ := Accept(fd) -// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) -// Read(nfd, buf) +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 +// }) +// _ = Listen(fd, 1) +// nfd, sa, _ := Accept(fd) +// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) +// Read(nfd, buf) // // Client example: // -// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) -// _ = Connect(fd, &SockaddrRFCOMM{ -// Channel: 1, -// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 -// }) -// Write(fd, []byte(`hello`)) +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = Connect(fd, &SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 +// }) +// Write(fd, []byte(`hello`)) type SockaddrRFCOMM struct { // Addr represents a bluetooth address, byte ordering is little-endian. Addr [6]uint8 @@ -546,12 +557,12 @@ func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) { // The SockaddrCAN struct must be bound to the socket file descriptor // using Bind before the CAN socket can be used. // -// // Read one raw CAN frame -// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) -// addr := &SockaddrCAN{Ifindex: index} -// Bind(fd, addr) -// frame := make([]byte, 16) -// Read(fd, frame) +// // Read one raw CAN frame +// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) +// addr := &SockaddrCAN{Ifindex: index} +// Bind(fd, addr) +// frame := make([]byte, 16) +// Read(fd, frame) // // The full SocketCAN documentation can be found in the linux kernel // archives at: https://www.kernel.org/doc/Documentation/networking/can.txt @@ -622,13 +633,13 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { // Here is an example of using an AF_ALG socket with SHA1 hashing. // The initial socket setup process is as follows: // -// // Open a socket to perform SHA1 hashing. -// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) -// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} -// unix.Bind(fd, addr) -// // Note: unix.Accept does not work at this time; must invoke accept() -// // manually using unix.Syscall. -// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) +// // Open a socket to perform SHA1 hashing. +// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) +// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} +// unix.Bind(fd, addr) +// // Note: unix.Accept does not work at this time; must invoke accept() +// // manually using unix.Syscall. +// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) // // Once a file descriptor has been returned from Accept, it may be used to // perform SHA1 hashing. The descriptor is not safe for concurrent use, but @@ -637,39 +648,39 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { // When hashing a small byte slice or string, a single Write and Read may // be used: // -// // Assume hashfd is already configured using the setup process. -// hash := os.NewFile(hashfd, "sha1") -// // Hash an input string and read the results. Each Write discards -// // previous hash state. Read always reads the current state. -// b := make([]byte, 20) -// for i := 0; i < 2; i++ { -// io.WriteString(hash, "Hello, world.") -// hash.Read(b) -// fmt.Println(hex.EncodeToString(b)) -// } -// // Output: -// // 2ae01472317d1935a84797ec1983ae243fc6aa28 -// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // Assume hashfd is already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash an input string and read the results. Each Write discards +// // previous hash state. Read always reads the current state. +// b := make([]byte, 20) +// for i := 0; i < 2; i++ { +// io.WriteString(hash, "Hello, world.") +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// } +// // Output: +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 // // For hashing larger byte slices, or byte streams such as those read from // a file or socket, use Sendto with MSG_MORE to instruct the kernel to update // the hash digest instead of creating a new one for a given chunk and finalizing it. // -// // Assume hashfd and addr are already configured using the setup process. -// hash := os.NewFile(hashfd, "sha1") -// // Hash the contents of a file. -// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") -// b := make([]byte, 4096) -// for { -// n, err := f.Read(b) -// if err == io.EOF { -// break -// } -// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) -// } -// hash.Read(b) -// fmt.Println(hex.EncodeToString(b)) -// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 +// // Assume hashfd and addr are already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash the contents of a file. +// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") +// b := make([]byte, 4096) +// for { +// n, err := f.Read(b) +// if err == io.EOF { +// break +// } +// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) +// } +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 // // For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html. type SockaddrALG struct { @@ -1489,19 +1500,13 @@ func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error //sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL //sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL -func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr - var rsa RawSockaddrAny - msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = &p[0] - iov.SetLen(len(p)) - } var dummy byte if len(oob) > 0 { - if len(p) == 0 { + if emptyIovecs(iov) { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) if err != nil { @@ -1509,53 +1514,36 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from } // receive at least one normal byte if sockType != SOCK_DGRAM { - iov.Base = &dummy - iov.SetLen(1) + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } } msg.Control = &oob[0] msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); err != nil { return } oobn = int(msg.Controllen) recvflags = int(msg.Flags) - // source address is only specified if the socket is unconnected - if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(fd, &rsa) - } - return -} - -func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { - _, err = SendmsgN(fd, p, oob, to, flags) return } -func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { - var ptr unsafe.Pointer - var salen _Socklen - if to != nil { - var err error - ptr, salen, err = to.sockaddr() - if err != nil { - return 0, err - } - } +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(ptr) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = &p[0] - iov.SetLen(len(p)) - } var dummy byte + var empty bool if len(oob) > 0 { - if len(p) == 0 { + empty = emptyIovecs(iov) + if empty { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) if err != nil { @@ -1563,19 +1551,23 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) } // send at least one normal byte if sockType != SOCK_DGRAM { - iov.Base = &dummy - iov.SetLen(1) + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } } msg.Control = &oob[0] msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil @@ -1838,6 +1830,9 @@ func Dup2(oldfd, newfd int) error { //sys Fremovexattr(fd int, attr string) (err error) //sys Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) //sys Fsync(fd int) (err error) +//sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) +//sys Fsopen(fsName string, flags int) (fd int, err error) +//sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) @@ -1868,7 +1863,9 @@ func Getpgrp() (pid int) { //sys MemfdCreate(name string, flags int) (fd int, err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys MoveMount(fromDirfd int, fromPathName string, toDirfd int, toPathName string, flags int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys OpenTree(dfd int, fileName string, flags uint) (r int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 @@ -1896,17 +1893,28 @@ func PrctlRetInt(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uint return int(ret), nil } -// issue 1435. -// On linux Setuid and Setgid only affects the current thread, not the process. -// This does not match what most callers expect so we must return an error -// here rather than letting the caller think that the call succeeded. - func Setuid(uid int) (err error) { - return EOPNOTSUPP + return syscall.Setuid(uid) +} + +func Setgid(gid int) (err error) { + return syscall.Setgid(gid) +} + +func Setreuid(ruid, euid int) (err error) { + return syscall.Setreuid(ruid, euid) +} + +func Setregid(rgid, egid int) (err error) { + return syscall.Setregid(rgid, egid) } -func Setgid(uid int) (err error) { - return EOPNOTSUPP +func Setresuid(ruid, euid, suid int) (err error) { + return syscall.Setresuid(ruid, euid, suid) +} + +func Setresgid(rgid, egid, sgid int) (err error) { + return syscall.Setresgid(rgid, egid, sgid) } // SetfsgidRetGid sets fsgid for current thread and returns previous fsgid set. @@ -2193,7 +2201,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { gid = Getgid() } - if uint32(gid) == st.Gid || isGroupMember(gid) { + if uint32(gid) == st.Gid || isGroupMember(int(st.Gid)) { fmode = (st.Mode >> 3) & 7 } else { fmode = st.Mode & 7 @@ -2245,7 +2253,7 @@ func (fh *FileHandle) Bytes() []byte { if n == 0 { return nil } - return (*[1 << 30]byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type)) + 4))[:n:n] + return unsafe.Slice((*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type))+4)), n) } // NameToHandleAt wraps the name_to_handle_at system call; it obtains @@ -2308,17 +2316,73 @@ type RemoteIovec struct { //sys PidfdOpen(pid int, flags int) (fd int, err error) = SYS_PIDFD_OPEN //sys PidfdGetfd(pidfd int, targetfd int, flags int) (fd int, err error) = SYS_PIDFD_GETFD +//sys PidfdSendSignal(pidfd int, sig Signal, info *Siginfo, flags int) (err error) = SYS_PIDFD_SEND_SIGNAL //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) //sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) //sys shmdt(addr uintptr) (err error) //sys shmget(key int, size int, flag int) (id int, err error) +//sys getitimer(which int, currValue *Itimerval) (err error) +//sys setitimer(which int, newValue *Itimerval, oldValue *Itimerval) (err error) + +// MakeItimerval creates an Itimerval from interval and value durations. +func MakeItimerval(interval, value time.Duration) Itimerval { + return Itimerval{ + Interval: NsecToTimeval(interval.Nanoseconds()), + Value: NsecToTimeval(value.Nanoseconds()), + } +} + +// A value which may be passed to the which parameter for Getitimer and +// Setitimer. +type ItimerWhich int + +// Possible which values for Getitimer and Setitimer. +const ( + ItimerReal ItimerWhich = ITIMER_REAL + ItimerVirtual ItimerWhich = ITIMER_VIRTUAL + ItimerProf ItimerWhich = ITIMER_PROF +) + +// Getitimer wraps getitimer(2) to return the current value of the timer +// specified by which. +func Getitimer(which ItimerWhich) (Itimerval, error) { + var it Itimerval + if err := getitimer(int(which), &it); err != nil { + return Itimerval{}, err + } + + return it, nil +} + +// Setitimer wraps setitimer(2) to arm or disarm the timer specified by which. +// It returns the previous value of the timer. +// +// If the Itimerval argument is the zero value, the timer will be disarmed. +func Setitimer(which ItimerWhich, it Itimerval) (Itimerval, error) { + var prev Itimerval + if err := setitimer(int(which), &it, &prev); err != nil { + return Itimerval{}, err + } + + return prev, nil +} + +//sysnb rtSigprocmask(how int, set *Sigset_t, oldset *Sigset_t, sigsetsize uintptr) (err error) = SYS_RT_SIGPROCMASK + +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + if oldset != nil { + // Explicitly clear in case Sigset_t is larger than _C__NSIG. + *oldset = Sigset_t{} + } + return rtSigprocmask(how, set, oldset, _C__NSIG/8) +} + /* * Unimplemented */ // AfsSyscall -// Alarm // ArchPrctl // Brk // ClockNanosleep @@ -2334,7 +2398,6 @@ type RemoteIovec struct { // GetMempolicy // GetRobustList // GetThreadArea -// Getitimer // Getpmsg // IoCancel // IoDestroy @@ -2374,7 +2437,6 @@ type RemoteIovec struct { // RestartSyscall // RtSigaction // RtSigpending -// RtSigprocmask // RtSigqueueinfo // RtSigreturn // RtSigsuspend @@ -2412,5 +2474,4 @@ type RemoteIovec struct { // Vfork // Vhangup // Vserver -// Waitid // _Sysctl diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 5f757e8aa770..ff5b5899d6db 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -35,16 +35,12 @@ func setTimeval(sec, usec int64) Timeval { //sys Iopl(level int) (err error) //sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 //sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32 -//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 -//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 -//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) @@ -173,14 +169,6 @@ const ( _SENDMMSG = 20 ) -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - fd, e := socketcall(_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) - if e != 0 { - err = e - } - return -} - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { fd, e := socketcall(_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) if e != 0 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go new file mode 100644 index 000000000000..08086ac6a4c4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go @@ -0,0 +1,14 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64) +// +build linux +// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64 + +package unix + +// SYS_ALARM is not defined on arm or riscv, but is available for other GOARCH +// values. + +//sys Alarm(seconds uint) (remaining uint, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 4299125aa7cc..9b2703532989 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -28,9 +28,10 @@ func Lstat(path string, stat *Stat_t) (err error) { return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) } +//sys MemfdSecret(flags int) (fd int, err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -45,11 +46,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -62,7 +59,6 @@ func Stat(path string, stat *Stat_t) (err error) { //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 79edeb9cb14a..856ad1d635cf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -27,7 +27,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return newoffset, nil } -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -63,10 +62,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 //sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32 -//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 -//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 -//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 @@ -97,8 +92,8 @@ func Utime(path string, buf *Utimbuf) error { //sys utimes(path string, times *[2]Timeval) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 //sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 862890de29bf..6422704bc52a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -22,8 +22,9 @@ import "unsafe" //sysnb getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys MemfdSecret(flags int) (fd int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -38,11 +39,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -66,7 +63,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { return ENOSYS } -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go new file mode 100644 index 000000000000..59dab510e97c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -0,0 +1,222 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +import "unsafe" + +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getuid() (uid int) +//sys Listen(s int, n int) (err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) +} + +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) + +func timespecFromStatxTimestamp(x StatxTimestamp) Timespec { + return Timespec{ + Sec: x.Sec, + Nsec: int64(x.Nsec), + } +} + +func Fstatat(fd int, path string, stat *Stat_t, flags int) error { + var r Statx_t + // Do it the glibc way, add AT_NO_AUTOMOUNT. + if err := Statx(fd, path, AT_NO_AUTOMOUNT|flags, STATX_BASIC_STATS, &r); err != nil { + return err + } + + stat.Dev = Mkdev(r.Dev_major, r.Dev_minor) + stat.Ino = r.Ino + stat.Mode = uint32(r.Mode) + stat.Nlink = r.Nlink + stat.Uid = r.Uid + stat.Gid = r.Gid + stat.Rdev = Mkdev(r.Rdev_major, r.Rdev_minor) + // hope we don't get to process files so large to overflow these size + // fields... + stat.Size = int64(r.Size) + stat.Blksize = int32(r.Blksize) + stat.Blocks = int64(r.Blocks) + stat.Atim = timespecFromStatxTimestamp(r.Atime) + stat.Mtim = timespecFromStatxTimestamp(r.Mtime) + stat.Ctim = timespecFromStatxTimestamp(r.Ctime) + + return nil +} + +func Fstat(fd int, stat *Stat_t) (err error) { + return Fstatat(fd, "", stat, AT_EMPTY_PATH) +} + +func Stat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, 0) +} + +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) +} + +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + return ENOSYS +} + +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = Prlimit(0, resource, nil, rlim) + return +} + +func Setrlimit(resource int, rlim *Rlimit) (err error) { + err = Prlimit(0, resource, rlim, nil) + return +} + +func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(dirfd, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func Time(t *Time_t) (Time_t, error) { + var tv Timeval + err := Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func Utime(path string, buf *Utimbuf) error { + tv := []Timeval{ + {Sec: buf.Actime}, + {Sec: buf.Modtime}, + } + return Utimes(path, tv) +} + +func utimes(path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func (r *PtraceRegs) PC() uint64 { return r.Era } + +func (r *PtraceRegs) SetPC(era uint64) { r.Era = era } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + +func Pause() error { + _, err := ppoll(nil, 0, nil, nil) + return err +} + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0) +} + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 8932e34ad2ad..bfef09a39eb0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -21,8 +21,8 @@ package unix //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -37,18 +37,13 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 7821c25d9f77..ab302509663e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -25,23 +25,18 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sysnb Getuid() (uid int) //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 //sys Ustat(dev int, ubuf *Ustat_t) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index c5053a0f03fd..eac1cf1acc86 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -27,23 +27,18 @@ import ( //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 //sys Ustat(dev int, ubuf *Ustat_t) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 25786c4216b5..4df56616b8f1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -26,26 +26,21 @@ package unix //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) //sys Truncate(path string, length int64) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f9f710414f0..5f4243dea2c3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -22,8 +22,9 @@ import "unsafe" //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys MemfdSecret(flags int) (fd int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { @@ -37,11 +38,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -65,7 +62,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { return ENOSYS } -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 6aa59cb270db..d0a7d4066851 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -26,19 +26,15 @@ import ( //sys Lchown(path string, uid int, gid int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) @@ -145,15 +141,6 @@ const ( netSendMMsg = 20 ) -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (int, error) { - args := [3]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))} - fd, _, err := Syscall(SYS_SOCKETCALL, netAccept, uintptr(unsafe.Pointer(&args)), 0) - if err != 0 { - return 0, err - } - return int(fd), nil -} - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (int, error) { args := [4]uintptr{uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)} fd, _, err := Syscall(SYS_SOCKETCALL, netAccept4, uintptr(unsafe.Pointer(&args)), 0) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index bbe8d174f8c1..f5c793be26d4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -23,26 +23,21 @@ package unix //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) -//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 696fed496f68..666f0a1b33d2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -163,11 +163,6 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return -1, ENOSYS } -func setattrlistTimes(path string, times []Timespec, flags int) error { - // used on Darwin for UtimesNano - return ENOSYS -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -313,8 +308,8 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 11b1d419da91..78daceb338bc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -81,6 +81,7 @@ func Pipe(p []int) (err error) { } //sysnb pipe2(p *[2]_C_int, flags int) (err error) + func Pipe2(p []int, flags int) error { if len(p) != 2 { return EINVAL @@ -95,6 +96,7 @@ func Pipe2(p []int, flags int) error { } //sys Getdents(fd int, buf []byte) (n int, err error) + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { n, err = Getdents(fd, buf) if err != nil || basep == nil { @@ -149,11 +151,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -func setattrlistTimes(path string, times []Timespec, flags int) error { - // used on Darwin for UtimesNano - return ENOSYS -} - //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -274,8 +271,8 @@ func Uname(uname *Utsname) error { //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go new file mode 100644 index 000000000000..e23c5394eff3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -0,0 +1,27 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build openbsd && !mips64 +// +build openbsd,!mips64 + +package unix + +import _ "unsafe" + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall syscall.syscall +//go:linkname syscall_syscall6 syscall.syscall6 +//go:linkname syscall_syscall10 syscall.syscall10 +//go:linkname syscall_rawSyscall syscall.rawSyscall +//go:linkname syscall_rawSyscall6 syscall.rawSyscall6 + +func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) { + return syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, 0) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go index 30f285343ee4..1378489f8d7f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go @@ -26,6 +26,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go new file mode 100644 index 000000000000..c2796139c013 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of openbsd/ppc64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go new file mode 100644 index 000000000000..23199a7ff624 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of openbsd/riscv64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 5c813921e855..2109e569ccef 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -451,77 +451,59 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_recvmsg -func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr - var rsa RawSockaddrAny - msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = (*int8)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } - var dummy int8 + var dummy byte if len(oob) > 0 { // receive at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Accrightslen = int32(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); n == -1 { return } oobn = int(msg.Accrightslen) - // source address is only specified if the socket is unconnected - if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(fd, &rsa) - } - return -} - -func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { - _, err = SendmsgN(fd, p, oob, to, flags) return } //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_sendmsg -func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { - var ptr unsafe.Pointer - var salen _Socklen - if to != nil { - ptr, salen, err = to.sockaddr() - if err != nil { - return 0, err - } - } +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = (*int8)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } - var dummy int8 + var dummy byte + var empty bool if len(oob) > 0 { // send at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Accrightslen = int32(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil @@ -636,6 +618,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Getpriority(which int, who int) (n int, err error) //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Getuid() (uid int) //sys Kill(pid int, signum syscall.Signal) (err error) @@ -661,8 +644,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) //sys Pause() (err error) -//sys Pread(fd int, p []byte, offset int64) (n int, err error) -//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Rename(from string, to string) (err error) @@ -755,8 +738,20 @@ type fileObjCookie struct { type EventPort struct { port int mu sync.Mutex - fds map[uintptr]interface{} + fds map[uintptr]*fileObjCookie paths map[string]*fileObjCookie + // The user cookie presents an interesting challenge from a memory management perspective. + // There are two paths by which we can discover that it is no longer in use: + // 1. The user calls port_dissociate before any events fire + // 2. An event fires and we return it to the user + // The tricky situation is if the event has fired in the kernel but + // the user hasn't requested/received it yet. + // If the user wants to port_dissociate before the event has been processed, + // we should handle things gracefully. To do so, we need to keep an extra + // reference to the cookie around until the event is processed + // thus the otherwise seemingly extraneous "cookies" map + // The key of this map is a pointer to the corresponding fCookie + cookies map[*fileObjCookie]struct{} } // PortEvent is an abstraction of the port_event C struct. @@ -780,9 +775,10 @@ func NewEventPort() (*EventPort, error) { return nil, err } e := &EventPort{ - port: port, - fds: make(map[uintptr]interface{}), - paths: make(map[string]*fileObjCookie), + port: port, + fds: make(map[uintptr]*fileObjCookie), + paths: make(map[string]*fileObjCookie), + cookies: make(map[*fileObjCookie]struct{}), } return e, nil } @@ -797,9 +793,14 @@ func NewEventPort() (*EventPort, error) { func (e *EventPort) Close() error { e.mu.Lock() defer e.mu.Unlock() + err := Close(e.port) + if err != nil { + return err + } e.fds = nil e.paths = nil - return Close(e.port) + e.cookies = nil + return nil } // PathIsWatched checks to see if path is associated with this EventPort. @@ -826,16 +827,16 @@ func (e *EventPort) AssociatePath(path string, stat os.FileInfo, events int, coo if _, found := e.paths[path]; found { return fmt.Errorf("%v is already associated with this Event Port", path) } - fobj, err := createFileObj(path, stat) + fCookie, err := createFileObjCookie(path, stat, cookie) if err != nil { return err } - fCookie := &fileObjCookie{fobj, cookie} - _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fobj)), events, (*byte)(unsafe.Pointer(&fCookie.cookie))) + _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fCookie.fobj)), events, (*byte)(unsafe.Pointer(fCookie))) if err != nil { return err } e.paths[path] = fCookie + e.cookies[fCookie] = struct{}{} return nil } @@ -848,11 +849,19 @@ func (e *EventPort) DissociatePath(path string) error { return fmt.Errorf("%v is not associated with this Event Port", path) } _, err := port_dissociate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(f.fobj))) - if err != nil { + // If the path is no longer associated with this event port (ENOENT) + // we should delete it from our map. We can still return ENOENT to the caller. + // But we need to save the cookie + if err != nil && err != ENOENT { return err } + if err == nil { + // dissociate was successful, safe to delete the cookie + fCookie := e.paths[path] + delete(e.cookies, fCookie) + } delete(e.paths, path) - return nil + return err } // AssociateFd wraps calls to port_associate(3c) on file descriptors. @@ -862,12 +871,16 @@ func (e *EventPort) AssociateFd(fd uintptr, events int, cookie interface{}) erro if _, found := e.fds[fd]; found { return fmt.Errorf("%v is already associated with this Event Port", fd) } - pcookie := &cookie - _, err := port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(pcookie))) + fCookie, err := createFileObjCookie("", nil, cookie) + if err != nil { + return err + } + _, err = port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(fCookie))) if err != nil { return err } - e.fds[fd] = pcookie + e.fds[fd] = fCookie + e.cookies[fCookie] = struct{}{} return nil } @@ -880,28 +893,37 @@ func (e *EventPort) DissociateFd(fd uintptr) error { return fmt.Errorf("%v is not associated with this Event Port", fd) } _, err := port_dissociate(e.port, PORT_SOURCE_FD, fd) - if err != nil { + if err != nil && err != ENOENT { return err } + if err == nil { + // dissociate was successful, safe to delete the cookie + fCookie := e.fds[fd] + delete(e.cookies, fCookie) + } delete(e.fds, fd) - return nil + return err } -func createFileObj(name string, stat os.FileInfo) (*fileObj, error) { - fobj := new(fileObj) - bs, err := ByteSliceFromString(name) - if err != nil { - return nil, err - } - fobj.Name = (*int8)(unsafe.Pointer(&bs[0])) - s := stat.Sys().(*syscall.Stat_t) - fobj.Atim.Sec = s.Atim.Sec - fobj.Atim.Nsec = s.Atim.Nsec - fobj.Mtim.Sec = s.Mtim.Sec - fobj.Mtim.Nsec = s.Mtim.Nsec - fobj.Ctim.Sec = s.Ctim.Sec - fobj.Ctim.Nsec = s.Ctim.Nsec - return fobj, nil +func createFileObjCookie(name string, stat os.FileInfo, cookie interface{}) (*fileObjCookie, error) { + fCookie := new(fileObjCookie) + fCookie.cookie = cookie + if name != "" && stat != nil { + fCookie.fobj = new(fileObj) + bs, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + fCookie.fobj.Name = (*int8)(unsafe.Pointer(&bs[0])) + s := stat.Sys().(*syscall.Stat_t) + fCookie.fobj.Atim.Sec = s.Atim.Sec + fCookie.fobj.Atim.Nsec = s.Atim.Nsec + fCookie.fobj.Mtim.Sec = s.Mtim.Sec + fCookie.fobj.Mtim.Nsec = s.Mtim.Nsec + fCookie.fobj.Ctim.Sec = s.Ctim.Sec + fCookie.fobj.Ctim.Nsec = s.Ctim.Nsec + } + return fCookie, nil } // GetOne wraps port_get(3c) and returns a single PortEvent. @@ -912,24 +934,52 @@ func (e *EventPort) GetOne(t *Timespec) (*PortEvent, error) { return nil, err } p := new(PortEvent) - p.Events = pe.Events - p.Source = pe.Source e.mu.Lock() defer e.mu.Unlock() - switch pe.Source { + err = e.peIntToExt(pe, p) + if err != nil { + return nil, err + } + return p, nil +} + +// peIntToExt converts a cgo portEvent struct into the friendlier PortEvent +// NOTE: Always call this function while holding the e.mu mutex +func (e *EventPort) peIntToExt(peInt *portEvent, peExt *PortEvent) error { + if e.cookies == nil { + return fmt.Errorf("this EventPort is already closed") + } + peExt.Events = peInt.Events + peExt.Source = peInt.Source + fCookie := (*fileObjCookie)(unsafe.Pointer(peInt.User)) + _, found := e.cookies[fCookie] + + if !found { + panic("unexpected event port address; may be due to kernel bug; see https://go.dev/issue/54254") + } + peExt.Cookie = fCookie.cookie + delete(e.cookies, fCookie) + + switch peInt.Source { case PORT_SOURCE_FD: - p.Fd = uintptr(pe.Object) - cookie := (*interface{})(unsafe.Pointer(pe.User)) - p.Cookie = *cookie - delete(e.fds, p.Fd) + peExt.Fd = uintptr(peInt.Object) + // Only remove the fds entry if it exists and this cookie matches + if fobj, ok := e.fds[peExt.Fd]; ok { + if fobj == fCookie { + delete(e.fds, peExt.Fd) + } + } case PORT_SOURCE_FILE: - p.fobj = (*fileObj)(unsafe.Pointer(uintptr(pe.Object))) - p.Path = BytePtrToString((*byte)(unsafe.Pointer(p.fobj.Name))) - cookie := (*interface{})(unsafe.Pointer(pe.User)) - p.Cookie = *cookie - delete(e.paths, p.Path) + peExt.fobj = fCookie.fobj + peExt.Path = BytePtrToString((*byte)(unsafe.Pointer(peExt.fobj.Name))) + // Only remove the paths entry if it exists and this cookie matches + if fobj, ok := e.paths[peExt.Path]; ok { + if fobj == fCookie { + delete(e.paths, peExt.Path) + } + } } - return p, nil + return nil } // Pending wraps port_getn(3c) and returns how many events are pending. @@ -953,7 +1003,7 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error) got := uint32(min) max := uint32(len(s)) var err error - ps := make([]portEvent, max, max) + ps := make([]portEvent, max) _, err = port_getn(e.port, &ps[0], max, &got, timeout) // got will be trustworthy with ETIME, but not any other error. if err != nil && err != ETIME { @@ -961,22 +1011,122 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error) } e.mu.Lock() defer e.mu.Unlock() + valid := 0 for i := 0; i < int(got); i++ { - s[i].Events = ps[i].Events - s[i].Source = ps[i].Source - switch ps[i].Source { - case PORT_SOURCE_FD: - s[i].Fd = uintptr(ps[i].Object) - cookie := (*interface{})(unsafe.Pointer(ps[i].User)) - s[i].Cookie = *cookie - delete(e.fds, s[i].Fd) - case PORT_SOURCE_FILE: - s[i].fobj = (*fileObj)(unsafe.Pointer(uintptr(ps[i].Object))) - s[i].Path = BytePtrToString((*byte)(unsafe.Pointer(s[i].fobj.Name))) - cookie := (*interface{})(unsafe.Pointer(ps[i].User)) - s[i].Cookie = *cookie - delete(e.paths, s[i].Path) + err2 := e.peIntToExt(&ps[i], &s[i]) + if err2 != nil { + if valid == 0 && err == nil { + // If err2 is the only error and there are no valid events + // to return, return it to the caller. + err = err2 + } + break + } + valid = i + 1 + } + return valid, err +} + +//sys putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) + +func Putmsg(fd int, cl []byte, data []byte, flags int) (err error) { + var clp, datap *strbuf + if len(cl) > 0 { + clp = &strbuf{ + Len: int32(len(cl)), + Buf: (*int8)(unsafe.Pointer(&cl[0])), + } + } + if len(data) > 0 { + datap = &strbuf{ + Len: int32(len(data)), + Buf: (*int8)(unsafe.Pointer(&data[0])), + } + } + return putmsg(fd, clp, datap, flags) +} + +//sys getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) + +func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags int, err error) { + var clp, datap *strbuf + if len(cl) > 0 { + clp = &strbuf{ + Maxlen: int32(len(cl)), + Buf: (*int8)(unsafe.Pointer(&cl[0])), + } + } + if len(data) > 0 { + datap = &strbuf{ + Maxlen: int32(len(data)), + Buf: (*int8)(unsafe.Pointer(&data[0])), } } - return int(got), err + + if err = getmsg(fd, clp, datap, &flags); err != nil { + return nil, nil, 0, err + } + + if len(cl) > 0 { + retCl = cl[:clp.Len] + } + if len(data) > 0 { + retData = data[:datap.Len] + } + return retCl, retData, flags, nil +} + +func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) { + return ioctlRet(fd, req, uintptr(arg)) +} + +func IoctlSetString(fd int, req uint, val string) error { + bs := make([]byte, len(val)+1) + copy(bs[:len(bs)-1], val) + err := ioctl(fd, req, uintptr(unsafe.Pointer(&bs[0]))) + runtime.KeepAlive(&bs[0]) + return err +} + +// Lifreq Helpers + +func (l *Lifreq) SetName(name string) error { + if len(name) >= len(l.Name) { + return fmt.Errorf("name cannot be more than %d characters", len(l.Name)-1) + } + for i := range name { + l.Name[i] = int8(name[i]) + } + return nil +} + +func (l *Lifreq) SetLifruInt(d int) { + *(*int)(unsafe.Pointer(&l.Lifru[0])) = d +} + +func (l *Lifreq) GetLifruInt() int { + return *(*int)(unsafe.Pointer(&l.Lifru[0])) +} + +func (l *Lifreq) SetLifruUint(d uint) { + *(*uint)(unsafe.Pointer(&l.Lifru[0])) = d +} + +func (l *Lifreq) GetLifruUint() uint { + return *(*uint)(unsafe.Pointer(&l.Lifru[0])) +} + +func IoctlLifreq(fd int, req uint, l *Lifreq) error { + return ioctl(fd, req, uintptr(unsafe.Pointer(l))) +} + +// Strioctl Helpers + +func (s *Strioctl) SetInt(i int) { + s.Len = int32(unsafe.Sizeof(i)) + s.Dp = (*int8)(unsafe.Pointer(&i)) +} + +func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) { + return ioctlRet(fd, req, uintptr(unsafe.Pointer(s))) } diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index cf296a2433a9..00bafda86545 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -13,8 +13,6 @@ import ( "sync" "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) var ( @@ -117,11 +115,7 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d } // Use unsafe to convert addr into a []byte. - var b []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) - hdr.Data = unsafe.Pointer(addr) - hdr.Cap = length - hdr.Len = length + b := unsafe.Slice((*byte)(unsafe.Pointer(addr)), length) // Register mapping in m and return it. p := &b[cap(b)-1] @@ -177,6 +171,30 @@ func Write(fd int, p []byte) (n int, err error) { return } +func Pread(fd int, p []byte, offset int64) (n int, err error) { + n, err = pread(fd, p, offset) + if raceenabled { + if n > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), n) + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } + } + return +} + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = pwrite(fd, p, offset) + if raceenabled && n > 0 { + raceReadRange(unsafe.Pointer(&p[0]), n) + } + return +} + // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. var SocketDisableIPv6 bool @@ -313,16 +331,107 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { return } +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var iov [1]Iovec + if len(p) > 0 { + iov[0].Base = &p[0] + iov[0].SetLen(len(p)) + } + var rsa RawSockaddrAny + n, oobn, recvflags, err = recvmsgRaw(fd, iov[:], oob, flags, &rsa) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(fd, &rsa) + } + return +} + +// RecvmsgBuffers receives a message from a socket using the recvmsg +// system call. The flags are passed to recvmsg. Any non-control data +// read is scattered into the buffers slices. The results are: +// - n is the number of non-control data read into bufs +// - oobn is the number of control data read into oob; this may be interpreted using [ParseSocketControlMessage] +// - recvflags is flags returned by recvmsg +// - from is the address of the sender +func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + iov := make([]Iovec, len(buffers)) + for i := range buffers { + if len(buffers[i]) > 0 { + iov[i].Base = &buffers[i][0] + iov[i].SetLen(len(buffers[i])) + } else { + iov[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } + var rsa RawSockaddrAny + n, oobn, recvflags, err = recvmsgRaw(fd, iov, oob, flags, &rsa) + if err == nil && rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(fd, &rsa) + } + return +} + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var iov [1]Iovec + if len(p) > 0 { + iov[0].Base = &p[0] + iov[0].SetLen(len(p)) + } + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + return sendmsgN(fd, iov[:], oob, ptr, salen, flags) +} + +// SendmsgBuffers sends a message on a socket to an address using the sendmsg +// system call. The flags are passed to sendmsg. Any non-control data written +// is gathered from buffers. The function returns the number of bytes written +// to the socket. +func SendmsgBuffers(fd int, buffers [][]byte, oob []byte, to Sockaddr, flags int) (n int, err error) { + iov := make([]Iovec, len(buffers)) + for i := range buffers { + if len(buffers[i]) > 0 { + iov[i].Base = &buffers[i][0] + iov[i].SetLen(len(buffers[i])) + } else { + iov[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + return sendmsgN(fd, iov, oob, ptr, salen, flags) +} + func Send(s int, buf []byte, flags int) (err error) { return sendto(s, buf, flags, nil, 0) } func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) { - ptr, n, err := to.sockaddr() - if err != nil { - return err + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return err + } } - return sendto(fd, p, flags, ptr, n) + return sendto(fd, p, flags, ptr, salen) } func SetsockoptByte(fd, level, opt int, value byte) (err error) { @@ -433,3 +542,13 @@ func Lutimes(path string, tv []Timeval) error { } return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW) } + +// emptyIovec reports whether there are no bytes in the slice of Iovec. +func emptyIovecs(iov []Iovec) bool { + for i := range iov { + if iov[i].Len > 0 { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index 5898e9a52b75..b6919ca580e7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -2,11 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && gc && !ppc64le && !ppc64 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc +// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris // +build gc -// +build !ppc64le -// +build !ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index f8616f454ec6..68b2f3e1cd0a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -9,8 +9,10 @@ package unix import ( "bytes" + "fmt" "runtime" "sort" + "strings" "sync" "syscall" "unsafe" @@ -55,7 +57,13 @@ func (d *Dirent) NameString() string { if d == nil { return "" } - return string(d.Name[:d.Namlen]) + s := string(d.Name[:]) + idx := strings.IndexByte(s, 0) + if idx == -1 { + return s + } else { + return s[:idx] + } } func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { @@ -1230,6 +1238,14 @@ func Readdir(dir uintptr) (*Dirent, error) { return &ent, err } +func readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { + r0, _, e1 := syscall_syscall(SYS___READDIR_R_A, dirp, uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + if int64(r0) == -1 { + err = errnoErr(Errno(e1)) + } + return +} + func Closedir(dir uintptr) error { _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) if e != 0 { @@ -1821,3 +1837,158 @@ func Unmount(name string, mtm int) (err error) { } return err } + +func fdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + // w_ctrl() + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, + []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + // __e2a_l() + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, + []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) + return string(buffer[:zb]), nil + } + // __errno() + errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, + []uintptr{})))) + // __errno2() + errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, + []uintptr{})) + // strerror_r() + ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, + []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) + } else { + return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) + } +} + +func direntLeToDirentUnix(dirent *direntLE, dir uintptr, path string) (Dirent, error) { + var d Dirent + + d.Ino = uint64(dirent.Ino) + offset, err := Telldir(dir) + if err != nil { + return d, err + } + + d.Off = int64(offset) + s := string(bytes.Split(dirent.Name[:], []byte{0})[0]) + copy(d.Name[:], s) + + d.Reclen = uint16(24 + len(d.NameString())) + var st Stat_t + path = path + "/" + s + err = Lstat(path, &st) + if err != nil { + return d, err + } + + d.Type = uint8(st.Mode >> 24) + return d, err +} + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // Simulation of Getdirentries port from the Darwin implementation. + // COMMENTS FROM DARWIN: + // It's not the full required semantics, but should handle the case + // of calling Getdirentries or ReadDirent repeatedly. + // It won't handle assigning the results of lseek to *basep, or handle + // the directory being edited underfoot. + + skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + return 0, err + } + + // Get path from fd to avoid unavailable call (fdopendir) + path, err := fdToPath(fd) + if err != nil { + return 0, err + } + d, err := Opendir(path) + if err != nil { + return 0, err + } + defer Closedir(d) + + var cnt int64 + for { + var entryLE direntLE + var entrypLE *direntLE + e := readdir_r(d, &entryLE, &entrypLE) + if e != nil { + return n, e + } + if entrypLE == nil { + break + } + if skip > 0 { + skip-- + cnt++ + continue + } + + // Dirent on zos has a different structure + entry, e := direntLeToDirentUnix(&entryLE, d, path) + if e != nil { + return n, e + } + + reclen := int(entry.Reclen) + if reclen > len(buf) { + // Not enough room. Return for now. + // The counter will let us know where we should start up again. + // Note: this strategy for suspending in the middle and + // restarting is O(n^2) in the length of the directory. Oh well. + break + } + + // Copy entry into return buffer. + s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen) + copy(buf, s) + + buf = buf[reclen:] + n += reclen + cnt++ + } + // Set the seek offset of the input fd to record + // how many files we've already returned. + _, err = Seek(fd, cnt, 0 /* SEEK_SET */) + if err != nil { + return n, err + } + + return n, nil +} + +func ReadDirent(fd int, buf []byte) (n int, err error) { + var base = (*uintptr)(unsafe.Pointer(new(uint64))) + return Getdirentries(fd, buf, base) +} + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true +} diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 0bb4c8de557b..5bb41d17bc47 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -7,11 +7,7 @@ package unix -import ( - "unsafe" - - "golang.org/x/sys/internal/unsafeheader" -) +import "unsafe" // SysvShmAttach attaches the Sysv shared memory segment associated with the // shared memory identifier id. @@ -34,12 +30,7 @@ func SysvShmAttach(id int, addr uintptr, flag int) ([]byte, error) { } // Use unsafe to convert addr into a []byte. - // TODO: convert to unsafe.Slice once we can assume Go 1.17 - var b []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) - hdr.Data = unsafe.Pointer(addr) - hdr.Cap = int(info.Segsz) - hdr.Len = int(info.Segsz) + b := unsafe.Slice((*byte)(unsafe.Pointer(addr)), int(info.Segsz)) return b, nil } diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index 25df1e37801f..663b3779de2d 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -160,13 +160,12 @@ func Lremovexattr(link string, attr string) (err error) { } func Listxattr(file string, dest []byte) (sz int, err error) { - d := initxattrdest(dest, 0) destsiz := len(dest) // FreeBSD won't allow you to list xattrs from multiple namespaces - s := 0 + s, pos := 0, 0 for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) + stmp, e := ListxattrNS(file, nsid, dest[pos:]) /* Errors accessing system attrs are ignored so that * we can implement the Linux-like behavior of omitting errors that @@ -175,66 +174,102 @@ func Listxattr(file string, dest []byte) (sz int, err error) { * Linux will still error if we ask for user attributes on a file that * we don't have read permissions on, so don't ignore those errors */ - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { + if e != nil { + if e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } return s, e } s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 + pos = s + if pos > destsiz { + pos = destsiz } - d = initxattrdest(dest, s) } return s, nil } -func Flistxattr(fd int, dest []byte) (sz int, err error) { +func ListxattrNS(file string, nsid int, dest []byte) (sz int, err error) { d := initxattrdest(dest, 0) destsiz := len(dest) - s := 0 + s, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) + if e != nil { + return 0, err + } + + return s, nil +} + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + destsiz := len(dest) + + s, pos := 0, 0 for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { + stmp, e := FlistxattrNS(fd, nsid, dest[pos:]) + + if e != nil { + if e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } return s, e } s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 + pos = s + if pos > destsiz { + pos = destsiz } - d = initxattrdest(dest, s) } return s, nil } -func Llistxattr(link string, dest []byte) (sz int, err error) { +func FlistxattrNS(fd int, nsid int, dest []byte) (sz int, err error) { d := initxattrdest(dest, 0) destsiz := len(dest) - s := 0 + s, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) + if e != nil { + return 0, err + } + + return s, nil +} + +func Llistxattr(link string, dest []byte) (sz int, err error) { + destsiz := len(dest) + + s, pos := 0, 0 for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { + stmp, e := LlistxattrNS(link, nsid, dest[pos:]) + + if e != nil { + if e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } return s, e } s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 + pos = s + if pos > destsiz { + pos = destsiz } - d = initxattrdest(dest, s) + } + + return s, nil +} + +func LlistxattrNS(link string, nsid int, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + s, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) + if e != nil { + return 0, err } return s, nil diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 440900112cd4..f8c2c5138748 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80084267 BIOCSETFNR = 0x80084282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8008427b BIOCSETZBUF = 0x800c4281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1179,6 +1185,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1189,6 +1197,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1196,6 +1208,60 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETFSBASE = 0x47 + PT_GETGSBASE = 0x49 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETXMMREGS = 0x40 + PT_GETXSTATE = 0x45 + PT_GETXSTATE_INFO = 0x44 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETFSBASE = 0x48 + PT_SETGSBASE = 0x4a + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETXMMREGS = 0x41 + PT_SETXSTATE = 0x46 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1320,10 +1386,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0086924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1414,6 +1482,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1472,22 +1541,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1496,12 +1583,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1541,6 +1634,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1554,7 +1648,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1694,12 +1787,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1842,7 +1936,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1904,6 +1998,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 64520d31226b..96310c3be1b0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80104267 BIOCSETFNR = 0x80104282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8010427b BIOCSETZBUF = 0x80184281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1180,6 +1186,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1190,6 +1198,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1197,6 +1209,58 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETFSBASE = 0x47 + PT_GETGSBASE = 0x49 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETXSTATE = 0x45 + PT_GETXSTATE_INFO = 0x44 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETFSBASE = 0x48 + PT_SETGSBASE = 0x4a + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETXSTATE = 0x46 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1321,10 +1385,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0106924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1415,6 +1481,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1473,22 +1540,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1497,12 +1582,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1542,6 +1633,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1555,7 +1647,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1693,12 +1784,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1841,7 +1933,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1903,6 +1995,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 99e9a0e06e95..777b69defa04 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80084267 BIOCSETFNR = 0x80084282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8008427b BIOCSETZBUF = 0x800c4281 BIOCSHDRCMPLT = 0x80044275 @@ -362,7 +363,7 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 - DIOCGATTR = 0xc144648e + DIOCGATTR = 0xc148648e DIOCGDELETE = 0x80106488 DIOCGFLUSH = 0x20006487 DIOCGFRONTSTUFF = 0x40086486 @@ -377,7 +378,7 @@ const ( DIOCGSTRIPESIZE = 0x4008648b DIOCSKERNELDUMP = 0x804c6490 DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 - DIOCZONECMD = 0xc06c648f + DIOCZONECMD = 0xc078648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 @@ -407,7 +408,9 @@ const ( DLT_C_HDLC_WITH_DIR = 0xcd DLT_DBUS = 0xe7 DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 @@ -417,6 +420,7 @@ const ( DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 DLT_FC_2 = 0xe0 DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa @@ -444,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -484,9 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x109 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -502,7 +508,9 @@ const ( DLT_NFC_LLCP = 0xf5 DLT_NFLOG = 0xef DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x79 @@ -526,15 +534,18 @@ const ( DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c DLT_SITA = 0xc4 DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xd DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d DLT_TZSP = 0x80 DLT_USB = 0xba DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc @@ -554,6 +565,7 @@ const ( DLT_USER7 = 0x9a DLT_USER8 = 0x9b DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f DLT_WATTSTOPPER_DLM = 0x107 DLT_WIHART = 0xdf DLT_WIRESHARK_UPPER_PDU = 0xfc @@ -578,6 +590,7 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd EVFILT_FS = -0x9 EVFILT_LIO = -0xa EVFILT_PROC = -0x5 @@ -585,11 +598,12 @@ const ( EVFILT_READ = -0x1 EVFILT_SENDFILE = -0xc EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc + EVFILT_SYSCOUNT = 0xd EVFILT_TIMER = -0x7 EVFILT_USER = -0xb EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 @@ -606,6 +620,7 @@ const ( EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff EXTATTR_NAMESPACE_EMPTY = 0x0 EXTATTR_NAMESPACE_SYSTEM = 0x2 EXTATTR_NAMESPACE_USER = 0x1 @@ -647,6 +662,7 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 @@ -663,6 +679,7 @@ const ( IFF_MONITOR = 0x40000 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PPROMISC = 0x20000 @@ -719,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -799,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -837,6 +854,7 @@ const ( IPV6_DSTOPTS = 0x32 IPV6_FLOWID = 0x43 IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWTYPE = 0x44 IPV6_FRAGTTL = 0x78 @@ -857,13 +875,13 @@ const ( IPV6_MAX_GROUP_SRC_FILTER = 0x200 IPV6_MAX_MEMBERSHIPS = 0xfff IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 IPV6_PATHMTU = 0x2c IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe @@ -875,6 +893,7 @@ const ( IPV6_RECVFLOWID = 0x46 IPV6_RECVHOPLIMIT = 0x25 IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 IPV6_RECVPATHMTU = 0x2b IPV6_RECVPKTINFO = 0x24 IPV6_RECVRSSBUCKETID = 0x47 @@ -894,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -935,10 +955,8 @@ const ( IP_MAX_MEMBERSHIPS = 0xfff IP_MAX_SOCK_MUTE_FILTER = 0x80 IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 IP_MF = 0x2000 IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f IP_MSFILTER = 0x4a IP_MSS = 0x240 IP_MULTICAST_IF = 0x9 @@ -948,6 +966,7 @@ const ( IP_OFFMASK = 0x1fff IP_ONESBCAST = 0x17 IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b IP_PORTRANGE = 0x13 IP_PORTRANGE_DEFAULT = 0x0 IP_PORTRANGE_HIGH = 0x1 @@ -956,6 +975,7 @@ const ( IP_RECVFLOWID = 0x5d IP_RECVIF = 0x14 IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b IP_RECVRETOPTS = 0x6 IP_RECVRSSBUCKETID = 0x5e IP_RECVTOS = 0x44 @@ -972,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -983,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1071,10 +1094,12 @@ const ( MNT_SUSPEND = 0x4 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 MNT_UPDATE = 0x10000 - MNT_UPDATEMASK = 0x2d8d0807e + MNT_UPDATEMASK = 0xad8d0807e MNT_USER = 0x8000 - MNT_VISFLAGMASK = 0x3fef0ffff + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 @@ -1103,6 +1128,7 @@ const ( NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 NOTE_CLOSE = 0x100 @@ -1159,6 +1185,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1169,6 +1197,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1176,6 +1208,53 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETVFPREGS = 0x40 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETVFPREGS = 0x41 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1257,7 +1336,6 @@ const ( RTV_WEIGHT = 0x100 RT_ALL_FIBS = -0x1 RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 RT_DEFAULT_FIB = 0x0 RT_HAS_GW = 0x80 RT_HAS_HEADER = 0x10 @@ -1267,15 +1345,17 @@ const ( RT_LLE_CACHE = 0x100 RT_MAY_LOOP = 0x8 RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 RT_REJECT = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_BINTIME = 0x4 SCM_CREDS = 0x3 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 SEEK_CUR = 0x1 SEEK_DATA = 0x3 SEEK_END = 0x2 @@ -1299,10 +1379,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0086924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1318,8 +1400,11 @@ const ( SIOCGIFPDSTADDR = 0xc0206948 SIOCGIFPHYS = 0xc0206935 SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 SIOCGIFSTATUS = 0xc331693b SIOCGIFXMEDIA = 0xc028698b + SIOCGLANPCP = 0xc0206998 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCGPRIVATE_0 = 0xc0206950 @@ -1350,6 +1435,7 @@ const ( SIOCSIFPHYS = 0x80206936 SIOCSIFRVNET = 0xc020695b SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 SIOCSTUNFIB = 0x8020695f @@ -1369,6 +1455,7 @@ const ( SO_BINTIME = 0x2000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1377,6 +1464,7 @@ const ( SO_LISTENINCQLEN = 0x1013 SO_LISTENQLEN = 0x1012 SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 SO_NOSIGPIPE = 0x800 SO_NO_DDP = 0x8000 SO_NO_OFFLOAD = 0x4000 @@ -1387,13 +1475,22 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 SO_SETFIB = 0x1014 SO_SNDBUF = 0x1001 SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 @@ -1437,10 +1534,69 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DELACK = 0x48 TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 TCP_FUNCTION_BLK = 0x2000 TCP_FUNCTION_NAME_LEN_MAX = 0x20 TCP_INFO = 0x20 @@ -1448,6 +1604,12 @@ const ( TCP_KEEPIDLE = 0x100 TCP_KEEPINIT = 0x80 TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOG_ID_LEN = 0x40 TCP_MAXBURST = 0x4 TCP_MAXHLEN = 0x3c TCP_MAXOLEN = 0x28 @@ -1463,8 +1625,30 @@ const ( TCP_NOPUSH = 0x4 TCP_PCAP_IN = 0x1000 TCP_PCAP_OUT = 0x800 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 TCP_VENDOR = 0x80000000 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1528,6 +1712,8 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1592,12 +1778,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1740,7 +1927,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1802,6 +1989,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 4c837711493f..c557ac2db317 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80104267 BIOCSETFNR = 0x80104282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8010427b BIOCSETZBUF = 0x80184281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1180,6 +1186,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1190,6 +1198,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1197,6 +1209,51 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1321,10 +1378,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0106924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1415,6 +1474,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1473,22 +1533,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1497,12 +1575,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1542,6 +1626,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1555,7 +1640,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1694,12 +1778,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1842,7 +1927,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1904,6 +1989,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go new file mode 100644 index 000000000000..341b4d96265b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -0,0 +1,2148 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_HYPERV = 0x2b + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2b + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + ALTWERASE = 0x200 + B0 = 0x0 + B1000000 = 0xf4240 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1500000 = 0x16e360 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B2000000 = 0x1e8480 + B230400 = 0x38400 + B2400 = 0x960 + B2500000 = 0x2625a0 + B28800 = 0x7080 + B300 = 0x12c + B3000000 = 0x2dc6c0 + B3500000 = 0x3567e0 + B38400 = 0x9600 + B4000000 = 0x3d0900 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B500000 = 0x7a120 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0104279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4008427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x40184280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x80104282 + BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 + BIOCSETWF = 0x8010427b + BIOCSETZBUF = 0x80184281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x8 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + CAP_ACCEPT = 0x200000020000000 + CAP_ACL_CHECK = 0x400000000010000 + CAP_ACL_DELETE = 0x400000000020000 + CAP_ACL_GET = 0x400000000040000 + CAP_ACL_SET = 0x400000000080000 + CAP_ALL0 = 0x20007ffffffffff + CAP_ALL1 = 0x4000000001fffff + CAP_BIND = 0x200000040000000 + CAP_BINDAT = 0x200008000000400 + CAP_CHFLAGSAT = 0x200000000001400 + CAP_CONNECT = 0x200000080000000 + CAP_CONNECTAT = 0x200010000000400 + CAP_CREATE = 0x200000000000040 + CAP_EVENT = 0x400000000000020 + CAP_EXTATTR_DELETE = 0x400000000001000 + CAP_EXTATTR_GET = 0x400000000002000 + CAP_EXTATTR_LIST = 0x400000000004000 + CAP_EXTATTR_SET = 0x400000000008000 + CAP_FCHDIR = 0x200000000000800 + CAP_FCHFLAGS = 0x200000000001000 + CAP_FCHMOD = 0x200000000002000 + CAP_FCHMODAT = 0x200000000002400 + CAP_FCHOWN = 0x200000000004000 + CAP_FCHOWNAT = 0x200000000004400 + CAP_FCNTL = 0x200000000008000 + CAP_FCNTL_ALL = 0x78 + CAP_FCNTL_GETFL = 0x8 + CAP_FCNTL_GETOWN = 0x20 + CAP_FCNTL_SETFL = 0x10 + CAP_FCNTL_SETOWN = 0x40 + CAP_FEXECVE = 0x200000000000080 + CAP_FLOCK = 0x200000000010000 + CAP_FPATHCONF = 0x200000000020000 + CAP_FSCK = 0x200000000040000 + CAP_FSTAT = 0x200000000080000 + CAP_FSTATAT = 0x200000000080400 + CAP_FSTATFS = 0x200000000100000 + CAP_FSYNC = 0x200000000000100 + CAP_FTRUNCATE = 0x200000000000200 + CAP_FUTIMES = 0x200000000200000 + CAP_FUTIMESAT = 0x200000000200400 + CAP_GETPEERNAME = 0x200000100000000 + CAP_GETSOCKNAME = 0x200000200000000 + CAP_GETSOCKOPT = 0x200000400000000 + CAP_IOCTL = 0x400000000000080 + CAP_IOCTLS_ALL = 0x7fffffffffffffff + CAP_KQUEUE = 0x400000000100040 + CAP_KQUEUE_CHANGE = 0x400000000100000 + CAP_KQUEUE_EVENT = 0x400000000000040 + CAP_LINKAT_SOURCE = 0x200020000000400 + CAP_LINKAT_TARGET = 0x200000000400400 + CAP_LISTEN = 0x200000800000000 + CAP_LOOKUP = 0x200000000000400 + CAP_MAC_GET = 0x400000000000001 + CAP_MAC_SET = 0x400000000000002 + CAP_MKDIRAT = 0x200000000800400 + CAP_MKFIFOAT = 0x200000001000400 + CAP_MKNODAT = 0x200000002000400 + CAP_MMAP = 0x200000000000010 + CAP_MMAP_R = 0x20000000000001d + CAP_MMAP_RW = 0x20000000000001f + CAP_MMAP_RWX = 0x20000000000003f + CAP_MMAP_RX = 0x20000000000003d + CAP_MMAP_W = 0x20000000000001e + CAP_MMAP_WX = 0x20000000000003e + CAP_MMAP_X = 0x20000000000003c + CAP_PDGETPID = 0x400000000000200 + CAP_PDKILL = 0x400000000000800 + CAP_PDWAIT = 0x400000000000400 + CAP_PEELOFF = 0x200001000000000 + CAP_POLL_EVENT = 0x400000000000020 + CAP_PREAD = 0x20000000000000d + CAP_PWRITE = 0x20000000000000e + CAP_READ = 0x200000000000001 + CAP_RECV = 0x200000000000001 + CAP_RENAMEAT_SOURCE = 0x200000004000400 + CAP_RENAMEAT_TARGET = 0x200040000000400 + CAP_RIGHTS_VERSION = 0x0 + CAP_RIGHTS_VERSION_00 = 0x0 + CAP_SEEK = 0x20000000000000c + CAP_SEEK_TELL = 0x200000000000004 + CAP_SEM_GETVALUE = 0x400000000000004 + CAP_SEM_POST = 0x400000000000008 + CAP_SEM_WAIT = 0x400000000000010 + CAP_SEND = 0x200000000000002 + CAP_SETSOCKOPT = 0x200002000000000 + CAP_SHUTDOWN = 0x200004000000000 + CAP_SOCK_CLIENT = 0x200007780000003 + CAP_SOCK_SERVER = 0x200007f60000003 + CAP_SYMLINKAT = 0x200000008000400 + CAP_TTYHOOK = 0x400000000000100 + CAP_UNLINKAT = 0x200000010000400 + CAP_UNUSED0_44 = 0x200080000000000 + CAP_UNUSED0_57 = 0x300000000000000 + CAP_UNUSED1_22 = 0x400000000200000 + CAP_UNUSED1_57 = 0x500000000000000 + CAP_WRITE = 0x200000000000002 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x5 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_COARSE = 0xc + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_COARSE = 0xa + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DIOCGATTR = 0xc148648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGKERNELDUMP = 0xc0986492 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x80986491 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCSKERNELDUMP_FREEBSD12 = 0x80506490 + DIOCZONECMD = 0xc080648f + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_BREDR_BB = 0xff + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_BLUETOOTH_LE_LL = 0xfb + DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 + DLT_BLUETOOTH_LINUX_MONITOR = 0xfe + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_CLASS_NETBSD_RAWAF = 0x2240000 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 + DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_EPON = 0x103 + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_INFINIBAND = 0xf7 + DLT_IPFILTER = 0x74 + DLT_IPMB_KONTRON = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPMI_HPM_2 = 0x104 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 + DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x114 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NETLINK = 0xfd + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PKTAP = 0x102 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PROFIBUS_DL = 0x101 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RDS = 0x109 + DLT_REDBACK_SMARTEDGE = 0x20 + DLT_RIO = 0x7c + DLT_RTAC_SERIAL = 0xfa + DLT_SCCP = 0x8e + DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a + DLT_USB_FREEBSD = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f + DLT_WATTSTOPPER_DLM = 0x107 + DLT_WIHART = 0xdf + DLT_WIRESHARK_UPPER_PDU = 0xfc + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EHE_DEAD_PRIORITY = -0x1 + EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_PROCDESC = -0x8 + EVFILT_READ = -0x1 + EVFILT_SENDFILE = -0xc + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xd + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_FLAG2 = 0x4000 + EV_FORCEONESHOT = 0x100 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_NONE = -0xc8 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_ADD_SEALS = 0x13 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_GET_SEALS = 0x14 + F_ISUNIONSTACK = 0x15 + F_KINFO = 0x16 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f72 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_KNOWSEPOCH = 0x20 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_IEEE1394 = 0x90 + IFT_INFINIBAND = 0xc7 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_PPP = 0x17 + IFT_PROPVIRTUAL = 0x35 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_NETMASK_DEFAULT = 0xffffff00 + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDMULTI = 0x41 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FLOWID = 0x43 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOWTYPE = 0x44 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVFLOWID = 0x46 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRSSBUCKETID = 0x47 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RSSBUCKETID = 0x45 + IPV6_RSS_LISTEN_BUCKET = 0x42 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BINDMULTI = 0x19 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FLOWID = 0x5a + IP_FLOWTYPE = 0x5b + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVFLOWID = 0x5d + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b + IP_RECVRETOPTS = 0x6 + IP_RECVRSSBUCKETID = 0x5e + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSSBUCKETID = 0x5c + IP_RSS_LISTEN_BUCKET = 0x1a + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b + ISIG = 0x80 + ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_32BIT = 0x80000 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GUARD = 0x2000 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RESERVED0020 = 0x20 + MAP_RESERVED0040 = 0x40 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0xfc000000 + MFD_HUGE_SHIFT = 0x1a + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0x300d0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EMPTYDIR = 0x2000000000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_EXTLS = 0x4000000000 + MNT_EXTLSCERT = 0x8000000000 + MNT_EXTLSCERTUSER = 0x10000000000 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOCOVER = 0x1000000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0xad8d0807e + MNT_USER = 0x8000 + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff + MNT_WAIT = 0x1 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x80000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NET_RT_NHGRP = 0x7 + NET_RT_NHOP = 0x6 + NFDBITS = 0x40 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_CLOSE = 0x100 + NOTE_CLOSE_WRITE = 0x200 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FILE_POLL = 0x2 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MSECONDS = 0x2 + NOTE_NSECONDS = 0x8 + NOTE_OPEN = 0x80 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_READ = 0x400 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x4 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x1000000 + O_EMPTY_PATH = 0x2000000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_PATH = 0x400000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_VERIFY = 0x200000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_COREDUMP = 0x1d + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FIXEDMTU = 0x80000 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_BLACKHOLE = 0x40 + RT_DEFAULT_FIB = 0x0 + RT_DEFAULT_WEIGHT = 0x1 + RT_HAS_GW = 0x80 + RT_HAS_HEADER = 0x10 + RT_HAS_HEADER_BIT = 0x4 + RT_L2_ME = 0x4 + RT_L2_ME_BIT = 0x2 + RT_LLE_CACHE = 0x100 + RT_MAX_WEIGHT = 0xffffff + RT_MAY_LOOP = 0x8 + RT_MAY_LOOP_BIT = 0x3 + RT_REJECT = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_CREDS2 = 0x8 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPHYADDR = 0x80206949 + SIOCGDRVSPEC = 0xc028697b + SIOCGETSGCNT = 0xc0207210 + SIOCGETVIFCNT = 0xc028720f + SIOCGHIWAT = 0x40047301 + SIOCGHWADDR = 0xc020693e + SIOCGI2C = 0xc020693d + SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0x8020692c + SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0306938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 + SIOCGIFSTATUS = 0xc331693b + SIOCGIFXMEDIA = 0xc030698b + SIOCGLANPCP = 0xc0206998 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCGTUNFIB = 0xc020695e + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSDRVSPEC = 0x8028697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSTUNFIB = 0x8020695f + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB3 = 0x4 + TABDLY = 0x4 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_INIT_RATE = 0x458 + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_USE_RACK_RR = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 + TCP_CA_NAME_MAX = 0x10 + TCP_CCALGOOPT = 0x41 + TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DEFER_OPTIONS = 0x470 + TCP_DELACK = 0x48 + TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 + TCP_FAST_RSM_HACK = 0x471 + TCP_FIN_IS_RST = 0x49 + TCP_FUNCTION_BLK = 0x2000 + TCP_FUNCTION_NAME_LEN_MAX = 0x20 + TCP_HDWR_RATE_CAP = 0x46a + TCP_HDWR_UP_ONLY = 0x46c + TCP_IDLE_REDUCE = 0x46 + TCP_INFO = 0x20 + TCP_IWND_NB = 0x2b + TCP_IWND_NSEG = 0x2c + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOGID_CNT = 0x2e + TCP_LOG_ID_LEN = 0x40 + TCP_LOG_LIMIT = 0x4a + TCP_LOG_TAG = 0x2f + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXPEAKRATE = 0x45 + TCP_MAXSEG = 0x2 + TCP_MAXUNACKTIME = 0x44 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NO_PRR = 0x462 + TCP_PACING_RATE_CAP = 0x46b + TCP_PCAP_IN = 0x1000 + TCP_PCAP_OUT = 0x800 + TCP_PERF_INFO = 0x4e + TCP_PROC_ACCOUNTING = 0x4c + TCP_RACK_ABC_VAL = 0x46d + TCP_RACK_CHEAT_NOT_CONF_RATE = 0x459 + TCP_RACK_DO_DETECTION = 0x449 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_FORCE_MSEG = 0x45d + TCP_RACK_GP_INCREASE = 0x446 + TCP_RACK_GP_INCREASE_CA = 0x45a + TCP_RACK_GP_INCREASE_REC = 0x45c + TCP_RACK_GP_INCREASE_SS = 0x45b + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MBUF_QUEUE = 0x41a + TCP_RACK_MEASURE_CNT = 0x46f + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_NONRXT_CFG_RATE = 0x463 + TCP_RACK_NO_PUSH_AT_MAX = 0x466 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_RATE_CA = 0x45e + TCP_RACK_PACE_RATE_REC = 0x460 + TCP_RACK_PACE_RATE_SS = 0x45f + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PACE_TO_FILL = 0x467 + TCP_RACK_PACING_BETA = 0x472 + TCP_RACK_PACING_BETA_ECN = 0x473 + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROFILE = 0x469 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_RR_CONF = 0x459 + TCP_RACK_TIMER_SLOP = 0x474 + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 + TCP_REC_ABC_VAL = 0x46e + TCP_REMOTE_UDP_ENCAPS_PORT = 0x47 + TCP_REUSPORT_LB_NUMA = 0x402 + TCP_REUSPORT_LB_NUMA_CURDOM = -0x1 + TCP_REUSPORT_LB_NUMA_NODOM = -0x2 + TCP_RXTLS_ENABLE = 0x29 + TCP_RXTLS_MODE = 0x2a + TCP_SHARED_CWND_ALLOWED = 0x4b + TCP_SHARED_CWND_ENABLE = 0x464 + TCP_SHARED_CWND_TIME_LIMIT = 0x468 + TCP_STATS = 0x21 + TCP_TIMELY_DYN_ADJ = 0x465 + TCP_TLS_MODE_IFNET = 0x2 + TCP_TLS_MODE_NONE = 0x0 + TCP_TLS_MODE_SW = 0x1 + TCP_TLS_MODE_TOE = 0x3 + TCP_TXTLS_ENABLE = 0x27 + TCP_TXTLS_MODE = 0x28 + TCP_USER_LOG = 0x30 + TCP_USE_CMP_ACKS = 0x4d + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECAPMODE = syscall.Errno(0x5e) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x61) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCAPABLE = syscall.Errno(0x5d) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5f) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x60) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGLIBRT = syscall.Signal(0x21) + SIGLWP = syscall.Signal(0x20) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOTCAPABLE", "capabilities insufficient"}, + {94, "ECAPMODE", "not permitted in capability mode"}, + {95, "ENOTRECOVERABLE", "state not recoverable"}, + {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "unknown signal"}, + {33, "SIGLIBRT", "unknown signal"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 4e5420586120..785d693eb328 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -38,7 +38,8 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2d + AF_MAX = 0x2e + AF_MCTP = 0x2d AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -139,6 +140,306 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUDIT_ADD = 0x3eb + AUDIT_ADD_RULE = 0x3f3 + AUDIT_ALWAYS = 0x2 + AUDIT_ANOM_ABEND = 0x6a5 + AUDIT_ANOM_CREAT = 0x6a7 + AUDIT_ANOM_LINK = 0x6a6 + AUDIT_ANOM_PROMISCUOUS = 0x6a4 + AUDIT_ARCH = 0xb + AUDIT_ARCH_AARCH64 = 0xc00000b7 + AUDIT_ARCH_ALPHA = 0xc0009026 + AUDIT_ARCH_ARCOMPACT = 0x4000005d + AUDIT_ARCH_ARCOMPACTBE = 0x5d + AUDIT_ARCH_ARCV2 = 0x400000c3 + AUDIT_ARCH_ARCV2BE = 0xc3 + AUDIT_ARCH_ARM = 0x40000028 + AUDIT_ARCH_ARMEB = 0x28 + AUDIT_ARCH_C6X = 0x4000008c + AUDIT_ARCH_C6XBE = 0x8c + AUDIT_ARCH_CRIS = 0x4000004c + AUDIT_ARCH_CSKY = 0x400000fc + AUDIT_ARCH_FRV = 0x5441 + AUDIT_ARCH_H8300 = 0x2e + AUDIT_ARCH_HEXAGON = 0xa4 + AUDIT_ARCH_I386 = 0x40000003 + AUDIT_ARCH_IA64 = 0xc0000032 + AUDIT_ARCH_LOONGARCH32 = 0x40000102 + AUDIT_ARCH_LOONGARCH64 = 0xc0000102 + AUDIT_ARCH_M32R = 0x58 + AUDIT_ARCH_M68K = 0x4 + AUDIT_ARCH_MICROBLAZE = 0xbd + AUDIT_ARCH_MIPS = 0x8 + AUDIT_ARCH_MIPS64 = 0x80000008 + AUDIT_ARCH_MIPS64N32 = 0xa0000008 + AUDIT_ARCH_MIPSEL = 0x40000008 + AUDIT_ARCH_MIPSEL64 = 0xc0000008 + AUDIT_ARCH_MIPSEL64N32 = 0xe0000008 + AUDIT_ARCH_NDS32 = 0x400000a7 + AUDIT_ARCH_NDS32BE = 0xa7 + AUDIT_ARCH_NIOS2 = 0x40000071 + AUDIT_ARCH_OPENRISC = 0x5c + AUDIT_ARCH_PARISC = 0xf + AUDIT_ARCH_PARISC64 = 0x8000000f + AUDIT_ARCH_PPC = 0x14 + AUDIT_ARCH_PPC64 = 0x80000015 + AUDIT_ARCH_PPC64LE = 0xc0000015 + AUDIT_ARCH_RISCV32 = 0x400000f3 + AUDIT_ARCH_RISCV64 = 0xc00000f3 + AUDIT_ARCH_S390 = 0x16 + AUDIT_ARCH_S390X = 0x80000016 + AUDIT_ARCH_SH = 0x2a + AUDIT_ARCH_SH64 = 0x8000002a + AUDIT_ARCH_SHEL = 0x4000002a + AUDIT_ARCH_SHEL64 = 0xc000002a + AUDIT_ARCH_SPARC = 0x2 + AUDIT_ARCH_SPARC64 = 0x8000002b + AUDIT_ARCH_TILEGX = 0xc00000bf + AUDIT_ARCH_TILEGX32 = 0x400000bf + AUDIT_ARCH_TILEPRO = 0x400000bc + AUDIT_ARCH_UNICORE = 0x4000006e + AUDIT_ARCH_X86_64 = 0xc000003e + AUDIT_ARCH_XTENSA = 0x5e + AUDIT_ARG0 = 0xc8 + AUDIT_ARG1 = 0xc9 + AUDIT_ARG2 = 0xca + AUDIT_ARG3 = 0xcb + AUDIT_AVC = 0x578 + AUDIT_AVC_PATH = 0x57a + AUDIT_BITMASK_SIZE = 0x40 + AUDIT_BIT_MASK = 0x8000000 + AUDIT_BIT_TEST = 0x48000000 + AUDIT_BPF = 0x536 + AUDIT_BPRM_FCAPS = 0x529 + AUDIT_CAPSET = 0x52a + AUDIT_CLASS_CHATTR = 0x2 + AUDIT_CLASS_CHATTR_32 = 0x3 + AUDIT_CLASS_DIR_WRITE = 0x0 + AUDIT_CLASS_DIR_WRITE_32 = 0x1 + AUDIT_CLASS_READ = 0x4 + AUDIT_CLASS_READ_32 = 0x5 + AUDIT_CLASS_SIGNAL = 0x8 + AUDIT_CLASS_SIGNAL_32 = 0x9 + AUDIT_CLASS_WRITE = 0x6 + AUDIT_CLASS_WRITE_32 = 0x7 + AUDIT_COMPARE_AUID_TO_EUID = 0x10 + AUDIT_COMPARE_AUID_TO_FSUID = 0xe + AUDIT_COMPARE_AUID_TO_OBJ_UID = 0x5 + AUDIT_COMPARE_AUID_TO_SUID = 0xf + AUDIT_COMPARE_EGID_TO_FSGID = 0x17 + AUDIT_COMPARE_EGID_TO_OBJ_GID = 0x4 + AUDIT_COMPARE_EGID_TO_SGID = 0x18 + AUDIT_COMPARE_EUID_TO_FSUID = 0x12 + AUDIT_COMPARE_EUID_TO_OBJ_UID = 0x3 + AUDIT_COMPARE_EUID_TO_SUID = 0x11 + AUDIT_COMPARE_FSGID_TO_OBJ_GID = 0x9 + AUDIT_COMPARE_FSUID_TO_OBJ_UID = 0x8 + AUDIT_COMPARE_GID_TO_EGID = 0x14 + AUDIT_COMPARE_GID_TO_FSGID = 0x15 + AUDIT_COMPARE_GID_TO_OBJ_GID = 0x2 + AUDIT_COMPARE_GID_TO_SGID = 0x16 + AUDIT_COMPARE_SGID_TO_FSGID = 0x19 + AUDIT_COMPARE_SGID_TO_OBJ_GID = 0x7 + AUDIT_COMPARE_SUID_TO_FSUID = 0x13 + AUDIT_COMPARE_SUID_TO_OBJ_UID = 0x6 + AUDIT_COMPARE_UID_TO_AUID = 0xa + AUDIT_COMPARE_UID_TO_EUID = 0xb + AUDIT_COMPARE_UID_TO_FSUID = 0xc + AUDIT_COMPARE_UID_TO_OBJ_UID = 0x1 + AUDIT_COMPARE_UID_TO_SUID = 0xd + AUDIT_CONFIG_CHANGE = 0x519 + AUDIT_CWD = 0x51b + AUDIT_DAEMON_ABORT = 0x4b2 + AUDIT_DAEMON_CONFIG = 0x4b3 + AUDIT_DAEMON_END = 0x4b1 + AUDIT_DAEMON_START = 0x4b0 + AUDIT_DEL = 0x3ec + AUDIT_DEL_RULE = 0x3f4 + AUDIT_DEVMAJOR = 0x64 + AUDIT_DEVMINOR = 0x65 + AUDIT_DIR = 0x6b + AUDIT_DM_CTRL = 0x53a + AUDIT_DM_EVENT = 0x53b + AUDIT_EGID = 0x6 + AUDIT_EOE = 0x528 + AUDIT_EQUAL = 0x40000000 + AUDIT_EUID = 0x2 + AUDIT_EVENT_LISTENER = 0x537 + AUDIT_EXE = 0x70 + AUDIT_EXECVE = 0x51d + AUDIT_EXIT = 0x67 + AUDIT_FAIL_PANIC = 0x2 + AUDIT_FAIL_PRINTK = 0x1 + AUDIT_FAIL_SILENT = 0x0 + AUDIT_FANOTIFY = 0x533 + AUDIT_FD_PAIR = 0x525 + AUDIT_FEATURE_BITMAP_ALL = 0x7f + AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT = 0x1 + AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME = 0x2 + AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND = 0x8 + AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH = 0x4 + AUDIT_FEATURE_BITMAP_FILTER_FS = 0x40 + AUDIT_FEATURE_BITMAP_LOST_RESET = 0x20 + AUDIT_FEATURE_BITMAP_SESSIONID_FILTER = 0x10 + AUDIT_FEATURE_CHANGE = 0x530 + AUDIT_FEATURE_LOGINUID_IMMUTABLE = 0x1 + AUDIT_FEATURE_ONLY_UNSET_LOGINUID = 0x0 + AUDIT_FEATURE_VERSION = 0x1 + AUDIT_FIELD_COMPARE = 0x6f + AUDIT_FILETYPE = 0x6c + AUDIT_FILTERKEY = 0xd2 + AUDIT_FILTER_ENTRY = 0x2 + AUDIT_FILTER_EXCLUDE = 0x5 + AUDIT_FILTER_EXIT = 0x4 + AUDIT_FILTER_FS = 0x6 + AUDIT_FILTER_PREPEND = 0x10 + AUDIT_FILTER_TASK = 0x1 + AUDIT_FILTER_TYPE = 0x5 + AUDIT_FILTER_URING_EXIT = 0x7 + AUDIT_FILTER_USER = 0x0 + AUDIT_FILTER_WATCH = 0x3 + AUDIT_FIRST_KERN_ANOM_MSG = 0x6a4 + AUDIT_FIRST_USER_MSG = 0x44c + AUDIT_FIRST_USER_MSG2 = 0x834 + AUDIT_FSGID = 0x8 + AUDIT_FSTYPE = 0x1a + AUDIT_FSUID = 0x4 + AUDIT_GET = 0x3e8 + AUDIT_GET_FEATURE = 0x3fb + AUDIT_GID = 0x5 + AUDIT_GREATER_THAN = 0x20000000 + AUDIT_GREATER_THAN_OR_EQUAL = 0x60000000 + AUDIT_INODE = 0x66 + AUDIT_INTEGRITY_DATA = 0x708 + AUDIT_INTEGRITY_EVM_XATTR = 0x70e + AUDIT_INTEGRITY_HASH = 0x70b + AUDIT_INTEGRITY_METADATA = 0x709 + AUDIT_INTEGRITY_PCR = 0x70c + AUDIT_INTEGRITY_POLICY_RULE = 0x70f + AUDIT_INTEGRITY_RULE = 0x70d + AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_IPC = 0x517 + AUDIT_IPC_SET_PERM = 0x51f + AUDIT_KERNEL = 0x7d0 + AUDIT_KERNEL_OTHER = 0x524 + AUDIT_KERN_MODULE = 0x532 + AUDIT_LAST_FEATURE = 0x1 + AUDIT_LAST_KERN_ANOM_MSG = 0x707 + AUDIT_LAST_USER_MSG = 0x4af + AUDIT_LAST_USER_MSG2 = 0xbb7 + AUDIT_LESS_THAN = 0x10000000 + AUDIT_LESS_THAN_OR_EQUAL = 0x50000000 + AUDIT_LIST = 0x3ea + AUDIT_LIST_RULES = 0x3f5 + AUDIT_LOGIN = 0x3ee + AUDIT_LOGINUID = 0x9 + AUDIT_LOGINUID_SET = 0x18 + AUDIT_MAC_CALIPSO_ADD = 0x58a + AUDIT_MAC_CALIPSO_DEL = 0x58b + AUDIT_MAC_CIPSOV4_ADD = 0x57f + AUDIT_MAC_CIPSOV4_DEL = 0x580 + AUDIT_MAC_CONFIG_CHANGE = 0x57d + AUDIT_MAC_IPSEC_ADDSA = 0x583 + AUDIT_MAC_IPSEC_ADDSPD = 0x585 + AUDIT_MAC_IPSEC_DELSA = 0x584 + AUDIT_MAC_IPSEC_DELSPD = 0x586 + AUDIT_MAC_IPSEC_EVENT = 0x587 + AUDIT_MAC_MAP_ADD = 0x581 + AUDIT_MAC_MAP_DEL = 0x582 + AUDIT_MAC_POLICY_LOAD = 0x57b + AUDIT_MAC_STATUS = 0x57c + AUDIT_MAC_UNLBL_ALLOW = 0x57e + AUDIT_MAC_UNLBL_STCADD = 0x588 + AUDIT_MAC_UNLBL_STCDEL = 0x589 + AUDIT_MAKE_EQUIV = 0x3f7 + AUDIT_MAX_FIELDS = 0x40 + AUDIT_MAX_FIELD_COMPARE = 0x19 + AUDIT_MAX_KEY_LEN = 0x100 + AUDIT_MESSAGE_TEXT_MAX = 0x2170 + AUDIT_MMAP = 0x52b + AUDIT_MQ_GETSETATTR = 0x523 + AUDIT_MQ_NOTIFY = 0x522 + AUDIT_MQ_OPEN = 0x520 + AUDIT_MQ_SENDRECV = 0x521 + AUDIT_MSGTYPE = 0xc + AUDIT_NEGATE = 0x80000000 + AUDIT_NETFILTER_CFG = 0x52d + AUDIT_NETFILTER_PKT = 0x52c + AUDIT_NEVER = 0x0 + AUDIT_NLGRP_MAX = 0x1 + AUDIT_NOT_EQUAL = 0x30000000 + AUDIT_NR_FILTERS = 0x8 + AUDIT_OBJ_GID = 0x6e + AUDIT_OBJ_LEV_HIGH = 0x17 + AUDIT_OBJ_LEV_LOW = 0x16 + AUDIT_OBJ_PID = 0x526 + AUDIT_OBJ_ROLE = 0x14 + AUDIT_OBJ_TYPE = 0x15 + AUDIT_OBJ_UID = 0x6d + AUDIT_OBJ_USER = 0x13 + AUDIT_OPENAT2 = 0x539 + AUDIT_OPERATORS = 0x78000000 + AUDIT_PATH = 0x516 + AUDIT_PERM = 0x6a + AUDIT_PERM_ATTR = 0x8 + AUDIT_PERM_EXEC = 0x1 + AUDIT_PERM_READ = 0x4 + AUDIT_PERM_WRITE = 0x2 + AUDIT_PERS = 0xa + AUDIT_PID = 0x0 + AUDIT_POSSIBLE = 0x1 + AUDIT_PPID = 0x12 + AUDIT_PROCTITLE = 0x52f + AUDIT_REPLACE = 0x531 + AUDIT_SADDR_FAM = 0x71 + AUDIT_SECCOMP = 0x52e + AUDIT_SELINUX_ERR = 0x579 + AUDIT_SESSIONID = 0x19 + AUDIT_SET = 0x3e9 + AUDIT_SET_FEATURE = 0x3fa + AUDIT_SGID = 0x7 + AUDIT_SID_UNSET = 0xffffffff + AUDIT_SIGNAL_INFO = 0x3f2 + AUDIT_SOCKADDR = 0x51a + AUDIT_SOCKETCALL = 0x518 + AUDIT_STATUS_BACKLOG_LIMIT = 0x10 + AUDIT_STATUS_BACKLOG_WAIT_TIME = 0x20 + AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL = 0x80 + AUDIT_STATUS_ENABLED = 0x1 + AUDIT_STATUS_FAILURE = 0x2 + AUDIT_STATUS_LOST = 0x40 + AUDIT_STATUS_PID = 0x4 + AUDIT_STATUS_RATE_LIMIT = 0x8 + AUDIT_SUBJ_CLR = 0x11 + AUDIT_SUBJ_ROLE = 0xe + AUDIT_SUBJ_SEN = 0x10 + AUDIT_SUBJ_TYPE = 0xf + AUDIT_SUBJ_USER = 0xd + AUDIT_SUCCESS = 0x68 + AUDIT_SUID = 0x3 + AUDIT_SYSCALL = 0x514 + AUDIT_SYSCALL_CLASSES = 0x10 + AUDIT_TIME_ADJNTPVAL = 0x535 + AUDIT_TIME_INJOFFSET = 0x534 + AUDIT_TRIM = 0x3f6 + AUDIT_TTY = 0x527 + AUDIT_TTY_GET = 0x3f8 + AUDIT_TTY_SET = 0x3f9 + AUDIT_UID = 0x1 + AUDIT_UID_UNSET = 0xffffffff + AUDIT_UNUSED_BITS = 0x7fffc00 + AUDIT_URINGOP = 0x538 + AUDIT_USER = 0x3ed + AUDIT_USER_AVC = 0x453 + AUDIT_USER_TTY = 0x464 + AUDIT_VERSION_BACKLOG_LIMIT = 0x1 + AUDIT_VERSION_BACKLOG_WAIT_TIME = 0x2 + AUDIT_VERSION_LATEST = 0x7f + AUDIT_WATCH = 0x69 + AUDIT_WATCH_INS = 0x3ef + AUDIT_WATCH_LIST = 0x3f1 + AUDIT_WATCH_REM = 0x3f0 AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B110 = 0x3 @@ -183,6 +484,7 @@ const ( BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_KPROBE_MULTI_RETURN = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 @@ -190,6 +492,8 @@ const ( BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 + BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 + BPF_F_XDP_HAS_FRAGS = 0x20 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -259,6 +563,17 @@ const ( BUS_USB = 0x3 BUS_VIRTUAL = 0x6 CAN_BCM = 0x2 + CAN_CTRLMODE_3_SAMPLES = 0x4 + CAN_CTRLMODE_BERR_REPORTING = 0x10 + CAN_CTRLMODE_CC_LEN8_DLC = 0x100 + CAN_CTRLMODE_FD = 0x20 + CAN_CTRLMODE_FD_NON_ISO = 0x80 + CAN_CTRLMODE_LISTENONLY = 0x2 + CAN_CTRLMODE_LOOPBACK = 0x1 + CAN_CTRLMODE_ONE_SHOT = 0x8 + CAN_CTRLMODE_PRESUME_ACK = 0x40 + CAN_CTRLMODE_TDC_AUTO = 0x200 + CAN_CTRLMODE_TDC_MANUAL = 0x400 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d CAN_EFF_MASK = 0x1fffffff @@ -336,6 +651,7 @@ const ( CAN_RTR_FLAG = 0x40000000 CAN_SFF_ID_BITS = 0xb CAN_SFF_MASK = 0x7ff + CAN_TERMINATION_DISABLED = 0x0 CAN_TP16 = 0x3 CAN_TP20 = 0x4 CAP_AUDIT_CONTROL = 0x1e @@ -380,9 +696,11 @@ const ( CAP_SYS_TIME = 0x19 CAP_SYS_TTY_CONFIG = 0x1a CAP_WAKE_ALARM = 0x23 + CEPH_SUPER_MAGIC = 0xc36400 CFLUSH = 0xf CGROUP2_SUPER_MAGIC = 0x63677270 CGROUP_SUPER_MAGIC = 0x27e0eb + CIFS_SUPER_MAGIC = 0xff534d42 CLOCK_BOOTTIME = 0x7 CLOCK_BOOTTIME_ALARM = 0x9 CLOCK_DEFAULT = 0x0 @@ -502,9 +820,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2021-03-22)" + DM_VERSION_EXTRA = "-ioctl (2022-02-22)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2d + DM_VERSION_MINOR = 0x2e DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -520,6 +838,55 @@ const ( EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EM_386 = 0x3 + EM_486 = 0x6 + EM_68K = 0x4 + EM_860 = 0x7 + EM_88K = 0x5 + EM_AARCH64 = 0xb7 + EM_ALPHA = 0x9026 + EM_ALTERA_NIOS2 = 0x71 + EM_ARCOMPACT = 0x5d + EM_ARCV2 = 0xc3 + EM_ARM = 0x28 + EM_BLACKFIN = 0x6a + EM_BPF = 0xf7 + EM_CRIS = 0x4c + EM_CSKY = 0xfc + EM_CYGNUS_M32R = 0x9041 + EM_CYGNUS_MN10300 = 0xbeef + EM_FRV = 0x5441 + EM_H8_300 = 0x2e + EM_HEXAGON = 0xa4 + EM_IA_64 = 0x32 + EM_LOONGARCH = 0x102 + EM_M32 = 0x1 + EM_M32R = 0x58 + EM_MICROBLAZE = 0xbd + EM_MIPS = 0x8 + EM_MIPS_RS3_LE = 0xa + EM_MIPS_RS4_BE = 0xa + EM_MN10300 = 0x59 + EM_NDS32 = 0xa7 + EM_NONE = 0x0 + EM_OPENRISC = 0x5c + EM_PARISC = 0xf + EM_PPC = 0x14 + EM_PPC64 = 0x15 + EM_RISCV = 0xf3 + EM_S390 = 0x16 + EM_S390_OLD = 0xa390 + EM_SH = 0x2a + EM_SPARC = 0x2 + EM_SPARC32PLUS = 0x12 + EM_SPARCV9 = 0x2b + EM_SPU = 0x17 + EM_TILEGX = 0xbf + EM_TILEPRO = 0xbc + EM_TI_C6000 = 0x8c + EM_UNICORE = 0x6e + EM_X86_64 = 0x3e + EM_XTENSA = 0x5e ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -697,6 +1064,7 @@ const ( ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be ETH_P_ERSPAN2 = 0x22eb + ETH_P_ETHERCAT = 0x88a4 ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -734,6 +1102,7 @@ const ( ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 ETH_P_PREAUTH = 0x88c7 + ETH_P_PROFINET = 0x8892 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -771,6 +1140,7 @@ const ( EV_SYN = 0x0 EV_VERSION = 0x10001 EXABYTE_ENABLE_NEST = 0xf0 + EXFAT_SUPER_MAGIC = 0x2011bab0 EXT2_SUPER_MAGIC = 0xef53 EXT3_SUPER_MAGIC = 0xef53 EXT4_SUPER_MAGIC = 0xef53 @@ -813,12 +1183,15 @@ const ( FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc + FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 FAN_MARK_ADD = 0x1 FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_EVICTABLE = 0x200 FAN_MARK_FILESYSTEM = 0x100 FAN_MARK_FLUSH = 0x80 FAN_MARK_IGNORED_MASK = 0x20 @@ -841,17 +1214,27 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 + FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 + FAN_REPORT_TARGET_FID = 0x1000 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 + FIB_RULE_DEV_DETACHED = 0x8 + FIB_RULE_FIND_SADDR = 0x10000 + FIB_RULE_IIF_DETACHED = 0x8 + FIB_RULE_INVERT = 0x2 + FIB_RULE_OIF_DETACHED = 0x10 + FIB_RULE_PERMANENT = 0x1 + FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" @@ -914,6 +1297,7 @@ const ( FS_VERITY_METADATA_TYPE_DESCRIPTOR = 0x2 FS_VERITY_METADATA_TYPE_MERKLE_TREE = 0x1 FS_VERITY_METADATA_TYPE_SIGNATURE = 0x3 + FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -1026,7 +1410,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa + IFA_MAX = 0xb IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -1267,15 +1651,21 @@ const ( IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUTF8 = 0x4000 IXANY = 0x800 JFFS2_SUPER_MAGIC = 0x72b6 + KCMPROTO_CONNECTED = 0x0 + KCM_RECV_DISABLE = 0x1 KEXEC_ARCH_386 = 0x30000 KEXEC_ARCH_68K = 0x40000 KEXEC_ARCH_AARCH64 = 0xb70000 KEXEC_ARCH_ARM = 0x280000 KEXEC_ARCH_DEFAULT = 0x0 KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_LOONGARCH = 0x1020000 KEXEC_ARCH_MASK = 0xffff0000 KEXEC_ARCH_MIPS = 0x80000 KEXEC_ARCH_MIPS_LE = 0xa0000 @@ -1368,6 +1758,7 @@ const ( LANDLOCK_ACCESS_FS_MAKE_SYM = 0x1000 LANDLOCK_ACCESS_FS_READ_DIR = 0x8 LANDLOCK_ACCESS_FS_READ_FILE = 0x4 + LANDLOCK_ACCESS_FS_REFER = 0x2000 LANDLOCK_ACCESS_FS_REMOVE_DIR = 0x10 LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 @@ -1477,6 +1868,7 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 MOUNT_ATTR_IDMAP = 0x100000 @@ -1722,6 +2114,7 @@ const ( NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_BULK = 0x200 NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 @@ -1831,6 +2224,9 @@ const ( PERF_MEM_BLK_NA = 0x1 PERF_MEM_BLK_SHIFT = 0x28 PERF_MEM_HOPS_0 = 0x1 + PERF_MEM_HOPS_1 = 0x2 + PERF_MEM_HOPS_2 = 0x3 + PERF_MEM_HOPS_3 = 0x4 PERF_MEM_HOPS_SHIFT = 0x2b PERF_MEM_LOCK_LOCKED = 0x2 PERF_MEM_LOCK_NA = 0x1 @@ -2034,6 +2430,13 @@ const ( PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SET_VMA = 0x53564d41 + PR_SET_VMA_ANON_NAME = 0x0 + PR_SME_GET_VL = 0x40 + PR_SME_SET_VL = 0x3f + PR_SME_SET_VL_ONEXEC = 0x40000 + PR_SME_VL_INHERIT = 0x20000 + PR_SME_VL_LEN_MASK = 0xffff PR_SPEC_DISABLE = 0x4 PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 @@ -2117,6 +2520,10 @@ const ( PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 + P_ALL = 0x0 + P_PGID = 0x2 + P_PID = 0x1 + P_PIDFD = 0x3 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 RAMFS_MAGIC = 0x858458f6 @@ -2182,8 +2589,9 @@ const ( RTC_FEATURE_ALARM = 0x0 RTC_FEATURE_ALARM_RES_2S = 0x3 RTC_FEATURE_ALARM_RES_MINUTE = 0x1 + RTC_FEATURE_ALARM_WAKEUP_ONLY = 0x7 RTC_FEATURE_BACKUP_SWITCH_MODE = 0x6 - RTC_FEATURE_CNT = 0x7 + RTC_FEATURE_CNT = 0x8 RTC_FEATURE_CORRECTION = 0x5 RTC_FEATURE_NEED_WEEK_DAY = 0x2 RTC_FEATURE_UPDATE_INTERRUPT = 0x4 @@ -2257,6 +2665,7 @@ const ( RTM_DELRULE = 0x21 RTM_DELTCLASS = 0x29 RTM_DELTFILTER = 0x2d + RTM_DELTUNNEL = 0x79 RTM_DELVLAN = 0x71 RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 @@ -2289,8 +2698,9 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e + RTM_GETTUNNEL = 0x7a RTM_GETVLAN = 0x72 - RTM_MAX = 0x77 + RTM_MAX = 0x7b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -2314,11 +2724,13 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x1a - RTM_NR_MSGTYPES = 0x68 + RTM_NEWTUNNEL = 0x78 + RTM_NR_FAMILIES = 0x1b + RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 + RTM_SETSTATS = 0x5f RTNH_ALIGNTO = 0x4 RTNH_COMPARE_MASK = 0x59 RTNH_F_DEAD = 0x1 @@ -2442,6 +2854,9 @@ const ( SIOCGSTAMPNS = 0x8907 SIOCGSTAMPNS_OLD = 0x8907 SIOCGSTAMP_OLD = 0x8906 + SIOCKCMATTACH = 0x89e0 + SIOCKCMCLONE = 0x89e2 + SIOCKCMUNATTACH = 0x89e1 SIOCOUTQNSD = 0x894b SIOCPROTOPRIVATE = 0x89e0 SIOCRTMSG = 0x890d @@ -2484,6 +2899,7 @@ const ( SMART_STATUS = 0xda SMART_WRITE_LOG_SECTOR = 0xd6 SMART_WRITE_THRESHOLDS = 0xd7 + SMB2_SUPER_MAGIC = 0xfe534d42 SMB_SUPER_MAGIC = 0x517b SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 @@ -2495,6 +2911,9 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_SNDBUF_LOCK = 0x1 + SOCK_TXREHASH_DEFAULT = 0xff + SOCK_TXREHASH_DISABLED = 0x0 + SOCK_TXREHASH_ENABLED = 0x1 SOL_AAL = 0x109 SOL_ALG = 0x117 SOL_ATM = 0x108 @@ -2510,6 +2929,8 @@ const ( SOL_IUCV = 0x115 SOL_KCM = 0x119 SOL_LLC = 0x10c + SOL_MCTP = 0x11d + SOL_MPTCP = 0x11c SOL_NETBEUI = 0x10b SOL_NETLINK = 0x10e SOL_NFC = 0x118 @@ -2519,6 +2940,7 @@ const ( SOL_RAW = 0xff SOL_RDS = 0x114 SOL_RXRPC = 0x110 + SOL_SMC = 0x11e SOL_TCP = 0x6 SOL_TIPC = 0x10f SOL_TLS = 0x11a @@ -2625,7 +3047,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xa + TASKSTATS_VERSION = 0xd TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 234fd4a5d1ad..36c0dfc7c4cf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -m32 +// mkerrors.sh -Wall -Werror -static -I/tmp/386/include -m32 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -350,6 +351,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 58619b7589b0..4ff942703b7b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -m64 +// mkerrors.sh -Wall -Werror -static -I/tmp/amd64/include -m64 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go package unix @@ -327,6 +327,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3a64ff59dcec..3eaa0fb78e30 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/arm/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go package unix @@ -333,6 +333,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -357,6 +358,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index abe0b925789f..d7995bdc3a21 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char +// mkerrors.sh -Wall -Werror -static -I/tmp/arm64/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go package unix @@ -323,6 +323,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -347,6 +348,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 @@ -511,6 +513,7 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + ZA_MAGIC = 0x54366345 _HIDIOCGRAWNAME = 0x80804804 _HIDIOCGRAWPHYS = 0x80404805 _HIDIOCGRAWUNIQ = 0x80404808 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go new file mode 100644 index 000000000000..928e24c20535 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -0,0 +1,818 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/loong64/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go + +package unix + +import "syscall" + +const ( + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d + FLUSHO = 0x1000 + FPU_CTX_MAGIC = 0x46505501 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + OTPERASE = 0x400c4d19 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PARAM_GET = 0x40187013 + RTC_PARAM_SET = 0x40187014 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 + SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_RESERVE_MEM = 0x49 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 +) + +// Errors +const ( + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + ECANCELED = syscall.Errno(0x7d) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EISCONN = syscall.Errno(0x6a) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTCONN = syscall.Errno(0x6b) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTUNIQ = syscall.Errno(0x4c) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPFNOSUPPORT = syscall.Errno(0x60) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGIO = syscall.Signal(0x1d) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 14d7a84399de..179bffb474b4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mips/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 99e7c4ac0b45..1fba17bd75cb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mips64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 496364c33cc6..b77dde31537e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mips64le/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 3e40830857dd..78c6c751bfa5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mipsle/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go package unix @@ -326,6 +326,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 1151a7dfab33..1c0d31f0b4c2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/ppc/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux // +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go package unix @@ -381,6 +381,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -405,6 +406,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index ed17f249e758..959dd9bb8fcc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/ppc64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go package unix @@ -385,6 +385,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -409,6 +410,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index d84a37c1ac23..5a873cdbc9d2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/ppc64le/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go package unix @@ -385,6 +385,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -409,6 +410,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 5cafba83f6b4..e336d141e1f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/riscv64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go package unix @@ -314,6 +314,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -338,6 +339,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 6d122da41c53..390c01d92a53 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char +// mkerrors.sh -Wall -Werror -static -I/tmp/s390x/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go package unix @@ -389,6 +389,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -413,6 +414,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 6bd19e51dbb9..98a6e5f11f50 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/sparc64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go package unix @@ -380,6 +380,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 + SO_RCVMARK = 0x54 SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 @@ -404,6 +405,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x42 SO_TIMESTAMPNS_OLD = 0x21 SO_TIMESTAMP_NEW = 0x46 + SO_TXREHASH = 0x53 SO_TXTIME = 0x3f SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x25 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go new file mode 100644 index 000000000000..8e2c51b1eec0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go @@ -0,0 +1,1905 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x200 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc010427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80104277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x8010426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RND = 0xc0 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x10000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f + DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PBB = 0x88e7 + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x9 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MBIM = 0xfa + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x0 + MAP_SHARED = 0x1 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NFDBITS = 0x20 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OLCUC = 0x20 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb + RTAX_BRD = 0x7 + RTAX_DNS = 0xc + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xf + RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd + RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 + RTA_BRD = 0x80 + RTA_DNS = 0x1000 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 + RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x110fc08 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 + RTM_ADD = 0x1 + RTM_BFD = 0x12 + RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_INVALIDATE = 0x11 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_SOURCE = 0x16 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 + SIOCBRDGGIFFLGS = 0xc060693e + SIOCBRDGGMA = 0xc0146953 + SIOCBRDGGPARAM = 0xc0406958 + SIOCBRDGGPRI = 0xc0146950 + SIOCBRDGGRL = 0xc030694f + SIOCBRDGGTO = 0xc0146946 + SIOCBRDGIFS = 0xc0606942 + SIOCBRDGRTS = 0xc0206943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPARENT = 0x802069b4 + SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGETVLAN = 0xc0206990 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc028698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0406938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 + SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 + SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8028698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db + SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf + SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 + SIOCSVNETID = 0x802069a6 + SOCK_CLOEXEC = 0x8000 + SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x4010745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b + TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WUNTRACED = 0x2 + XCASE = 0x1000000 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5f) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go new file mode 100644 index 000000000000..13d403031ed6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go @@ -0,0 +1,1904 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x200 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc010427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80104277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x8010426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RND = 0xc0 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x10000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f + DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PBB = 0x88e7 + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x9 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MBIM = 0xfa + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x0 + MAP_SHARED = 0x1 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NFDBITS = 0x20 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OLCUC = 0x20 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb + RTAX_BRD = 0x7 + RTAX_DNS = 0xc + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xf + RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd + RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 + RTA_BRD = 0x80 + RTA_DNS = 0x1000 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 + RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x110fc08 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 + RTM_ADD = 0x1 + RTM_BFD = 0x12 + RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_INVALIDATE = 0x11 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_SOURCE = 0x16 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 + SIOCBRDGGIFFLGS = 0xc060693e + SIOCBRDGGMA = 0xc0146953 + SIOCBRDGGPARAM = 0xc0406958 + SIOCBRDGGPRI = 0xc0146950 + SIOCBRDGGRL = 0xc030694f + SIOCBRDGGTO = 0xc0146946 + SIOCBRDGIFS = 0xc0606942 + SIOCBRDGRTS = 0xc0206943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPARENT = 0x802069b4 + SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGETVLAN = 0xc0206990 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc028698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0406938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 + SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 + SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8028698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db + SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf + SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 + SIOCSVNETID = 0x802069a6 + SOCK_CLOEXEC = 0x8000 + SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x4010745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b + TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WUNTRACED = 0x2 + XCASE = 0x1000000 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5f) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 85e0cc386678..870215d2c479 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -975,7 +975,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] @@ -992,7 +992,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f1d4a73b0898..a89b0bfa53ca 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -931,7 +931,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] @@ -946,7 +946,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go deleted file mode 100644 index a06eb0932420..000000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go +++ /dev/null @@ -1,40 +0,0 @@ -// go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build darwin && amd64 && go1.13 -// +build darwin,amd64,go1.13 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_closedir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - res = Errno(r0) - return -} - -var libc_readdir_r_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s deleted file mode 100644 index d6c3e25c018a..000000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s +++ /dev/null @@ -1,25 +0,0 @@ -// go run mkasm_darwin.go amd64 -// Code generated by the command above; DO NOT EDIT. - -//go:build go1.13 -// +build go1.13 - -#include "textflag.h" - -TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fdopendir(SB) - -GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) - -TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_closedir(SB) - -GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) - -TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readdir_r(SB) - -GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 -DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 0ae0ed4cb8af..c2461c496797 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1,8 +1,8 @@ -// go run mksyscall.go -tags darwin,amd64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go +// go run mksyscall.go -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. -//go:build darwin && amd64 && go1.12 -// +build darwin,amd64,go1.12 +//go:build darwin && amd64 +// +build darwin,amd64 package unix @@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_closedir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +var libc_readdir_r_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]int32) (err error) { _, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -643,17 +669,22 @@ var libc_flistxattr_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setattrlist_trampoline_addr uintptr +var libc_utimensat_trampoline_addr uintptr -//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic libc_utimensat utimensat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1638,6 +1669,30 @@ var libc_mknod_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1698,7 +1753,7 @@ var libc_pathconf_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1719,7 +1774,7 @@ var libc_pread_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index eac6ca806f4d..95fe4c0eb962 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -1,11 +1,14 @@ -// go run mkasm_darwin.go amd64 +// go run mkasm.go darwin amd64 // Code generated by the command above; DO NOT EDIT. -//go:build go1.12 -// +build go1.12 - #include "textflag.h" +TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) + +GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) + TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) @@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) +TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) + +GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) + +TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) + +GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) + TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) @@ -228,11 +243,11 @@ TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) -TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setattrlist(SB) +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) -GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) @@ -600,6 +615,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) + +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go deleted file mode 100644 index cec595d553a4..000000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go +++ /dev/null @@ -1,40 +0,0 @@ -// go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build darwin && arm64 && go1.13 -// +build darwin,arm64,go1.13 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_closedir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - res = Errno(r0) - return -} - -var libc_readdir_r_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s deleted file mode 100644 index 357989722cfb..000000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s +++ /dev/null @@ -1,25 +0,0 @@ -// go run mkasm_darwin.go arm64 -// Code generated by the command above; DO NOT EDIT. - -//go:build go1.13 -// +build go1.13 - -#include "textflag.h" - -TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fdopendir(SB) - -GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) - -TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_closedir(SB) - -GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) - -TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readdir_r(SB) - -GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 -DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index cf71be3edb3b..26a0fdc505bb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1,8 +1,8 @@ -// go run mksyscall.go -tags darwin,arm64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go +// go run mksyscall.go -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. -//go:build darwin && arm64 && go1.12 -// +build darwin,arm64,go1.12 +//go:build darwin && arm64 +// +build darwin,arm64 package unix @@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_closedir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +var libc_readdir_r_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]int32) (err error) { _, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -643,17 +669,22 @@ var libc_flistxattr_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { - _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setattrlist_trampoline_addr uintptr +var libc_utimensat_trampoline_addr uintptr -//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic libc_utimensat utimensat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1638,6 +1669,30 @@ var libc_mknod_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1698,7 +1753,7 @@ var libc_pathconf_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1719,7 +1774,7 @@ var libc_pread_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 4ebcf2175854..efa5b4c987c5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -1,11 +1,14 @@ -// go run mkasm_darwin.go arm64 +// go run mkasm.go darwin arm64 // Code generated by the command above; DO NOT EDIT. -//go:build go1.12 -// +build go1.12 - #include "textflag.h" +TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) + +GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) + TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) @@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) +TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) + +GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) + +TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) + +GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) + TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) @@ -228,11 +243,11 @@ TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) -TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setattrlist(SB) +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) -GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) @@ -600,6 +615,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) + +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 3e9bddb7b224..039c4aa06c2c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +922,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +937,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +947,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +967,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +984,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1205,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,43 +1250,13 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), uintptr(dev>>32), 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), uintptr(dev>>32), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1420,7 +1323,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1437,7 +1340,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1753,22 +1656,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1671,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index c72a462b91e1..0535d3cfdf2b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +922,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +937,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +947,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +967,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +984,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1205,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,22 +1250,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1347,21 +1265,6 @@ func mknodat(fd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1420,7 +1323,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1437,7 +1340,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1753,22 +1656,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1671,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 530d5df90c0c..1018b5221704 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -351,22 +351,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -404,6 +388,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data int) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +922,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +937,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +947,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +967,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +984,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1205,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,43 +1250,13 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, uintptr(dev), uintptr(dev>>32)) if e1 != 0 { err = errnoErr(e1) } @@ -1420,7 +1323,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1437,7 +1340,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1753,22 +1656,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1671,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 71e7df9e8558..3802f4b379a5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -912,7 +912,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +922,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +937,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +947,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +967,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +984,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1205,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,22 +1250,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1347,21 +1265,6 @@ func mknodat(fd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1420,7 +1323,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1437,7 +1340,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1753,22 +1656,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1671,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go new file mode 100644 index 000000000000..8a2db7da9f3e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -0,0 +1,1889 @@ +// go run mksyscall.go -tags freebsd,riscv64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_riscv64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build freebsd && riscv64 +// +build freebsd,riscv64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CapEnter() (err error) { + _, _, e1 := Syscall(SYS_CAP_ENTER, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsGet(version int, fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS___CAP_RIGHTS_GET, uintptr(version), uintptr(fd), uintptr(unsafe.Pointer(rightsp))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsLimit(fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS_CAP_RIGHTS_LIMIT, uintptr(fd), uintptr(unsafe.Pointer(rightsp)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(fdat), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index af5cb064ec4f..b57c7050d7a8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -15,25 +15,19 @@ import ( //go:cgo_import_dynamic libc_writev writev "libc.so" //go:cgo_import_dynamic libc_pwritev pwritev "libc.so" //go:cgo_import_dynamic libc_accept4 accept4 "libsocket.so" -//go:cgo_import_dynamic libc_putmsg putmsg "libc.so" -//go:cgo_import_dynamic libc_getmsg getmsg "libc.so" //go:linkname procreadv libc_readv //go:linkname procpreadv libc_preadv //go:linkname procwritev libc_writev //go:linkname procpwritev libc_pwritev //go:linkname procaccept4 libc_accept4 -//go:linkname procputmsg libc_putmsg -//go:linkname procgetmsg libc_getmsg var ( procreadv, procpreadv, procwritev, procpwritev, - procaccept4, - procputmsg, - procgetmsg syscallFunc + procaccept4 syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -106,23 +100,3 @@ func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 93edda4c4939..293cf36804e9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -231,6 +231,16 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) { + _, _, e1 := Syscall6(SYS_WAITID, uintptr(idType), uintptr(id), uintptr(unsafe.Pointer(info)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) ret = int(r0) @@ -818,6 +828,49 @@ func Fsync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) { + r0, _, e1 := Syscall(SYS_FSMOUNT, uintptr(fd), uintptr(flags), uintptr(mountAttrs)) + fsfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsopen(fsName string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_FSOPEN, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_FSPICK, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -1195,6 +1248,26 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MoveMount(fromDirfd int, fromPathName string, toDirfd int, toPathName string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fromPathName) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(toPathName) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOVE_MOUNT, uintptr(fromDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(toDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1205,6 +1278,22 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func OpenTree(dfd int, fileName string, flags uint) (r int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fileName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN_TREE, uintptr(dfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + r = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) fd = int(r0) @@ -1992,6 +2081,16 @@ func PidfdGetfd(pidfd int, targetfd int, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PidfdSendSignal(pidfd int, sig Signal, info *Siginfo, flags int) (err error) { + _, _, e1 := Syscall6(SYS_PIDFD_SEND_SIGNAL, uintptr(pidfd), uintptr(sig), uintptr(unsafe.Pointer(info)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { r0, _, e1 := Syscall(SYS_SHMAT, uintptr(id), uintptr(addr), uintptr(flag)) ret = uintptr(r0) @@ -2032,3 +2131,33 @@ func shmget(key int, size int, flag int) (id int, err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getitimer(which int, currValue *Itimerval) (err error) { + _, _, e1 := Syscall(SYS_GETITIMER, uintptr(which), uintptr(unsafe.Pointer(currValue)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setitimer(which int, newValue *Itimerval, oldValue *Itimerval) (err error) { + _, _, e1 := Syscall(SYS_SETITIMER, uintptr(which), uintptr(unsafe.Pointer(newValue)), uintptr(unsafe.Pointer(oldValue))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func rtSigprocmask(how int, set *Sigset_t, oldset *Sigset_t, sigsetsize uintptr) (err error) { + _, _, e1 := RawSyscall6(SYS_RT_SIGPROCMASK, uintptr(how), uintptr(unsafe.Pointer(set)), uintptr(unsafe.Pointer(oldset)), uintptr(sigsetsize), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index ff90c81e7300..c81b0ad47772 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go +// go run mksyscall.go -l32 -tags linux,386 syscall_linux.go syscall_linux_386.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && 386 @@ -200,7 +200,7 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -217,7 +217,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -287,46 +287,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int(r0) @@ -524,3 +484,14 @@ func utimes(path string, times *[2]Timeval) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index fa7d3dbe4e94..2206bce7f4dd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go +// go run mksyscall.go -tags linux,amd64 syscall_linux.go syscall_linux_amd64.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && amd64 @@ -215,6 +215,17 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pause() (err error) { _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) if e1 != 0 { @@ -225,7 +236,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -242,7 +253,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -323,36 +334,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -363,16 +344,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -444,17 +415,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -691,3 +651,14 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 654f91530f69..edf6b39f1615 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -46,17 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -423,46 +412,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -549,7 +498,7 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -566,7 +515,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index e893f987f91d..190609f2140d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -180,7 +180,18 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -197,7 +208,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -278,36 +289,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -318,16 +299,6 @@ func setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -389,17 +360,6 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go new file mode 100644 index 000000000000..806ffd1e125e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -0,0 +1,487 @@ +// go run mksyscall.go -tags linux,loong64 syscall_linux.go syscall_linux_loong64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build linux && loong64 +// +build linux,loong64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 6d1552885314..5f984cbb1ca7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go +// go run mksyscall.go -b32 -arm -tags linux,mips syscall_linux.go syscall_linux_mipsx.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips @@ -150,7 +150,7 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -167,7 +167,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -248,46 +248,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -344,17 +304,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -702,3 +651,14 @@ func setrlimit(resource int, rlim *rlimit32) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 1e20d72df21e..46fc380a40e5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go +// go run mksyscall.go -tags linux,mips64 syscall_linux.go syscall_linux_mips64x.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64 @@ -180,7 +180,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -197,7 +197,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -278,36 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -318,16 +288,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -399,17 +359,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -696,3 +645,14 @@ func stat(path string, st *stat_t) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 82b5e2d9eda4..cbd0d4dadbad 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -180,7 +180,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -197,7 +197,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -278,36 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -318,16 +288,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -399,17 +359,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index a0440c1d43be..0c13d15f07cf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go +// go run mksyscall.go -l32 -arm -tags linux,mipsle syscall_linux.go syscall_linux_mipsx.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mipsle @@ -150,7 +150,7 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -167,7 +167,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -248,46 +248,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -344,17 +304,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -702,3 +651,14 @@ func setrlimit(resource int, rlim *rlimit32) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index 5864b9ca6490..e01432aed51f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -b32 -tags linux,ppc syscall_linux.go syscall_linux_ppc.go +// go run mksyscall.go -b32 -tags linux,ppc syscall_linux.go syscall_linux_ppc.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc @@ -210,7 +210,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -227,7 +227,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -308,46 +308,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -409,17 +369,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -707,3 +656,14 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index beeb49e34217..13c7ee7baff6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags linux,ppc64 syscall_linux.go syscall_linux_ppc64x.go +// go run mksyscall.go -tags linux,ppc64 syscall_linux.go syscall_linux_ppc64x.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64 @@ -240,7 +240,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -257,7 +257,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -349,36 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -389,16 +359,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -475,17 +435,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -753,3 +702,14 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 53139b82c7be..02d0c0fd61ec 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags linux,ppc64le syscall_linux.go syscall_linux_ppc64x.go +// go run mksyscall.go -tags linux,ppc64le syscall_linux.go syscall_linux_ppc64x.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64le @@ -240,7 +240,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -257,7 +257,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -349,36 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -389,16 +359,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -475,17 +435,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -753,3 +702,14 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 63b393b8027d..9fee3b1d2396 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -180,7 +180,18 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -197,7 +208,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -258,36 +269,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -298,16 +279,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -369,17 +340,6 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 202add37d10a..647bbfecd6aa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags linux,s390x syscall_linux.go syscall_linux_s390x.go +// go run mksyscall.go -tags linux,s390x syscall_linux.go syscall_linux_s390x.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && s390x @@ -210,7 +210,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -227,7 +227,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -319,36 +319,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -359,16 +329,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int64(r0) @@ -533,3 +493,14 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 2ab268c34359..ada057f89144 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags linux,sparc64 syscall_linux.go syscall_linux_sparc64.go +// go run mksyscall.go -tags linux,sparc64 syscall_linux.go syscall_linux_sparc64.go syscall_linux_alarm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && sparc64 @@ -220,7 +220,7 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -237,7 +237,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -329,36 +329,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -369,16 +339,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -455,17 +415,6 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) @@ -697,3 +646,14 @@ func utimes(path string, times *[2]Timeval) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Alarm(seconds uint) (remaining uint, err error) { + r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) + remaining = uint(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 51d0c0742bfa..4af561a48d8c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1330,7 +1330,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1347,7 +1347,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index df2efb6db3fa..3b90e9448add 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1330,7 +1330,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1347,7 +1347,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index c8536c2c9f09..890f4ccd131c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1330,7 +1330,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1347,7 +1347,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 8b981bfc2eb9..c79f071fc6a8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1330,7 +1330,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1347,7 +1347,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 8f80f4ade511..2925fe0a7b73 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go +// go run mksyscall.go -l32 -openbsd -libc -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,35 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +719,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +770,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +831,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +864,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +900,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +926,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), uintptr(length>>32)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1026,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1077,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1106,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1135,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1202,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1214,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1238,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1262,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1295,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1314,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1333,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1352,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1371,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1390,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1409,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1442,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1450,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1462,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1470,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1482,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,16 +1490,20 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,16 +1511,20 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1532,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1545,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1553,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1571,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1579,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1597,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1605,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1622,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1646,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1665,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1684,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + r0, r1, e1 := syscall_syscall6(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) newoffset = int64(int64(r1)<<32 | int64(r0)) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1706,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1721,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1775,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1909,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1949,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1968,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +1992,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2016,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2049,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2080,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2099,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2118,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2138,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2146,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + r0, _, e1 := syscall_syscall9(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2161,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2193,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2209,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s new file mode 100644 index 000000000000..75eb2f5f3f72 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -0,0 +1,796 @@ +// go run mkasm.go openbsd 386 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 +DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 +DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 +DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 +DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) + +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) + +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 +DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 +DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) + +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) + +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) + +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) + +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) + +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 +DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 +DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) + +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) + +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) + +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) + +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 +DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 +DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 +DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 +DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 +DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) + +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) + +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) + +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) + +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) + +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 +DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 +DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 3a47aca7bf70..98446d2b9540 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go +// go run mksyscall.go -openbsd -libc -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,35 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +719,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +770,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +831,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +864,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +900,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +926,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1026,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1077,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1106,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1135,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1202,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1214,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1238,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1262,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1295,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1314,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1333,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1352,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1371,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1390,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1409,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1442,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1450,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1462,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1470,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1482,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,16 +1490,20 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,16 +1511,20 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1532,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1545,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1553,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1571,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1579,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1597,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1605,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1622,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1646,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1665,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1684,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1706,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1721,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1775,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1909,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1949,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1968,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +1992,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2016,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2049,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2080,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2099,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2118,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2138,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2146,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2161,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2193,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2209,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s new file mode 100644 index 000000000000..243a6663ce67 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -0,0 +1,796 @@ +// go run mkasm.go openbsd amd64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) + +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) + +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) + +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) + +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) + +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) + +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) + +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) + +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) + +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) + +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) + +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) + +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) + +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) + +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) + +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) + +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 883a9b45e8e2..8da6791d1e33 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go +// go run mksyscall.go -l32 -openbsd -arm -libc -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,35 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +719,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +770,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +831,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +864,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +900,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +926,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall6(libc_ftruncate_trampoline_addr, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1026,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1077,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1106,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1135,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1202,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1214,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1238,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1262,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1295,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1314,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1333,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1352,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1371,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1390,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1409,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1442,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1450,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1462,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1470,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1482,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,16 +1490,20 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,16 +1511,20 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1532,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1545,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1553,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1571,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1579,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1597,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1605,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1622,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1646,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1665,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1684,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + r0, r1, e1 := syscall_syscall6(libc_lseek_trampoline_addr, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) newoffset = int64(int64(r1)<<32 | int64(r0)) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1706,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1721,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1775,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1909,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1949,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1968,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +1992,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2016,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2049,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall6(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2080,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2099,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2118,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2138,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2146,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + r0, _, e1 := syscall_syscall9(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2161,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2193,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2209,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s new file mode 100644 index 000000000000..9ad116d9fbdd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -0,0 +1,796 @@ +// go run mkasm.go openbsd arm +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 +DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 +DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 +DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 +DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) + +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) + +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 +DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 +DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) + +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) + +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) + +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) + +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) + +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 +DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 +DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) + +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) + +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) + +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) + +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 +DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 +DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 +DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 +DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 +DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) + +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) + +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) + +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) + +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) + +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 +DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 +DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index aac7fdc95e28..800aab6e3e79 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go +// go run mksyscall.go -openbsd -libc -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,35 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +719,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +770,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +831,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +864,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +900,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +926,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1026,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1077,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1106,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1135,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1202,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1214,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1238,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1262,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1295,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1314,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1333,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1352,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1371,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1390,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1409,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1442,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1450,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1462,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1470,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1482,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,16 +1490,20 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,16 +1511,20 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1532,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1545,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1553,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1571,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1579,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1597,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1605,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1622,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1646,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1665,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1684,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1706,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1721,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1775,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1909,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1949,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1968,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +1992,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2016,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2049,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2080,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2099,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2118,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2138,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2146,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2161,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2193,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2209,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s new file mode 100644 index 000000000000..4efeff9abbf4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -0,0 +1,796 @@ +// go run mkasm.go openbsd arm64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) + +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) + +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) + +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) + +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) + +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) + +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) + +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) + +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) + +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) + +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) + +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) + +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) + +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) + +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) + +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) + +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 8776187462b7..016d959bc664 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1128,7 +1128,7 @@ func Pathconf(path string, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) @@ -1145,7 +1145,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go new file mode 100644 index 000000000000..c85de2d9766b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -0,0 +1,2221 @@ +// go run mksyscall.go -openbsd -libc -tags openbsd,ppc64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_ppc64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build openbsd && ppc64 +// +build openbsd,ppc64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) + return +} + +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) + egid = int(r0) + return +} + +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) + uid = int(r0) + return +} + +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) + gid = int(r0) + return +} + +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) + pgrp = int(r0) + return +} + +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) + pid = int(r0) + return +} + +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) + ppid = int(r0) + return +} + +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrtable() (rtable int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) + uid = int(r0) + return +} + +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrtable(rtable int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s new file mode 100644 index 000000000000..7c9223b64187 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -0,0 +1,796 @@ +// go run mkasm.go openbsd ppc64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getgroups(SB) + RET +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setgroups(SB) + RET +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_wait4(SB) + RET +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_accept(SB) + RET +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_bind(SB) + RET +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_connect(SB) + RET +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_socket(SB) + RET +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getsockopt(SB) + RET +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setsockopt(SB) + RET +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpeername(SB) + RET +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getsockname(SB) + RET +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_shutdown(SB) + RET +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_socketpair(SB) + RET +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_recvfrom(SB) + RET +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_sendto(SB) + RET +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_recvmsg(SB) + RET +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_sendmsg(SB) + RET +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_kevent(SB) + RET +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_utimes(SB) + RET +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_futimes(SB) + RET +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_poll(SB) + RET +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_madvise(SB) + RET +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mlock(SB) + RET +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mlockall(SB) + RET +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mprotect(SB) + RET +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_msync(SB) + RET +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_munlock(SB) + RET +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_munlockall(SB) + RET +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pipe2(SB) + RET +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getdents(SB) + RET +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getcwd(SB) + RET +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_ioctl(SB) + RET +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_sysctl(SB) + RET +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_ppoll(SB) + RET +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_access(SB) + RET +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_adjtime(SB) + RET +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chdir(SB) + RET +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chflags(SB) + RET +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chmod(SB) + RET +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chown(SB) + RET +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chroot(SB) + RET +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_close(SB) + RET +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_dup(SB) + RET +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_dup2(SB) + RET +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_dup3(SB) + RET +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_exit(SB) + RET +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_faccessat(SB) + RET +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchdir(SB) + RET +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchflags(SB) + RET +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchmod(SB) + RET +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchmodat(SB) + RET +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchown(SB) + RET +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchownat(SB) + RET +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_flock(SB) + RET +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fpathconf(SB) + RET +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fstat(SB) + RET +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fstatat(SB) + RET +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fstatfs(SB) + RET +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fsync(SB) + RET +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_ftruncate(SB) + RET +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getegid(SB) + RET +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_geteuid(SB) + RET +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getgid(SB) + RET +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpgid(SB) + RET +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpgrp(SB) + RET +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpid(SB) + RET +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getppid(SB) + RET +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpriority(SB) + RET +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getrlimit(SB) + RET +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getrtable(SB) + RET +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getrusage(SB) + RET +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getsid(SB) + RET +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_gettimeofday(SB) + RET +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getuid(SB) + RET +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_issetugid(SB) + RET +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_kill(SB) + RET +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_kqueue(SB) + RET +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_lchown(SB) + RET +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_link(SB) + RET +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_linkat(SB) + RET +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_listen(SB) + RET +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_lstat(SB) + RET +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mkdir(SB) + RET +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mkdirat(SB) + RET +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mkfifo(SB) + RET +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mkfifoat(SB) + RET +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mknod(SB) + RET +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mknodat(SB) + RET +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_nanosleep(SB) + RET +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_open(SB) + RET +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_openat(SB) + RET +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pathconf(SB) + RET +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pread(SB) + RET +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pwrite(SB) + RET +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_read(SB) + RET +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_readlink(SB) + RET +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_readlinkat(SB) + RET +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_rename(SB) + RET +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_renameat(SB) + RET +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_revoke(SB) + RET +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_rmdir(SB) + RET +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_lseek(SB) + RET +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_select(SB) + RET +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setegid(SB) + RET +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_seteuid(SB) + RET +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setgid(SB) + RET +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setlogin(SB) + RET +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setpgid(SB) + RET +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setpriority(SB) + RET +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setregid(SB) + RET +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setreuid(SB) + RET +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setresgid(SB) + RET +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setresuid(SB) + RET +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setrlimit(SB) + RET +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setrtable(SB) + RET +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setsid(SB) + RET +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_settimeofday(SB) + RET +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setuid(SB) + RET +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_stat(SB) + RET +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_statfs(SB) + RET +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_symlink(SB) + RET +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_symlinkat(SB) + RET +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_sync(SB) + RET +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_truncate(SB) + RET +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_umask(SB) + RET +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unlink(SB) + RET +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unlinkat(SB) + RET +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unmount(SB) + RET +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_write(SB) + RET +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mmap(SB) + RET +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_munmap(SB) + RET +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_utimensat(SB) + RET +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go new file mode 100644 index 000000000000..8e3e7873f893 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -0,0 +1,2221 @@ +// go run mksyscall.go -openbsd -libc -tags openbsd,riscv64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_riscv64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build openbsd && riscv64 +// +build openbsd,riscv64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) + return +} + +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) + egid = int(r0) + return +} + +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) + uid = int(r0) + return +} + +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) + gid = int(r0) + return +} + +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) + pgrp = int(r0) + return +} + +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) + pid = int(r0) + return +} + +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) + ppid = int(r0) + return +} + +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrtable() (rtable int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) + uid = int(r0) + return +} + +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrtable(rtable int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s new file mode 100644 index 000000000000..7dba789271ca --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -0,0 +1,796 @@ +// go run mkasm.go openbsd riscv64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) + +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) + +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) + +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) + +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) + +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) + +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) + +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) + +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) + +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) + +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) + +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) + +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) + +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) + +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) + +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) + +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index b5f926cee2a9..91f5a2bde282 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -66,6 +66,7 @@ import ( //go:cgo_import_dynamic libc_getpriority getpriority "libc.so" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" //go:cgo_import_dynamic libc_getrusage getrusage "libc.so" +//go:cgo_import_dynamic libc_getsid getsid "libc.so" //go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" //go:cgo_import_dynamic libc_getuid getuid "libc.so" //go:cgo_import_dynamic libc_kill kill "libc.so" @@ -146,6 +147,8 @@ import ( //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" //go:cgo_import_dynamic libc_port_get port_get "libc.so" //go:cgo_import_dynamic libc_port_getn port_getn "libc.so" +//go:cgo_import_dynamic libc_putmsg putmsg "libc.so" +//go:cgo_import_dynamic libc_getmsg getmsg "libc.so" //go:linkname procpipe libc_pipe //go:linkname procpipe2 libc_pipe2 @@ -202,6 +205,7 @@ import ( //go:linkname procGetpriority libc_getpriority //go:linkname procGetrlimit libc_getrlimit //go:linkname procGetrusage libc_getrusage +//go:linkname procGetsid libc_getsid //go:linkname procGettimeofday libc_gettimeofday //go:linkname procGetuid libc_getuid //go:linkname procKill libc_kill @@ -227,8 +231,8 @@ import ( //go:linkname procOpenat libc_openat //go:linkname procPathconf libc_pathconf //go:linkname procPause libc_pause -//go:linkname procPread libc_pread -//go:linkname procPwrite libc_pwrite +//go:linkname procpread libc_pread +//go:linkname procpwrite libc_pwrite //go:linkname procread libc_read //go:linkname procReadlink libc_readlink //go:linkname procRename libc_rename @@ -282,6 +286,8 @@ import ( //go:linkname procport_dissociate libc_port_dissociate //go:linkname procport_get libc_port_get //go:linkname procport_getn libc_port_getn +//go:linkname procputmsg libc_putmsg +//go:linkname procgetmsg libc_getmsg var ( procpipe, @@ -339,6 +345,7 @@ var ( procGetpriority, procGetrlimit, procGetrusage, + procGetsid, procGettimeofday, procGetuid, procKill, @@ -364,8 +371,8 @@ var ( procOpenat, procPathconf, procPause, - procPread, - procPwrite, + procpread, + procpwrite, procread, procReadlink, procRename, @@ -418,7 +425,9 @@ var ( procport_associate, procport_dissociate, procport_get, - procport_getn syscallFunc + procport_getn, + procputmsg, + procgetmsg syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1044,6 +1053,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) + sid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1380,12 +1400,12 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { +func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] } - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = e1 @@ -1395,12 +1415,12 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { _p0 = &p[0] } - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = e1 @@ -2051,3 +2071,23 @@ func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Times } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go new file mode 100644 index 000000000000..e44054470b7e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go @@ -0,0 +1,281 @@ +// go run mksysctl_openbsd.go +// Code generated by the command above; DO NOT EDIT. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cpustats", []_C_int{1, 85}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go new file mode 100644 index 000000000000..a0db82fce206 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go @@ -0,0 +1,282 @@ +// go run mksysctl_openbsd.go +// Code generated by the command above; DO NOT EDIT. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cpustats", []_C_int{1, 85}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 59d5dfc20922..4e0d96107b9e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 342d471d2eb1..01636b838d30 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index e2e3d72c5b04..ad99bc106a86 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 61ad5ca3c19b..89dcc4274765 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go new file mode 100644 index 000000000000..ee37aaa0c906 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -0,0 +1,394 @@ +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void sys_exit(int rval); } exit sys_exit_args void + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_BREAK = 17 // { caddr_t break(char *nsize); } + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, struct sockaddr * __restrict from, __socklen_t * __restrict fromlenaddr); } + SYS_ACCEPT = 30 // { int accept(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } + SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } + SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); } + SYS_LGETFH = 160 // { int lgetfh(char *fname, struct fhandle *fhp); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } + SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } + SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } + SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct itimerspec *value); } + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } + SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } + SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, sigset_t *oset); } + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout); } + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, siginfo_t *info); } + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, acl_type_t type); } + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, struct mac *mac_p); } + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, struct mac *mac_p); } + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, struct mac *mac_p); } + SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); } + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); } + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } + SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } + SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } + SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } + SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); } + SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, unsigned int value); } + SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode, unsigned int value); } + SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); } + SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); } + SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); } + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } + SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, acl_type_t type); } + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, int *sig); } + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, int flags); } + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, u_int length); } + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, lwpid_t lwpid, struct rtprio *rtp); } + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, int whence); } + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, mode_t mode); } + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, cpusetid_t setid); } + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); } + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); } + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *mask); } + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, int flag); } + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, int fd, cap_rights_t *rightsp); } + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *sm); } + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, size_t namelen); } + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, off_t offset, off_t len); } + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, off_t len, int advice); } + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); } + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, cap_rights_t *rightsp); } + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, const u_long *cmds, size_t ncmds); } + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, u_long *cmds, size_t maxcmds); } + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, uint32_t fcntlrights); } + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, uint32_t *fcntlrightsp); } + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, int namelen); } + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, int namelen); } + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, u_long flags, int atflag); } + SYS_ACCEPT4 = 541 // { int accept4(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen, int flags); } + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, int com, void *data); } + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } + SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } + SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index cac1f758bf7e..c9c4ad0314f9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m32 /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/386/include -m32 /tmp/386/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux @@ -446,4 +446,5 @@ const ( SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f327e4a0bccb..12ff3417c5fd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m64 /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/amd64/include -m64 /tmp/amd64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux @@ -368,4 +368,5 @@ const ( SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index fb06a08d4ee8..c3fb5e77ab43 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/arm/include /tmp/arm/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux @@ -410,4 +410,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 58285646eb79..358c847a40c5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/arm64/include -fsigned-char /tmp/arm64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux @@ -313,4 +313,5 @@ const ( SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go new file mode 100644 index 000000000000..81c4849b1619 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -0,0 +1,311 @@ +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/loong64/include /tmp/loong64/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +const ( + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 3b0418e68944..202a57e90086 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips/include /tmp/mips/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux @@ -430,4 +430,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 4446 SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 + SYS_SET_MEMPOLICY_HOME_NODE = 4450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 314ebf166ab9..1fbceb52d7cf 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips64/include /tmp/mips64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux @@ -360,4 +360,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 5446 SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 + SYS_SET_MEMPOLICY_HOME_NODE = 5450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index b8fbb937a333..b4ffb7a207d5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips64le/include /tmp/mips64le/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux @@ -360,4 +360,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 5446 SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 + SYS_SET_MEMPOLICY_HOME_NODE = 5450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index ee309b2bac96..867985f9b440 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mipsle/include /tmp/mipsle/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux @@ -430,4 +430,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 4446 SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 + SYS_SET_MEMPOLICY_HOME_NODE = 4450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index ac3748104ed0..a8cce69ede2f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc/include /tmp/ppc/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux @@ -437,4 +437,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5aa472111041..d44c5b39d79d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc64/include /tmp/ppc64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux @@ -409,4 +409,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0793ac1a65be..4214dd9c03a7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc64le/include /tmp/ppc64le/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux @@ -409,4 +409,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a520962e3954..3e594a8c0910 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/riscv64/include /tmp/riscv64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux @@ -309,6 +309,8 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index d1738586b4f6..7ea465204b7c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/s390x/include -fsigned-char /tmp/s390x/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux @@ -374,4 +374,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index dfd5660f9741..92f628ef4f23 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/sparc64/include /tmp/sparc64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux @@ -388,4 +388,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 817edbf95c0a..597733813e37 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index ea453614e697..16af29189940 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index 467971eed661..f59b18a97795 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 32eec5ed56f1..721ef5910321 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go new file mode 100644 index 000000000000..f258cfa24ed4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go @@ -0,0 +1,218 @@ +// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +package unix + +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int sys_open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, size_t psize); } + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, dev_t dev); } + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, gid_t gid); } + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, struct rusage *rusage); } + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, int flags, void *data); } + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, const struct sigaction *nsa, struct sigaction *osa); } + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, u_long com, ... void *data); } + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int sys_execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, int flags); } + SYS_STATFS = 63 // { int sys_statfs(const char *path, struct statfs *buf); } + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, struct statfs *buf); } + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp); } + SYS_SETITIMER = 69 // { int sys_setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 70 // { int sys_getitimer(int which, struct itimerval *itv); } + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_KEVENT = 72 // { int sys_kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, int behav); } + SYS_UTIMES = 76 // { int sys_utimes(const char *path, const struct timeval *tptr); } + SYS_FUTIMES = 77 // { int sys_futimes(int fd, const struct timeval *tptr); } + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, const struct timespec *timeout, uint32_t *g); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, const struct timespec *times, int flag); } + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, const struct timespec *times); } + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, size_t psize, int64_t proc_cookie); } + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, socklen_t *anamelen, int flags); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, clockid_t clock_id, const struct timespec *tp, void *lock, const int *abort); } + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, u_int flags, int atflags); } + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, const char *execpromises); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, int flags); } + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, const char *permissions); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } + SYS_READV = 120 // { ssize_t sys_readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, int uid, char *arg); } + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, size_t len); } + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, int inherit); } + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, struct sigaltstack *oss); } + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, struct stat *sb); } + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, int n); } + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, siginfo_t *info, const struct timespec *timeout); } + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, int64_t *oldfreq); } + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, uid_t uid, gid_t gid, int flag); } + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, const char *path2, int flag); } + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, mode_t mode); } + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, ... mode_t mode); } + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, char *buf, size_t count); } + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, const char *link); } + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, int flag); } + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go new file mode 100644 index 000000000000..07919e0eccd9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go @@ -0,0 +1,219 @@ +// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +package unix + +// Deprecated: Use libc wrappers instead of direct syscalls. +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int sys_open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, size_t psize); } + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, dev_t dev); } + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, gid_t gid); } + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, struct rusage *rusage); } + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, int flags, void *data); } + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, const struct sigaction *nsa, struct sigaction *osa); } + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, u_long com, ... void *data); } + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int sys_execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, int flags); } + SYS_STATFS = 63 // { int sys_statfs(const char *path, struct statfs *buf); } + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, struct statfs *buf); } + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp); } + SYS_SETITIMER = 69 // { int sys_setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 70 // { int sys_getitimer(int which, struct itimerval *itv); } + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_KEVENT = 72 // { int sys_kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, int behav); } + SYS_UTIMES = 76 // { int sys_utimes(const char *path, const struct timeval *tptr); } + SYS_FUTIMES = 77 // { int sys_futimes(int fd, const struct timeval *tptr); } + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, const struct timespec *timeout, uint32_t *g); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, const struct timespec *times, int flag); } + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, const struct timespec *times); } + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, size_t psize, int64_t proc_cookie); } + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, socklen_t *anamelen, int flags); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, clockid_t clock_id, const struct timespec *tp, void *lock, const int *abort); } + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, u_int flags, int atflags); } + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, const char *execpromises); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, int flags); } + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, const char *permissions); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } + SYS_READV = 120 // { ssize_t sys_readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, int uid, char *arg); } + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, size_t len); } + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, int inherit); } + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, struct sigaltstack *oss); } + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, struct stat *sb); } + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, int n); } + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, siginfo_t *info, const struct timespec *timeout); } + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, int64_t *oldfreq); } + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, uid_t uid, gid_t gid, int flag); } + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, const char *path2, int flag); } + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, mode_t mode); } + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, ... mode_t mode); } + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, char *buf, size_t count); } + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, const char *link); } + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, int flag); } + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 885842c0eb40..e2a64f0991a0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -366,30 +366,57 @@ type ICMPv6Filter struct { Filt [8]uint32 } +type TCPConnectionInfo struct { + State uint8 + Snd_wscale uint8 + Rcv_wscale uint8 + _ uint8 + Options uint32 + Flags uint32 + Rto uint32 + Maxseg uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Snd_wnd uint32 + Snd_sbbytes uint32 + Rcv_wnd uint32 + Rttcur uint32 + Srtt uint32 + Rttvar uint32 + Txpackets uint64 + Txbytes uint64 + Txretransmitbytes uint64 + Rxpackets uint64 + Rxbytes uint64 + Rxoutoforderbytes uint64 + Txretransmitpackets uint64 +} + const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofSockaddrVM = 0xc - SizeofXvsockpcb = 0xa8 - SizeofXSocket = 0x64 - SizeofXSockbuf = 0x18 - SizeofXVSockPgen = 0x20 - SizeofXucred = 0x4c - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 + SizeofXucred = 0x4c + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofTCPConnectionInfo = 0x70 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index b23c02337db3..34aa775219f0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -366,30 +366,57 @@ type ICMPv6Filter struct { Filt [8]uint32 } +type TCPConnectionInfo struct { + State uint8 + Snd_wscale uint8 + Rcv_wscale uint8 + _ uint8 + Options uint32 + Flags uint32 + Rto uint32 + Maxseg uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Snd_wnd uint32 + Snd_sbbytes uint32 + Rcv_wnd uint32 + Rttcur uint32 + Srtt uint32 + Rttvar uint32 + Txpackets uint64 + Txbytes uint64 + Txretransmitbytes uint64 + Rxpackets uint64 + Rxbytes uint64 + Rxoutoforderbytes uint64 + Txretransmitpackets uint64 +} + const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofSockaddrVM = 0xc - SizeofXvsockpcb = 0xa8 - SizeofXSocket = 0x64 - SizeofXSockbuf = 0x18 - SizeofXVSockPgen = 0x20 - SizeofXucred = 0x4c - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 + SizeofXucred = 0x4c + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofTCPConnectionInfo = 0x70 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 4eec078e5249..d9c78cdcbc45 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -90,27 +90,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec - _ [8]byte -} - type Statfs_t struct { Version uint32 Type uint32 @@ -136,31 +115,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -181,14 +135,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -337,41 +283,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -380,7 +294,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -398,6 +312,17 @@ type __Siginfo struct { Value [4]byte _ [32]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [4]byte + _ [32]byte +} type Sigset_t struct { Val [4]uint32 @@ -432,10 +357,12 @@ type FpReg struct { Pad [64]uint8 } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint32 } @@ -444,8 +371,9 @@ type Kevent_t struct { Filter int16 Flags uint16 Fflags uint32 - Data int32 + Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7622904a532f..26991b165596 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -86,26 +86,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -131,31 +111,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -177,14 +132,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -333,41 +280,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -376,7 +291,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -395,6 +310,18 @@ type __Siginfo struct { _ [40]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [8]byte + _ [40]byte +} + type Sigset_t struct { Val [4]uint32 } @@ -435,10 +362,12 @@ type FpReg struct { Spare [12]uint64 } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint64 } @@ -449,6 +378,7 @@ type Kevent_t struct { Fflags uint32 Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 19223ce8ecf9..f8324e7e7f49 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -33,7 +33,7 @@ type Timeval struct { _ [4]byte } -type Time_t int32 +type Time_t int64 type Rusage struct { Utime Timeval @@ -88,26 +88,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -133,31 +113,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -179,14 +134,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -335,41 +282,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -378,7 +293,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -386,15 +301,27 @@ type PtraceLwpInfoStruct struct { } type __Siginfo struct { - Signo int32 - Errno int32 - Code int32 - Pid int32 - Uid uint32 - Status int32 - Addr *byte - Value [4]byte - X_reason [32]byte + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [4]byte + _ [32]byte +} + +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [4]byte + _ [32]byte } type Sigset_t struct { @@ -402,22 +329,28 @@ type Sigset_t struct { } type Reg struct { - R [13]uint32 - R_sp uint32 - R_lr uint32 - R_pc uint32 - R_cpsr uint32 + R [13]uint32 + Sp uint32 + Lr uint32 + Pc uint32 + Cpsr uint32 } type FpReg struct { - Fpr_fpsr uint32 - Fpr [8][3]uint32 + Fpsr uint32 + Fpr [8]FpExtendedPrecision +} + +type FpExtendedPrecision struct { + Exponent uint32 + Mantissa_hi uint32 + Mantissa_lo uint32 } type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint32 } @@ -426,8 +359,11 @@ type Kevent_t struct { Filter int16 Flags uint16 Fflags uint32 - Data int32 + _ [4]byte + Data int64 Udata *byte + _ [4]byte + Ext [4]uint64 } type FdSet struct { @@ -453,7 +389,7 @@ type ifMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Data ifData } @@ -464,7 +400,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -532,7 +467,7 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Metric int32 } @@ -543,7 +478,7 @@ type IfmaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 } type IfAnnounceMsghdr struct { @@ -560,7 +495,7 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte + _ uint16 Flags int32 Addrs int32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 8e3e33f67905..4220411f341a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -86,26 +86,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -131,31 +111,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -177,14 +132,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -333,39 +280,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -374,7 +291,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -393,6 +310,18 @@ type __Siginfo struct { _ [40]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [8]byte + _ [40]byte +} + type Sigset_t struct { Val [4]uint32 } @@ -413,10 +342,12 @@ type FpReg struct { _ [8]byte } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint64 } @@ -427,6 +358,7 @@ type Kevent_t struct { Fflags uint32 Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go new file mode 100644 index 000000000000..0660fd45c7c6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -0,0 +1,638 @@ +// cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Time_t int64 + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + _statfsVersion = 0x20140518 + _dirblksiz = 0x400 +) + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 +} + +type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 + _ [4]byte +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Pad0 uint8 + Namlen uint16 + Pad1 uint16 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x58 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __PtraceSiginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [8]byte + _ [40]byte +} + +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [8]byte + _ [40]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + Ra uint64 + Sp uint64 + Gp uint64 + Tp uint64 + T [7]uint64 + S [12]uint64 + A [8]uint64 + Sepc uint64 + Sstatus uint64 +} + +type FpReg struct { + X [32][2]uint64 + Fcsr uint64 +} + +type FpExtendedPrecision struct{} + +type PtraceIoDesc struct { + Op int32 + Offs uintptr + Addr uintptr + Len uint64 +} + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte + Ext [4]uint64 +} + +type FdSet struct { + Bits [16]uint64 +} + +const ( + sizeofIfMsghdr = 0xa8 + SizeofIfMsghdr = 0xa8 + sizeofIfData = 0x98 + SizeofIfData = 0x98 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x98 + SizeofRtMetrics = 0x70 +) + +type ifMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 + Data ifData +} + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Data IfData +} + +type ifData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Datalen uint16 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Hwassist uint64 + _ [8]byte + _ [16]byte +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Spare_char1 uint8 + Spare_char2 uint8 + Datalen uint8 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Hwassist uint64 + Epoch int64 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ uint16 + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint64 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Expire uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Pksent uint64 + Weight uint64 + Nhidx uint64 + Filler [2]uint64 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfZbuf = 0x18 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 + SizeofBpfZbufHeader = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfZbuf struct { + Bufa *byte + Bufb *byte + Buflen uint64 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [6]byte +} + +type BpfZbufHeader struct { + Kernel_gen uint32 + Kernel_len uint32 + User_gen uint32 + _ [5]uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_EACCESS = 0x100 + AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type CapRights struct { + Rights [2]uint64 +} + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go deleted file mode 100644 index 4c485261d6df..000000000000 --- a/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go +++ /dev/null @@ -1,42 +0,0 @@ -// cgo -godefs types_illumos.go | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build amd64 && illumos -// +build amd64,illumos - -package unix - -const ( - TUNNEWPPA = 0x540001 - TUNSETPPA = 0x540002 - - I_STR = 0x5308 - I_POP = 0x5303 - I_PUSH = 0x5302 - I_LINK = 0x530c - I_UNLINK = 0x530d - I_PLINK = 0x5316 - I_PUNLINK = 0x5317 - - IF_UNITSEL = -0x7ffb8cca -) - -type strbuf struct { - Maxlen int32 - Len int32 - Buf *int8 -} - -type Strioctl struct { - Cmd int32 - Timout int32 - Len int32 - Dp *int8 -} - -type Lifreq struct { - Name [32]int8 - Lifru1 [4]byte - Type uint32 - Lifru [336]byte -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 66788f156814..ff6881167d97 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -24,6 +24,11 @@ type ItimerSpec struct { Value Timespec } +type Itimerval struct { + Interval Timeval + Value Timeval +} + const ( TIME_OK = 0x0 TIME_INS = 0x1 @@ -749,6 +754,25 @@ const ( AT_SYMLINK_NOFOLLOW = 0x100 AT_EACCESS = 0x200 + + OPEN_TREE_CLONE = 0x1 + + MOVE_MOUNT_F_SYMLINKS = 0x1 + MOVE_MOUNT_F_AUTOMOUNTS = 0x2 + MOVE_MOUNT_F_EMPTY_PATH = 0x4 + MOVE_MOUNT_T_SYMLINKS = 0x10 + MOVE_MOUNT_T_AUTOMOUNTS = 0x20 + MOVE_MOUNT_T_EMPTY_PATH = 0x40 + MOVE_MOUNT_SET_GROUP = 0x100 + + FSOPEN_CLOEXEC = 0x1 + + FSPICK_CLOEXEC = 0x1 + FSPICK_SYMLINK_NOFOLLOW = 0x2 + FSPICK_NO_AUTOMOUNT = 0x4 + FSPICK_EMPTY_PATH = 0x8 + + FSMOUNT_CLOEXEC = 0x1 ) type OpenHow struct { @@ -921,6 +945,9 @@ type PerfEventAttr struct { Aux_watermark uint32 Sample_max_stack uint16 _ uint16 + Aux_sample_size uint32 + _ uint32 + Sig_data uint64 } type PerfEventMmapPage struct { @@ -1103,7 +1130,9 @@ const ( PERF_BR_SYSRET = 0x8 PERF_BR_COND_CALL = 0x9 PERF_BR_COND_RET = 0xa - PERF_BR_MAX = 0xb + PERF_BR_ERET = 0xb + PERF_BR_IRQ = 0xc + PERF_BR_MAX = 0xd PERF_SAMPLE_REGS_ABI_NONE = 0x0 PERF_SAMPLE_REGS_ABI_32 = 0x1 PERF_SAMPLE_REGS_ABI_64 = 0x2 @@ -1437,6 +1466,11 @@ const ( IFLA_ALT_IFNAME = 0x35 IFLA_PERM_ADDRESS = 0x36 IFLA_PROTO_DOWN_REASON = 0x37 + IFLA_PARENT_DEV_NAME = 0x38 + IFLA_PARENT_DEV_BUS_NAME = 0x39 + IFLA_GRO_MAX_SIZE = 0x3a + IFLA_TSO_MAX_SIZE = 0x3b + IFLA_TSO_MAX_SEGS = 0x3c IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -2945,7 +2979,7 @@ const ( DEVLINK_CMD_TRAP_POLICER_NEW = 0x47 DEVLINK_CMD_TRAP_POLICER_DEL = 0x48 DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49 - DEVLINK_CMD_MAX = 0x4d + DEVLINK_CMD_MAX = 0x51 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -3174,7 +3208,7 @@ const ( DEVLINK_ATTR_RATE_NODE_NAME = 0xa8 DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 0xa9 DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 0xaa - DEVLINK_ATTR_MAX = 0xaa + DEVLINK_ATTR_MAX = 0xae DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3614,7 +3648,11 @@ const ( ETHTOOL_A_RINGS_RX_MINI = 0x7 ETHTOOL_A_RINGS_RX_JUMBO = 0x8 ETHTOOL_A_RINGS_TX = 0x9 - ETHTOOL_A_RINGS_MAX = 0x9 + ETHTOOL_A_RINGS_RX_BUF_LEN = 0xa + ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb + ETHTOOL_A_RINGS_CQE_SIZE = 0xc + ETHTOOL_A_RINGS_TX_PUSH = 0xd + ETHTOOL_A_RINGS_MAX = 0xd ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -3766,6 +3804,8 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const SPEED_UNKNOWN = -0x1 + type EthtoolDrvinfo struct { Cmd uint32 Driver [32]byte @@ -4065,3 +4105,1505 @@ const ( NL_POLICY_TYPE_ATTR_MASK = 0xc NL_POLICY_TYPE_ATTR_MAX = 0xc ) + +type CANBitTiming struct { + Bitrate uint32 + Sample_point uint32 + Tq uint32 + Prop_seg uint32 + Phase_seg1 uint32 + Phase_seg2 uint32 + Sjw uint32 + Brp uint32 +} + +type CANBitTimingConst struct { + Name [16]uint8 + Tseg1_min uint32 + Tseg1_max uint32 + Tseg2_min uint32 + Tseg2_max uint32 + Sjw_max uint32 + Brp_min uint32 + Brp_max uint32 + Brp_inc uint32 +} + +type CANClock struct { + Freq uint32 +} + +type CANBusErrorCounters struct { + Txerr uint16 + Rxerr uint16 +} + +type CANCtrlMode struct { + Mask uint32 + Flags uint32 +} + +type CANDeviceStats struct { + Bus_error uint32 + Error_warning uint32 + Error_passive uint32 + Bus_off uint32 + Arbitration_lost uint32 + Restarts uint32 +} + +const ( + CAN_STATE_ERROR_ACTIVE = 0x0 + CAN_STATE_ERROR_WARNING = 0x1 + CAN_STATE_ERROR_PASSIVE = 0x2 + CAN_STATE_BUS_OFF = 0x3 + CAN_STATE_STOPPED = 0x4 + CAN_STATE_SLEEPING = 0x5 + CAN_STATE_MAX = 0x6 +) + +const ( + IFLA_CAN_UNSPEC = 0x0 + IFLA_CAN_BITTIMING = 0x1 + IFLA_CAN_BITTIMING_CONST = 0x2 + IFLA_CAN_CLOCK = 0x3 + IFLA_CAN_STATE = 0x4 + IFLA_CAN_CTRLMODE = 0x5 + IFLA_CAN_RESTART_MS = 0x6 + IFLA_CAN_RESTART = 0x7 + IFLA_CAN_BERR_COUNTER = 0x8 + IFLA_CAN_DATA_BITTIMING = 0x9 + IFLA_CAN_DATA_BITTIMING_CONST = 0xa + IFLA_CAN_TERMINATION = 0xb + IFLA_CAN_TERMINATION_CONST = 0xc + IFLA_CAN_BITRATE_CONST = 0xd + IFLA_CAN_DATA_BITRATE_CONST = 0xe + IFLA_CAN_BITRATE_MAX = 0xf +) + +type KCMAttach struct { + Fd int32 + Bpf_fd int32 +} + +type KCMUnattach struct { + Fd int32 +} + +type KCMClone struct { + Fd int32 +} + +const ( + NL80211_AC_BE = 0x2 + NL80211_AC_BK = 0x3 + NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED = 0x0 + NL80211_ACL_POLICY_DENY_UNLESS_LISTED = 0x1 + NL80211_AC_VI = 0x1 + NL80211_AC_VO = 0x0 + NL80211_ATTR_4ADDR = 0x53 + NL80211_ATTR_ACK = 0x5c + NL80211_ATTR_ACK_SIGNAL = 0x107 + NL80211_ATTR_ACL_POLICY = 0xa5 + NL80211_ATTR_ADMITTED_TIME = 0xd4 + NL80211_ATTR_AIRTIME_WEIGHT = 0x112 + NL80211_ATTR_AKM_SUITES = 0x4c + NL80211_ATTR_AP_ISOLATE = 0x60 + NL80211_ATTR_AUTH_DATA = 0x9c + NL80211_ATTR_AUTH_TYPE = 0x35 + NL80211_ATTR_BANDS = 0xef + NL80211_ATTR_BEACON_HEAD = 0xe + NL80211_ATTR_BEACON_INTERVAL = 0xc + NL80211_ATTR_BEACON_TAIL = 0xf + NL80211_ATTR_BG_SCAN_PERIOD = 0x98 + NL80211_ATTR_BSS_BASIC_RATES = 0x24 + NL80211_ATTR_BSS = 0x2f + NL80211_ATTR_BSS_CTS_PROT = 0x1c + NL80211_ATTR_BSS_HT_OPMODE = 0x6d + NL80211_ATTR_BSSID = 0xf5 + NL80211_ATTR_BSS_SELECT = 0xe3 + NL80211_ATTR_BSS_SHORT_PREAMBLE = 0x1d + NL80211_ATTR_BSS_SHORT_SLOT_TIME = 0x1e + NL80211_ATTR_CENTER_FREQ1 = 0xa0 + NL80211_ATTR_CENTER_FREQ1_OFFSET = 0x123 + NL80211_ATTR_CENTER_FREQ2 = 0xa1 + NL80211_ATTR_CHANNEL_WIDTH = 0x9f + NL80211_ATTR_CH_SWITCH_BLOCK_TX = 0xb8 + NL80211_ATTR_CH_SWITCH_COUNT = 0xb7 + NL80211_ATTR_CIPHER_SUITE_GROUP = 0x4a + NL80211_ATTR_CIPHER_SUITES = 0x39 + NL80211_ATTR_CIPHER_SUITES_PAIRWISE = 0x49 + NL80211_ATTR_CNTDWN_OFFS_BEACON = 0xba + NL80211_ATTR_CNTDWN_OFFS_PRESP = 0xbb + NL80211_ATTR_COALESCE_RULE = 0xb6 + NL80211_ATTR_COALESCE_RULE_CONDITION = 0x2 + NL80211_ATTR_COALESCE_RULE_DELAY = 0x1 + NL80211_ATTR_COALESCE_RULE_MAX = 0x3 + NL80211_ATTR_COALESCE_RULE_PKT_PATTERN = 0x3 + NL80211_ATTR_CONN_FAILED_REASON = 0x9b + NL80211_ATTR_CONTROL_PORT = 0x44 + NL80211_ATTR_CONTROL_PORT_ETHERTYPE = 0x66 + NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT = 0x67 + NL80211_ATTR_CONTROL_PORT_NO_PREAUTH = 0x11e + NL80211_ATTR_CONTROL_PORT_OVER_NL80211 = 0x108 + NL80211_ATTR_COOKIE = 0x58 + NL80211_ATTR_CQM_BEACON_LOSS_EVENT = 0x8 + NL80211_ATTR_CQM = 0x5e + NL80211_ATTR_CQM_MAX = 0x9 + NL80211_ATTR_CQM_PKT_LOSS_EVENT = 0x4 + NL80211_ATTR_CQM_RSSI_HYST = 0x2 + NL80211_ATTR_CQM_RSSI_LEVEL = 0x9 + NL80211_ATTR_CQM_RSSI_THOLD = 0x1 + NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT = 0x3 + NL80211_ATTR_CQM_TXE_INTVL = 0x7 + NL80211_ATTR_CQM_TXE_PKTS = 0x6 + NL80211_ATTR_CQM_TXE_RATE = 0x5 + NL80211_ATTR_CRIT_PROT_ID = 0xb3 + NL80211_ATTR_CSA_C_OFF_BEACON = 0xba + NL80211_ATTR_CSA_C_OFF_PRESP = 0xbb + NL80211_ATTR_CSA_C_OFFSETS_TX = 0xcd + NL80211_ATTR_CSA_IES = 0xb9 + NL80211_ATTR_DEVICE_AP_SME = 0x8d + NL80211_ATTR_DFS_CAC_TIME = 0x7 + NL80211_ATTR_DFS_REGION = 0x92 + NL80211_ATTR_DISABLE_HE = 0x12d + NL80211_ATTR_DISABLE_HT = 0x93 + NL80211_ATTR_DISABLE_VHT = 0xaf + NL80211_ATTR_DISCONNECTED_BY_AP = 0x47 + NL80211_ATTR_DONT_WAIT_FOR_ACK = 0x8e + NL80211_ATTR_DTIM_PERIOD = 0xd + NL80211_ATTR_DURATION = 0x57 + NL80211_ATTR_EXT_CAPA = 0xa9 + NL80211_ATTR_EXT_CAPA_MASK = 0xaa + NL80211_ATTR_EXTERNAL_AUTH_ACTION = 0x104 + NL80211_ATTR_EXTERNAL_AUTH_SUPPORT = 0x105 + NL80211_ATTR_EXT_FEATURES = 0xd9 + NL80211_ATTR_FEATURE_FLAGS = 0x8f + NL80211_ATTR_FILS_CACHE_ID = 0xfd + NL80211_ATTR_FILS_DISCOVERY = 0x126 + NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM = 0xfb + NL80211_ATTR_FILS_ERP_REALM = 0xfa + NL80211_ATTR_FILS_ERP_RRK = 0xfc + NL80211_ATTR_FILS_ERP_USERNAME = 0xf9 + NL80211_ATTR_FILS_KEK = 0xf2 + NL80211_ATTR_FILS_NONCES = 0xf3 + NL80211_ATTR_FRAME = 0x33 + NL80211_ATTR_FRAME_MATCH = 0x5b + NL80211_ATTR_FRAME_TYPE = 0x65 + NL80211_ATTR_FREQ_AFTER = 0x3b + NL80211_ATTR_FREQ_BEFORE = 0x3a + NL80211_ATTR_FREQ_FIXED = 0x3c + NL80211_ATTR_FREQ_RANGE_END = 0x3 + NL80211_ATTR_FREQ_RANGE_MAX_BW = 0x4 + NL80211_ATTR_FREQ_RANGE_START = 0x2 + NL80211_ATTR_FTM_RESPONDER = 0x10e + NL80211_ATTR_FTM_RESPONDER_STATS = 0x10f + NL80211_ATTR_GENERATION = 0x2e + NL80211_ATTR_HANDLE_DFS = 0xbf + NL80211_ATTR_HE_6GHZ_CAPABILITY = 0x125 + NL80211_ATTR_HE_BSS_COLOR = 0x11b + NL80211_ATTR_HE_CAPABILITY = 0x10d + NL80211_ATTR_HE_OBSS_PD = 0x117 + NL80211_ATTR_HIDDEN_SSID = 0x7e + NL80211_ATTR_HT_CAPABILITY = 0x1f + NL80211_ATTR_HT_CAPABILITY_MASK = 0x94 + NL80211_ATTR_IE_ASSOC_RESP = 0x80 + NL80211_ATTR_IE = 0x2a + NL80211_ATTR_IE_PROBE_RESP = 0x7f + NL80211_ATTR_IE_RIC = 0xb2 + NL80211_ATTR_IFACE_SOCKET_OWNER = 0xcc + NL80211_ATTR_IFINDEX = 0x3 + NL80211_ATTR_IFNAME = 0x4 + NL80211_ATTR_IFTYPE_AKM_SUITES = 0x11c + NL80211_ATTR_IFTYPE = 0x5 + NL80211_ATTR_IFTYPE_EXT_CAPA = 0xe6 + NL80211_ATTR_INACTIVITY_TIMEOUT = 0x96 + NL80211_ATTR_INTERFACE_COMBINATIONS = 0x78 + NL80211_ATTR_KEY_CIPHER = 0x9 + NL80211_ATTR_KEY = 0x50 + NL80211_ATTR_KEY_DATA = 0x7 + NL80211_ATTR_KEY_DEFAULT = 0xb + NL80211_ATTR_KEY_DEFAULT_MGMT = 0x28 + NL80211_ATTR_KEY_DEFAULT_TYPES = 0x6e + NL80211_ATTR_KEY_IDX = 0x8 + NL80211_ATTR_KEYS = 0x51 + NL80211_ATTR_KEY_SEQ = 0xa + NL80211_ATTR_KEY_TYPE = 0x37 + NL80211_ATTR_LOCAL_MESH_POWER_MODE = 0xa4 + NL80211_ATTR_LOCAL_STATE_CHANGE = 0x5f + NL80211_ATTR_MAC_ACL_MAX = 0xa7 + NL80211_ATTR_MAC_ADDRS = 0xa6 + NL80211_ATTR_MAC = 0x6 + NL80211_ATTR_MAC_HINT = 0xc8 + NL80211_ATTR_MAC_MASK = 0xd7 + NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca + NL80211_ATTR_MAX = 0x137 + NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 + NL80211_ATTR_MAX_CSA_COUNTERS = 0xce + NL80211_ATTR_MAX_MATCH_SETS = 0x85 + NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 + NL80211_ATTR_MAX_NUM_SCAN_SSIDS = 0x2b + NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS = 0xde + NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS = 0x7b + NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION = 0x6f + NL80211_ATTR_MAX_SCAN_IE_LEN = 0x38 + NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL = 0xdf + NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS = 0xe0 + NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN = 0x7c + NL80211_ATTR_MCAST_RATE = 0x6b + NL80211_ATTR_MDID = 0xb1 + NL80211_ATTR_MEASUREMENT_DURATION = 0xeb + NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY = 0xec + NL80211_ATTR_MESH_CONFIG = 0x23 + NL80211_ATTR_MESH_ID = 0x18 + NL80211_ATTR_MESH_PEER_AID = 0xed + NL80211_ATTR_MESH_SETUP = 0x70 + NL80211_ATTR_MGMT_SUBTYPE = 0x29 + NL80211_ATTR_MNTR_FLAGS = 0x17 + NL80211_ATTR_MPATH_INFO = 0x1b + NL80211_ATTR_MPATH_NEXT_HOP = 0x1a + NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED = 0xf4 + NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR = 0xe8 + NL80211_ATTR_MU_MIMO_GROUP_DATA = 0xe7 + NL80211_ATTR_NAN_FUNC = 0xf0 + NL80211_ATTR_NAN_MASTER_PREF = 0xee + NL80211_ATTR_NAN_MATCH = 0xf1 + NL80211_ATTR_NETNS_FD = 0xdb + NL80211_ATTR_NOACK_MAP = 0x95 + NL80211_ATTR_NSS = 0x106 + NL80211_ATTR_OFFCHANNEL_TX_OK = 0x6c + NL80211_ATTR_OPER_CLASS = 0xd6 + NL80211_ATTR_OPMODE_NOTIF = 0xc2 + NL80211_ATTR_P2P_CTWINDOW = 0xa2 + NL80211_ATTR_P2P_OPPPS = 0xa3 + NL80211_ATTR_PAD = 0xe5 + NL80211_ATTR_PBSS = 0xe2 + NL80211_ATTR_PEER_AID = 0xb5 + NL80211_ATTR_PEER_MEASUREMENTS = 0x111 + NL80211_ATTR_PID = 0x52 + NL80211_ATTR_PMK = 0xfe + NL80211_ATTR_PMKID = 0x55 + NL80211_ATTR_PMK_LIFETIME = 0x11f + NL80211_ATTR_PMKR0_NAME = 0x102 + NL80211_ATTR_PMK_REAUTH_THRESHOLD = 0x120 + NL80211_ATTR_PMKSA_CANDIDATE = 0x86 + NL80211_ATTR_PORT_AUTHORIZED = 0x103 + NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5 + NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6 + NL80211_ATTR_PREV_BSSID = 0x4f + NL80211_ATTR_PRIVACY = 0x46 + NL80211_ATTR_PROBE_RESP = 0x91 + NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90 + NL80211_ATTR_PROTOCOL_FEATURES = 0xad + NL80211_ATTR_PS_STATE = 0x5d + NL80211_ATTR_QOS_MAP = 0xc7 + NL80211_ATTR_RADAR_EVENT = 0xa8 + NL80211_ATTR_REASON_CODE = 0x36 + NL80211_ATTR_RECEIVE_MULTICAST = 0x121 + NL80211_ATTR_RECONNECT_REQUESTED = 0x12b + NL80211_ATTR_REG_ALPHA2 = 0x21 + NL80211_ATTR_REG_INDOOR = 0xdd + NL80211_ATTR_REG_INITIATOR = 0x30 + NL80211_ATTR_REG_RULE_FLAGS = 0x1 + NL80211_ATTR_REG_RULES = 0x22 + NL80211_ATTR_REG_TYPE = 0x31 + NL80211_ATTR_REKEY_DATA = 0x7a + NL80211_ATTR_REQ_IE = 0x4d + NL80211_ATTR_RESP_IE = 0x4e + NL80211_ATTR_ROAM_SUPPORT = 0x83 + NL80211_ATTR_RX_FRAME_TYPES = 0x64 + NL80211_ATTR_RXMGMT_FLAGS = 0xbc + NL80211_ATTR_RX_SIGNAL_DBM = 0x97 + NL80211_ATTR_S1G_CAPABILITY = 0x128 + NL80211_ATTR_S1G_CAPABILITY_MASK = 0x129 + NL80211_ATTR_SAE_DATA = 0x9c + NL80211_ATTR_SAE_PASSWORD = 0x115 + NL80211_ATTR_SAE_PWE = 0x12a + NL80211_ATTR_SAR_SPEC = 0x12c + NL80211_ATTR_SCAN_FLAGS = 0x9e + NL80211_ATTR_SCAN_FREQ_KHZ = 0x124 + NL80211_ATTR_SCAN_FREQUENCIES = 0x2c + NL80211_ATTR_SCAN_GENERATION = 0x2e + NL80211_ATTR_SCAN_SSIDS = 0x2d + NL80211_ATTR_SCAN_START_TIME_TSF_BSSID = 0xea + NL80211_ATTR_SCAN_START_TIME_TSF = 0xe9 + NL80211_ATTR_SCAN_SUPP_RATES = 0x7d + NL80211_ATTR_SCHED_SCAN_DELAY = 0xdc + NL80211_ATTR_SCHED_SCAN_INTERVAL = 0x77 + NL80211_ATTR_SCHED_SCAN_MATCH = 0x84 + NL80211_ATTR_SCHED_SCAN_MATCH_SSID = 0x1 + NL80211_ATTR_SCHED_SCAN_MAX_REQS = 0x100 + NL80211_ATTR_SCHED_SCAN_MULTI = 0xff + NL80211_ATTR_SCHED_SCAN_PLANS = 0xe1 + NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI = 0xf6 + NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST = 0xf7 + NL80211_ATTR_SMPS_MODE = 0xd5 + NL80211_ATTR_SOCKET_OWNER = 0xcc + NL80211_ATTR_SOFTWARE_IFTYPES = 0x79 + NL80211_ATTR_SPLIT_WIPHY_DUMP = 0xae + NL80211_ATTR_SSID = 0x34 + NL80211_ATTR_STA_AID = 0x10 + NL80211_ATTR_STA_CAPABILITY = 0xab + NL80211_ATTR_STA_EXT_CAPABILITY = 0xac + NL80211_ATTR_STA_FLAGS2 = 0x43 + NL80211_ATTR_STA_FLAGS = 0x11 + NL80211_ATTR_STA_INFO = 0x15 + NL80211_ATTR_STA_LISTEN_INTERVAL = 0x12 + NL80211_ATTR_STA_PLINK_ACTION = 0x19 + NL80211_ATTR_STA_PLINK_STATE = 0x74 + NL80211_ATTR_STA_SUPPORTED_CHANNELS = 0xbd + NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES = 0xbe + NL80211_ATTR_STA_SUPPORTED_RATES = 0x13 + NL80211_ATTR_STA_SUPPORT_P2P_PS = 0xe4 + NL80211_ATTR_STATUS_CODE = 0x48 + NL80211_ATTR_STA_TX_POWER = 0x114 + NL80211_ATTR_STA_TX_POWER_SETTING = 0x113 + NL80211_ATTR_STA_VLAN = 0x14 + NL80211_ATTR_STA_WME = 0x81 + NL80211_ATTR_SUPPORT_10_MHZ = 0xc1 + NL80211_ATTR_SUPPORT_5_MHZ = 0xc0 + NL80211_ATTR_SUPPORT_AP_UAPSD = 0x82 + NL80211_ATTR_SUPPORTED_COMMANDS = 0x32 + NL80211_ATTR_SUPPORTED_IFTYPES = 0x20 + NL80211_ATTR_SUPPORT_IBSS_RSN = 0x68 + NL80211_ATTR_SUPPORT_MESH_AUTH = 0x73 + NL80211_ATTR_SURVEY_INFO = 0x54 + NL80211_ATTR_SURVEY_RADIO_STATS = 0xda + NL80211_ATTR_TDLS_ACTION = 0x88 + NL80211_ATTR_TDLS_DIALOG_TOKEN = 0x89 + NL80211_ATTR_TDLS_EXTERNAL_SETUP = 0x8c + NL80211_ATTR_TDLS_INITIATOR = 0xcf + NL80211_ATTR_TDLS_OPERATION = 0x8a + NL80211_ATTR_TDLS_PEER_CAPABILITY = 0xcb + NL80211_ATTR_TDLS_SUPPORT = 0x8b + NL80211_ATTR_TESTDATA = 0x45 + NL80211_ATTR_TID_CONFIG = 0x11d + NL80211_ATTR_TIMED_OUT = 0x41 + NL80211_ATTR_TIMEOUT = 0x110 + NL80211_ATTR_TIMEOUT_REASON = 0xf8 + NL80211_ATTR_TSID = 0xd2 + NL80211_ATTR_TWT_RESPONDER = 0x116 + NL80211_ATTR_TX_FRAME_TYPES = 0x63 + NL80211_ATTR_TX_NO_CCK_RATE = 0x87 + NL80211_ATTR_TXQ_LIMIT = 0x10a + NL80211_ATTR_TXQ_MEMORY_LIMIT = 0x10b + NL80211_ATTR_TXQ_QUANTUM = 0x10c + NL80211_ATTR_TXQ_STATS = 0x109 + NL80211_ATTR_TX_RATES = 0x5a + NL80211_ATTR_UNSOL_BCAST_PROBE_RESP = 0x127 + NL80211_ATTR_UNSPEC = 0x0 + NL80211_ATTR_USE_MFP = 0x42 + NL80211_ATTR_USER_PRIO = 0xd3 + NL80211_ATTR_USER_REG_HINT_TYPE = 0x9a + NL80211_ATTR_USE_RRM = 0xd0 + NL80211_ATTR_VENDOR_DATA = 0xc5 + NL80211_ATTR_VENDOR_EVENTS = 0xc6 + NL80211_ATTR_VENDOR_ID = 0xc3 + NL80211_ATTR_VENDOR_SUBCMD = 0xc4 + NL80211_ATTR_VHT_CAPABILITY = 0x9d + NL80211_ATTR_VHT_CAPABILITY_MASK = 0xb0 + NL80211_ATTR_VLAN_ID = 0x11a + NL80211_ATTR_WANT_1X_4WAY_HS = 0x101 + NL80211_ATTR_WDEV = 0x99 + NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX = 0x72 + NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX = 0x71 + NL80211_ATTR_WIPHY_ANTENNA_RX = 0x6a + NL80211_ATTR_WIPHY_ANTENNA_TX = 0x69 + NL80211_ATTR_WIPHY_BANDS = 0x16 + NL80211_ATTR_WIPHY_CHANNEL_TYPE = 0x27 + NL80211_ATTR_WIPHY = 0x1 + NL80211_ATTR_WIPHY_COVERAGE_CLASS = 0x59 + NL80211_ATTR_WIPHY_DYN_ACK = 0xd1 + NL80211_ATTR_WIPHY_EDMG_BW_CONFIG = 0x119 + NL80211_ATTR_WIPHY_EDMG_CHANNELS = 0x118 + NL80211_ATTR_WIPHY_FRAG_THRESHOLD = 0x3f + NL80211_ATTR_WIPHY_FREQ = 0x26 + NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9 + NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122 + NL80211_ATTR_WIPHY_NAME = 0x2 + NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e + NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d + NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40 + NL80211_ATTR_WIPHY_SELF_MANAGED_REG = 0xd8 + NL80211_ATTR_WIPHY_TX_POWER_LEVEL = 0x62 + NL80211_ATTR_WIPHY_TX_POWER_SETTING = 0x61 + NL80211_ATTR_WIPHY_TXQ_PARAMS = 0x25 + NL80211_ATTR_WOWLAN_TRIGGERS = 0x75 + NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED = 0x76 + NL80211_ATTR_WPA_VERSIONS = 0x4b + NL80211_AUTHTYPE_AUTOMATIC = 0x8 + NL80211_AUTHTYPE_FILS_PK = 0x7 + NL80211_AUTHTYPE_FILS_SK = 0x5 + NL80211_AUTHTYPE_FILS_SK_PFS = 0x6 + NL80211_AUTHTYPE_FT = 0x2 + NL80211_AUTHTYPE_MAX = 0x7 + NL80211_AUTHTYPE_NETWORK_EAP = 0x3 + NL80211_AUTHTYPE_OPEN_SYSTEM = 0x0 + NL80211_AUTHTYPE_SAE = 0x4 + NL80211_AUTHTYPE_SHARED_KEY = 0x1 + NL80211_BAND_2GHZ = 0x0 + NL80211_BAND_5GHZ = 0x1 + NL80211_BAND_60GHZ = 0x2 + NL80211_BAND_6GHZ = 0x3 + NL80211_BAND_ATTR_EDMG_BW_CONFIG = 0xb + NL80211_BAND_ATTR_EDMG_CHANNELS = 0xa + NL80211_BAND_ATTR_FREQS = 0x1 + NL80211_BAND_ATTR_HT_AMPDU_DENSITY = 0x6 + NL80211_BAND_ATTR_HT_AMPDU_FACTOR = 0x5 + NL80211_BAND_ATTR_HT_CAPA = 0x4 + NL80211_BAND_ATTR_HT_MCS_SET = 0x3 + NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 + NL80211_BAND_ATTR_MAX = 0xb + NL80211_BAND_ATTR_RATES = 0x2 + NL80211_BAND_ATTR_VHT_CAPA = 0x8 + NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 + NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA = 0x6 + NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC = 0x2 + NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET = 0x4 + NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY = 0x3 + NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE = 0x5 + NL80211_BAND_IFTYPE_ATTR_IFTYPES = 0x1 + NL80211_BAND_IFTYPE_ATTR_MAX = 0xb + NL80211_BAND_S1GHZ = 0x4 + NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE = 0x2 + NL80211_BITRATE_ATTR_MAX = 0x2 + NL80211_BITRATE_ATTR_RATE = 0x1 + NL80211_BSS_BEACON_IES = 0xb + NL80211_BSS_BEACON_INTERVAL = 0x4 + NL80211_BSS_BEACON_TSF = 0xd + NL80211_BSS_BSSID = 0x1 + NL80211_BSS_CAPABILITY = 0x5 + NL80211_BSS_CHAIN_SIGNAL = 0x13 + NL80211_BSS_CHAN_WIDTH_10 = 0x1 + NL80211_BSS_CHAN_WIDTH_1 = 0x3 + NL80211_BSS_CHAN_WIDTH_20 = 0x0 + NL80211_BSS_CHAN_WIDTH_2 = 0x4 + NL80211_BSS_CHAN_WIDTH_5 = 0x2 + NL80211_BSS_CHAN_WIDTH = 0xc + NL80211_BSS_FREQUENCY = 0x2 + NL80211_BSS_FREQUENCY_OFFSET = 0x14 + NL80211_BSS_INFORMATION_ELEMENTS = 0x6 + NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf + NL80211_BSS_MAX = 0x14 + NL80211_BSS_PAD = 0x10 + NL80211_BSS_PARENT_BSSID = 0x12 + NL80211_BSS_PARENT_TSF = 0x11 + NL80211_BSS_PRESP_DATA = 0xe + NL80211_BSS_SEEN_MS_AGO = 0xa + NL80211_BSS_SELECT_ATTR_BAND_PREF = 0x2 + NL80211_BSS_SELECT_ATTR_MAX = 0x3 + NL80211_BSS_SELECT_ATTR_RSSI_ADJUST = 0x3 + NL80211_BSS_SELECT_ATTR_RSSI = 0x1 + NL80211_BSS_SIGNAL_MBM = 0x7 + NL80211_BSS_SIGNAL_UNSPEC = 0x8 + NL80211_BSS_STATUS_ASSOCIATED = 0x1 + NL80211_BSS_STATUS_AUTHENTICATED = 0x0 + NL80211_BSS_STATUS = 0x9 + NL80211_BSS_STATUS_IBSS_JOINED = 0x2 + NL80211_BSS_TSF = 0x3 + NL80211_CHAN_HT20 = 0x1 + NL80211_CHAN_HT40MINUS = 0x2 + NL80211_CHAN_HT40PLUS = 0x3 + NL80211_CHAN_NO_HT = 0x0 + NL80211_CHAN_WIDTH_10 = 0x7 + NL80211_CHAN_WIDTH_160 = 0x5 + NL80211_CHAN_WIDTH_16 = 0xc + NL80211_CHAN_WIDTH_1 = 0x8 + NL80211_CHAN_WIDTH_20 = 0x1 + NL80211_CHAN_WIDTH_20_NOHT = 0x0 + NL80211_CHAN_WIDTH_2 = 0x9 + NL80211_CHAN_WIDTH_40 = 0x2 + NL80211_CHAN_WIDTH_4 = 0xa + NL80211_CHAN_WIDTH_5 = 0x6 + NL80211_CHAN_WIDTH_80 = 0x3 + NL80211_CHAN_WIDTH_80P80 = 0x4 + NL80211_CHAN_WIDTH_8 = 0xb + NL80211_CMD_ABORT_SCAN = 0x72 + NL80211_CMD_ACTION = 0x3b + NL80211_CMD_ACTION_TX_STATUS = 0x3c + NL80211_CMD_ADD_NAN_FUNCTION = 0x75 + NL80211_CMD_ADD_TX_TS = 0x69 + NL80211_CMD_ASSOCIATE = 0x26 + NL80211_CMD_AUTHENTICATE = 0x25 + NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL = 0x38 + NL80211_CMD_CHANGE_NAN_CONFIG = 0x77 + NL80211_CMD_CHANNEL_SWITCH = 0x66 + NL80211_CMD_CH_SWITCH_NOTIFY = 0x58 + NL80211_CMD_CH_SWITCH_STARTED_NOTIFY = 0x6e + NL80211_CMD_CONNECT = 0x2e + NL80211_CMD_CONN_FAILED = 0x5b + NL80211_CMD_CONTROL_PORT_FRAME = 0x81 + NL80211_CMD_CONTROL_PORT_FRAME_TX_STATUS = 0x8b + NL80211_CMD_CRIT_PROTOCOL_START = 0x62 + NL80211_CMD_CRIT_PROTOCOL_STOP = 0x63 + NL80211_CMD_DEAUTHENTICATE = 0x27 + NL80211_CMD_DEL_BEACON = 0x10 + NL80211_CMD_DEL_INTERFACE = 0x8 + NL80211_CMD_DEL_KEY = 0xc + NL80211_CMD_DEL_MPATH = 0x18 + NL80211_CMD_DEL_NAN_FUNCTION = 0x76 + NL80211_CMD_DEL_PMK = 0x7c + NL80211_CMD_DEL_PMKSA = 0x35 + NL80211_CMD_DEL_STATION = 0x14 + NL80211_CMD_DEL_TX_TS = 0x6a + NL80211_CMD_DEL_WIPHY = 0x4 + NL80211_CMD_DISASSOCIATE = 0x28 + NL80211_CMD_DISCONNECT = 0x30 + NL80211_CMD_EXTERNAL_AUTH = 0x7f + NL80211_CMD_FLUSH_PMKSA = 0x36 + NL80211_CMD_FRAME = 0x3b + NL80211_CMD_FRAME_TX_STATUS = 0x3c + NL80211_CMD_FRAME_WAIT_CANCEL = 0x43 + NL80211_CMD_FT_EVENT = 0x61 + NL80211_CMD_GET_BEACON = 0xd + NL80211_CMD_GET_COALESCE = 0x64 + NL80211_CMD_GET_FTM_RESPONDER_STATS = 0x82 + NL80211_CMD_GET_INTERFACE = 0x5 + NL80211_CMD_GET_KEY = 0x9 + NL80211_CMD_GET_MESH_CONFIG = 0x1c + NL80211_CMD_GET_MESH_PARAMS = 0x1c + NL80211_CMD_GET_MPATH = 0x15 + NL80211_CMD_GET_MPP = 0x6b + NL80211_CMD_GET_POWER_SAVE = 0x3e + NL80211_CMD_GET_PROTOCOL_FEATURES = 0x5f + NL80211_CMD_GET_REG = 0x1f + NL80211_CMD_GET_SCAN = 0x20 + NL80211_CMD_GET_STATION = 0x11 + NL80211_CMD_GET_SURVEY = 0x32 + NL80211_CMD_GET_WIPHY = 0x1 + NL80211_CMD_GET_WOWLAN = 0x49 + NL80211_CMD_JOIN_IBSS = 0x2b + NL80211_CMD_JOIN_MESH = 0x44 + NL80211_CMD_JOIN_OCB = 0x6c + NL80211_CMD_LEAVE_IBSS = 0x2c + NL80211_CMD_LEAVE_MESH = 0x45 + NL80211_CMD_LEAVE_OCB = 0x6d + NL80211_CMD_MAX = 0x93 + NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 + NL80211_CMD_NAN_MATCH = 0x78 + NL80211_CMD_NEW_BEACON = 0xf + NL80211_CMD_NEW_INTERFACE = 0x7 + NL80211_CMD_NEW_KEY = 0xb + NL80211_CMD_NEW_MPATH = 0x17 + NL80211_CMD_NEW_PEER_CANDIDATE = 0x48 + NL80211_CMD_NEW_SCAN_RESULTS = 0x22 + NL80211_CMD_NEW_STATION = 0x13 + NL80211_CMD_NEW_SURVEY_RESULTS = 0x33 + NL80211_CMD_NEW_WIPHY = 0x3 + NL80211_CMD_NOTIFY_CQM = 0x40 + NL80211_CMD_NOTIFY_RADAR = 0x86 + NL80211_CMD_PEER_MEASUREMENT_COMPLETE = 0x85 + NL80211_CMD_PEER_MEASUREMENT_RESULT = 0x84 + NL80211_CMD_PEER_MEASUREMENT_START = 0x83 + NL80211_CMD_PMKSA_CANDIDATE = 0x50 + NL80211_CMD_PORT_AUTHORIZED = 0x7d + NL80211_CMD_PROBE_CLIENT = 0x54 + NL80211_CMD_PROBE_MESH_LINK = 0x88 + NL80211_CMD_RADAR_DETECT = 0x5e + NL80211_CMD_REG_BEACON_HINT = 0x2a + NL80211_CMD_REG_CHANGE = 0x24 + NL80211_CMD_REGISTER_ACTION = 0x3a + NL80211_CMD_REGISTER_BEACONS = 0x55 + NL80211_CMD_REGISTER_FRAME = 0x3a + NL80211_CMD_RELOAD_REGDB = 0x7e + NL80211_CMD_REMAIN_ON_CHANNEL = 0x37 + NL80211_CMD_REQ_SET_REG = 0x1b + NL80211_CMD_ROAM = 0x2f + NL80211_CMD_SCAN_ABORTED = 0x23 + NL80211_CMD_SCHED_SCAN_RESULTS = 0x4d + NL80211_CMD_SCHED_SCAN_STOPPED = 0x4e + NL80211_CMD_SET_BEACON = 0xe + NL80211_CMD_SET_BSS = 0x19 + NL80211_CMD_SET_CHANNEL = 0x41 + NL80211_CMD_SET_COALESCE = 0x65 + NL80211_CMD_SET_CQM = 0x3f + NL80211_CMD_SET_INTERFACE = 0x6 + NL80211_CMD_SET_KEY = 0xa + NL80211_CMD_SET_MAC_ACL = 0x5d + NL80211_CMD_SET_MCAST_RATE = 0x5c + NL80211_CMD_SET_MESH_CONFIG = 0x1d + NL80211_CMD_SET_MESH_PARAMS = 0x1d + NL80211_CMD_SET_MGMT_EXTRA_IE = 0x1e + NL80211_CMD_SET_MPATH = 0x16 + NL80211_CMD_SET_MULTICAST_TO_UNICAST = 0x79 + NL80211_CMD_SET_NOACK_MAP = 0x57 + NL80211_CMD_SET_PMK = 0x7b + NL80211_CMD_SET_PMKSA = 0x34 + NL80211_CMD_SET_POWER_SAVE = 0x3d + NL80211_CMD_SET_QOS_MAP = 0x68 + NL80211_CMD_SET_REG = 0x1a + NL80211_CMD_SET_REKEY_OFFLOAD = 0x4f + NL80211_CMD_SET_SAR_SPECS = 0x8c + NL80211_CMD_SET_STATION = 0x12 + NL80211_CMD_SET_TID_CONFIG = 0x89 + NL80211_CMD_SET_TX_BITRATE_MASK = 0x39 + NL80211_CMD_SET_WDS_PEER = 0x42 + NL80211_CMD_SET_WIPHY = 0x2 + NL80211_CMD_SET_WIPHY_NETNS = 0x31 + NL80211_CMD_SET_WOWLAN = 0x4a + NL80211_CMD_STA_OPMODE_CHANGED = 0x80 + NL80211_CMD_START_AP = 0xf + NL80211_CMD_START_NAN = 0x73 + NL80211_CMD_START_P2P_DEVICE = 0x59 + NL80211_CMD_START_SCHED_SCAN = 0x4b + NL80211_CMD_STOP_AP = 0x10 + NL80211_CMD_STOP_NAN = 0x74 + NL80211_CMD_STOP_P2P_DEVICE = 0x5a + NL80211_CMD_STOP_SCHED_SCAN = 0x4c + NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH = 0x70 + NL80211_CMD_TDLS_CHANNEL_SWITCH = 0x6f + NL80211_CMD_TDLS_MGMT = 0x52 + NL80211_CMD_TDLS_OPER = 0x51 + NL80211_CMD_TESTMODE = 0x2d + NL80211_CMD_TRIGGER_SCAN = 0x21 + NL80211_CMD_UNEXPECTED_4ADDR_FRAME = 0x56 + NL80211_CMD_UNEXPECTED_FRAME = 0x53 + NL80211_CMD_UNPROT_BEACON = 0x8a + NL80211_CMD_UNPROT_DEAUTHENTICATE = 0x46 + NL80211_CMD_UNPROT_DISASSOCIATE = 0x47 + NL80211_CMD_UNSPEC = 0x0 + NL80211_CMD_UPDATE_CONNECT_PARAMS = 0x7a + NL80211_CMD_UPDATE_FT_IES = 0x60 + NL80211_CMD_UPDATE_OWE_INFO = 0x87 + NL80211_CMD_VENDOR = 0x67 + NL80211_CMD_WIPHY_REG_CHANGE = 0x71 + NL80211_COALESCE_CONDITION_MATCH = 0x0 + NL80211_COALESCE_CONDITION_NO_MATCH = 0x1 + NL80211_CONN_FAIL_BLOCKED_CLIENT = 0x1 + NL80211_CONN_FAIL_MAX_CLIENTS = 0x0 + NL80211_CQM_RSSI_BEACON_LOSS_EVENT = 0x2 + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH = 0x1 + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW = 0x0 + NL80211_CQM_TXE_MAX_INTVL = 0x708 + NL80211_CRIT_PROTO_APIPA = 0x3 + NL80211_CRIT_PROTO_DHCP = 0x1 + NL80211_CRIT_PROTO_EAPOL = 0x2 + NL80211_CRIT_PROTO_MAX_DURATION = 0x1388 + NL80211_CRIT_PROTO_UNSPEC = 0x0 + NL80211_DFS_AVAILABLE = 0x2 + NL80211_DFS_ETSI = 0x2 + NL80211_DFS_FCC = 0x1 + NL80211_DFS_JP = 0x3 + NL80211_DFS_UNAVAILABLE = 0x1 + NL80211_DFS_UNSET = 0x0 + NL80211_DFS_USABLE = 0x0 + NL80211_EDMG_BW_CONFIG_MAX = 0xf + NL80211_EDMG_BW_CONFIG_MIN = 0x4 + NL80211_EDMG_CHANNELS_MAX = 0x3c + NL80211_EDMG_CHANNELS_MIN = 0x1 + NL80211_EXTERNAL_AUTH_ABORT = 0x1 + NL80211_EXTERNAL_AUTH_START = 0x0 + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK = 0x32 + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X = 0x10 + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK = 0xf + NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP = 0x12 + NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT = 0x1b + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21 + NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22 + NL80211_EXT_FEATURE_AQL = 0x28 + NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e + NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29 + NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36 + NL80211_EXT_FEATURE_BEACON_RATE_HT = 0x7 + NL80211_EXT_FEATURE_BEACON_RATE_LEGACY = 0x6 + NL80211_EXT_FEATURE_BEACON_RATE_VHT = 0x8 + NL80211_EXT_FEATURE_BSS_PARENT_TSF = 0x4 + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 = 0x1f + NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH = 0x2a + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211 = 0x1a + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS = 0x30 + NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd + NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b + NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c + NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 + NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 + NL80211_EXT_FEATURE_FILS_DISCOVERY = 0x34 + NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME = 0x11 + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD = 0xe + NL80211_EXT_FEATURE_FILS_STA = 0x9 + NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN = 0x18 + NL80211_EXT_FEATURE_LOW_POWER_SCAN = 0x17 + NL80211_EXT_FEATURE_LOW_SPAN_SCAN = 0x16 + NL80211_EXT_FEATURE_MFP_OPTIONAL = 0x15 + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA = 0xa + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED = 0xb + NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS = 0x2d + NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER = 0x2 + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 + NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 + NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b + NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_RRM = 0x1 + NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 + NL80211_EXT_FEATURE_SAE_OFFLOAD = 0x26 + NL80211_EXT_FEATURE_SCAN_FREQ_KHZ = 0x2f + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT = 0x1e + NL80211_EXT_FEATURE_SCAN_RANDOM_SN = 0x1d + NL80211_EXT_FEATURE_SCAN_START_TIME = 0x3 + NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23 + NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc + NL80211_EXT_FEATURE_SECURE_LTF = 0x37 + NL80211_EXT_FEATURE_SECURE_RTT = 0x38 + NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5 + NL80211_EXT_FEATURE_STA_TX_PWR = 0x25 + NL80211_EXT_FEATURE_TXQS = 0x1c + NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35 + NL80211_EXT_FEATURE_VHT_IBSS = 0x0 + NL80211_EXT_FEATURE_VLAN_OFFLOAD = 0x27 + NL80211_FEATURE_ACKTO_ESTIMATION = 0x800000 + NL80211_FEATURE_ACTIVE_MONITOR = 0x20000 + NL80211_FEATURE_ADVERTISE_CHAN_LIMITS = 0x4000 + NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 0x40000 + NL80211_FEATURE_AP_SCAN = 0x100 + NL80211_FEATURE_CELL_BASE_REG_HINTS = 0x8 + NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES = 0x80000 + NL80211_FEATURE_DYNAMIC_SMPS = 0x2000000 + NL80211_FEATURE_FULL_AP_CLIENT_STATE = 0x8000 + NL80211_FEATURE_HT_IBSS = 0x2 + NL80211_FEATURE_INACTIVITY_TIMER = 0x4 + NL80211_FEATURE_LOW_PRIORITY_SCAN = 0x40 + NL80211_FEATURE_MAC_ON_CREATE = 0x8000000 + NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 0x80000000 + NL80211_FEATURE_NEED_OBSS_SCAN = 0x400 + NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL = 0x10 + NL80211_FEATURE_P2P_GO_CTWIN = 0x800 + NL80211_FEATURE_P2P_GO_OPPPS = 0x1000 + NL80211_FEATURE_QUIET = 0x200000 + NL80211_FEATURE_SAE = 0x20 + NL80211_FEATURE_SCAN_FLUSH = 0x80 + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 0x20000000 + NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 0x40000000 + NL80211_FEATURE_SK_TX_STATUS = 0x1 + NL80211_FEATURE_STATIC_SMPS = 0x1000000 + NL80211_FEATURE_SUPPORTS_WMM_ADMISSION = 0x4000000 + NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 0x10000000 + NL80211_FEATURE_TX_POWER_INSERTION = 0x400000 + NL80211_FEATURE_USERSPACE_MPM = 0x10000 + NL80211_FEATURE_VIF_TXPOWER = 0x200 + NL80211_FEATURE_WFA_TPC_IE_IN_PROBES = 0x100000 + NL80211_FILS_DISCOVERY_ATTR_INT_MAX = 0x2 + NL80211_FILS_DISCOVERY_ATTR_INT_MIN = 0x1 + NL80211_FILS_DISCOVERY_ATTR_MAX = 0x3 + NL80211_FILS_DISCOVERY_ATTR_TMPL = 0x3 + NL80211_FILS_DISCOVERY_TMPL_MIN_LEN = 0x2a + NL80211_FREQUENCY_ATTR_16MHZ = 0x19 + NL80211_FREQUENCY_ATTR_1MHZ = 0x15 + NL80211_FREQUENCY_ATTR_2MHZ = 0x16 + NL80211_FREQUENCY_ATTR_4MHZ = 0x17 + NL80211_FREQUENCY_ATTR_8MHZ = 0x18 + NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd + NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7 + NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8 + NL80211_FREQUENCY_ATTR_DISABLED = 0x2 + NL80211_FREQUENCY_ATTR_FREQ = 0x1 + NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf + NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe + NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf + NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 + NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 + NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc + NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 + NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb + NL80211_FREQUENCY_ATTR_NO_HE = 0x13 + NL80211_FREQUENCY_ATTR_NO_HT40_MINUS = 0x9 + NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa + NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3 + NL80211_FREQUENCY_ATTR_NO_IR = 0x3 + NL80211_FREQUENCY_ATTR_OFFSET = 0x14 + NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3 + NL80211_FREQUENCY_ATTR_RADAR = 0x5 + NL80211_FREQUENCY_ATTR_WMM = 0x12 + NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3 + NL80211_FTM_RESP_ATTR_ENABLED = 0x1 + NL80211_FTM_RESP_ATTR_LCI = 0x2 + NL80211_FTM_RESP_ATTR_MAX = 0x3 + NL80211_FTM_STATS_ASAP_NUM = 0x4 + NL80211_FTM_STATS_FAILED_NUM = 0x3 + NL80211_FTM_STATS_MAX = 0xa + NL80211_FTM_STATS_NON_ASAP_NUM = 0x5 + NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM = 0x9 + NL80211_FTM_STATS_PAD = 0xa + NL80211_FTM_STATS_PARTIAL_NUM = 0x2 + NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM = 0x8 + NL80211_FTM_STATS_SUCCESS_NUM = 0x1 + NL80211_FTM_STATS_TOTAL_DURATION_MSEC = 0x6 + NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM = 0x7 + NL80211_GENL_NAME = "nl80211" + NL80211_HE_BSS_COLOR_ATTR_COLOR = 0x1 + NL80211_HE_BSS_COLOR_ATTR_DISABLED = 0x2 + NL80211_HE_BSS_COLOR_ATTR_MAX = 0x3 + NL80211_HE_BSS_COLOR_ATTR_PARTIAL = 0x3 + NL80211_HE_MAX_CAPABILITY_LEN = 0x36 + NL80211_HE_MIN_CAPABILITY_LEN = 0x10 + NL80211_HE_NSS_MAX = 0x8 + NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP = 0x4 + NL80211_HE_OBSS_PD_ATTR_MAX = 0x6 + NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET = 0x2 + NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET = 0x1 + NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET = 0x3 + NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP = 0x5 + NL80211_HE_OBSS_PD_ATTR_SR_CTRL = 0x6 + NL80211_HIDDEN_SSID_NOT_IN_USE = 0x0 + NL80211_HIDDEN_SSID_ZERO_CONTENTS = 0x2 + NL80211_HIDDEN_SSID_ZERO_LEN = 0x1 + NL80211_HT_CAPABILITY_LEN = 0x1a + NL80211_IFACE_COMB_BI_MIN_GCD = 0x7 + NL80211_IFACE_COMB_LIMITS = 0x1 + NL80211_IFACE_COMB_MAXNUM = 0x2 + NL80211_IFACE_COMB_NUM_CHANNELS = 0x4 + NL80211_IFACE_COMB_RADAR_DETECT_REGIONS = 0x6 + NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS = 0x5 + NL80211_IFACE_COMB_STA_AP_BI_MATCH = 0x3 + NL80211_IFACE_COMB_UNSPEC = 0x0 + NL80211_IFACE_LIMIT_MAX = 0x1 + NL80211_IFACE_LIMIT_TYPES = 0x2 + NL80211_IFACE_LIMIT_UNSPEC = 0x0 + NL80211_IFTYPE_ADHOC = 0x1 + NL80211_IFTYPE_AKM_ATTR_IFTYPES = 0x1 + NL80211_IFTYPE_AKM_ATTR_MAX = 0x2 + NL80211_IFTYPE_AKM_ATTR_SUITES = 0x2 + NL80211_IFTYPE_AP = 0x3 + NL80211_IFTYPE_AP_VLAN = 0x4 + NL80211_IFTYPE_MAX = 0xc + NL80211_IFTYPE_MESH_POINT = 0x7 + NL80211_IFTYPE_MONITOR = 0x6 + NL80211_IFTYPE_NAN = 0xc + NL80211_IFTYPE_OCB = 0xb + NL80211_IFTYPE_P2P_CLIENT = 0x8 + NL80211_IFTYPE_P2P_DEVICE = 0xa + NL80211_IFTYPE_P2P_GO = 0x9 + NL80211_IFTYPE_STATION = 0x2 + NL80211_IFTYPE_UNSPECIFIED = 0x0 + NL80211_IFTYPE_WDS = 0x5 + NL80211_KCK_EXT_LEN = 0x18 + NL80211_KCK_LEN = 0x10 + NL80211_KEK_EXT_LEN = 0x20 + NL80211_KEK_LEN = 0x10 + NL80211_KEY_CIPHER = 0x3 + NL80211_KEY_DATA = 0x1 + NL80211_KEY_DEFAULT_BEACON = 0xa + NL80211_KEY_DEFAULT = 0x5 + NL80211_KEY_DEFAULT_MGMT = 0x6 + NL80211_KEY_DEFAULT_TYPE_MULTICAST = 0x2 + NL80211_KEY_DEFAULT_TYPES = 0x8 + NL80211_KEY_DEFAULT_TYPE_UNICAST = 0x1 + NL80211_KEY_IDX = 0x2 + NL80211_KEY_MAX = 0xa + NL80211_KEY_MODE = 0x9 + NL80211_KEY_NO_TX = 0x1 + NL80211_KEY_RX_TX = 0x0 + NL80211_KEY_SEQ = 0x4 + NL80211_KEY_SET_TX = 0x2 + NL80211_KEY_TYPE = 0x7 + NL80211_KEYTYPE_GROUP = 0x0 + NL80211_KEYTYPE_PAIRWISE = 0x1 + NL80211_KEYTYPE_PEERKEY = 0x2 + NL80211_MAX_NR_AKM_SUITES = 0x2 + NL80211_MAX_NR_CIPHER_SUITES = 0x5 + NL80211_MAX_SUPP_HT_RATES = 0x4d + NL80211_MAX_SUPP_RATES = 0x20 + NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MESHCONF_ATTR_MAX = 0x1f + NL80211_MESHCONF_AUTO_OPEN_PLINKS = 0x7 + NL80211_MESHCONF_AWAKE_WINDOW = 0x1b + NL80211_MESHCONF_CONFIRM_TIMEOUT = 0x2 + NL80211_MESHCONF_CONNECTED_TO_AS = 0x1f + NL80211_MESHCONF_CONNECTED_TO_GATE = 0x1d + NL80211_MESHCONF_ELEMENT_TTL = 0xf + NL80211_MESHCONF_FORWARDING = 0x13 + NL80211_MESHCONF_GATE_ANNOUNCEMENTS = 0x11 + NL80211_MESHCONF_HOLDING_TIMEOUT = 0x3 + NL80211_MESHCONF_HT_OPMODE = 0x16 + NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT = 0xb + NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL = 0x19 + NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES = 0x8 + NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME = 0xd + NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT = 0x17 + NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL = 0x12 + NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL = 0xc + NL80211_MESHCONF_HWMP_RANN_INTERVAL = 0x10 + NL80211_MESHCONF_HWMP_ROOT_INTERVAL = 0x18 + NL80211_MESHCONF_HWMP_ROOTMODE = 0xe + NL80211_MESHCONF_MAX_PEER_LINKS = 0x4 + NL80211_MESHCONF_MAX_RETRIES = 0x5 + NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT = 0xa + NL80211_MESHCONF_NOLEARN = 0x1e + NL80211_MESHCONF_PATH_REFRESH_TIME = 0x9 + NL80211_MESHCONF_PLINK_TIMEOUT = 0x1c + NL80211_MESHCONF_POWER_MODE = 0x1a + NL80211_MESHCONF_RETRY_TIMEOUT = 0x1 + NL80211_MESHCONF_RSSI_THRESHOLD = 0x14 + NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR = 0x15 + NL80211_MESHCONF_TTL = 0x6 + NL80211_MESH_POWER_ACTIVE = 0x1 + NL80211_MESH_POWER_DEEP_SLEEP = 0x3 + NL80211_MESH_POWER_LIGHT_SLEEP = 0x2 + NL80211_MESH_POWER_MAX = 0x3 + NL80211_MESH_POWER_UNKNOWN = 0x0 + NL80211_MESH_SETUP_ATTR_MAX = 0x8 + NL80211_MESH_SETUP_AUTH_PROTOCOL = 0x8 + NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC = 0x2 + NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL = 0x1 + NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC = 0x6 + NL80211_MESH_SETUP_IE = 0x3 + NL80211_MESH_SETUP_USERSPACE_AMPE = 0x5 + NL80211_MESH_SETUP_USERSPACE_AUTH = 0x4 + NL80211_MESH_SETUP_USERSPACE_MPM = 0x7 + NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE = 0x3 + NL80211_MFP_NO = 0x0 + NL80211_MFP_OPTIONAL = 0x2 + NL80211_MFP_REQUIRED = 0x1 + NL80211_MIN_REMAIN_ON_CHANNEL_TIME = 0xa + NL80211_MNTR_FLAG_ACTIVE = 0x6 + NL80211_MNTR_FLAG_CONTROL = 0x3 + NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 + NL80211_MNTR_FLAG_FCSFAIL = 0x1 + NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_OTHER_BSS = 0x4 + NL80211_MNTR_FLAG_PLCPFAIL = 0x2 + NL80211_MPATH_FLAG_ACTIVE = 0x1 + NL80211_MPATH_FLAG_FIXED = 0x8 + NL80211_MPATH_FLAG_RESOLVED = 0x10 + NL80211_MPATH_FLAG_RESOLVING = 0x2 + NL80211_MPATH_FLAG_SN_VALID = 0x4 + NL80211_MPATH_INFO_DISCOVERY_RETRIES = 0x7 + NL80211_MPATH_INFO_DISCOVERY_TIMEOUT = 0x6 + NL80211_MPATH_INFO_EXPTIME = 0x4 + NL80211_MPATH_INFO_FLAGS = 0x5 + NL80211_MPATH_INFO_FRAME_QLEN = 0x1 + NL80211_MPATH_INFO_HOP_COUNT = 0x8 + NL80211_MPATH_INFO_MAX = 0x9 + NL80211_MPATH_INFO_METRIC = 0x3 + NL80211_MPATH_INFO_PATH_CHANGE = 0x9 + NL80211_MPATH_INFO_SN = 0x2 + NL80211_MULTICAST_GROUP_CONFIG = "config" + NL80211_MULTICAST_GROUP_MLME = "mlme" + NL80211_MULTICAST_GROUP_NAN = "nan" + NL80211_MULTICAST_GROUP_REG = "regulatory" + NL80211_MULTICAST_GROUP_SCAN = "scan" + NL80211_MULTICAST_GROUP_TESTMODE = "testmode" + NL80211_MULTICAST_GROUP_VENDOR = "vendor" + NL80211_NAN_FUNC_ATTR_MAX = 0x10 + NL80211_NAN_FUNC_CLOSE_RANGE = 0x9 + NL80211_NAN_FUNC_FOLLOW_UP = 0x2 + NL80211_NAN_FUNC_FOLLOW_UP_DEST = 0x8 + NL80211_NAN_FUNC_FOLLOW_UP_ID = 0x6 + NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID = 0x7 + NL80211_NAN_FUNC_INSTANCE_ID = 0xf + NL80211_NAN_FUNC_MAX_TYPE = 0x2 + NL80211_NAN_FUNC_PUBLISH_BCAST = 0x4 + NL80211_NAN_FUNC_PUBLISH = 0x0 + NL80211_NAN_FUNC_PUBLISH_TYPE = 0x3 + NL80211_NAN_FUNC_RX_MATCH_FILTER = 0xd + NL80211_NAN_FUNC_SERVICE_ID = 0x2 + NL80211_NAN_FUNC_SERVICE_ID_LEN = 0x6 + NL80211_NAN_FUNC_SERVICE_INFO = 0xb + NL80211_NAN_FUNC_SERVICE_SPEC_INFO_MAX_LEN = 0xff + NL80211_NAN_FUNC_SRF = 0xc + NL80211_NAN_FUNC_SRF_MAX_LEN = 0xff + NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE = 0x5 + NL80211_NAN_FUNC_SUBSCRIBE = 0x1 + NL80211_NAN_FUNC_TERM_REASON = 0x10 + NL80211_NAN_FUNC_TERM_REASON_ERROR = 0x2 + NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED = 0x1 + NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST = 0x0 + NL80211_NAN_FUNC_TTL = 0xa + NL80211_NAN_FUNC_TX_MATCH_FILTER = 0xe + NL80211_NAN_FUNC_TYPE = 0x1 + NL80211_NAN_MATCH_ATTR_MAX = 0x2 + NL80211_NAN_MATCH_FUNC_LOCAL = 0x1 + NL80211_NAN_MATCH_FUNC_PEER = 0x2 + NL80211_NAN_SOLICITED_PUBLISH = 0x1 + NL80211_NAN_SRF_ATTR_MAX = 0x4 + NL80211_NAN_SRF_BF = 0x2 + NL80211_NAN_SRF_BF_IDX = 0x3 + NL80211_NAN_SRF_INCLUDE = 0x1 + NL80211_NAN_SRF_MAC_ADDRS = 0x4 + NL80211_NAN_UNSOLICITED_PUBLISH = 0x2 + NL80211_NUM_ACS = 0x4 + NL80211_P2P_PS_SUPPORTED = 0x1 + NL80211_P2P_PS_UNSUPPORTED = 0x0 + NL80211_PKTPAT_MASK = 0x1 + NL80211_PKTPAT_OFFSET = 0x3 + NL80211_PKTPAT_PATTERN = 0x2 + NL80211_PLINK_ACTION_BLOCK = 0x2 + NL80211_PLINK_ACTION_NO_ACTION = 0x0 + NL80211_PLINK_ACTION_OPEN = 0x1 + NL80211_PLINK_BLOCKED = 0x6 + NL80211_PLINK_CNF_RCVD = 0x3 + NL80211_PLINK_ESTAB = 0x4 + NL80211_PLINK_HOLDING = 0x5 + NL80211_PLINK_LISTEN = 0x0 + NL80211_PLINK_OPN_RCVD = 0x2 + NL80211_PLINK_OPN_SNT = 0x1 + NL80211_PMKSA_CANDIDATE_BSSID = 0x2 + NL80211_PMKSA_CANDIDATE_INDEX = 0x1 + NL80211_PMKSA_CANDIDATE_PREAUTH = 0x3 + NL80211_PMSR_ATTR_MAX = 0x5 + NL80211_PMSR_ATTR_MAX_PEERS = 0x1 + NL80211_PMSR_ATTR_PEERS = 0x5 + NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR = 0x3 + NL80211_PMSR_ATTR_REPORT_AP_TSF = 0x2 + NL80211_PMSR_ATTR_TYPE_CAPA = 0x4 + NL80211_PMSR_FTM_CAPA_ATTR_ASAP = 0x1 + NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS = 0x6 + NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT = 0x7 + NL80211_PMSR_FTM_CAPA_ATTR_MAX = 0xa + NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST = 0x8 + NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP = 0x2 + NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED = 0xa + NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES = 0x5 + NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC = 0x4 + NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI = 0x3 + NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED = 0x9 + NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS = 0x7 + NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP = 0x5 + NL80211_PMSR_FTM_FAILURE_NO_RESPONSE = 0x1 + NL80211_PMSR_FTM_FAILURE_PEER_BUSY = 0x6 + NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE = 0x4 + NL80211_PMSR_FTM_FAILURE_REJECTED = 0x2 + NL80211_PMSR_FTM_FAILURE_UNSPECIFIED = 0x0 + NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL = 0x3 + NL80211_PMSR_FTM_REQ_ATTR_ASAP = 0x1 + NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION = 0x5 + NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD = 0x4 + NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST = 0x6 + NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK = 0xc + NL80211_PMSR_FTM_REQ_ATTR_MAX = 0xd + NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED = 0xb + NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP = 0x3 + NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES = 0x7 + NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE = 0x2 + NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC = 0x9 + NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI = 0x8 + NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED = 0xa + NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION = 0x7 + NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX = 0x2 + NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME = 0x5 + NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC = 0x14 + NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG = 0x10 + NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD = 0x12 + NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE = 0x11 + NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON = 0x1 + NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST = 0x8 + NL80211_PMSR_FTM_RESP_ATTR_LCI = 0x13 + NL80211_PMSR_FTM_RESP_ATTR_MAX = 0x15 + NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP = 0x6 + NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS = 0x3 + NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES = 0x4 + NL80211_PMSR_FTM_RESP_ATTR_PAD = 0x15 + NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG = 0x9 + NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD = 0xa + NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG = 0xd + NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD = 0xf + NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE = 0xe + NL80211_PMSR_FTM_RESP_ATTR_RX_RATE = 0xc + NL80211_PMSR_FTM_RESP_ATTR_TX_RATE = 0xb + NL80211_PMSR_PEER_ATTR_ADDR = 0x1 + NL80211_PMSR_PEER_ATTR_CHAN = 0x2 + NL80211_PMSR_PEER_ATTR_MAX = 0x4 + NL80211_PMSR_PEER_ATTR_REQ = 0x3 + NL80211_PMSR_PEER_ATTR_RESP = 0x4 + NL80211_PMSR_REQ_ATTR_DATA = 0x1 + NL80211_PMSR_REQ_ATTR_GET_AP_TSF = 0x2 + NL80211_PMSR_REQ_ATTR_MAX = 0x2 + NL80211_PMSR_RESP_ATTR_AP_TSF = 0x4 + NL80211_PMSR_RESP_ATTR_DATA = 0x1 + NL80211_PMSR_RESP_ATTR_FINAL = 0x5 + NL80211_PMSR_RESP_ATTR_HOST_TIME = 0x3 + NL80211_PMSR_RESP_ATTR_MAX = 0x6 + NL80211_PMSR_RESP_ATTR_PAD = 0x6 + NL80211_PMSR_RESP_ATTR_STATUS = 0x2 + NL80211_PMSR_STATUS_FAILURE = 0x3 + NL80211_PMSR_STATUS_REFUSED = 0x1 + NL80211_PMSR_STATUS_SUCCESS = 0x0 + NL80211_PMSR_STATUS_TIMEOUT = 0x2 + NL80211_PMSR_TYPE_FTM = 0x1 + NL80211_PMSR_TYPE_INVALID = 0x0 + NL80211_PMSR_TYPE_MAX = 0x1 + NL80211_PREAMBLE_DMG = 0x3 + NL80211_PREAMBLE_HE = 0x4 + NL80211_PREAMBLE_HT = 0x1 + NL80211_PREAMBLE_LEGACY = 0x0 + NL80211_PREAMBLE_VHT = 0x2 + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 0x8 + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P = 0x4 + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 = 0x2 + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS = 0x1 + NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP = 0x1 + NL80211_PS_DISABLED = 0x0 + NL80211_PS_ENABLED = 0x1 + NL80211_RADAR_CAC_ABORTED = 0x2 + NL80211_RADAR_CAC_FINISHED = 0x1 + NL80211_RADAR_CAC_STARTED = 0x5 + NL80211_RADAR_DETECTED = 0x0 + NL80211_RADAR_NOP_FINISHED = 0x3 + NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 + NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb + NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 + NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc + NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 + NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 + NL80211_RATE_INFO_BITRATE32 = 0x5 + NL80211_RATE_INFO_BITRATE = 0x1 + NL80211_RATE_INFO_HE_1XLTF = 0x0 + NL80211_RATE_INFO_HE_2XLTF = 0x1 + NL80211_RATE_INFO_HE_4XLTF = 0x2 + NL80211_RATE_INFO_HE_DCM = 0x10 + NL80211_RATE_INFO_HE_GI_0_8 = 0x0 + NL80211_RATE_INFO_HE_GI_1_6 = 0x1 + NL80211_RATE_INFO_HE_GI_3_2 = 0x2 + NL80211_RATE_INFO_HE_GI = 0xf + NL80211_RATE_INFO_HE_MCS = 0xd + NL80211_RATE_INFO_HE_NSS = 0xe + NL80211_RATE_INFO_HE_RU_ALLOC_106 = 0x2 + NL80211_RATE_INFO_HE_RU_ALLOC_242 = 0x3 + NL80211_RATE_INFO_HE_RU_ALLOC_26 = 0x0 + NL80211_RATE_INFO_HE_RU_ALLOC_2x996 = 0x6 + NL80211_RATE_INFO_HE_RU_ALLOC_484 = 0x4 + NL80211_RATE_INFO_HE_RU_ALLOC_52 = 0x1 + NL80211_RATE_INFO_HE_RU_ALLOC_996 = 0x5 + NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 + NL80211_RATE_INFO_MAX = 0x16 + NL80211_RATE_INFO_MCS = 0x2 + NL80211_RATE_INFO_SHORT_GI = 0x4 + NL80211_RATE_INFO_VHT_MCS = 0x6 + NL80211_RATE_INFO_VHT_NSS = 0x7 + NL80211_REGDOM_SET_BY_CORE = 0x0 + NL80211_REGDOM_SET_BY_COUNTRY_IE = 0x3 + NL80211_REGDOM_SET_BY_DRIVER = 0x2 + NL80211_REGDOM_SET_BY_USER = 0x1 + NL80211_REGDOM_TYPE_COUNTRY = 0x0 + NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 + NL80211_REGDOM_TYPE_INTERSECTION = 0x3 + NL80211_REGDOM_TYPE_WORLD = 0x1 + NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REKEY_DATA_AKM = 0x4 + NL80211_REKEY_DATA_KCK = 0x2 + NL80211_REKEY_DATA_KEK = 0x1 + NL80211_REKEY_DATA_REPLAY_CTR = 0x3 + NL80211_REPLAY_CTR_LEN = 0x8 + NL80211_RRF_AUTO_BW = 0x800 + NL80211_RRF_DFS = 0x10 + NL80211_RRF_GO_CONCURRENT = 0x1000 + NL80211_RRF_IR_CONCURRENT = 0x1000 + NL80211_RRF_NO_160MHZ = 0x10000 + NL80211_RRF_NO_80MHZ = 0x8000 + NL80211_RRF_NO_CCK = 0x2 + NL80211_RRF_NO_HE = 0x20000 + NL80211_RRF_NO_HT40 = 0x6000 + NL80211_RRF_NO_HT40MINUS = 0x2000 + NL80211_RRF_NO_HT40PLUS = 0x4000 + NL80211_RRF_NO_IBSS = 0x80 + NL80211_RRF_NO_INDOOR = 0x4 + NL80211_RRF_NO_IR_ALL = 0x180 + NL80211_RRF_NO_IR = 0x80 + NL80211_RRF_NO_OFDM = 0x1 + NL80211_RRF_NO_OUTDOOR = 0x8 + NL80211_RRF_PASSIVE_SCAN = 0x80 + NL80211_RRF_PTMP_ONLY = 0x40 + NL80211_RRF_PTP_ONLY = 0x20 + NL80211_RXMGMT_FLAG_ANSWERED = 0x1 + NL80211_RXMGMT_FLAG_EXTERNAL_AUTH = 0x2 + NL80211_SAE_PWE_BOTH = 0x3 + NL80211_SAE_PWE_HASH_TO_ELEMENT = 0x2 + NL80211_SAE_PWE_HUNT_AND_PECK = 0x1 + NL80211_SAE_PWE_UNSPECIFIED = 0x0 + NL80211_SAR_ATTR_MAX = 0x2 + NL80211_SAR_ATTR_SPECS = 0x2 + NL80211_SAR_ATTR_SPECS_END_FREQ = 0x4 + NL80211_SAR_ATTR_SPECS_MAX = 0x4 + NL80211_SAR_ATTR_SPECS_POWER = 0x1 + NL80211_SAR_ATTR_SPECS_RANGE_INDEX = 0x2 + NL80211_SAR_ATTR_SPECS_START_FREQ = 0x3 + NL80211_SAR_ATTR_TYPE = 0x1 + NL80211_SAR_TYPE_POWER = 0x0 + NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP = 0x20 + NL80211_SCAN_FLAG_AP = 0x4 + NL80211_SCAN_FLAG_COLOCATED_6GHZ = 0x4000 + NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME = 0x10 + NL80211_SCAN_FLAG_FLUSH = 0x2 + NL80211_SCAN_FLAG_FREQ_KHZ = 0x2000 + NL80211_SCAN_FLAG_HIGH_ACCURACY = 0x400 + NL80211_SCAN_FLAG_LOW_POWER = 0x200 + NL80211_SCAN_FLAG_LOW_PRIORITY = 0x1 + NL80211_SCAN_FLAG_LOW_SPAN = 0x100 + NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 0x1000 + NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x80 + NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE = 0x40 + NL80211_SCAN_FLAG_RANDOM_ADDR = 0x8 + NL80211_SCAN_FLAG_RANDOM_SN = 0x800 + NL80211_SCAN_RSSI_THOLD_OFF = -0x12c + NL80211_SCHED_SCAN_MATCH_ATTR_BSSID = 0x5 + NL80211_SCHED_SCAN_MATCH_ATTR_MAX = 0x6 + NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI = 0x3 + NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST = 0x4 + NL80211_SCHED_SCAN_MATCH_ATTR_RSSI = 0x2 + NL80211_SCHED_SCAN_MATCH_ATTR_SSID = 0x1 + NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI = 0x6 + NL80211_SCHED_SCAN_PLAN_INTERVAL = 0x1 + NL80211_SCHED_SCAN_PLAN_ITERATIONS = 0x2 + NL80211_SCHED_SCAN_PLAN_MAX = 0x2 + NL80211_SMPS_DYNAMIC = 0x2 + NL80211_SMPS_MAX = 0x2 + NL80211_SMPS_OFF = 0x0 + NL80211_SMPS_STATIC = 0x1 + NL80211_STA_BSS_PARAM_BEACON_INTERVAL = 0x5 + NL80211_STA_BSS_PARAM_CTS_PROT = 0x1 + NL80211_STA_BSS_PARAM_DTIM_PERIOD = 0x4 + NL80211_STA_BSS_PARAM_MAX = 0x5 + NL80211_STA_BSS_PARAM_SHORT_PREAMBLE = 0x2 + NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME = 0x3 + NL80211_STA_FLAG_ASSOCIATED = 0x7 + NL80211_STA_FLAG_AUTHENTICATED = 0x5 + NL80211_STA_FLAG_AUTHORIZED = 0x1 + NL80211_STA_FLAG_MAX = 0x7 + NL80211_STA_FLAG_MAX_OLD_API = 0x6 + NL80211_STA_FLAG_MFP = 0x4 + NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 + NL80211_STA_FLAG_TDLS_PEER = 0x6 + NL80211_STA_FLAG_WME = 0x3 + NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23 + NL80211_STA_INFO_ACK_SIGNAL = 0x22 + NL80211_STA_INFO_AIRTIME_LINK_METRIC = 0x29 + NL80211_STA_INFO_AIRTIME_WEIGHT = 0x28 + NL80211_STA_INFO_ASSOC_AT_BOOTTIME = 0x2a + NL80211_STA_INFO_BEACON_LOSS = 0x12 + NL80211_STA_INFO_BEACON_RX = 0x1d + NL80211_STA_INFO_BEACON_SIGNAL_AVG = 0x1e + NL80211_STA_INFO_BSS_PARAM = 0xf + NL80211_STA_INFO_CHAIN_SIGNAL_AVG = 0x1a + NL80211_STA_INFO_CHAIN_SIGNAL = 0x19 + NL80211_STA_INFO_CONNECTED_TIME = 0x10 + NL80211_STA_INFO_CONNECTED_TO_AS = 0x2b + NL80211_STA_INFO_CONNECTED_TO_GATE = 0x26 + NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG = 0x23 + NL80211_STA_INFO_EXPECTED_THROUGHPUT = 0x1b + NL80211_STA_INFO_FCS_ERROR_COUNT = 0x25 + NL80211_STA_INFO_INACTIVE_TIME = 0x1 + NL80211_STA_INFO_LLID = 0x4 + NL80211_STA_INFO_LOCAL_PM = 0x14 + NL80211_STA_INFO_MAX = 0x2b + NL80211_STA_INFO_NONPEER_PM = 0x16 + NL80211_STA_INFO_PAD = 0x21 + NL80211_STA_INFO_PEER_PM = 0x15 + NL80211_STA_INFO_PLID = 0x5 + NL80211_STA_INFO_PLINK_STATE = 0x6 + NL80211_STA_INFO_RX_BITRATE = 0xe + NL80211_STA_INFO_RX_BYTES64 = 0x17 + NL80211_STA_INFO_RX_BYTES = 0x2 + NL80211_STA_INFO_RX_DROP_MISC = 0x1c + NL80211_STA_INFO_RX_DURATION = 0x20 + NL80211_STA_INFO_RX_MPDUS = 0x24 + NL80211_STA_INFO_RX_PACKETS = 0x9 + NL80211_STA_INFO_SIGNAL_AVG = 0xd + NL80211_STA_INFO_SIGNAL = 0x7 + NL80211_STA_INFO_STA_FLAGS = 0x11 + NL80211_STA_INFO_TID_STATS = 0x1f + NL80211_STA_INFO_T_OFFSET = 0x13 + NL80211_STA_INFO_TX_BITRATE = 0x8 + NL80211_STA_INFO_TX_BYTES64 = 0x18 + NL80211_STA_INFO_TX_BYTES = 0x3 + NL80211_STA_INFO_TX_DURATION = 0x27 + NL80211_STA_INFO_TX_FAILED = 0xc + NL80211_STA_INFO_TX_PACKETS = 0xa + NL80211_STA_INFO_TX_RETRIES = 0xb + NL80211_STA_WME_MAX = 0x2 + NL80211_STA_WME_MAX_SP = 0x2 + NL80211_STA_WME_UAPSD_QUEUES = 0x1 + NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY = 0x5 + NL80211_SURVEY_INFO_CHANNEL_TIME = 0x4 + NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY = 0x6 + NL80211_SURVEY_INFO_CHANNEL_TIME_RX = 0x7 + NL80211_SURVEY_INFO_CHANNEL_TIME_TX = 0x8 + NL80211_SURVEY_INFO_FREQUENCY = 0x1 + NL80211_SURVEY_INFO_FREQUENCY_OFFSET = 0xc + NL80211_SURVEY_INFO_IN_USE = 0x3 + NL80211_SURVEY_INFO_MAX = 0xc + NL80211_SURVEY_INFO_NOISE = 0x2 + NL80211_SURVEY_INFO_PAD = 0xa + NL80211_SURVEY_INFO_TIME_BSS_RX = 0xb + NL80211_SURVEY_INFO_TIME_BUSY = 0x5 + NL80211_SURVEY_INFO_TIME = 0x4 + NL80211_SURVEY_INFO_TIME_EXT_BUSY = 0x6 + NL80211_SURVEY_INFO_TIME_RX = 0x7 + NL80211_SURVEY_INFO_TIME_SCAN = 0x9 + NL80211_SURVEY_INFO_TIME_TX = 0x8 + NL80211_TDLS_DISABLE_LINK = 0x4 + NL80211_TDLS_DISCOVERY_REQ = 0x0 + NL80211_TDLS_ENABLE_LINK = 0x3 + NL80211_TDLS_PEER_HE = 0x8 + NL80211_TDLS_PEER_HT = 0x1 + NL80211_TDLS_PEER_VHT = 0x2 + NL80211_TDLS_PEER_WMM = 0x4 + NL80211_TDLS_SETUP = 0x1 + NL80211_TDLS_TEARDOWN = 0x2 + NL80211_TID_CONFIG_ATTR_AMPDU_CTRL = 0x9 + NL80211_TID_CONFIG_ATTR_AMSDU_CTRL = 0xb + NL80211_TID_CONFIG_ATTR_MAX = 0xd + NL80211_TID_CONFIG_ATTR_NOACK = 0x6 + NL80211_TID_CONFIG_ATTR_OVERRIDE = 0x4 + NL80211_TID_CONFIG_ATTR_PAD = 0x1 + NL80211_TID_CONFIG_ATTR_PEER_SUPP = 0x3 + NL80211_TID_CONFIG_ATTR_RETRY_LONG = 0x8 + NL80211_TID_CONFIG_ATTR_RETRY_SHORT = 0x7 + NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL = 0xa + NL80211_TID_CONFIG_ATTR_TIDS = 0x5 + NL80211_TID_CONFIG_ATTR_TX_RATE = 0xd + NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE = 0xc + NL80211_TID_CONFIG_ATTR_VIF_SUPP = 0x2 + NL80211_TID_CONFIG_DISABLE = 0x1 + NL80211_TID_CONFIG_ENABLE = 0x0 + NL80211_TID_STATS_MAX = 0x6 + NL80211_TID_STATS_PAD = 0x5 + NL80211_TID_STATS_RX_MSDU = 0x1 + NL80211_TID_STATS_TX_MSDU = 0x2 + NL80211_TID_STATS_TX_MSDU_FAILED = 0x4 + NL80211_TID_STATS_TX_MSDU_RETRIES = 0x3 + NL80211_TID_STATS_TXQ_STATS = 0x6 + NL80211_TIMEOUT_ASSOC = 0x3 + NL80211_TIMEOUT_AUTH = 0x2 + NL80211_TIMEOUT_SCAN = 0x1 + NL80211_TIMEOUT_UNSPECIFIED = 0x0 + NL80211_TKIP_DATA_OFFSET_ENCR_KEY = 0x0 + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY = 0x18 + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY = 0x10 + NL80211_TX_POWER_AUTOMATIC = 0x0 + NL80211_TX_POWER_FIXED = 0x2 + NL80211_TX_POWER_LIMITED = 0x1 + NL80211_TXQ_ATTR_AC = 0x1 + NL80211_TXQ_ATTR_AIFS = 0x5 + NL80211_TXQ_ATTR_CWMAX = 0x4 + NL80211_TXQ_ATTR_CWMIN = 0x3 + NL80211_TXQ_ATTR_MAX = 0x5 + NL80211_TXQ_ATTR_QUEUE = 0x1 + NL80211_TXQ_ATTR_TXOP = 0x2 + NL80211_TXQ_Q_BE = 0x2 + NL80211_TXQ_Q_BK = 0x3 + NL80211_TXQ_Q_VI = 0x1 + NL80211_TXQ_Q_VO = 0x0 + NL80211_TXQ_STATS_BACKLOG_BYTES = 0x1 + NL80211_TXQ_STATS_BACKLOG_PACKETS = 0x2 + NL80211_TXQ_STATS_COLLISIONS = 0x8 + NL80211_TXQ_STATS_DROPS = 0x4 + NL80211_TXQ_STATS_ECN_MARKS = 0x5 + NL80211_TXQ_STATS_FLOWS = 0x3 + NL80211_TXQ_STATS_MAX = 0xb + NL80211_TXQ_STATS_MAX_FLOWS = 0xb + NL80211_TXQ_STATS_OVERLIMIT = 0x6 + NL80211_TXQ_STATS_OVERMEMORY = 0x7 + NL80211_TXQ_STATS_TX_BYTES = 0x9 + NL80211_TXQ_STATS_TX_PACKETS = 0xa + NL80211_TX_RATE_AUTOMATIC = 0x0 + NL80211_TXRATE_DEFAULT_GI = 0x0 + NL80211_TX_RATE_FIXED = 0x2 + NL80211_TXRATE_FORCE_LGI = 0x2 + NL80211_TXRATE_FORCE_SGI = 0x1 + NL80211_TXRATE_GI = 0x4 + NL80211_TXRATE_HE = 0x5 + NL80211_TXRATE_HE_GI = 0x6 + NL80211_TXRATE_HE_LTF = 0x7 + NL80211_TXRATE_HT = 0x2 + NL80211_TXRATE_LEGACY = 0x1 + NL80211_TX_RATE_LIMITED = 0x1 + NL80211_TXRATE_MAX = 0x7 + NL80211_TXRATE_MCS = 0x2 + NL80211_TXRATE_VHT = 0x3 + NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT = 0x1 + NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX = 0x2 + NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL = 0x2 + NL80211_USER_REG_HINT_CELL_BASE = 0x1 + NL80211_USER_REG_HINT_INDOOR = 0x2 + NL80211_USER_REG_HINT_USER = 0x0 + NL80211_VENDOR_ID_IS_LINUX = 0x80000000 + NL80211_VHT_CAPABILITY_LEN = 0xc + NL80211_VHT_NSS_MAX = 0x8 + NL80211_WIPHY_NAME_MAXLEN = 0x40 + NL80211_WMMR_AIFSN = 0x3 + NL80211_WMMR_CW_MAX = 0x2 + NL80211_WMMR_CW_MIN = 0x1 + NL80211_WMMR_MAX = 0x4 + NL80211_WMMR_TXOP = 0x4 + NL80211_WOWLAN_PKTPAT_MASK = 0x1 + NL80211_WOWLAN_PKTPAT_OFFSET = 0x3 + NL80211_WOWLAN_PKTPAT_PATTERN = 0x2 + NL80211_WOWLAN_TCP_DATA_INTERVAL = 0x9 + NL80211_WOWLAN_TCP_DATA_PAYLOAD = 0x6 + NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ = 0x7 + NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN = 0x8 + NL80211_WOWLAN_TCP_DST_IPV4 = 0x2 + NL80211_WOWLAN_TCP_DST_MAC = 0x3 + NL80211_WOWLAN_TCP_DST_PORT = 0x5 + NL80211_WOWLAN_TCP_SRC_IPV4 = 0x1 + NL80211_WOWLAN_TCP_SRC_PORT = 0x4 + NL80211_WOWLAN_TCP_WAKE_MASK = 0xb + NL80211_WOWLAN_TCP_WAKE_PAYLOAD = 0xa + NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE = 0x8 + NL80211_WOWLAN_TRIG_ANY = 0x1 + NL80211_WOWLAN_TRIG_DISCONNECT = 0x2 + NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST = 0x7 + NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE = 0x6 + NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED = 0x5 + NL80211_WOWLAN_TRIG_MAGIC_PKT = 0x3 + NL80211_WOWLAN_TRIG_NET_DETECT = 0x12 + NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS = 0x13 + NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4 + NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9 + NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe + NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa + NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb + NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc + NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN = 0xd + NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST = 0x10 + NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH = 0xf + NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS = 0x11 + NL80211_WPA_VERSION_1 = 0x1 + NL80211_WPA_VERSION_2 = 0x2 + NL80211_WPA_VERSION_3 = 0x4 +) + +const ( + FRA_UNSPEC = 0x0 + FRA_DST = 0x1 + FRA_SRC = 0x2 + FRA_IIFNAME = 0x3 + FRA_GOTO = 0x4 + FRA_UNUSED2 = 0x5 + FRA_PRIORITY = 0x6 + FRA_UNUSED3 = 0x7 + FRA_UNUSED4 = 0x8 + FRA_UNUSED5 = 0x9 + FRA_FWMARK = 0xa + FRA_FLOW = 0xb + FRA_TUN_ID = 0xc + FRA_SUPPRESS_IFGROUP = 0xd + FRA_SUPPRESS_PREFIXLEN = 0xe + FRA_TABLE = 0xf + FRA_FWMASK = 0x10 + FRA_OIFNAME = 0x11 + FRA_PAD = 0x12 + FRA_L3MDEV = 0x13 + FRA_UID_RANGE = 0x14 + FRA_PROTOCOL = 0x15 + FRA_IP_PROTO = 0x16 + FRA_SPORT_RANGE = 0x17 + FRA_DPORT_RANGE = 0x18 + FR_ACT_UNSPEC = 0x0 + FR_ACT_TO_TBL = 0x1 + FR_ACT_GOTO = 0x2 + FR_ACT_NOP = 0x3 + FR_ACT_RES3 = 0x4 + FR_ACT_RES4 = 0x5 + FR_ACT_BLACKHOLE = 0x6 + FR_ACT_UNREACHABLE = 0x7 + FR_ACT_PROHIBIT = 0x8 +) + +const ( + AUDIT_NLGRP_NONE = 0x0 + AUDIT_NLGRP_READLOG = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index bea2549455ea..89c516a29acf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/386/cgo -- -Wall -Werror -static -I/tmp/386/include -m32 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux @@ -240,6 +240,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -250,6 +254,19 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -311,6 +328,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index b8c8f2894335..62b4fb269963 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/amd64/cgo -- -Wall -Werror -static -I/tmp/amd64/include -m64 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux @@ -255,6 +255,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -265,6 +269,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -324,6 +342,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 4db44301632b..e86b35893ece 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/arm/cgo -- -Wall -Werror -static -I/tmp/arm/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux @@ -231,6 +231,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -241,6 +245,19 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -302,6 +319,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 3ebcad8a8873..6c6be4c911d8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/arm64/cgo -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux @@ -234,6 +234,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -244,6 +248,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -303,6 +321,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go new file mode 100644 index 000000000000..4982ea355a28 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -0,0 +1,691 @@ +// cgo -godefs -objdir=/tmp/loong64/cgo -- -Wall -Werror -static -I/tmp/loong64/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +const ( + SizeofPtr = 0x8 + SizeofLong = 0x8 +) + +type ( + _C_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint64 + Size int64 + Blksize int32 + _ int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ [2]int32 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Flock_t struct { + Type int16 + Whence int16 + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + +const ( + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + +const ( + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 +) + +const ( + SizeofSockFprog = 0x10 +) + +type PtraceRegs struct { + Regs [32]uint64 + Orig_a0 uint64 + Era uint64 + Badv uint64 + Reserved [10]uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + _ [0]int8 + _ [4]byte +} + +type Ustat_t struct { + Tfree int32 + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + +const ( + POLLRDHUP = 0x2000 +) + +type Sigset_t struct { + Val [16]uint64 +} + +const _C__NSIG = 0x41 + +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Taskstats struct { + Version uint16 + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 + Thrashing_count uint64 + Thrashing_delay_total uint64 + Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 +} + +type cpuMask uint64 + +const ( + _NCPUBITS = 0x40 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +const ( + SizeofTpacketHdr = 0x20 +) + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x1269 +) + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 3eb33e48ab53..173141a67032 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mips/cgo -- -Wall -Werror -static -I/tmp/mips/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux @@ -236,6 +236,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -246,6 +250,19 @@ type Sigset_t struct { const _C__NSIG = 0x80 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x3 +) + +type Siginfo struct { + Signo int32 + Code int32 + Errno int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -307,6 +324,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 79a94467252f..93ae4c51673d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mips64/cgo -- -Wall -Werror -static -I/tmp/mips64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux @@ -237,6 +237,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -247,6 +251,20 @@ type Sigset_t struct { const _C__NSIG = 0x80 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x3 +) + +type Siginfo struct { + Signo int32 + Code int32 + Errno int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -306,6 +324,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 8f4b107cad36..4e4e510ca519 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mips64le/cgo -- -Wall -Werror -static -I/tmp/mips64le/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux @@ -237,6 +237,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -247,6 +251,20 @@ type Sigset_t struct { const _C__NSIG = 0x80 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x3 +) + +type Siginfo struct { + Signo int32 + Code int32 + Errno int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -306,6 +324,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index e4eb2179811f..3f5ba013d995 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mipsle/cgo -- -Wall -Werror -static -I/tmp/mipsle/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux @@ -236,6 +236,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -246,6 +250,19 @@ type Sigset_t struct { const _C__NSIG = 0x80 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x3 +) + +type Siginfo struct { + Signo int32 + Code int32 + Errno int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -307,6 +324,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index d5b21f0f7da5..71dfe7cdb47a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/ppc/cgo -- -Wall -Werror -static -I/tmp/ppc/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux @@ -243,6 +243,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -253,6 +257,19 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ [116]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -314,6 +331,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 5188d142b9f5..3a2b7f0a666e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/ppc64/cgo -- -Wall -Werror -static -I/tmp/ppc64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux @@ -244,6 +244,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -254,6 +258,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -313,6 +331,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index de4dd4c736e8..a52d62756328 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/ppc64le/cgo -- -Wall -Werror -static -I/tmp/ppc64le/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux @@ -244,6 +244,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -254,6 +258,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -313,6 +331,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index dccbf9b06040..dfc007d8a691 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/riscv64/cgo -- -Wall -Werror -static -I/tmp/riscv64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux @@ -262,6 +262,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -272,6 +276,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -331,6 +349,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 6358806106f0..b53cb9103d30 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/s390x/cgo -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux @@ -210,8 +210,8 @@ type PtraceFpregs struct { } type PtracePer struct { - _ [0]uint64 - _ [32]byte + Control_regs [3]uint64 + _ [8]byte Starting_addr uint64 Ending_addr uint64 Perc_atmid uint16 @@ -257,6 +257,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + const ( POLLRDHUP = 0x2000 ) @@ -267,6 +271,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -326,6 +344,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 765edc13ff25..fe0aa3547280 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/sparc64/cgo -- -Wall -Werror -static -I/tmp/sparc64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux @@ -239,6 +239,10 @@ type EpollEvent struct { Pad int32 } +const ( + OPEN_TREE_CLOEXEC = 0x400000 +) + const ( POLLRDHUP = 0x800 ) @@ -249,6 +253,20 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x4 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + type Termios struct { Iflag uint32 Oflag uint32 @@ -308,6 +326,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index baf5fe650444..2ed718ca06a7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte Pad_cgo_0 [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index e21ae8ecfa6f..b4fb97ebe650 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -96,10 +96,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index f190651cd964..2c4675040ef3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -98,10 +98,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 84747c582cfc..ddee04514708 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index ac5c8b6370b1..eb13d4e8bfc2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go new file mode 100644 index 000000000000..d6724c0102c8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go @@ -0,0 +1,571 @@ +// cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte + _ [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xa8 + SizeofIfData = 0x90 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Rdomain uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct{} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x18 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_EACCESS = 0x1 + AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sigset_t uint32 + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Unused01 int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Unused05 int32 + Unused06 int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Unused07 int32 + Unused08 int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Unused09 int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 + Fpswtch int32 + Kmapent int32 +} + +const SizeofClockinfo = 0x10 + +type Clockinfo struct { + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go new file mode 100644 index 000000000000..ddfd27a434a1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go @@ -0,0 +1,571 @@ +// cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte + _ [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xa8 + SizeofIfData = 0x90 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Rdomain uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct{} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x18 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_EACCESS = 0x1 + AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sigset_t uint32 + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Unused01 int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Unused05 int32 + Unused06 int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Unused07 int32 + Unused08 int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Unused09 int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 + Fpswtch int32 + Kmapent int32 +} + +const SizeofClockinfo = 0x10 + +type Clockinfo struct { + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index ad4aad279686..0400747c67d4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -178,7 +178,7 @@ type Linger struct { } type Iovec struct { - Base *int8 + Base *byte Len uint64 } @@ -480,3 +480,38 @@ const ( MOUNTEDOVER = 0x40000000 FILE_EXCEPTION = 0x60000070 ) + +const ( + TUNNEWPPA = 0x540001 + TUNSETPPA = 0x540002 + + I_STR = 0x5308 + I_POP = 0x5303 + I_PUSH = 0x5302 + I_LINK = 0x530c + I_UNLINK = 0x530d + I_PLINK = 0x5316 + I_PUNLINK = 0x5317 + + IF_UNITSEL = -0x7ffb8cca +) + +type strbuf struct { + Maxlen int32 + Len int32 + Buf *int8 +} + +type Strioctl struct { + Cmd int32 + Timout int32 + Len int32 + Dp *int8 +} + +type Lifreq struct { + Name [32]int8 + Lifru1 [4]byte + Type uint32 + Lifru [336]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index 4ab638cb94c7..aec1efcb306a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -339,7 +339,7 @@ type Statfs_t struct { Flags uint64 } -type Dirent struct { +type direntLE struct { Reclen uint16 Namlen uint16 Ino uint32 @@ -347,6 +347,15 @@ type Dirent struct { Name [256]byte } +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte +} + type FdSet struct { Bits [64]int32 } diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 855698bb2828..75980fd44ad7 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -15,11 +15,11 @@ import ( // in http://msdn.microsoft.com/en-us/library/ms880421. // This function returns "" (2 double quotes) if s is empty. // Alternatively, these transformations are done: -// - every back slash (\) is doubled, but only if immediately -// followed by double quote ("); -// - every double quote (") is escaped by back slash (\); -// - finally, s is wrapped with double quotes (arg -> "arg"), -// but only if there is space or tab inside s. +// - every back slash (\) is doubled, but only if immediately +// followed by double quote ("); +// - every double quote (") is escaped by back slash (\); +// - finally, s is wrapped with double quotes (arg -> "arg"), +// but only if there is space or tab inside s. func EscapeArg(s string) string { if len(s) == 0 { return "\"\"" diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go index 906325e095a8..6c8d97b6a590 100644 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -20,7 +20,6 @@ // log.Fatal(err) // } // fmt.Printf("Windows system root is %q\n", s) -// package registry import ( diff --git a/vendor/golang.org/x/sys/windows/setupapi_windows.go b/vendor/golang.org/x/sys/windows/setupapi_windows.go index 14027da3f3f1..f8126482fa5a 100644 --- a/vendor/golang.org/x/sys/windows/setupapi_windows.go +++ b/vendor/golang.org/x/sys/windows/setupapi_windows.go @@ -296,7 +296,7 @@ const ( // Flag to indicate that the sorting from the INF file should be used. DI_INF_IS_SORTED DI_FLAGS = 0x00008000 - // Flag to indicate that only the the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. + // Flag to indicate that only the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. DI_ENUMSINGLEINF DI_FLAGS = 0x00010000 // Flag that prevents ConfigMgr from removing/re-enumerating devices during device diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 72074d582f10..8732cdb957f3 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -30,8 +30,6 @@ import ( "strings" "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) // ByteSliceFromString returns a NUL-terminated slice of bytes @@ -83,13 +81,7 @@ func BytePtrToString(p *byte) string { ptr = unsafe.Pointer(uintptr(ptr) + 1) } - var s []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(s) + return string(unsafe.Slice(p, n)) } // Single-word zero for use when we need a valid pointer to 0 bytes. diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index cf44e6933790..a49853e9d3af 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -10,6 +10,7 @@ import ( errorspkg "errors" "fmt" "runtime" + "strings" "sync" "syscall" "time" @@ -86,10 +87,8 @@ func StringToUTF16(s string) []uint16 { // s, with a terminating NUL added. If s contains a NUL byte at any // location, it returns (nil, syscall.EINVAL). func UTF16FromString(s string) ([]uint16, error) { - for i := 0; i < len(s); i++ { - if s[i] == 0 { - return nil, syscall.EINVAL - } + if strings.IndexByte(s, 0) != -1 { + return nil, syscall.EINVAL } return utf16.Encode([]rune(s + "\x00")), nil } @@ -139,13 +138,7 @@ func UTF16PtrToString(p *uint16) string { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - var s []uint16 - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(utf16.Decode(s)) + return string(utf16.Decode(unsafe.Slice(p, n))) } func Getpagesize() int { return 4096 } @@ -186,8 +179,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState -//sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) -//sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) = ReadFile +//sys writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) = WriteFile //sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) //sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff] //sys CloseHandle(handle Handle) (err error) @@ -365,6 +358,16 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) +//sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows +//sys EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) = user32.EnumChildWindows +//sys GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) = user32.GetClassNameW +//sys GetDesktopWindow() (hwnd HWND) = user32.GetDesktopWindow +//sys GetForegroundWindow() (hwnd HWND) = user32.GetForegroundWindow +//sys IsWindow(hwnd HWND) (isWindow bool) = user32.IsWindow +//sys IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode +//sys IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible +//sys GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo +//sys GetLargePageMinimum() (size uintptr) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -418,6 +421,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation //sys GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) = psapi.GetModuleFileNameExW //sys GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) = psapi.GetModuleBaseNameW +//sys QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSetEx // NT Native APIs //sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb @@ -439,6 +443,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) = ntdll.RtlAddFunctionTable //sys RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) = ntdll.RtlDeleteFunctionTable +// Desktop Window Manager API (Dwmapi) +//sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute +//sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -549,12 +557,6 @@ func Read(fd Handle, p []byte) (n int, err error) { } return 0, e } - if raceenabled { - if done > 0 { - raceWriteRange(unsafe.Pointer(&p[0]), int(done)) - } - raceAcquire(unsafe.Pointer(&ioSync)) - } return int(done), nil } @@ -567,12 +569,31 @@ func Write(fd Handle, p []byte) (n int, err error) { if e != nil { return 0, e } - if raceenabled && done > 0 { - raceReadRange(unsafe.Pointer(&p[0]), int(done)) - } return int(done), nil } +func ReadFile(fd Handle, p []byte, done *uint32, overlapped *Overlapped) error { + err := readFile(fd, p, done, overlapped) + if raceenabled { + if *done > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), int(*done)) + } + raceAcquire(unsafe.Pointer(&ioSync)) + } + return err +} + +func WriteFile(fd Handle, p []byte, done *uint32, overlapped *Overlapped) error { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + err := writeFile(fd, p, done, overlapped) + if raceenabled && *done > 0 { + raceReadRange(unsafe.Pointer(&p[0]), int(*done)) + } + return err +} + var ioSync int64 func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) { @@ -611,7 +632,6 @@ var ( func getStdHandle(stdhandle uint32) (fd Handle) { r, _ := GetStdHandle(stdhandle) - CloseOnExec(r) return r } @@ -736,7 +756,7 @@ func Utimes(path string, tv []Timeval) (err error) { if e != nil { return e } - defer Close(h) + defer CloseHandle(h) a := NsecToFiletime(tv[0].Nanoseconds()) w := NsecToFiletime(tv[1].Nanoseconds()) return SetFileTime(h, nil, &a, &w) @@ -756,7 +776,7 @@ func UtimesNano(path string, ts []Timespec) (err error) { if e != nil { return e } - defer Close(h) + defer CloseHandle(h) a := NsecToFiletime(TimespecToNsec(ts[0])) w := NsecToFiletime(TimespecToNsec(ts[1])) return SetFileTime(h, nil, &a, &w) @@ -850,6 +870,7 @@ const socket_error = uintptr(^uint32(0)) //sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar +//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -959,6 +980,32 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { return unsafe.Pointer(&sa.raw), sl, nil } +type RawSockaddrBth struct { + AddressFamily [2]byte + BtAddr [8]byte + ServiceClassId [16]byte + Port [4]byte +} + +type SockaddrBth struct { + BtAddr uint64 + ServiceClassId GUID + Port uint32 + + raw RawSockaddrBth +} + +func (sa *SockaddrBth) sockaddr() (unsafe.Pointer, int32, error) { + family := AF_BTH + sa.raw = RawSockaddrBth{ + AddressFamily: *(*[2]byte)(unsafe.Pointer(&family)), + BtAddr: *(*[8]byte)(unsafe.Pointer(&sa.BtAddr)), + Port: *(*[4]byte)(unsafe.Pointer(&sa.Port)), + ServiceClassId: *(*[16]byte)(unsafe.Pointer(&sa.ServiceClassId)), + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { switch rsa.Addr.Family { case AF_UNIX: @@ -1034,6 +1081,14 @@ func Connect(fd Handle, sa Sockaddr) (err error) { return connect(fd, ptr, n) } +func GetBestInterfaceEx(sa Sockaddr, pdwBestIfIndex *uint32) (err error) { + ptr, _, err := sa.sockaddr() + if err != nil { + return err + } + return getBestInterfaceEx(ptr, pdwBestIfIndex) +} + func Getsockname(fd Handle) (sa Sockaddr, err error) { var rsa RawSockaddrAny l := int32(unsafe.Sizeof(rsa)) @@ -1061,9 +1116,13 @@ func Shutdown(fd Handle, how int) (err error) { } func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) { - rsa, l, err := to.sockaddr() - if err != nil { - return err + var rsa unsafe.Pointer + var l int32 + if to != nil { + rsa, l, err = to.sockaddr() + if err != nil { + return err + } } return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine) } @@ -1687,3 +1746,71 @@ func LoadResourceData(module, resInfo Handle) (data []byte, err error) { h.Cap = int(size) return } + +// PSAPI_WORKING_SET_EX_BLOCK contains extended working set information for a page. +type PSAPI_WORKING_SET_EX_BLOCK uint64 + +// Valid returns the validity of this page. +// If this bit is 1, the subsequent members are valid; otherwise they should be ignored. +func (b PSAPI_WORKING_SET_EX_BLOCK) Valid() bool { + return (b & 1) == 1 +} + +// ShareCount is the number of processes that share this page. The maximum value of this member is 7. +func (b PSAPI_WORKING_SET_EX_BLOCK) ShareCount() uint64 { + return b.intField(1, 3) +} + +// Win32Protection is the memory protection attributes of the page. For a list of values, see +// https://docs.microsoft.com/en-us/windows/win32/memory/memory-protection-constants +func (b PSAPI_WORKING_SET_EX_BLOCK) Win32Protection() uint64 { + return b.intField(4, 11) +} + +// Shared returns the shared status of this page. +// If this bit is 1, the page can be shared. +func (b PSAPI_WORKING_SET_EX_BLOCK) Shared() bool { + return (b & (1 << 15)) == 1 +} + +// Node is the NUMA node. The maximum value of this member is 63. +func (b PSAPI_WORKING_SET_EX_BLOCK) Node() uint64 { + return b.intField(16, 6) +} + +// Locked returns the locked status of this page. +// If this bit is 1, the virtual page is locked in physical memory. +func (b PSAPI_WORKING_SET_EX_BLOCK) Locked() bool { + return (b & (1 << 22)) == 1 +} + +// LargePage returns the large page status of this page. +// If this bit is 1, the page is a large page. +func (b PSAPI_WORKING_SET_EX_BLOCK) LargePage() bool { + return (b & (1 << 23)) == 1 +} + +// Bad returns the bad status of this page. +// If this bit is 1, the page is has been reported as bad. +func (b PSAPI_WORKING_SET_EX_BLOCK) Bad() bool { + return (b & (1 << 31)) == 1 +} + +// intField extracts an integer field in the PSAPI_WORKING_SET_EX_BLOCK union. +func (b PSAPI_WORKING_SET_EX_BLOCK) intField(start, length int) uint64 { + var mask PSAPI_WORKING_SET_EX_BLOCK + for pos := start; pos < start+length; pos++ { + mask |= (1 << pos) + } + + masked := b & mask + return uint64(masked >> start) +} + +// PSAPI_WORKING_SET_EX_INFORMATION contains extended working set information for a process. +type PSAPI_WORKING_SET_EX_INFORMATION struct { + // The virtual address. + VirtualAddress Pointer + // A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress. + VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index e19471c6a852..0c4add974106 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -160,6 +160,10 @@ const ( MAX_COMPUTERNAME_LENGTH = 15 + MAX_DHCPV6_DUID_LENGTH = 130 + + MAX_DNS_SUFFIX_STRING_LENGTH = 256 + TIME_ZONE_ID_UNKNOWN = 0 TIME_ZONE_ID_STANDARD = 1 @@ -2000,27 +2004,62 @@ type IpAdapterPrefix struct { } type IpAdapterAddresses struct { - Length uint32 - IfIndex uint32 - Next *IpAdapterAddresses - AdapterName *byte - FirstUnicastAddress *IpAdapterUnicastAddress - FirstAnycastAddress *IpAdapterAnycastAddress - FirstMulticastAddress *IpAdapterMulticastAddress - FirstDnsServerAddress *IpAdapterDnsServerAdapter - DnsSuffix *uint16 - Description *uint16 - FriendlyName *uint16 - PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte - PhysicalAddressLength uint32 - Flags uint32 - Mtu uint32 - IfType uint32 - OperStatus uint32 - Ipv6IfIndex uint32 - ZoneIndices [16]uint32 - FirstPrefix *IpAdapterPrefix - /* more fields might be present here. */ + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + FirstWinsServerAddress *IpAdapterWinsServerAddress + FirstGatewayAddress *IpAdapterGatewayAddress + Ipv4Metric uint32 + Ipv6Metric uint32 + Luid uint64 + Dhcpv4Server SocketAddress + CompartmentId uint32 + NetworkGuid GUID + ConnectionType uint32 + TunnelType uint32 + Dhcpv6Server SocketAddress + Dhcpv6ClientDuid [MAX_DHCPV6_DUID_LENGTH]byte + Dhcpv6ClientDuidLength uint32 + Dhcpv6Iaid uint32 + FirstDnsSuffix *IpAdapterDNSSuffix +} + +type IpAdapterWinsServerAddress struct { + Length uint32 + Reserved uint32 + Next *IpAdapterWinsServerAddress + Address SocketAddress +} + +type IpAdapterGatewayAddress struct { + Length uint32 + Reserved uint32 + Next *IpAdapterGatewayAddress + Address SocketAddress +} + +type IpAdapterDNSSuffix struct { + Next *IpAdapterDNSSuffix + String [MAX_DNS_SUFFIX_STRING_LENGTH]uint16 } const ( @@ -3174,3 +3213,48 @@ type ModuleInfo struct { } const ALL_PROCESSOR_GROUPS = 0xFFFF + +type Rect struct { + Left int32 + Top int32 + Right int32 + Bottom int32 +} + +type GUIThreadInfo struct { + Size uint32 + Flags uint32 + Active HWND + Focus HWND + Capture HWND + MenuOwner HWND + MoveSize HWND + CaretHandle HWND + CaretRect Rect +} + +const ( + DWMWA_NCRENDERING_ENABLED = 1 + DWMWA_NCRENDERING_POLICY = 2 + DWMWA_TRANSITIONS_FORCEDISABLED = 3 + DWMWA_ALLOW_NCPAINT = 4 + DWMWA_CAPTION_BUTTON_BOUNDS = 5 + DWMWA_NONCLIENT_RTL_LAYOUT = 6 + DWMWA_FORCE_ICONIC_REPRESENTATION = 7 + DWMWA_FLIP3D_POLICY = 8 + DWMWA_EXTENDED_FRAME_BOUNDS = 9 + DWMWA_HAS_ICONIC_BITMAP = 10 + DWMWA_DISALLOW_PEEK = 11 + DWMWA_EXCLUDED_FROM_PEEK = 12 + DWMWA_CLOAK = 13 + DWMWA_CLOAKED = 14 + DWMWA_FREEZE_REPRESENTATION = 15 + DWMWA_PASSIVE_UPDATE_MODE = 16 + DWMWA_USE_HOSTBACKDROPBRUSH = 17 + DWMWA_USE_IMMERSIVE_DARK_MODE = 20 + DWMWA_WINDOW_CORNER_PREFERENCE = 33 + DWMWA_BORDER_COLOR = 34 + DWMWA_CAPTION_COLOR = 35 + DWMWA_TEXT_COLOR = 36 + DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9ea1a44f04de..ac60052e44a7 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -40,6 +40,7 @@ var ( modadvapi32 = NewLazySystemDLL("advapi32.dll") modcrypt32 = NewLazySystemDLL("crypt32.dll") moddnsapi = NewLazySystemDLL("dnsapi.dll") + moddwmapi = NewLazySystemDLL("dwmapi.dll") modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modkernel32 = NewLazySystemDLL("kernel32.dll") modmswsock = NewLazySystemDLL("mswsock.dll") @@ -175,8 +176,11 @@ var ( procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") + procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -248,6 +252,7 @@ var ( procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLargePageMinimum = modkernel32.NewProc("GetLargePageMinimum") procGetLastError = modkernel32.NewProc("GetLastError") procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") @@ -407,6 +412,7 @@ var ( procGetModuleBaseNameW = modpsapi.NewProc("GetModuleBaseNameW") procGetModuleFileNameExW = modpsapi.NewProc("GetModuleFileNameExW") procGetModuleInformation = modpsapi.NewProc("GetModuleInformation") + procQueryWorkingSetEx = modpsapi.NewProc("QueryWorkingSetEx") procSubscribeServiceChangeNotifications = modsechost.NewProc("SubscribeServiceChangeNotifications") procUnsubscribeServiceChangeNotifications = modsechost.NewProc("UnsubscribeServiceChangeNotifications") procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") @@ -442,9 +448,18 @@ var ( procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procEnumChildWindows = moduser32.NewProc("EnumChildWindows") + procEnumWindows = moduser32.NewProc("EnumWindows") procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") + procGetClassNameW = moduser32.NewProc("GetClassNameW") + procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") + procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") + procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") + procIsWindow = moduser32.NewProc("IsWindow") + procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") + procIsWindowVisible = moduser32.NewProc("IsWindowVisible") procMessageBoxW = moduser32.NewProc("MessageBoxW") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") @@ -1523,6 +1538,22 @@ func DnsRecordListFree(rl *DNSRecord, freetype uint32) { return } +func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { + r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { + r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1539,6 +1570,14 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { return } +func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetIfEntry(pIfRow *MibIfRow) (errcode error) { r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) if r0 != 0 { @@ -2142,6 +2181,12 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( return } +func GetLargePageMinimum() (size uintptr) { + r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + size = uintptr(r0) + return +} + func GetLastError() (lasterr error) { r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) if r0 != 0 { @@ -2761,7 +2806,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree return } -func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { +func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { var _p0 *byte if len(buf) > 0 { _p0 = &buf[0] @@ -3203,7 +3248,7 @@ func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, return } -func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { +func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { var _p0 *byte if len(buf) > 0 { _p0 = &buf[0] @@ -3495,6 +3540,14 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb return } +func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { + r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) { ret = procSubscribeServiceChangeNotifications.Find() if ret != nil { @@ -3784,6 +3837,19 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui return } +func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { + syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + return +} + +func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { + r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitWindowsEx(flags uint32, reason uint32) (err error) { r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) if r1 == 0 { @@ -3792,6 +3858,35 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { return } +func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { + r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + copied = int32(r0) + if copied == 0 { + err = errnoErr(e1) + } + return +} + +func GetDesktopWindow() (hwnd HWND) { + r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + hwnd = HWND(r0) + return +} + +func GetForegroundWindow() (hwnd HWND) { + r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + hwnd = HWND(r0) + return +} + +func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetShellWindow() (shellWindow HWND) { r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) shellWindow = HWND(r0) @@ -3807,6 +3902,24 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { return } +func IsWindow(hwnd HWND) (isWindow bool) { + r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + isWindow = r0 != 0 + return +} + +func IsWindowUnicode(hwnd HWND) (isUnicode bool) { + r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + isUnicode = r0 != 0 + return +} + +func IsWindowVisible(hwnd HWND) (isVisible bool) { + r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + isVisible = r0 != 0 + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) ret = int32(r0) diff --git a/vendor/golang.org/x/text/AUTHORS b/vendor/golang.org/x/text/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/text/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/text/CONTRIBUTORS b/vendor/golang.org/x/text/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/text/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index e4c0811016c2..9d2ae547b5ed 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -193,14 +193,14 @@ func (p *paragraph) run() { // // At the end of this function: // -// - The member variable matchingPDI is set to point to the index of the -// matching PDI character for each isolate initiator character. If there is -// no matching PDI, it is set to the length of the input text. For other -// characters, it is set to -1. -// - The member variable matchingIsolateInitiator is set to point to the -// index of the matching isolate initiator character for each PDI character. -// If there is no matching isolate initiator, or the character is not a PDI, -// it is set to -1. +// - The member variable matchingPDI is set to point to the index of the +// matching PDI character for each isolate initiator character. If there is +// no matching PDI, it is set to the length of the input text. For other +// characters, it is set to -1. +// - The member variable matchingIsolateInitiator is set to point to the +// index of the matching isolate initiator character for each PDI character. +// If there is no matching isolate initiator, or the character is not a PDI, +// it is set to -1. func (p *paragraph) determineMatchingIsolates() { p.matchingPDI = make([]int, p.Len()) p.matchingIsolateInitiator = make([]int, p.Len()) @@ -435,7 +435,7 @@ func maxLevel(a, b level) level { } // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, -// either L or R, for each isolating run sequence. +// either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { length := len(indexes) types := make([]Class, length) @@ -495,9 +495,9 @@ func (s *isolatingRunSequence) resolveWeakTypes() { if t == NSM { s.types[i] = precedingCharacterType } else { - if t.in(LRI, RLI, FSI, PDI) { - precedingCharacterType = ON - } + // if t.in(LRI, RLI, FSI, PDI) { + // precedingCharacterType = ON + // } precedingCharacterType = t } } @@ -905,7 +905,7 @@ func (p *paragraph) getLevels(linebreaks []int) []level { // Lines are concatenated from left to right. So for example, the fifth // character from the left on the third line is // -// getReordering(linebreaks)[linebreaks[1] + 4] +// getReordering(linebreaks)[linebreaks[1] + 4] // // (linebreaks[1] is the position after the last character of the second // line, which is also the index of the first character on the third line, diff --git a/vendor/golang.org/x/text/unicode/bidi/trieval.go b/vendor/golang.org/x/text/unicode/bidi/trieval.go index 4c459c4b72e0..6a796e2214c6 100644 --- a/vendor/golang.org/x/text/unicode/bidi/trieval.go +++ b/vendor/golang.org/x/text/unicode/bidi/trieval.go @@ -37,18 +37,6 @@ const ( unknownClass = ^Class(0) ) -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - // A trie entry has the following bits: // 7..5 XOR mask for brackets // 4 1: Bracket open, 0: Bracket close diff --git a/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/golang.org/x/text/unicode/norm/forminfo.go index 526c7033ac46..d69ccb4f9761 100644 --- a/vendor/golang.org/x/text/unicode/norm/forminfo.go +++ b/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -110,10 +110,11 @@ func (p Properties) BoundaryAfter() bool { } // We pack quick check data in 4 bits: -// 5: Combines forward (0 == false, 1 == true) -// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) -// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. -// 1..0: Number of trailing non-starters. +// +// 5: Combines forward (0 == false, 1 == true) +// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) +// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. +// 1..0: Number of trailing non-starters. // // When all 4 bits are zero, the character is inert, meaning it is never // influenced by normalization. diff --git a/vendor/golang.org/x/text/unicode/norm/normalize.go b/vendor/golang.org/x/text/unicode/norm/normalize.go index 95efcf26e81d..4747ad07a839 100644 --- a/vendor/golang.org/x/text/unicode/norm/normalize.go +++ b/vendor/golang.org/x/text/unicode/norm/normalize.go @@ -18,16 +18,17 @@ import ( // A Form denotes a canonical representation of Unicode code points. // The Unicode-defined normalization and equivalence forms are: // -// NFC Unicode Normalization Form C -// NFD Unicode Normalization Form D -// NFKC Unicode Normalization Form KC -// NFKD Unicode Normalization Form KD +// NFC Unicode Normalization Form C +// NFD Unicode Normalization Form D +// NFKC Unicode Normalization Form KC +// NFKD Unicode Normalization Form KD // // For a Form f, this documentation uses the notation f(x) to mean // the bytes or string x converted to the given form. // A position n in x is called a boundary if conversion to the form can // proceed independently on both sides: -// f(x) == append(f(x[0:n]), f(x[n:])...) +// +// f(x) == append(f(x[0:n]), f(x[n:])...) // // References: https://unicode.org/reports/tr15/ and // https://unicode.org/notes/tn5/. diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index 96a130d30e9e..9115ef257e83 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -7315,7 +7315,7 @@ const recompMapPacked = "" + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E - "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00v\x03#\x00\x00\x1e\x7f" + // 0x00760323: 0x00001E7F "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 @@ -7342,7 +7342,7 @@ const recompMapPacked = "" + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 - "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x01\x7f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS deleted file mode 100644 index 15167cd746c5..000000000000 --- a/vendor/golang.org/x/time/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS deleted file mode 100644 index 1c4577e96806..000000000000 --- a/vendor/golang.org/x/time/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 0cfcc8463c29..8f7c29f156aa 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -80,6 +80,19 @@ func (lim *Limiter) Burst() int { return lim.burst } +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, _, tokens := lim.advance(t) // does not mutute lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + // NewLimiter returns a new Limiter that allows events up to rate r and permits // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { @@ -89,16 +102,16 @@ func NewLimiter(r Limit, b int) *Limiter { } } -// Allow is shorthand for AllowN(time.Now(), 1). +// Allow reports whether an event may happen now. func (lim *Limiter) Allow() bool { return lim.AllowN(time.Now(), 1) } -// AllowN reports whether n events may happen at time now. +// AllowN reports whether n events may happen at time t. // Use this method if you intend to drop / skip events that exceed the rate limit. // Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok } // A Reservation holds information about events that are permitted by a Limiter to happen after a delay. @@ -125,17 +138,17 @@ func (r *Reservation) Delay() time.Duration { } // InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) +const InfDuration = time.Duration(math.MaxInt64) // DelayFrom returns the duration for which the reservation holder must wait // before taking the reserved action. Zero duration means act immediately. // InfDuration means the limiter cannot grant the tokens requested in this // Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { +func (r *Reservation) DelayFrom(t time.Time) time.Duration { if !r.ok { return InfDuration } - delay := r.timeToAct.Sub(now) + delay := r.timeToAct.Sub(t) if delay < 0 { return 0 } @@ -150,7 +163,7 @@ func (r *Reservation) Cancel() { // CancelAt indicates that the reservation holder will not perform the reserved action // and reverses the effects of this Reservation on the rate limit as much as possible, // considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { +func (r *Reservation) CancelAt(t time.Time) { if !r.ok { return } @@ -158,7 +171,7 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.mu.Lock() defer r.lim.mu.Unlock() - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { return } @@ -170,18 +183,18 @@ func (r *Reservation) CancelAt(now time.Time) { return } // advance time to now - now, _, tokens := r.lim.advance(now) + t, _, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { tokens = burst } // update state - r.lim.last = now + r.lim.last = t r.lim.tokens = tokens if r.timeToAct == r.lim.lastEvent { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { + if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent } } @@ -196,18 +209,20 @@ func (lim *Limiter) Reserve() *Reservation { // The Limiter takes this Reservation into account when allowing future events. // The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. // Usage example: -// r := lim.ReserveN(time.Now(), 1) -// if !r.OK() { -// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? -// return -// } -// time.Sleep(r.Delay()) -// Act() +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// // Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. // If you need to respect a deadline or cancel the delay, use Wait instead. // To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) return &r } @@ -221,6 +236,18 @@ func (lim *Limiter) Wait(ctx context.Context) (err error) { // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { lim.mu.Lock() burst := lim.burst limit := lim.limit @@ -236,25 +263,25 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { default: } // Determine wait limit - now := time.Now() waitLimit := InfDuration if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) + waitLimit = deadline.Sub(t) } // Reserve - r := lim.reserveN(now, n, waitLimit) + r := lim.reserveN(t, n, waitLimit) if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } // Wait if necessary - delay := r.DelayFrom(now) + delay := r.DelayFrom(t) if delay == 0 { return nil } - t := time.NewTimer(delay) - defer t.Stop() + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing select { - case <-t.C: + case <-ch: // We can proceed. return nil case <-ctx.Done(): @@ -273,13 +300,13 @@ func (lim *Limiter) SetLimit(newLimit Limit) { // SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated // or underutilized by those which reserved (using Reserve or Wait) but did not yet act // before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, _, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.limit = newLimit } @@ -290,13 +317,13 @@ func (lim *Limiter) SetBurst(newBurst int) { } // SetBurstAt sets a new burst size for the limiter. -func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, _, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.burst = newBurst } @@ -304,20 +331,32 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { lim.mu.Lock() + defer lim.mu.Unlock() if lim.limit == Inf { - lim.mu.Unlock() return Reservation{ ok: true, lim: lim, tokens: n, - timeToAct: now, + timeToAct: t, + } + } else if lim.limit == 0 { + var ok bool + if lim.burst >= n { + ok = true + lim.burst -= n + } + return Reservation{ + ok: ok, + lim: lim, + tokens: lim.burst, + timeToAct: t, } } - now, last, tokens := lim.advance(now) + t, last, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -339,44 +378,46 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio } if ok { r.tokens = n - r.timeToAct = now.Add(waitDuration) + r.timeToAct = t.Add(waitDuration) } // Update state if ok { - lim.last = now + lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct } else { lim.last = last } - lim.mu.Unlock() return r } // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newLast time.Time, newTokens float64) { last := lim.last - if now.Before(last) { - last = now + if t.Before(last) { + last = t } // Calculate the new number of tokens, due to time that passed. - elapsed := now.Sub(last) + elapsed := t.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens + return t, last, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration // of time it takes to accumulate them at a rate of limit tokens per second. func (limit Limit) durationFromTokens(tokens float64) time.Duration { + if limit <= 0 { + return InfDuration + } seconds := tokens / float64(limit) return time.Duration(float64(time.Second) * seconds) } @@ -384,5 +425,8 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { // tokensFromDuration is a unit conversion function from a time duration to the number of tokens // which could be accumulated during that duration at a rate of limit tokens per second. func (limit Limit) tokensFromDuration(d time.Duration) float64 { + if limit <= 0 { + return 0 + } return d.Seconds() * float64(limit) } diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index cd03f8c76888..52338d004ce3 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index bcc6f5451c90..f4f9408f3852 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -192,7 +193,7 @@ type BuildOptions struct { // server can ignore this field. Authority string // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. @@ -243,7 +244,7 @@ type DoneInfo struct { // ServerLoad is the load received from server. It's usually sent as part of // trailing metadata. // - // The only supported type now is *orca_v1.LoadReport. + // The only supported type now is *orca_v3.LoadReport. ServerLoad interface{} } @@ -370,56 +371,3 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") - -// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns -// and returns one aggregated connectivity state. -// -// It's not thread safe. -type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transient failure state. - numIdle uint64 // Number of addrConns in idle state. -} - -// RecordTransition records state change happening in subConn and based on that -// it evaluates what aggregated state should be. -// -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; -// - Else if at least one SubConn is Idle, the aggregated state is Idle; -// - Else there are no subconns and the aggregated state is Transient Failure -// -// Shutdown is not considered. -func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - case connectivity.Idle: - cse.numIdle += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - if cse.numTransientFailure > 0 { - return connectivity.TransientFailure - } - if cse.numIdle > 0 { - return connectivity.Idle - } - return connectivity.TransientFailure -} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a67074a3ad06..e8dfc828aaac 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -45,6 +45,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, + state: connectivity.Connecting, } // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we @@ -134,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) return nil } diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 000000000000..a87b6809af38 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 274eb2f85802..f7031ad2251b 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: grpcrand.Intn(len(scs)), + next: uint32(grpcrand.Intn(len(scs))), } } @@ -69,15 +69,13 @@ type rrPicker struct { // created. The slice is immutable. Each Get() will do a round robin // selection from it and return the selected SubConn. subConns []balancer.SubConn - - mu sync.Mutex - next int + next uint32 } func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - sc := p.subConns[p.next] - p.next = (p.next + 1) % len(p.subConns) - p.mu.Unlock() + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] return balancer.PickResult{SubConn: sc}, nil } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index f4ea61746823..b1c23eaae0db 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -20,130 +20,178 @@ package grpc import ( "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + cc *ClientConn -// exitIdle contains no data and is just a signal sent on the updateCh in -// ccBalancerWrapper to instruct the balancer to exit idle. -type exitIdle struct{} + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. -type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - hasExitIdle bool - updateCh *buffer.Unbounded - closed *grpcsync.Event - done *grpcsync.Event - - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.updateCh.Get(): + case u := <-ccb.updateCh.Get(): ccb.updateCh.Load() if ccb.closed.HasFired() { break } - switch u := t.(type) { + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) case *scStateUpdate: - ccb.balancerMu.Lock() - ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) - ccb.balancerMu.Unlock() - case *acBalancerWrapper: - ccb.mu.Lock() - if ccb.subConns != nil { - delete(ccb.subConns, u) - ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) - } - ccb.mu.Unlock() - case exitIdle: - if ccb.cc.GetState() == connectivity.Idle { - if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { - // We already checked that the balancer implements - // ExitIdle before pushing the event to updateCh, but - // check conditionally again as defensive programming. - ccb.balancerMu.Lock() - ei.ExitIdle() - ccb.balancerMu.Unlock() - } - } + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) } case <-ccb.closed.Done(): } if ccb.closed.HasFired() { - ccb.balancerMu.Lock() - ccb.balancer.Close() - ccb.balancerMu.Unlock() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) - ccb.done.Fire() - // Fire done before removing the addr conns. We can safely unblock - // ccb.close and allow the removeAddrConns to happen - // asynchronously. - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } + ccb.handleClose() return } } } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) } -func (ccb *ccBalancerWrapper) exitIdle() bool { - if !ccb.hasExitIdle { - return false +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs } - ccb.updateCh.Put(exitIdle{}) - return true + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -161,44 +209,125 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { ccb.balancer.ResolverError(err) } +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() +} + func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } acbw := &acBalancerWrapper{ac: ac} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock - // during switchBalancer() if the old balancer calls RemoveSubConn() in its - // Close(). - ccb.updateCh.Put(sc) + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -210,11 +339,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 000000000000..a220c47c59a5 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 28f09dc87073..779b03bca1c3 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -79,7 +79,7 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") @@ -146,6 +146,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + for _, opt := range extraDialOptions { + opt.apply(&cc.dopts) + } + for _, opt := range opts { opt.apply(&cc.dopts) } @@ -159,23 +163,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, } - cc.csMgr.channelzID = cc.channelzID } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { return nil, errNoTransportSecurity @@ -281,7 +282,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, @@ -289,7 +290,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, - } + }) // Build the resolver. rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) @@ -398,7 +399,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -464,34 +465,36 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error @@ -536,14 +539,7 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { - return - } - for ac := range cc.conns { - go ac.connect() - } + cc.balancerWrapper.exitIdle() } func (cc *ClientConn) scWatcher() { @@ -623,9 +619,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -653,16 +647,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -670,24 +658,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -696,56 +672,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - // Don't hold cc.mu while closing the balancers. The balancers may call - // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex - // would cause a deadlock in that case. - cc.mu.Unlock() - cc.balancerWrapper.close() - cc.mu.Lock() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -764,23 +712,26 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() + defer cc.mu.Unlock() if cc.conns == nil { - cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} - cc.mu.Unlock() return ac, nil } @@ -853,16 +804,31 @@ func (ac *addrConn) connect() error { return nil } +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// // If ac is TransientFailure, it updates ac.addrs and returns true. The updated // addresses will be picked up by retry in the next iteration after backoff. // // If ac is Shutdown or Idle, it updates ac.addrs and returns true. // +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. // - If true, it updates ac.addrs and returns true. The ac will keep using @@ -879,6 +845,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return true } + if equalAddresses(ac.addrs, addrs) { + return true + } + if ac.state == connectivity.Connecting { return false } @@ -959,14 +929,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { @@ -991,35 +957,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1070,11 +1027,11 @@ func (cc *ClientConn) Close() error { rWrapper := cc.resolverWrapper cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil cc.mu.Unlock() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. cc.blockingpicker.close() - if bWrapper != nil { bWrapper.close() } @@ -1085,22 +1042,22 @@ func (cc *ClientConn) Close() error { for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1130,7 +1087,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1284,6 +1241,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() defer connClosed.Fire() + defer hcancel() if !hcStarted || hctx.Err() != nil { // We didn't start the health check or set the state to READY, so // no need to do anything else here. @@ -1294,7 +1252,6 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // state, since there may be a new transport in this addrConn. return } - hcancel() ac.transport = nil // Refresh the name resolver ac.cc.resolveNow(resolver.ResolveNowOptions{}) @@ -1312,14 +1269,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } @@ -1332,7 +1288,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr.Close(transport.ErrConnClosing) if connectCtx.Err() == context.DeadlineExceeded { err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) return err } return nil @@ -1497,19 +1453,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 4fbed12565fd..82bee1443bfe 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -70,3 +70,29 @@ type info struct { func (info) AuthType() string { return "insecure" } + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index c4bf09f9e940..9372dc322e80 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -20,22 +20,32 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + extraDialOptions = append(extraDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + extraDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger +} + // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { @@ -45,19 +55,18 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -73,10 +82,12 @@ type DialOption interface { apply(*dialOptions) } +var extraDialOptions []DialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -195,25 +206,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -286,7 +278,7 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -304,8 +296,8 @@ func WithReturnConnectionError() DialOption { // WithCredentialsBundle or WithPerRPCCredentials) which require transport // security is incompatible and will cause grpc.Dial() to fail. // -// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead. -// Will be supported throughout 1.x. +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.TransportCredentials = insecure.NewCredentials() @@ -315,7 +307,7 @@ func WithInsecure() DialOption { // WithNoProxy returns a DialOption which disables the use of proxies for this // ClientConn. This is ignored if WithDialer or WithContextDialer are used. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -346,7 +338,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // the ClientConn.WithCreds. This should not be used together with // WithTransportCredentials. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -402,7 +394,21 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl }) } @@ -414,7 +420,7 @@ func WithStatsHandler(h stats.Handler) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -494,11 +500,11 @@ func WithAuthority(a string) DialOption { // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) @@ -539,9 +545,6 @@ func WithDefaultServiceConfig(s string) DialOption { // service config enables them. This does not impact transparent retries, which // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. -// -// Retry support is currently enabled by default, but may be disabled by -// setting the environment variable "GRPC_GO_RETRY" to "off". func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true @@ -559,7 +562,7 @@ func WithMaxHeaderListSize(s uint32) DialOption { // WithDisableHealthCheck disables the LB channel health checking for all // SubConns of this ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -606,7 +609,7 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { // resolver.Register. They will be matched against the scheme used for the // current Dial only, and will take precedence over the global registry. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 6d84f74c7d08..18e530fc9024 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 69f525d1baeb..a332dfd7b54e 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 668e0adcf0a9..bb96ef57be89 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -72,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000000..08666f62a7cb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,384 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb213..809d73ccafb0 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -31,35 +31,42 @@ import ( // Logger is the global binary logger. It can be used to get binary logger for // each method. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be // built at init time from the configuration (environment variable or flags). // -// It is used to get a methodLogger for each individual method. +// It is used to get a MethodLogger for each individual method. var binLogger Logger var grpclogLogger = grpclog.Component("binarylog") -// SetLogger sets the binarg logger. +// SetLogger sets the binary logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } -// GetMethodLogger returns the methodLogger for the given methodName. +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +75,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,83 +107,83 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // -// New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // -// New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } -// getMethodLogger returns the methodLogger for the given methodName. +// getMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602fde..c5579e65065f 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger { return l } -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds // it to the right map in the logger. func (l *logger) fillMethodLoggerWithConfigString(config string) error { // "" is invalid. @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0cdb41831509..179f4a26d135 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -48,7 +48,13 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +type MethodLogger interface { + Log(LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +63,9 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +// NewTruncatingMethodLogger returns a new truncating method logger. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +76,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -85,11 +94,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -119,7 +132,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index cd1807543eee..777cbcd7921d 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,6 +24,8 @@ package channelz import ( + "context" + "errors" "fmt" "sort" "sync" @@ -49,7 +51,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid) - } else { - db.get().addChannel(id, cn, false, pid) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. @@ -326,6 +367,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 000000000000..c9a27acd3710 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index b0013f9c8865..8e13a3d2ce7b 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 3c595d154bd3..ad0ce4dabf06 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 000000000000..821dd0a7c198 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 9bad03cec64f..af09711a3e88 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -26,13 +26,13 @@ import ( const ( // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. // Do not use this and read from env directly. Its value is read and kept in - // variable BootstrapFileName. + // variable XDSBootstrapFileName. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" - // XDSBootstrapFileContentEnv is the env variable to set bootstrapp file + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file // content. Do not use this and read from env directly. Its value is read - // and kept in variable BootstrapFileName. + // and kept in variable XDSBootstrapFileContent. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" @@ -41,6 +41,7 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" @@ -76,13 +77,16 @@ var ( // environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to // "true". - XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") - + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". + XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 740f83c2b766..517ea70642a1 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -52,6 +52,13 @@ func Intn(n int) int { return r.Intn(n) } +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) +} + // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index 4e7475060c1c..e9c4af64830c 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -39,6 +39,11 @@ func ParseMethod(methodName string) (service, method string, _ error) { return methodName[:pos], methodName[pos+1:], nil } +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. const baseContentType = "application/grpc" // ContentSubtype returns the content-subtype for the given content-type. The diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 1b596bf3579f..fd0ee3dcaf1e 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -38,11 +38,10 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool @@ -64,6 +63,70 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports interface{} // func(*grpc.Server, string) + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddGlobalDialOptions interface{} // func(opt ...DialOption) + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + ClearGlobalDialOptions func() + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -86,3 +149,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index b8733dbf340d..b2980f8ac44a 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -72,3 +75,46 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { + for k, vals := range md { + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 000000000000..0177af4b5114 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 20852e59df29..7f1a702cacbe 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -49,8 +49,9 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv } addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { - // prepend "\x00" to address for unix-abstract - addr.Addr = "\x00" + addr.Addr + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr } cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) return &nopResolver{}, nil diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 8394d252df03..409769f48fdc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -137,6 +137,7 @@ type earlyAbortStream struct { streamID uint32 contentSubtype string status *status.Status + rst bool } func (*earlyAbortStream) isTransportResponseFrame() bool { return false } @@ -786,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { return err } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } return nil } @@ -880,9 +886,9 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. + // maximum possible HTTP2 frame size. if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 1c3459c2b4c5..090120925bb4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -49,7 +49,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC // from inside an http.Handler. It requires that the http Server // supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { return nil, errors.New("gRPC requires HTTP/2") } @@ -138,7 +138,7 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats stats.Handler + stats []stats.Handler } func (ht *serverHandlerTransport) Close() { @@ -228,10 +228,10 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) if err == nil { // transport has not been closed - if ht.stats != nil { - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -314,10 +314,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { }) if err == nil { - if ht.stats != nil { + for _, sh := range ht.stats { // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + sh.HandleRPC(s.Context(), &stats.OutHeader{ Header: md.Copy(), Compression: s.sendCompress, }) @@ -369,14 +369,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: ht.RemoteAddr(), Compression: s.recvCompress, } - ht.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f0c72d337105..5c2f35b24e75 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -78,6 +78,7 @@ type http2Client struct { framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. controlBuf *controlBuffer fc *trInFlow // The scheme used: https if TLS is on, http otherwise. @@ -90,7 +91,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandler stats.Handler + statsHandlers []stats.Handler initialWindowSize int32 @@ -109,6 +110,7 @@ type http2Client struct { waitingStreams uint32 nextID uint32 + // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables state transportState activeStreams map[uint32]*Stream @@ -132,7 +134,7 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData onGoAway func(GoAwayReason) @@ -311,7 +313,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandler: opts.StatsHandler, + statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, onPrefaceReceipt: onPrefaceReceipt, nextID: 1, @@ -324,6 +326,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), } + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) if md, ok := addr.Metadata.(*metadata.MD); ok { t.md = *md @@ -341,18 +345,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -466,7 +471,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ Addr: t.remoteAddr, - AuthInfo: t.authInfo, + AuthInfo: t.authInfo, // Can be nil } } @@ -630,8 +635,8 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // the wire. However, there are two notable exceptions: // // 1. If the stream headers violate the max header list size allowed by the -// server. In this case there is no reason to retry at all, as it is -// assumed the RPC would continue to fail on subsequent attempts. +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. // 2. If the credentials errored when requesting their headers. In this case, // it's possible a retry can fix the problem, but indefinitely transparently // retrying is not appropriate as it is likely the credentials, if they can @@ -639,8 +644,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call type NewStreamError struct { Err error - DoNotRetry bool - DoNotTransparentRetry bool + AllowTransparentRetry bool } func (e NewStreamError) Error() string { @@ -649,11 +653,11 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -685,7 +689,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea cleanup(err) return err } - t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) @@ -719,6 +722,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.nextID += 2 s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.mu.Lock() + if t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + t.activeStreams[s.id] = s + t.mu.Unlock() if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -744,22 +754,17 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } for { success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { - if !checkForStreamQuota(it) { - return false - } - if !checkForHeaderListSize(it) { - return false - } - return true + return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { - return nil, &NewStreamError{Err: err} + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { @@ -767,29 +772,32 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea case <-ctx.Done(): return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, &NewStreamError{Err: errStreamDrain} + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, &NewStreamError{Err: ErrConnClosing} + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { + if len(t.statsHandlers) != 0 { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) } - t.statsHandler.HandleRPC(s.ctx, outHeader) } return s, nil } @@ -898,9 +906,7 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -917,11 +923,11 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { connEnd := &stats.ConnEnd{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } @@ -1001,13 +1007,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.mu.Unlock() updateIWS := func(interface{}) bool { t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() return true } t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) @@ -1213,7 +1219,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { default: t.setGoAwayReason(f) close(t.goAway) - t.controlBuf.put(&incomingGoAway{}) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. @@ -1226,18 +1232,29 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { if upperLimit == 0 { // This is the first GoAway Frame. upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + if streamID > id && streamID <= upperLimit { + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } } } - t.prevGoAwayID = id - active := len(t.activeStreams) t.mu.Unlock() - if active == 0 { - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } @@ -1433,7 +1450,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { if isHeader { inHeader := &stats.InHeader{ Client: true, @@ -1441,14 +1458,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, } - t.statsHandler.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + sh.HandleRPC(s.ctx, inTrailer) } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 2c6eaf0e59cf..3dd15647bc84 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,7 +21,6 @@ package transport import ( "bytes" "context" - "errors" "fmt" "io" "math" @@ -36,6 +35,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -52,10 +52,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -82,7 +82,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats stats.Handler + stats []stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -117,7 +117,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool @@ -231,6 +231,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime @@ -252,7 +257,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, + stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, @@ -260,6 +265,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } + // Add peer information to the http2server context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ @@ -267,20 +275,20 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { @@ -443,6 +451,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) return false } @@ -479,14 +488,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } - pr := &peer.Peer{ - Addr: t.remoteAddr, - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) @@ -516,14 +518,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if httpMethod != http.MethodPost { t.mu.Unlock() + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + logger.Infof("transport: %v", errMsg) } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) s.cancel() return false @@ -544,6 +548,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: s.id, contentSubtype: s.contentSubtype, status: stat, + rst: !frame.StreamEnded(), }) return false } @@ -561,8 +566,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: t.remoteAddr, @@ -571,7 +576,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), } - t.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) @@ -925,12 +930,27 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } - s.hdrMu.Lock() + if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -939,10 +959,8 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } - s.hdrMu.Unlock() return nil } @@ -973,14 +991,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - if t.stats != nil { + for _, sh := range t.stats { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, } - t.stats.HandleRPC(s.Context(), outHeader) + sh.HandleRPC(s.Context(), outHeader) } return nil } @@ -990,17 +1008,19 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { return nil } - s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() return err } } else { // Send a trailer only response. @@ -1029,7 +1049,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { endStream: true, onWrite: t.setResetPingStrikes, } - s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) if !success { if err != nil { @@ -1041,10 +1061,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - if t.stats != nil { + for _, sh := range t.stats { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1056,23 +1076,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -1082,12 +1091,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1210,25 +1214,19 @@ func (t *http2Server) Close() { if err := t.conn.Close(); err != nil && logger.V(logLevel) { logger.Infof("transport: error closing conn during Close: %v", err) } - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() } - if t.stats != nil { + for _, sh := range t.stats { connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1250,6 +1248,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1269,6 +1272,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1404,6 +1412,13 @@ func (t *http2Server) getOutFlowWindow() int64 { } } +func (t *http2Server) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index d8247bcdf692..2c601a864d99 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -20,7 +20,6 @@ package transport import ( "bufio" - "bytes" "encoding/base64" "fmt" "io" @@ -45,14 +44,8 @@ import ( const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues http2InitHeaderTableSize = 4096 - // baseContentType is the base content-type for gRPC. This is a valid - // content-type on it's own, but can also include a content-subtype such as - // "proto" as a suffix after "+" or ";". See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - ) var ( @@ -257,13 +250,13 @@ func encodeGrpcMessage(msg string) string { } func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder for len(msg) > 0 { r, size := utf8.DecodeRuneInString(msg) for _, b := range []byte(string(r)) { if size > 1 { // If size > 1, r is not ascii. Always do percent encoding. - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) continue } @@ -272,14 +265,14 @@ func encodeGrpcMessageUnchecked(msg string) string { // // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". if b >= spaceByte && b <= tildeByte && b != percentByte { - buf.WriteByte(b) + sb.WriteByte(b) } else { - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) } } msg = msg[size:] } - return buf.String() + return sb.String() } // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. @@ -297,23 +290,23 @@ func decodeGrpcMessage(msg string) string { } func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { - buf.WriteByte(c) + sb.WriteByte(c) } else { - buf.WriteByte(byte(parsed)) + sb.WriteByte(byte(parsed)) i += 2 } } else { - buf.WriteByte(c) + sb.WriteByte(c) } } - return buf.String() + return sb.String() } type bufWriter struct { @@ -322,8 +315,6 @@ type bufWriter struct { batchSize int conn net.Conn err error - - onFlush func() } func newBufWriter(conn net.Conn, batchSize int) *bufWriter { @@ -360,9 +351,6 @@ func (w *bufWriter) Flush() error { if w.offset == 0 { return nil } - if w.onFlush != nil { - w.onFlush() - } _, w.err = w.conn.Write(w.buf[:w.offset]) w.offset = 0 return w.err diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index d3bf65b2bdff..6c3ba8515940 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -522,14 +523,14 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + StatsHandlers []stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -552,8 +553,8 @@ type ConnectOptions struct { CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. @@ -563,7 +564,7 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -741,6 +742,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 3604c7819fdc..98d62e0675f6 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -50,7 +50,7 @@ type MD map[string][]string // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func New(m map[string]string) MD { - md := MD{} + md := make(MD, len(m)) for k, val := range m { key := strings.ToLower(k) md[key] = append(md[key], val) @@ -74,7 +74,7 @@ func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } - md := MD{} + md := make(MD, len(kv)/2) for i := 0; i < len(kv); i += 2 { key := strings.ToLower(kv[i]) md[key] = append(md[key], kv[i+1]) @@ -182,17 +182,51 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { if !ok { return nil, false } - out := MD{} + out := make(MD, len(md)) for k, v := range md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + out[key] = copyOf(v) } return out, true } +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Key must be lower-case. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + if strings.ToLower(k) == key { + return copyOf(v) + } + } + return nil +} + +// the returned slice must not be modified in place +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + // FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD @@ -220,13 +254,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { return nil, false } - out := MD{} + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) for k, v := range raw.md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + out[key] = copyOf(v) } for _, added := range raw.added { if len(added)%2 == 1 { diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index e8367cb8993b..843633c910a1 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -131,7 +131,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } if _, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -175,3 +175,9 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 5168b62b078a..fb7a99e0a273 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -44,79 +44,107 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + return nil + } + + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + b.state = state.ConnectivityState + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { + switch state.ConnectivityState { case connectivity.Ready: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) case connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } } @@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() { } func (b *pickfirstBalancer) ExitIdle() { - if b.sc != nil && b.state == connectivity.Idle { - b.sc.Connect() + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() } } @@ -135,18 +163,18 @@ type picker struct { err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - sc balancer.SubConn + subConn balancer.SubConn } -func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - i.sc.Connect() +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 58c802f8aec7..99db79fafcfb 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -68,7 +68,6 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto ) @@ -80,8 +79,7 @@ SOURCES=( # Note that the protos listed here are all for testing purposes. All protos to # be used externally should have a go_package option (and they don't need to be # listed here). -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ -Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ @@ -117,12 +115,9 @@ done mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go - -# grpc/service_config/service_config.proto does not have a go_package option. -mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go # grpc/testing does not have a go_package option. mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index e87ecd0eeb38..efcb7f3efd82 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -28,25 +28,40 @@ type addressMapEntry struct { // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. type AddressMap struct { - m map[string]addressMapEntryList + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} } type addressMapEntryList []*addressMapEntry // NewAddressMap creates a new AddressMap. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[string]addressMapEntryList)} + return &AddressMap{m: make(map[Address]addressMapEntryList)} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. func (l addressMapEntryList) find(addr Address) int { - if len(l) == 0 { - return -1 - } for i, entry := range l { - if entry.addr.ServerName == addr.ServerName && - entry.addr.Attributes.Equal(addr.Attributes) { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { return i } } @@ -55,7 +70,8 @@ func (l addressMapEntryList) find(addr Address) int { // Get returns the value for the address in the map, if present. func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } @@ -64,17 +80,19 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { // Set updates or adds the value to the address in the map. func (a *AddressMap) Set(addr Address, value interface{}) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { - a.m[addr.Addr][entry].value = value + entryList[entry].value = value return } - a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) } // Delete removes addr from the map. func (a *AddressMap) Delete(addr Address) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] entry := entryList.find(addr) if entry == -1 { return @@ -85,7 +103,7 @@ func (a *AddressMap) Delete(addr Address) { copy(entryList[entry:], entryList[entry+1:]) entryList = entryList[:len(entryList)-1] } - a.m[addr.Addr] = entryList + a.m[addrKey] = entryList } // Len returns the number of entries in the map. @@ -107,3 +125,14 @@ func (a *AddressMap) Keys() []Address { } return ret } + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []interface{} { + ret := make([]interface{}, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e28b68026062..ca2e35a3596f 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -139,13 +140,18 @@ type Address struct { // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. -func (a *Address) Equal(o Address) bool { +func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && a.Type == o.Type && a.Metadata == o.Metadata } +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + return pretty.ToJSON(a) +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 2c47cd54f07c..05a9d4e0bac0 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,7 +19,6 @@ package grpc import ( - "fmt" "strings" "sync" @@ -27,6 +26,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { if ccr.done.HasFired() { return nil } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } + ccr.addChannelzTraceEvent(s) ccr.curState = s if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { return balancer.ErrBadResolverState @@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs ccr.cc.updateResolverState(ccr.curState, nil) } @@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) if ccr.cc.dopts.disableServiceConfig { channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return @@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) return } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) ccr.curState.ServiceConfig = scpr ccr.cc.updateResolverState(ccr.curState, nil) } @@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eadf9e05fd18..f4dde72b41f8 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -73,6 +73,14 @@ func init() { internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + extraServerOptions = append(extraServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + extraServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption } var statusOK = status.New(codes.OK, "") @@ -134,7 +142,7 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData serverWorkerChannels []chan *serverWorkerData @@ -149,8 +157,9 @@ type serverOptions struct { streamInt StreamServerInterceptor chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger inTapHandle tap.ServerInHandle - statsHandler stats.Handler + statsHandlers []stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int @@ -174,6 +183,7 @@ var defaultServerOptions = serverOptions{ writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } +var extraServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -183,7 +193,7 @@ type ServerOption interface { // EmptyServerOption does not alter the server configuration. It can be embedded // in another structure to build custom server options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -207,6 +217,22 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { } } +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + // WriteBufferSize determines how much data can be batched before doing a write on the wire. // The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. // The default value for this buffer is 32KB. @@ -298,7 +324,7 @@ func CustomCodec(codec Codec) ServerOption { // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. // Will be supported throughout 1.x. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -419,7 +445,7 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -435,7 +461,21 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.statsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl }) } @@ -462,7 +502,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -483,7 +523,7 @@ func MaxHeaderListSize(s uint32) ServerOption { // HeaderTableSize returns a ServerOption that sets the size of dynamic // header table for stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -498,7 +538,7 @@ func HeaderTableSize(s uint32) ServerOption { // zero (default) will disable workers and spawn a new goroutine for each // stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -560,6 +600,9 @@ func (s *Server) stopServerWorkers() { // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions + for _, o := range extraServerOptions { + o.apply(&opts) + } for _, o := range opt { o.apply(&opts) } @@ -584,9 +627,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -712,7 +754,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -724,9 +766,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -759,11 +800,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -773,8 +809,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -866,7 +910,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + StatsHandlers: s.opts.statsHandlers, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -887,7 +931,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { if err != credentials.ErrConnDispatched { // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) } c.Close() } @@ -945,24 +989,24 @@ var _ http.Handler = (*Server)(nil) // To share one port (such as 443 for https) between gRPC and an // existing http.Handler, use a root http.Handler such as: // -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } // // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally // separate from grpc-go's HTTP/2 server. Performance and features may vary // between the two paths. ServeHTTP does not support some gRPC features // available through grpc-go's HTTP/2 server. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -1075,8 +1119,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } } return err } @@ -1123,13 +1169,13 @@ func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerIn } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - sh := s.opts.statsHandler - if sh != nil || trInfo != nil || channelz.IsOn() { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - if sh != nil { + for _, sh := range shs { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, @@ -1160,7 +1206,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.Finish() } - if sh != nil { + for _, sh := range shs { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1180,9 +1226,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ @@ -1202,7 +1255,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if peer, ok := peer.FromContext(ctx); ok { logEntry.PeerAddr = peer.Addr } - binlog.Log(logEntry) + for _, binlog := range binlogs { + binlog.Log(logEntry) + } } // comp and cp are used for compression. decomp and dc are used for @@ -1242,7 +1297,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } var payInfo *payloadInfo - if sh != nil || binlog != nil { + if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) @@ -1259,7 +1314,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { + for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1268,10 +1323,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Length: len(d), }) } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: d, - }) + } + for _, binlog := range binlogs { + binlog.Log(cm) + } } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) @@ -1283,9 +1341,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1294,18 +1353,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if e := t.WriteStatus(stream, appStatus); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } - if binlog != nil { + if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { // Only log serverHeader if there was header. Otherwise it can // be trailer only. - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + } } - binlog.Log(&binarylog.ServerTrailer{ + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(st) + } } return appErr } @@ -1331,26 +1396,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ + } + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(st) + } } return err } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ + } + sm := &binarylog.ServerMessage{ Message: reply, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(sm) + } } if channelz.IsOn() { t.IncrMsgSent() @@ -1362,11 +1435,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? err = t.WriteStatus(stream, statusOK) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(st) + } } return err } @@ -1416,16 +1492,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if channelz.IsOn() { s.incrCallsStarted() } - sh := s.opts.statsHandler + shs := s.opts.statsHandlers var statsBegin *stats.Begin - if sh != nil { + if len(shs) != 0 { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - sh.HandleRPC(stream.Context(), statsBegin) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), statsBegin) + } } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ @@ -1437,10 +1515,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: sh, + statsHandler: shs, } - if sh != nil || trInfo != nil || channelz.IsOn() { + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1454,7 +1532,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } - if sh != nil { + if len(shs) != 0 { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1462,7 +1540,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), end) + } } if channelz.IsOn() { @@ -1475,8 +1555,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp }() } - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1495,7 +1582,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if peer, ok := peer.FromContext(ss.Context()); ok { logEntry.PeerAddr = peer.Addr } - ss.binlog.Log(logEntry) + for _, binlog := range ss.binlogs { + binlog.Log(logEntry) + } } // If dc is set and matches the stream's compression, use it. Otherwise, try @@ -1549,7 +1638,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1559,11 +1650,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } } // TODO: Should we log an error from WriteStatus here and below? return appErr @@ -1574,11 +1668,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } err = t.WriteStatus(ss.s, statusOK) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } } return err } @@ -1654,7 +1751,7 @@ type streamKey struct{} // NewContextWithServerTransportStream creates a new context from ctx and // attaches stream to it. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1669,7 +1766,7 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans // // See also NewContextWithServerTransportStream. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1684,7 +1781,7 @@ type ServerTransportStream interface { // ctx. Returns nil if the given context has no stream associated with it // (which implies it is not an RPC invocation context). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1706,11 +1803,7 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis @@ -1748,11 +1841,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() @@ -1805,12 +1894,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1822,8 +1925,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1837,6 +1946,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 22c4240cf7e8..01bbb2025aed 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -57,10 +57,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancerName will override this. This is deprecated; - // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig - // will be used. + // LB is the load balancer the service providers recommends. This is + // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, + // lbConfig will be used. LB *string // lbConfig is the service config's load balancing configuration. If @@ -218,7 +217,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { @@ -381,6 +380,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 625d47b34e59..0c16cfb2ea80 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/transport" @@ -46,10 +47,12 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used @@ -137,13 +140,13 @@ type ClientStream interface { // To ensure resources are not leaked due to the stream returned, one of the following // actions must be performed: // -// 1. Call Close on the ClientConn. -// 2. Cancel the context provided. -// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated -// client-streaming RPC, for instance, might use the helper function -// CloseAndRecv (note that CloseSend does not Recv, therefore is not -// guaranteed to release all resources). -// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. // // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. @@ -164,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -293,20 +301,35 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } - cs.binlog = binarylog.GetMethodLogger(method) - - if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { - cs.finish(err) - return nil, err + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } - if cs.binlog != nil { + if len(cs.binlogs) != 0 { md, _ := metadata.FromOutgoingContext(ctx) logEntry := &binarylog.ClientHeader{ OnClientSide: true, @@ -320,7 +343,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client logEntry.Timeout = 0 } } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } if desc != unaryStreamDesc { @@ -341,14 +366,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) method := cs.callHdr.Method - sh := cs.cc.dopts.copts.StatsHandler var beginTime time.Time - if sh != nil { + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) beginTime = time.Now() begin := &stats.Begin{ @@ -377,27 +408,6 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { ctx = trace.NewContext(ctx, trInfo.tr) } - newAttempt := &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, - } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) - } - }() - - if err := ctx.Err(); err != nil { - return toRPCErr(err) - } - if cs.cc.parsedTarget.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. @@ -405,16 +415,32 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } @@ -423,12 +449,21 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - // Return without converting to an RPC error so retry code can - // inspect. - return err + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s} return nil } @@ -454,7 +489,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlogs []binarylog.MethodLogger // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -504,8 +539,13 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandler stats.Handler - beginTime time.Time + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { @@ -525,41 +565,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. If the RPC should be // retried, the bool indicates whether it is being retried transparently. -func (cs *clientStream) shouldRetry(err error) (bool, error) { - if cs.attempt.s == nil { - // Error from NewClientStream. - nse, ok := err.(*transport.NewStreamError) - if !ok { - // Unexpected, but assume no I/O was performed and the RPC is not - // fatal, so retry indefinitely. - return true, nil - } - - // Unwrap and convert error. - err = toRPCErr(nse.Err) - - // Never retry DoNotRetry errors, which indicate the RPC should not be - // retried due to max header list size violation, etc. - if nse.DoNotRetry { - return false, err - } +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs - // In the event of a non-IO operation error from NewStream, we never - // attempted to write anything to the wire, so we can retry - // indefinitely. - if !nse.DoNotTransparentRetry { - return true, nil - } - } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } // Wait for the trailers. unprocessed := false - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -571,14 +591,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { + if a.s != nil { + if !a.s.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -595,10 +615,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } rp := cs.methodConfig.RetryPolicy @@ -643,19 +663,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(toRPCErr(lastErr)) - isTransparent, err := cs.shouldRetry(lastErr) + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(isTransparent); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -665,7 +690,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -679,6 +707,18 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } + if len(cs.buffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } + } a := cs.attempt cs.mu.Unlock() err := op(a) @@ -695,7 +735,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -713,7 +753,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { cs.finish(err) return nil, err } - if cs.binlog != nil && !cs.serverHeaderBinlogged { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Only log if binary log is on and header has not been logged. logEntry := &binarylog.ServerHeader{ OnClientSide: true, @@ -723,10 +763,12 @@ func (cs *clientStream) Header() (metadata.MD, error) { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -744,10 +786,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -795,47 +836,48 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, - }) + Message: data, + } + for _, binlog := range cs.binlogs { + binlog.Log(cm) + } } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() } var recvInfo *payloadInfo - if cs.binlog != nil { + if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ OnClientSide: true, Message: recvInfo.uncompressedBytes, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(sm) + } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - if cs.binlog != nil { + if len(cs.binlogs) != 0 { // finish will not log Trailer. Log Trailer here. logEntry := &binarylog.ServerTrailer{ OnClientSide: true, @@ -848,7 +890,9 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } } return err @@ -869,10 +913,13 @@ func (cs *clientStream) CloseSend() error { return nil } cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(chc) + } } // We never returned an error here for reasons. return nil @@ -905,10 +952,13 @@ func (cs *clientStream) finish(err error) { // // Only one of cancel or trailer needs to be logged. In the cases where // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ + if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { + c := &binarylog.Cancel{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(c) + } } if err == nil { cs.retryThrottler.successfulRPC() @@ -941,8 +991,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { } return io.EOF } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -952,7 +1002,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { + if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} } @@ -980,6 +1030,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } return io.EOF // indicates successful end of stream. } + return toRPCErr(err) } if a.trInfo != nil { @@ -989,8 +1040,8 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1049,7 +1100,7 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - if a.statsHandler != nil { + for _, sh := range a.statsHandlers { end := &stats.End{ Client: true, BeginTime: a.beginTime, @@ -1057,7 +1108,7 @@ func (a *csAttempt) finish(err error) { Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.ctx, end) + sh.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1362,8 +1413,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1424,9 +1477,9 @@ type serverStream struct { maxSendMessageSize int trInfo *traceInfo - statsHandler stats.Handler + statsHandler []stats.Handler - binlog *binarylog.MethodLogger + binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1446,17 +1499,29 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } } return err } @@ -1465,6 +1530,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } @@ -1510,20 +1578,28 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if ss.binlog != nil { + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } } - ss.binlog.Log(&binarylog.ServerMessage{ + sm := &binarylog.ServerMessage{ Message: data, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(sm) + } } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } } return nil } @@ -1557,13 +1633,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(chc) + } } return err } @@ -1572,20 +1651,25 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), - }) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: payInfo.uncompressedBytes, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(cm) + } } return nil } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 9d3fd73da94e..d472ca64307b 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.44.1-dev" +const Version = "1.50.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index d923187a7b3a..c3fc8253b13a 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -107,7 +107,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy git status --porcelain 2>&1 | fail_on_output || \ @@ -147,7 +147,6 @@ grpc.NewGZIPDecompressor grpc.RPCCompressor grpc.RPCDecompressor grpc.ServiceConfig -grpc.WithBalancerName grpc.WithCompressor grpc.WithDecompressor grpc.WithDialer diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS deleted file mode 100644 index 2b00ddba0dfe..000000000000 --- a/vendor/google.golang.org/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS deleted file mode 100644 index 1fbd3e976faf..000000000000 --- a/vendor/google.golang.org/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go new file mode 100644 index 000000000000..369df13da274 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go @@ -0,0 +1,168 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/encoding/protowire" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type fileInfo struct { + *protogen.File + + allEnums []*enumInfo + allMessages []*messageInfo + allExtensions []*extensionInfo + + allEnumsByPtr map[*enumInfo]int // value is index into allEnums + allMessagesByPtr map[*messageInfo]int // value is index into allMessages + allMessageFieldsByPtr map[*messageInfo]*structFields + + // needRawDesc specifies whether the generator should emit logic to provide + // the legacy raw descriptor in GZIP'd form. + // This is updated by enum and message generation logic as necessary, + // and checked at the end of file generation. + needRawDesc bool +} + +type structFields struct { + count int + unexported map[int]string +} + +func (sf *structFields) append(name string) { + if r, _ := utf8.DecodeRuneInString(name); !unicode.IsUpper(r) { + if sf.unexported == nil { + sf.unexported = make(map[int]string) + } + sf.unexported[sf.count] = name + } + sf.count++ +} + +func newFileInfo(file *protogen.File) *fileInfo { + f := &fileInfo{File: file} + + // Collect all enums, messages, and extensions in "flattened ordering". + // See filetype.TypeBuilder. + var walkMessages func([]*protogen.Message, func(*protogen.Message)) + walkMessages = func(messages []*protogen.Message, f func(*protogen.Message)) { + for _, m := range messages { + f(m) + walkMessages(m.Messages, f) + } + } + initEnumInfos := func(enums []*protogen.Enum) { + for _, enum := range enums { + f.allEnums = append(f.allEnums, newEnumInfo(f, enum)) + } + } + initMessageInfos := func(messages []*protogen.Message) { + for _, message := range messages { + f.allMessages = append(f.allMessages, newMessageInfo(f, message)) + } + } + initExtensionInfos := func(extensions []*protogen.Extension) { + for _, extension := range extensions { + f.allExtensions = append(f.allExtensions, newExtensionInfo(f, extension)) + } + } + initEnumInfos(f.Enums) + initMessageInfos(f.Messages) + initExtensionInfos(f.Extensions) + walkMessages(f.Messages, func(m *protogen.Message) { + initEnumInfos(m.Enums) + initMessageInfos(m.Messages) + initExtensionInfos(m.Extensions) + }) + + // Derive a reverse mapping of enum and message pointers to their index + // in allEnums and allMessages. + if len(f.allEnums) > 0 { + f.allEnumsByPtr = make(map[*enumInfo]int) + for i, e := range f.allEnums { + f.allEnumsByPtr[e] = i + } + } + if len(f.allMessages) > 0 { + f.allMessagesByPtr = make(map[*messageInfo]int) + f.allMessageFieldsByPtr = make(map[*messageInfo]*structFields) + for i, m := range f.allMessages { + f.allMessagesByPtr[m] = i + f.allMessageFieldsByPtr[m] = new(structFields) + } + } + + return f +} + +type enumInfo struct { + *protogen.Enum + + genJSONMethod bool + genRawDescMethod bool +} + +func newEnumInfo(f *fileInfo, enum *protogen.Enum) *enumInfo { + e := &enumInfo{Enum: enum} + e.genJSONMethod = true + e.genRawDescMethod = true + return e +} + +type messageInfo struct { + *protogen.Message + + genRawDescMethod bool + genExtRangeMethod bool + + isTracked bool + hasWeak bool +} + +func newMessageInfo(f *fileInfo, message *protogen.Message) *messageInfo { + m := &messageInfo{Message: message} + m.genRawDescMethod = true + m.genExtRangeMethod = true + m.isTracked = isTrackedMessage(m) + for _, field := range m.Fields { + m.hasWeak = m.hasWeak || field.Desc.IsWeak() + } + return m +} + +// isTrackedMessage reports whether field tracking is enabled on the message. +func isTrackedMessage(m *messageInfo) (tracked bool) { + const trackFieldUse_fieldNumber = 37383685 + + // Decode the option from unknown fields to avoid a dependency on the + // annotation proto from protoc-gen-go. + b := m.Desc.Options().(*descriptorpb.MessageOptions).ProtoReflect().GetUnknown() + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + if num == trackFieldUse_fieldNumber && typ == protowire.VarintType { + v, _ := protowire.ConsumeVarint(b) + tracked = protowire.DecodeBool(v) + } + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + return tracked +} + +type extensionInfo struct { + *protogen.Extension +} + +func newExtensionInfo(f *fileInfo, extension *protogen.Extension) *extensionInfo { + x := &extensionInfo{Extension: extension} + return x +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go new file mode 100644 index 000000000000..d34efa9b1cf1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go @@ -0,0 +1,884 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal_gengo is internal to the protobuf module. +package internal_gengo + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "math" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/version" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" + + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/pluginpb" +) + +// SupportedFeatures reports the set of supported protobuf language features. +var SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL) + +// GenerateVersionMarkers specifies whether to generate version markers. +var GenerateVersionMarkers = true + +// Standard library dependencies. +const ( + base64Package = protogen.GoImportPath("encoding/base64") + mathPackage = protogen.GoImportPath("math") + reflectPackage = protogen.GoImportPath("reflect") + sortPackage = protogen.GoImportPath("sort") + stringsPackage = protogen.GoImportPath("strings") + syncPackage = protogen.GoImportPath("sync") + timePackage = protogen.GoImportPath("time") + utf8Package = protogen.GoImportPath("unicode/utf8") +) + +// Protobuf library dependencies. +// +// These are declared as an interface type so that they can be more easily +// patched to support unique build environments that impose restrictions +// on the dependencies of generated source code. +var ( + protoPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/proto") + protoifacePackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoiface") + protoimplPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoimpl") + protojsonPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/encoding/protojson") + protoreflectPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoreflect") + protoregistryPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoregistry") +) + +type goImportPath interface { + String() string + Ident(string) protogen.GoIdent +} + +// GenerateFile generates the contents of a .pb.go file. +func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { + filename := file.GeneratedFilenamePrefix + ".pb.go" + g := gen.NewGeneratedFile(filename, file.GoImportPath) + f := newFileInfo(file) + + genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Syntax_field_number)) + genGeneratedHeader(gen, g, f) + genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Package_field_number)) + + packageDoc := genPackageKnownComment(f) + g.P(packageDoc, "package ", f.GoPackageName) + g.P() + + // Emit a static check that enforces a minimum version of the proto package. + if GenerateVersionMarkers { + g.P("const (") + g.P("// Verify that this generated code is sufficiently up-to-date.") + g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimpl.GenVersion, " - ", protoimplPackage.Ident("MinVersion"), ")") + g.P("// Verify that runtime/protoimpl is sufficiently up-to-date.") + g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimplPackage.Ident("MaxVersion"), " - ", protoimpl.GenVersion, ")") + g.P(")") + g.P() + } + + for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ { + genImport(gen, g, f, imps.Get(i)) + } + for _, enum := range f.allEnums { + genEnum(g, f, enum) + } + for _, message := range f.allMessages { + genMessage(g, f, message) + } + genExtensions(g, f) + + genReflectFileDescriptor(gen, g, f) + + return g +} + +// genStandaloneComments prints all leading comments for a FileDescriptorProto +// location identified by the field number n. +func genStandaloneComments(g *protogen.GeneratedFile, f *fileInfo, n int32) { + loc := f.Desc.SourceLocations().ByPath(protoreflect.SourcePath{n}) + for _, s := range loc.LeadingDetachedComments { + g.P(protogen.Comments(s)) + g.P() + } + if s := loc.LeadingComments; s != "" { + g.P(protogen.Comments(s)) + g.P() + } +} + +func genGeneratedHeader(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") + + if GenerateVersionMarkers { + g.P("// versions:") + protocGenGoVersion := version.String() + protocVersion := "(unknown)" + if v := gen.Request.GetCompilerVersion(); v != nil { + protocVersion = fmt.Sprintf("v%v.%v.%v", v.GetMajor(), v.GetMinor(), v.GetPatch()) + if s := v.GetSuffix(); s != "" { + protocVersion += "-" + s + } + } + g.P("// \tprotoc-gen-go ", protocGenGoVersion) + g.P("// \tprotoc ", protocVersion) + } + + if f.Proto.GetOptions().GetDeprecated() { + g.P("// ", f.Desc.Path(), " is a deprecated file.") + } else { + g.P("// source: ", f.Desc.Path()) + } + g.P() +} + +func genImport(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo, imp protoreflect.FileImport) { + impFile, ok := gen.FilesByPath[imp.Path()] + if !ok { + return + } + if impFile.GoImportPath == f.GoImportPath { + // Don't generate imports or aliases for types in the same Go package. + return + } + // Generate imports for all non-weak dependencies, even if they are not + // referenced, because other code and tools depend on having the + // full transitive closure of protocol buffer types in the binary. + if !imp.IsWeak { + g.Import(impFile.GoImportPath) + } + if !imp.IsPublic { + return + } + + // Generate public imports by generating the imported file, parsing it, + // and extracting every symbol that should receive a forwarding declaration. + impGen := GenerateFile(gen, impFile) + impGen.Skip() + b, err := impGen.Content() + if err != nil { + gen.Error(err) + return + } + fset := token.NewFileSet() + astFile, err := parser.ParseFile(fset, "", b, parser.ParseComments) + if err != nil { + gen.Error(err) + return + } + genForward := func(tok token.Token, name string, expr ast.Expr) { + // Don't import unexported symbols. + r, _ := utf8.DecodeRuneInString(name) + if !unicode.IsUpper(r) { + return + } + // Don't import the FileDescriptor. + if name == impFile.GoDescriptorIdent.GoName { + return + } + // Don't import decls referencing a symbol defined in another package. + // i.e., don't import decls which are themselves public imports: + // + // type T = somepackage.T + if _, ok := expr.(*ast.SelectorExpr); ok { + return + } + g.P(tok, " ", name, " = ", impFile.GoImportPath.Ident(name)) + } + g.P("// Symbols defined in public import of ", imp.Path(), ".") + g.P() + for _, decl := range astFile.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + genForward(decl.Tok, spec.Name.Name, spec.Type) + case *ast.ValueSpec: + for i, name := range spec.Names { + var expr ast.Expr + if i < len(spec.Values) { + expr = spec.Values[i] + } + genForward(decl.Tok, name.Name, expr) + } + case *ast.ImportSpec: + default: + panic(fmt.Sprintf("can't generate forward for spec type %T", spec)) + } + } + } + } + g.P() +} + +func genEnum(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) { + // Enum type declaration. + g.Annotate(e.GoIdent.GoName, e.Location) + leadingComments := appendDeprecationSuffix(e.Comments.Leading, + e.Desc.Options().(*descriptorpb.EnumOptions).GetDeprecated()) + g.P(leadingComments, + "type ", e.GoIdent, " int32") + + // Enum value constants. + g.P("const (") + for _, value := range e.Values { + g.Annotate(value.GoIdent.GoName, value.Location) + leadingComments := appendDeprecationSuffix(value.Comments.Leading, + value.Desc.Options().(*descriptorpb.EnumValueOptions).GetDeprecated()) + g.P(leadingComments, + value.GoIdent, " ", e.GoIdent, " = ", value.Desc.Number(), + trailingComment(value.Comments.Trailing)) + } + g.P(")") + g.P() + + // Enum value maps. + g.P("// Enum value maps for ", e.GoIdent, ".") + g.P("var (") + g.P(e.GoIdent.GoName+"_name", " = map[int32]string{") + for _, value := range e.Values { + duplicate := "" + if value.Desc != e.Desc.Values().ByNumber(value.Desc.Number()) { + duplicate = "// Duplicate value: " + } + g.P(duplicate, value.Desc.Number(), ": ", strconv.Quote(string(value.Desc.Name())), ",") + } + g.P("}") + g.P(e.GoIdent.GoName+"_value", " = map[string]int32{") + for _, value := range e.Values { + g.P(strconv.Quote(string(value.Desc.Name())), ": ", value.Desc.Number(), ",") + } + g.P("}") + g.P(")") + g.P() + + // Enum method. + // + // NOTE: A pointer value is needed to represent presence in proto2. + // Since a proto2 message can reference a proto3 enum, it is useful to + // always generate this method (even on proto3 enums) to support that case. + g.P("func (x ", e.GoIdent, ") Enum() *", e.GoIdent, " {") + g.P("p := new(", e.GoIdent, ")") + g.P("*p = x") + g.P("return p") + g.P("}") + g.P() + + // String method. + g.P("func (x ", e.GoIdent, ") String() string {") + g.P("return ", protoimplPackage.Ident("X"), ".EnumStringOf(x.Descriptor(), ", protoreflectPackage.Ident("EnumNumber"), "(x))") + g.P("}") + g.P() + + genEnumReflectMethods(g, f, e) + + // UnmarshalJSON method. + if e.genJSONMethod && e.Desc.Syntax() == protoreflect.Proto2 { + g.P("// Deprecated: Do not use.") + g.P("func (x *", e.GoIdent, ") UnmarshalJSON(b []byte) error {") + g.P("num, err := ", protoimplPackage.Ident("X"), ".UnmarshalJSONEnum(x.Descriptor(), b)") + g.P("if err != nil {") + g.P("return err") + g.P("}") + g.P("*x = ", e.GoIdent, "(num)") + g.P("return nil") + g.P("}") + g.P() + } + + // EnumDescriptor method. + if e.genRawDescMethod { + var indexes []string + for i := 1; i < len(e.Location.Path); i += 2 { + indexes = append(indexes, strconv.Itoa(int(e.Location.Path[i]))) + } + g.P("// Deprecated: Use ", e.GoIdent, ".Descriptor instead.") + g.P("func (", e.GoIdent, ") EnumDescriptor() ([]byte, []int) {") + g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}") + g.P("}") + g.P() + f.needRawDesc = true + } +} + +func genMessage(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + if m.Desc.IsMapEntry() { + return + } + + // Message type declaration. + g.Annotate(m.GoIdent.GoName, m.Location) + leadingComments := appendDeprecationSuffix(m.Comments.Leading, + m.Desc.Options().(*descriptorpb.MessageOptions).GetDeprecated()) + g.P(leadingComments, + "type ", m.GoIdent, " struct {") + genMessageFields(g, f, m) + g.P("}") + g.P() + + genMessageKnownFunctions(g, f, m) + genMessageDefaultDecls(g, f, m) + genMessageMethods(g, f, m) + genMessageOneofWrapperTypes(g, f, m) +} + +func genMessageFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + sf := f.allMessageFieldsByPtr[m] + genMessageInternalFields(g, f, m, sf) + for _, field := range m.Fields { + genMessageField(g, f, m, field, sf) + } +} + +func genMessageInternalFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, sf *structFields) { + g.P(genid.State_goname, " ", protoimplPackage.Ident("MessageState")) + sf.append(genid.State_goname) + g.P(genid.SizeCache_goname, " ", protoimplPackage.Ident("SizeCache")) + sf.append(genid.SizeCache_goname) + if m.hasWeak { + g.P(genid.WeakFields_goname, " ", protoimplPackage.Ident("WeakFields")) + sf.append(genid.WeakFields_goname) + } + g.P(genid.UnknownFields_goname, " ", protoimplPackage.Ident("UnknownFields")) + sf.append(genid.UnknownFields_goname) + if m.Desc.ExtensionRanges().Len() > 0 { + g.P(genid.ExtensionFields_goname, " ", protoimplPackage.Ident("ExtensionFields")) + sf.append(genid.ExtensionFields_goname) + } + if sf.count > 0 { + g.P() + } +} + +func genMessageField(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field, sf *structFields) { + if oneof := field.Oneof; oneof != nil && !oneof.Desc.IsSynthetic() { + // It would be a bit simpler to iterate over the oneofs below, + // but generating the field here keeps the contents of the Go + // struct in the same order as the contents of the source + // .proto file. + if oneof.Fields[0] != field { + return // only generate for first appearance + } + + tags := structTags{ + {"protobuf_oneof", string(oneof.Desc.Name())}, + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + + g.Annotate(m.GoIdent.GoName+"."+oneof.GoName, oneof.Location) + leadingComments := oneof.Comments.Leading + if leadingComments != "" { + leadingComments += "\n" + } + ss := []string{fmt.Sprintf(" Types that are assignable to %s:\n", oneof.GoName)} + for _, field := range oneof.Fields { + ss = append(ss, "\t*"+field.GoIdent.GoName+"\n") + } + leadingComments += protogen.Comments(strings.Join(ss, "")) + g.P(leadingComments, + oneof.GoName, " ", oneofInterfaceName(oneof), tags) + sf.append(oneof.GoName) + return + } + goType, pointer := fieldGoType(g, f, field) + if pointer { + goType = "*" + goType + } + tags := structTags{ + {"protobuf", fieldProtobufTagValue(field)}, + {"json", fieldJSONTagValue(field)}, + } + if field.Desc.IsMap() { + key := field.Message.Fields[0] + val := field.Message.Fields[1] + tags = append(tags, structTags{ + {"protobuf_key", fieldProtobufTagValue(key)}, + {"protobuf_val", fieldProtobufTagValue(val)}, + }...) + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + + name := field.GoName + if field.Desc.IsWeak() { + name = genid.WeakFieldPrefix_goname + name + } + g.Annotate(m.GoIdent.GoName+"."+name, field.Location) + leadingComments := appendDeprecationSuffix(field.Comments.Leading, + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + name, " ", goType, tags, + trailingComment(field.Comments.Trailing)) + sf.append(field.GoName) +} + +// genMessageDefaultDecls generates consts and vars holding the default +// values of fields. +func genMessageDefaultDecls(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + var consts, vars []string + for _, field := range m.Fields { + if !field.Desc.HasDefault() { + continue + } + name := "Default_" + m.GoIdent.GoName + "_" + field.GoName + goType, _ := fieldGoType(g, f, field) + defVal := field.Desc.Default() + switch field.Desc.Kind() { + case protoreflect.StringKind: + consts = append(consts, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.String())) + case protoreflect.BytesKind: + vars = append(vars, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.Bytes())) + case protoreflect.EnumKind: + idx := field.Desc.DefaultEnumValue().Index() + val := field.Enum.Values[idx] + if val.GoIdent.GoImportPath == f.GoImportPath { + consts = append(consts, fmt.Sprintf("%s = %s", name, g.QualifiedGoIdent(val.GoIdent))) + } else { + // If the enum value is declared in a different Go package, + // reference it by number since the name may not be correct. + // See https://github.com/golang/protobuf/issues/513. + consts = append(consts, fmt.Sprintf("%s = %s(%d) // %s", + name, g.QualifiedGoIdent(field.Enum.GoIdent), val.Desc.Number(), g.QualifiedGoIdent(val.GoIdent))) + } + case protoreflect.FloatKind, protoreflect.DoubleKind: + if f := defVal.Float(); math.IsNaN(f) || math.IsInf(f, 0) { + var fn, arg string + switch f := defVal.Float(); { + case math.IsInf(f, -1): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "-1" + case math.IsInf(f, +1): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "+1" + case math.IsNaN(f): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("NaN")), "" + } + vars = append(vars, fmt.Sprintf("%s = %s(%s(%s))", name, goType, fn, arg)) + } else { + consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, f)) + } + default: + consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, defVal.Interface())) + } + } + if len(consts) > 0 { + g.P("// Default values for ", m.GoIdent, " fields.") + g.P("const (") + for _, s := range consts { + g.P(s) + } + g.P(")") + } + if len(vars) > 0 { + g.P("// Default values for ", m.GoIdent, " fields.") + g.P("var (") + for _, s := range vars { + g.P(s) + } + g.P(")") + } + g.P() +} + +func genMessageMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + genMessageBaseMethods(g, f, m) + genMessageGetterMethods(g, f, m) + genMessageSetterMethods(g, f, m) +} + +func genMessageBaseMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + // Reset method. + g.P("func (x *", m.GoIdent, ") Reset() {") + g.P("*x = ", m.GoIdent, "{}") + g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " {") + g.P("mi := &", messageTypesVarName(f), "[", f.allMessagesByPtr[m], "]") + g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))") + g.P("ms.StoreMessageInfo(mi)") + g.P("}") + g.P("}") + g.P() + + // String method. + g.P("func (x *", m.GoIdent, ") String() string {") + g.P("return ", protoimplPackage.Ident("X"), ".MessageStringOf(x)") + g.P("}") + g.P() + + // ProtoMessage method. + g.P("func (*", m.GoIdent, ") ProtoMessage() {}") + g.P() + + // ProtoReflect method. + genMessageReflectMethods(g, f, m) + + // Descriptor method. + if m.genRawDescMethod { + var indexes []string + for i := 1; i < len(m.Location.Path); i += 2 { + indexes = append(indexes, strconv.Itoa(int(m.Location.Path[i]))) + } + g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor instead.") + g.P("func (*", m.GoIdent, ") Descriptor() ([]byte, []int) {") + g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}") + g.P("}") + g.P() + f.needRawDesc = true + } +} + +func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, field := range m.Fields { + genNoInterfacePragma(g, m.isTracked) + + // Getter for parent oneof. + if oneof := field.Oneof; oneof != nil && oneof.Fields[0] == field && !oneof.Desc.IsSynthetic() { + g.Annotate(m.GoIdent.GoName+".Get"+oneof.GoName, oneof.Location) + g.P("func (m *", m.GoIdent.GoName, ") Get", oneof.GoName, "() ", oneofInterfaceName(oneof), " {") + g.P("if m != nil {") + g.P("return m.", oneof.GoName) + g.P("}") + g.P("return nil") + g.P("}") + g.P() + } + + // Getter for message field. + goType, pointer := fieldGoType(g, f, field) + defaultValue := fieldDefaultValue(g, f, m, field) + g.Annotate(m.GoIdent.GoName+".Get"+field.GoName, field.Location) + leadingComments := appendDeprecationSuffix("", + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + switch { + case field.Desc.IsWeak(): + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", protoPackage.Ident("Message"), "{") + g.P("var w ", protoimplPackage.Ident("WeakFields")) + g.P("if x != nil {") + g.P("w = x.", genid.WeakFields_goname) + if m.isTracked { + g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName) + } + g.P("}") + g.P("return ", protoimplPackage.Ident("X"), ".GetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ")") + g.P("}") + case field.Oneof != nil && !field.Oneof.Desc.IsSynthetic(): + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {") + g.P("if x, ok := x.Get", field.Oneof.GoName, "().(*", field.GoIdent, "); ok {") + g.P("return x.", field.GoName) + g.P("}") + g.P("return ", defaultValue) + g.P("}") + default: + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {") + if !field.Desc.HasPresence() || defaultValue == "nil" { + g.P("if x != nil {") + } else { + g.P("if x != nil && x.", field.GoName, " != nil {") + } + star := "" + if pointer { + star = "*" + } + g.P("return ", star, " x.", field.GoName) + g.P("}") + g.P("return ", defaultValue) + g.P("}") + } + g.P() + } +} + +func genMessageSetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, field := range m.Fields { + if !field.Desc.IsWeak() { + continue + } + + genNoInterfacePragma(g, m.isTracked) + + g.Annotate(m.GoIdent.GoName+".Set"+field.GoName, field.Location) + leadingComments := appendDeprecationSuffix("", + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, "func (x *", m.GoIdent, ") Set", field.GoName, "(v ", protoPackage.Ident("Message"), ") {") + g.P("var w *", protoimplPackage.Ident("WeakFields")) + g.P("if x != nil {") + g.P("w = &x.", genid.WeakFields_goname) + if m.isTracked { + g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName) + } + g.P("}") + g.P(protoimplPackage.Ident("X"), ".SetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ", v)") + g.P("}") + g.P() + } +} + +// fieldGoType returns the Go type used for a field. +// +// If it returns pointer=true, the struct field is a pointer to the type. +func fieldGoType(g *protogen.GeneratedFile, f *fileInfo, field *protogen.Field) (goType string, pointer bool) { + if field.Desc.IsWeak() { + return "struct{}", false + } + + pointer = field.Desc.HasPresence() + switch field.Desc.Kind() { + case protoreflect.BoolKind: + goType = "bool" + case protoreflect.EnumKind: + goType = g.QualifiedGoIdent(field.Enum.GoIdent) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + goType = "int32" + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + goType = "uint32" + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + goType = "int64" + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + goType = "uint64" + case protoreflect.FloatKind: + goType = "float32" + case protoreflect.DoubleKind: + goType = "float64" + case protoreflect.StringKind: + goType = "string" + case protoreflect.BytesKind: + goType = "[]byte" + pointer = false // rely on nullability of slices for presence + case protoreflect.MessageKind, protoreflect.GroupKind: + goType = "*" + g.QualifiedGoIdent(field.Message.GoIdent) + pointer = false // pointer captured as part of the type + } + switch { + case field.Desc.IsList(): + return "[]" + goType, false + case field.Desc.IsMap(): + keyType, _ := fieldGoType(g, f, field.Message.Fields[0]) + valType, _ := fieldGoType(g, f, field.Message.Fields[1]) + return fmt.Sprintf("map[%v]%v", keyType, valType), false + } + return goType, pointer +} + +func fieldProtobufTagValue(field *protogen.Field) string { + var enumName string + if field.Desc.Kind() == protoreflect.EnumKind { + enumName = protoimpl.X.LegacyEnumName(field.Enum.Desc) + } + return tag.Marshal(field.Desc, enumName) +} + +func fieldDefaultValue(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field) string { + if field.Desc.IsList() { + return "nil" + } + if field.Desc.HasDefault() { + defVarName := "Default_" + m.GoIdent.GoName + "_" + field.GoName + if field.Desc.Kind() == protoreflect.BytesKind { + return "append([]byte(nil), " + defVarName + "...)" + } + return defVarName + } + switch field.Desc.Kind() { + case protoreflect.BoolKind: + return "false" + case protoreflect.StringKind: + return `""` + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.BytesKind: + return "nil" + case protoreflect.EnumKind: + val := field.Enum.Values[0] + if val.GoIdent.GoImportPath == f.GoImportPath { + return g.QualifiedGoIdent(val.GoIdent) + } else { + // If the enum value is declared in a different Go package, + // reference it by number since the name may not be correct. + // See https://github.com/golang/protobuf/issues/513. + return g.QualifiedGoIdent(field.Enum.GoIdent) + "(" + strconv.FormatInt(int64(val.Desc.Number()), 10) + ")" + } + default: + return "0" + } +} + +func fieldJSONTagValue(field *protogen.Field) string { + return string(field.Desc.Name()) + ",omitempty" +} + +func genExtensions(g *protogen.GeneratedFile, f *fileInfo) { + if len(f.allExtensions) == 0 { + return + } + + g.P("var ", extensionTypesVarName(f), " = []", protoimplPackage.Ident("ExtensionInfo"), "{") + for _, x := range f.allExtensions { + g.P("{") + g.P("ExtendedType: (*", x.Extendee.GoIdent, ")(nil),") + goType, pointer := fieldGoType(g, f, x.Extension) + if pointer { + goType = "*" + goType + } + g.P("ExtensionType: (", goType, ")(nil),") + g.P("Field: ", x.Desc.Number(), ",") + g.P("Name: ", strconv.Quote(string(x.Desc.FullName())), ",") + g.P("Tag: ", strconv.Quote(fieldProtobufTagValue(x.Extension)), ",") + g.P("Filename: ", strconv.Quote(f.Desc.Path()), ",") + g.P("},") + } + g.P("}") + g.P() + + // Group extensions by the target message. + var orderedTargets []protogen.GoIdent + allExtensionsByTarget := make(map[protogen.GoIdent][]*extensionInfo) + allExtensionsByPtr := make(map[*extensionInfo]int) + for i, x := range f.allExtensions { + target := x.Extendee.GoIdent + if len(allExtensionsByTarget[target]) == 0 { + orderedTargets = append(orderedTargets, target) + } + allExtensionsByTarget[target] = append(allExtensionsByTarget[target], x) + allExtensionsByPtr[x] = i + } + for _, target := range orderedTargets { + g.P("// Extension fields to ", target, ".") + g.P("var (") + for _, x := range allExtensionsByTarget[target] { + xd := x.Desc + typeName := xd.Kind().String() + switch xd.Kind() { + case protoreflect.EnumKind: + typeName = string(xd.Enum().FullName()) + case protoreflect.MessageKind, protoreflect.GroupKind: + typeName = string(xd.Message().FullName()) + } + fieldName := string(xd.Name()) + + leadingComments := x.Comments.Leading + if leadingComments != "" { + leadingComments += "\n" + } + leadingComments += protogen.Comments(fmt.Sprintf(" %v %v %v = %v;\n", + xd.Cardinality(), typeName, fieldName, xd.Number())) + leadingComments = appendDeprecationSuffix(leadingComments, + x.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + "E_", x.GoIdent, " = &", extensionTypesVarName(f), "[", allExtensionsByPtr[x], "]", + trailingComment(x.Comments.Trailing)) + } + g.P(")") + g.P() + } +} + +// genMessageOneofWrapperTypes generates the oneof wrapper types and +// associates the types with the parent message type. +func genMessageOneofWrapperTypes(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, oneof := range m.Oneofs { + if oneof.Desc.IsSynthetic() { + continue + } + ifName := oneofInterfaceName(oneof) + g.P("type ", ifName, " interface {") + g.P(ifName, "()") + g.P("}") + g.P() + for _, field := range oneof.Fields { + g.Annotate(field.GoIdent.GoName, field.Location) + g.Annotate(field.GoIdent.GoName+"."+field.GoName, field.Location) + g.P("type ", field.GoIdent, " struct {") + goType, _ := fieldGoType(g, f, field) + tags := structTags{ + {"protobuf", fieldProtobufTagValue(field)}, + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + leadingComments := appendDeprecationSuffix(field.Comments.Leading, + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + field.GoName, " ", goType, tags, + trailingComment(field.Comments.Trailing)) + g.P("}") + g.P() + } + for _, field := range oneof.Fields { + g.P("func (*", field.GoIdent, ") ", ifName, "() {}") + g.P() + } + } +} + +// oneofInterfaceName returns the name of the interface type implemented by +// the oneof field value types. +func oneofInterfaceName(oneof *protogen.Oneof) string { + return "is" + oneof.GoIdent.GoName +} + +// genNoInterfacePragma generates a standalone "nointerface" pragma to +// decorate methods with field-tracking support. +func genNoInterfacePragma(g *protogen.GeneratedFile, tracked bool) { + if tracked { + g.P("//go:nointerface") + g.P() + } +} + +var gotrackTags = structTags{{"go", "track"}} + +// structTags is a data structure for build idiomatic Go struct tags. +// Each [2]string is a key-value pair, where value is the unescaped string. +// +// Example: structTags{{"key", "value"}}.String() -> `key:"value"` +type structTags [][2]string + +func (tags structTags) String() string { + if len(tags) == 0 { + return "" + } + var ss []string + for _, tag := range tags { + // NOTE: When quoting the value, we need to make sure the backtick + // character does not appear. Convert all cases to the escaped hex form. + key := tag[0] + val := strings.Replace(strconv.Quote(tag[1]), "`", `\x60`, -1) + ss = append(ss, fmt.Sprintf("%s:%s", key, val)) + } + return "`" + strings.Join(ss, " ") + "`" +} + +// appendDeprecationSuffix optionally appends a deprecation notice as a suffix. +func appendDeprecationSuffix(prefix protogen.Comments, deprecated bool) protogen.Comments { + if !deprecated { + return prefix + } + if prefix != "" { + prefix += "\n" + } + return prefix + " Deprecated: Do not use.\n" +} + +// trailingComment is like protogen.Comments, but lacks a trailing newline. +type trailingComment protogen.Comments + +func (c trailingComment) String() string { + s := strings.TrimSuffix(protogen.Comments(c).String(), "\n") + if strings.Contains(s, "\n") { + // We don't support multi-lined trailing comments as it is unclear + // how to best render them in the generated code. + return "" + } + return s +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go new file mode 100644 index 000000000000..1319a1267507 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go @@ -0,0 +1,351 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "fmt" + "math" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func genReflectFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + g.P("var ", f.GoDescriptorIdent, " ", protoreflectPackage.Ident("FileDescriptor")) + g.P() + + genFileDescriptor(gen, g, f) + if len(f.allEnums) > 0 { + g.P("var ", enumTypesVarName(f), " = make([]", protoimplPackage.Ident("EnumInfo"), ",", len(f.allEnums), ")") + } + if len(f.allMessages) > 0 { + g.P("var ", messageTypesVarName(f), " = make([]", protoimplPackage.Ident("MessageInfo"), ",", len(f.allMessages), ")") + } + + // Generate a unique list of Go types for all declarations and dependencies, + // and the associated index into the type list for all dependencies. + var goTypes []string + var depIdxs []string + seen := map[protoreflect.FullName]int{} + genDep := func(name protoreflect.FullName, depSource string) { + if depSource != "" { + line := fmt.Sprintf("%d, // %d: %s -> %s", seen[name], len(depIdxs), depSource, name) + depIdxs = append(depIdxs, line) + } + } + genEnum := func(e *protogen.Enum, depSource string) { + if e != nil { + name := e.Desc.FullName() + if _, ok := seen[name]; !ok { + line := fmt.Sprintf("(%s)(0), // %d: %s", g.QualifiedGoIdent(e.GoIdent), len(goTypes), name) + goTypes = append(goTypes, line) + seen[name] = len(seen) + } + if depSource != "" { + genDep(name, depSource) + } + } + } + genMessage := func(m *protogen.Message, depSource string) { + if m != nil { + name := m.Desc.FullName() + if _, ok := seen[name]; !ok { + line := fmt.Sprintf("(*%s)(nil), // %d: %s", g.QualifiedGoIdent(m.GoIdent), len(goTypes), name) + if m.Desc.IsMapEntry() { + // Map entry messages have no associated Go type. + line = fmt.Sprintf("nil, // %d: %s", len(goTypes), name) + } + goTypes = append(goTypes, line) + seen[name] = len(seen) + } + if depSource != "" { + genDep(name, depSource) + } + } + } + + // This ordering is significant. + // See filetype.TypeBuilder.DependencyIndexes. + type offsetEntry struct { + start int + name string + } + var depOffsets []offsetEntry + for _, enum := range f.allEnums { + genEnum(enum.Enum, "") + } + for _, message := range f.allMessages { + genMessage(message.Message, "") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "field type_name"}) + for _, message := range f.allMessages { + for _, field := range message.Fields { + if field.Desc.IsWeak() { + continue + } + source := string(field.Desc.FullName()) + genEnum(field.Enum, source+":type_name") + genMessage(field.Message, source+":type_name") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension extendee"}) + for _, extension := range f.allExtensions { + source := string(extension.Desc.FullName()) + genMessage(extension.Extendee, source+":extendee") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension type_name"}) + for _, extension := range f.allExtensions { + source := string(extension.Desc.FullName()) + genEnum(extension.Enum, source+":type_name") + genMessage(extension.Message, source+":type_name") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method input_type"}) + for _, service := range f.Services { + for _, method := range service.Methods { + source := string(method.Desc.FullName()) + genMessage(method.Input, source+":input_type") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method output_type"}) + for _, service := range f.Services { + for _, method := range service.Methods { + source := string(method.Desc.FullName()) + genMessage(method.Output, source+":output_type") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), ""}) + for i := len(depOffsets) - 2; i >= 0; i-- { + curr, next := depOffsets[i], depOffsets[i+1] + depIdxs = append(depIdxs, fmt.Sprintf("%d, // [%d:%d] is the sub-list for %s", + curr.start, curr.start, next.start, curr.name)) + } + if len(depIdxs) > math.MaxInt32 { + panic("too many dependencies") // sanity check + } + + g.P("var ", goTypesVarName(f), " = []interface{}{") + for _, s := range goTypes { + g.P(s) + } + g.P("}") + + g.P("var ", depIdxsVarName(f), " = []int32{") + for _, s := range depIdxs { + g.P(s) + } + g.P("}") + + g.P("func init() { ", initFuncName(f.File), "() }") + + g.P("func ", initFuncName(f.File), "() {") + g.P("if ", f.GoDescriptorIdent, " != nil {") + g.P("return") + g.P("}") + + // Ensure that initialization functions for different files in the same Go + // package run in the correct order: Call the init funcs for every .proto file + // imported by this one that is in the same Go package. + for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ { + impFile := gen.FilesByPath[imps.Get(i).Path()] + if impFile.GoImportPath != f.GoImportPath { + continue + } + g.P(initFuncName(impFile), "()") + } + + if len(f.allMessages) > 0 { + // Populate MessageInfo.Exporters. + g.P("if !", protoimplPackage.Ident("UnsafeEnabled"), " {") + for _, message := range f.allMessages { + if sf := f.allMessageFieldsByPtr[message]; len(sf.unexported) > 0 { + idx := f.allMessagesByPtr[message] + typesVar := messageTypesVarName(f) + + g.P(typesVar, "[", idx, "].Exporter = func(v interface{}, i int) interface{} {") + g.P("switch v := v.(*", message.GoIdent, "); i {") + for i := 0; i < sf.count; i++ { + if name := sf.unexported[i]; name != "" { + g.P("case ", i, ": return &v.", name) + } + } + g.P("default: return nil") + g.P("}") + g.P("}") + } + } + g.P("}") + + // Populate MessageInfo.OneofWrappers. + for _, message := range f.allMessages { + if len(message.Oneofs) > 0 { + idx := f.allMessagesByPtr[message] + typesVar := messageTypesVarName(f) + + // Associate the wrapper types by directly passing them to the MessageInfo. + g.P(typesVar, "[", idx, "].OneofWrappers = []interface{} {") + for _, oneof := range message.Oneofs { + if !oneof.Desc.IsSynthetic() { + for _, field := range oneof.Fields { + g.P("(*", field.GoIdent, ")(nil),") + } + } + } + g.P("}") + } + } + } + + g.P("type x struct{}") + g.P("out := ", protoimplPackage.Ident("TypeBuilder"), "{") + g.P("File: ", protoimplPackage.Ident("DescBuilder"), "{") + g.P("GoPackagePath: ", reflectPackage.Ident("TypeOf"), "(x{}).PkgPath(),") + g.P("RawDescriptor: ", rawDescVarName(f), ",") + g.P("NumEnums: ", len(f.allEnums), ",") + g.P("NumMessages: ", len(f.allMessages), ",") + g.P("NumExtensions: ", len(f.allExtensions), ",") + g.P("NumServices: ", len(f.Services), ",") + g.P("},") + g.P("GoTypes: ", goTypesVarName(f), ",") + g.P("DependencyIndexes: ", depIdxsVarName(f), ",") + if len(f.allEnums) > 0 { + g.P("EnumInfos: ", enumTypesVarName(f), ",") + } + if len(f.allMessages) > 0 { + g.P("MessageInfos: ", messageTypesVarName(f), ",") + } + if len(f.allExtensions) > 0 { + g.P("ExtensionInfos: ", extensionTypesVarName(f), ",") + } + g.P("}.Build()") + g.P(f.GoDescriptorIdent, " = out.File") + + // Set inputs to nil to allow GC to reclaim resources. + g.P(rawDescVarName(f), " = nil") + g.P(goTypesVarName(f), " = nil") + g.P(depIdxsVarName(f), " = nil") + g.P("}") +} + +func genFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + descProto := proto.Clone(f.Proto).(*descriptorpb.FileDescriptorProto) + descProto.SourceCodeInfo = nil // drop source code information + + b, err := proto.MarshalOptions{AllowPartial: true, Deterministic: true}.Marshal(descProto) + if err != nil { + gen.Error(err) + return + } + + g.P("var ", rawDescVarName(f), " = []byte{") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.P("}") + g.P() + + if f.needRawDesc { + onceVar := rawDescVarName(f) + "Once" + dataVar := rawDescVarName(f) + "Data" + g.P("var (") + g.P(onceVar, " ", syncPackage.Ident("Once")) + g.P(dataVar, " = ", rawDescVarName(f)) + g.P(")") + g.P() + + g.P("func ", rawDescVarName(f), "GZIP() []byte {") + g.P(onceVar, ".Do(func() {") + g.P(dataVar, " = ", protoimplPackage.Ident("X"), ".CompressGZIP(", dataVar, ")") + g.P("})") + g.P("return ", dataVar) + g.P("}") + g.P() + } +} + +func genEnumReflectMethods(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) { + idx := f.allEnumsByPtr[e] + typesVar := enumTypesVarName(f) + + // Descriptor method. + g.P("func (", e.GoIdent, ") Descriptor() ", protoreflectPackage.Ident("EnumDescriptor"), " {") + g.P("return ", typesVar, "[", idx, "].Descriptor()") + g.P("}") + g.P() + + // Type method. + g.P("func (", e.GoIdent, ") Type() ", protoreflectPackage.Ident("EnumType"), " {") + g.P("return &", typesVar, "[", idx, "]") + g.P("}") + g.P() + + // Number method. + g.P("func (x ", e.GoIdent, ") Number() ", protoreflectPackage.Ident("EnumNumber"), " {") + g.P("return ", protoreflectPackage.Ident("EnumNumber"), "(x)") + g.P("}") + g.P() +} + +func genMessageReflectMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + idx := f.allMessagesByPtr[m] + typesVar := messageTypesVarName(f) + + // ProtoReflect method. + g.P("func (x *", m.GoIdent, ") ProtoReflect() ", protoreflectPackage.Ident("Message"), " {") + g.P("mi := &", typesVar, "[", idx, "]") + g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " && x != nil {") + g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))") + g.P("if ms.LoadMessageInfo() == nil {") + g.P("ms.StoreMessageInfo(mi)") + g.P("}") + g.P("return ms") + g.P("}") + g.P("return mi.MessageOf(x)") + g.P("}") + g.P() +} + +func fileVarName(f *protogen.File, suffix string) string { + prefix := f.GoDescriptorIdent.GoName + _, n := utf8.DecodeRuneInString(prefix) + prefix = strings.ToLower(prefix[:n]) + prefix[n:] + return prefix + "_" + suffix +} +func rawDescVarName(f *fileInfo) string { + return fileVarName(f.File, "rawDesc") +} +func goTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "goTypes") +} +func depIdxsVarName(f *fileInfo) string { + return fileVarName(f.File, "depIdxs") +} +func enumTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "enumTypes") +} +func messageTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "msgTypes") +} +func extensionTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "extTypes") +} +func initFuncName(f *protogen.File) string { + return fileVarName(f, "init") +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go new file mode 100644 index 000000000000..696dddb9f060 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go @@ -0,0 +1,1080 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "strings" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/genid" +) + +// Specialized support for well-known types are hard-coded into the generator +// as opposed to being injected in adjacent .go sources in the generated package +// in order to support specialized build systems like Bazel that always generate +// dynamically from the source .proto files. + +func genPackageKnownComment(f *fileInfo) protogen.Comments { + switch f.Desc.Path() { + case genid.File_google_protobuf_any_proto: + return ` Package anypb contains generated types for ` + genid.File_google_protobuf_any_proto + `. + + The Any message is a dynamic representation of any other message value. + It is functionally a tuple of the full name of the remote message type and + the serialized bytes of the remote message value. + + + Constructing an Any + + An Any message containing another message value is constructed using New: + + any, err := anypb.New(m) + if err != nil { + ... // handle error + } + ... // make use of any + + + Unmarshaling an Any + + With a populated Any message, the underlying message can be serialized into + a remote concrete message value in a few ways. + + If the exact concrete type is known, then a new (or pre-existing) instance + of that message can be passed to the UnmarshalTo method: + + m := new(foopb.MyMessage) + if err := any.UnmarshalTo(m); err != nil { + ... // handle error + } + ... // make use of m + + If the exact concrete type is not known, then the UnmarshalNew method can be + used to unmarshal the contents into a new instance of the remote message type: + + m, err := any.UnmarshalNew() + if err != nil { + ... // handle error + } + ... // make use of m + + UnmarshalNew uses the global type registry to resolve the message type and + construct a new instance of that message to unmarshal into. In order for a + message type to appear in the global registry, the Go type representing that + protobuf message type must be linked into the Go binary. For messages + generated by protoc-gen-go, this is achieved through an import of the + generated Go package representing a .proto file. + + A common pattern with UnmarshalNew is to use a type switch with the resulting + proto.Message value: + + switch m := m.(type) { + case *foopb.MyMessage: + ... // make use of m as a *foopb.MyMessage + case *barpb.OtherMessage: + ... // make use of m as a *barpb.OtherMessage + case *bazpb.SomeMessage: + ... // make use of m as a *bazpb.SomeMessage + } + + This pattern ensures that the generated packages containing the message types + listed in the case clauses are linked into the Go binary and therefore also + registered in the global registry. + + + Type checking an Any + + In order to type check whether an Any message represents some other message, + then use the MessageIs method: + + if any.MessageIs((*foopb.MyMessage)(nil)) { + ... // make use of any, knowing that it contains a foopb.MyMessage + } + + The MessageIs method can also be used with an allocated instance of the target + message type if the intention is to unmarshal into it if the type matches: + + m := new(foopb.MyMessage) + if any.MessageIs(m) { + if err := any.UnmarshalTo(m); err != nil { + ... // handle error + } + ... // make use of m + } + +` + case genid.File_google_protobuf_timestamp_proto: + return ` Package timestamppb contains generated types for ` + genid.File_google_protobuf_timestamp_proto + `. + + The Timestamp message represents a timestamp, + an instant in time since the Unix epoch (January 1st, 1970). + + + Conversion to a Go Time + + The AsTime method can be used to convert a Timestamp message to a + standard Go time.Time value in UTC: + + t := ts.AsTime() + ... // make use of t as a time.Time + + Converting to a time.Time is a common operation so that the extensive + set of time-based operations provided by the time package can be leveraged. + See https://golang.org/pkg/time for more information. + + The AsTime method performs the conversion on a best-effort basis. Timestamps + with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) + are normalized during the conversion to a time.Time. To manually check for + invalid Timestamps per the documented limitations in timestamp.proto, + additionally call the CheckValid method: + + if err := ts.CheckValid(); err != nil { + ... // handle error + } + + + Conversion from a Go Time + + The timestamppb.New function can be used to construct a Timestamp message + from a standard Go time.Time value: + + ts := timestamppb.New(t) + ... // make use of ts as a *timestamppb.Timestamp + + In order to construct a Timestamp representing the current time, use Now: + + ts := timestamppb.Now() + ... // make use of ts as a *timestamppb.Timestamp + +` + case genid.File_google_protobuf_duration_proto: + return ` Package durationpb contains generated types for ` + genid.File_google_protobuf_duration_proto + `. + + The Duration message represents a signed span of time. + + + Conversion to a Go Duration + + The AsDuration method can be used to convert a Duration message to a + standard Go time.Duration value: + + d := dur.AsDuration() + ... // make use of d as a time.Duration + + Converting to a time.Duration is a common operation so that the extensive + set of time-based operations provided by the time package can be leveraged. + See https://golang.org/pkg/time for more information. + + The AsDuration method performs the conversion on a best-effort basis. + Durations with denormal values (e.g., nanoseconds beyond -99999999 and + +99999999, inclusive; or seconds and nanoseconds with opposite signs) + are normalized during the conversion to a time.Duration. To manually check for + invalid Duration per the documented limitations in duration.proto, + additionally call the CheckValid method: + + if err := dur.CheckValid(); err != nil { + ... // handle error + } + + Note that the documented limitations in duration.proto does not protect a + Duration from overflowing the representable range of a time.Duration in Go. + The AsDuration method uses saturation arithmetic such that an overflow clamps + the resulting value to the closest representable value (e.g., math.MaxInt64 + for positive overflow and math.MinInt64 for negative overflow). + + + Conversion from a Go Duration + + The durationpb.New function can be used to construct a Duration message + from a standard Go time.Duration value: + + dur := durationpb.New(d) + ... // make use of d as a *durationpb.Duration + +` + case genid.File_google_protobuf_struct_proto: + return ` Package structpb contains generated types for ` + genid.File_google_protobuf_struct_proto + `. + + The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are + used to represent arbitrary JSON. The Value message represents a JSON value, + the Struct message represents a JSON object, and the ListValue message + represents a JSON array. See https://json.org for more information. + + The Value, Struct, and ListValue types have generated MarshalJSON and + UnmarshalJSON methods such that they serialize JSON equivalent to what the + messages themselves represent. Use of these types with the + "google.golang.org/protobuf/encoding/protojson" package + ensures that they will be serialized as their JSON equivalent. + + + Conversion to and from a Go interface + + The standard Go "encoding/json" package has functionality to serialize + arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and + ListValue.AsSlice methods can convert the protobuf message representation into + a form represented by interface{}, map[string]interface{}, and []interface{}. + This form can be used with other packages that operate on such data structures + and also directly with the standard json package. + + In order to convert the interface{}, map[string]interface{}, and []interface{} + forms back as Value, Struct, and ListValue messages, use the NewStruct, + NewList, and NewValue constructor functions. + + + Example usage + + Consider the following example JSON object: + + { + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": { + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100" + }, + "phoneNumbers": [ + { + "type": "home", + "number": "212 555-1234" + }, + { + "type": "office", + "number": "646 555-4567" + } + ], + "children": [], + "spouse": null + } + + To construct a Value message representing the above JSON object: + + m, err := structpb.NewValue(map[string]interface{}{ + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": map[string]interface{}{ + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100", + }, + "phoneNumbers": []interface{}{ + map[string]interface{}{ + "type": "home", + "number": "212 555-1234", + }, + map[string]interface{}{ + "type": "office", + "number": "646 555-4567", + }, + }, + "children": []interface{}{}, + "spouse": nil, + }) + if err != nil { + ... // handle error + } + ... // make use of m as a *structpb.Value + +` + case genid.File_google_protobuf_field_mask_proto: + return ` Package fieldmaskpb contains generated types for ` + genid.File_google_protobuf_field_mask_proto + `. + + The FieldMask message represents a set of symbolic field paths. + The paths are specific to some target message type, + which is not stored within the FieldMask message itself. + + + Constructing a FieldMask + + The New function is used construct a FieldMask: + + var messageType *descriptorpb.DescriptorProto + fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") + if err != nil { + ... // handle error + } + ... // make use of fm + + The "field.name" and "field.number" paths are valid paths according to the + google.protobuf.DescriptorProto message. Use of a path that does not correlate + to valid fields reachable from DescriptorProto would result in an error. + + Once a FieldMask message has been constructed, + the Append method can be used to insert additional paths to the path set: + + var messageType *descriptorpb.DescriptorProto + if err := fm.Append(messageType, "options"); err != nil { + ... // handle error + } + + + Type checking a FieldMask + + In order to verify that a FieldMask represents a set of fields that are + reachable from some target message type, use the IsValid method: + + var messageType *descriptorpb.DescriptorProto + if fm.IsValid(messageType) { + ... // make use of fm + } + + IsValid needs to be passed the target message type as an input since the + FieldMask message itself does not store the message type that the set of paths + are for. +` + default: + return "" + } +} + +func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + switch m.Desc.FullName() { + case genid.Any_message_fullname: + g.P("// New marshals src into a new Any instance.") + g.P("func New(src ", protoPackage.Ident("Message"), ") (*Any, error) {") + g.P(" dst := new(Any)") + g.P(" if err := dst.MarshalFrom(src); err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return dst, nil") + g.P("}") + g.P() + + g.P("// MarshalFrom marshals src into dst as the underlying message") + g.P("// using the provided marshal options.") + g.P("//") + g.P("// If no options are specified, call dst.MarshalFrom instead.") + g.P("func MarshalFrom(dst *Any, src ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("MarshalOptions"), ") error {") + g.P(" const urlPrefix = \"type.googleapis.com/\"") + g.P(" if src == nil {") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")") + g.P(" }") + g.P(" b, err := opts.Marshal(src)") + g.P(" if err != nil {") + g.P(" return err") + g.P(" }") + g.P(" dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName())") + g.P(" dst.Value = b") + g.P(" return nil") + g.P("}") + g.P() + + g.P("// UnmarshalTo unmarshals the underlying message from src into dst") + g.P("// using the provided unmarshal options.") + g.P("// It reports an error if dst is not of the right message type.") + g.P("//") + g.P("// If no options are specified, call src.UnmarshalTo instead.") + g.P("func UnmarshalTo(src *Any, dst ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("UnmarshalOptions"), ") error {") + g.P(" if src == nil {") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")") + g.P(" }") + g.P(" if !src.MessageIs(dst) {") + g.P(" got := dst.ProtoReflect().Descriptor().FullName()") + g.P(" want := src.MessageName()") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"mismatched message type: got %q, want %q\", got, want)") + g.P(" }") + g.P(" return opts.Unmarshal(src.GetValue(), dst)") + g.P("}") + g.P() + + g.P("// UnmarshalNew unmarshals the underlying message from src into dst,") + g.P("// which is newly created message using a type resolved from the type URL.") + g.P("// The message type is resolved according to opt.Resolver,") + g.P("// which should implement protoregistry.MessageTypeResolver.") + g.P("// It reports an error if the underlying message type could not be resolved.") + g.P("//") + g.P("// If no options are specified, call src.UnmarshalNew instead.") + g.P("func UnmarshalNew(src *Any, opts ", protoPackage.Ident("UnmarshalOptions"), ") (dst ", protoPackage.Ident("Message"), ", err error) {") + g.P(" if src.GetTypeUrl() == \"\" {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid empty type URL\")") + g.P(" }") + g.P(" if opts.Resolver == nil {") + g.P(" opts.Resolver = ", protoregistryPackage.Ident("GlobalTypes")) + g.P(" }") + g.P(" r, ok := opts.Resolver.(", protoregistryPackage.Ident("MessageTypeResolver"), ")") + g.P(" if !ok {") + g.P(" return nil, ", protoregistryPackage.Ident("NotFound")) + g.P(" }") + g.P(" mt, err := r.FindMessageByURL(src.GetTypeUrl())") + g.P(" if err != nil {") + g.P(" if err == ", protoregistryPackage.Ident("NotFound"), " {") + g.P(" return nil, err") + g.P(" }") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"could not resolve %q: %v\", src.GetTypeUrl(), err)") + g.P(" }") + g.P(" dst = mt.New().Interface()") + g.P(" return dst, opts.Unmarshal(src.GetValue(), dst)") + g.P("}") + g.P() + + g.P("// MessageIs reports whether the underlying message is of the same type as m.") + g.P("func (x *Any) MessageIs(m ", protoPackage.Ident("Message"), ") bool {") + g.P(" if m == nil {") + g.P(" return false") + g.P(" }") + g.P(" url := x.GetTypeUrl()") + g.P(" name := string(m.ProtoReflect().Descriptor().FullName())") + g.P(" if !", stringsPackage.Ident("HasSuffix"), "(url, name) {") + g.P(" return false") + g.P(" }") + g.P(" return len(url) == len(name) || url[len(url)-len(name)-1] == '/'") + g.P("}") + g.P() + + g.P("// MessageName reports the full name of the underlying message,") + g.P("// returning an empty string if invalid.") + g.P("func (x *Any) MessageName() ", protoreflectPackage.Ident("FullName"), " {") + g.P(" url := x.GetTypeUrl()") + g.P(" name := ", protoreflectPackage.Ident("FullName"), "(url)") + g.P(" if i := ", stringsPackage.Ident("LastIndexByte"), "(url, '/'); i >= 0 {") + g.P(" name = name[i+len(\"/\"):]") + g.P(" }") + g.P(" if !name.IsValid() {") + g.P(" return \"\"") + g.P(" }") + g.P(" return name") + g.P("}") + g.P() + + g.P("// MarshalFrom marshals m into x as the underlying message.") + g.P("func (x *Any) MarshalFrom(m ", protoPackage.Ident("Message"), ") error {") + g.P(" return MarshalFrom(x, m, ", protoPackage.Ident("MarshalOptions"), "{})") + g.P("}") + g.P() + + g.P("// UnmarshalTo unmarshals the contents of the underlying message of x into m.") + g.P("// It resets m before performing the unmarshal operation.") + g.P("// It reports an error if m is not of the right message type.") + g.P("func (x *Any) UnmarshalTo(m ", protoPackage.Ident("Message"), ") error {") + g.P(" return UnmarshalTo(x, m, ", protoPackage.Ident("UnmarshalOptions"), "{})") + g.P("}") + g.P() + + g.P("// UnmarshalNew unmarshals the contents of the underlying message of x into") + g.P("// a newly allocated message of the specified type.") + g.P("// It reports an error if the underlying message type could not be resolved.") + g.P("func (x *Any) UnmarshalNew() (", protoPackage.Ident("Message"), ", error) {") + g.P(" return UnmarshalNew(x, ", protoPackage.Ident("UnmarshalOptions"), "{})") + g.P("}") + g.P() + + case genid.Timestamp_message_fullname: + g.P("// Now constructs a new Timestamp from the current time.") + g.P("func Now() *Timestamp {") + g.P(" return New(", timePackage.Ident("Now"), "())") + g.P("}") + g.P() + + g.P("// New constructs a new Timestamp from the provided time.Time.") + g.P("func New(t ", timePackage.Ident("Time"), ") *Timestamp {") + g.P(" return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())}") + g.P("}") + g.P() + + g.P("// AsTime converts x to a time.Time.") + g.P("func (x *Timestamp) AsTime() ", timePackage.Ident("Time"), " {") + g.P(" return ", timePackage.Ident("Unix"), "(int64(x.GetSeconds()), int64(x.GetNanos())).UTC()") + g.P("}") + g.P() + + g.P("// IsValid reports whether the timestamp is valid.") + g.P("// It is equivalent to CheckValid == nil.") + g.P("func (x *Timestamp) IsValid() bool {") + g.P(" return x.check() == 0") + g.P("}") + g.P() + + g.P("// CheckValid returns an error if the timestamp is invalid.") + g.P("// In particular, it checks whether the value represents a date that is") + g.P("// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.") + g.P("// An error is reported for a nil Timestamp.") + g.P("func (x *Timestamp) CheckValid() error {") + g.P(" switch x.check() {") + g.P(" case invalidNil:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Timestamp\")") + g.P(" case invalidUnderflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) before 0001-01-01\", x)") + g.P(" case invalidOverflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) after 9999-12-31\", x)") + g.P(" case invalidNanos:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) has out-of-range nanos\", x)") + g.P(" default:") + g.P(" return nil") + g.P(" }") + g.P("}") + g.P() + + g.P("const (") + g.P(" _ = iota") + g.P(" invalidNil") + g.P(" invalidUnderflow") + g.P(" invalidOverflow") + g.P(" invalidNanos") + g.P(")") + g.P() + + g.P("func (x *Timestamp) check() uint {") + g.P(" const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive") + g.P(" const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" switch {") + g.P(" case x == nil:") + g.P(" return invalidNil") + g.P(" case secs < minTimestamp:") + g.P(" return invalidUnderflow") + g.P(" case secs > maxTimestamp:") + g.P(" return invalidOverflow") + g.P(" case nanos < 0 || nanos >= 1e9:") + g.P(" return invalidNanos") + g.P(" default:") + g.P(" return 0") + g.P(" }") + g.P("}") + g.P() + + case genid.Duration_message_fullname: + g.P("// New constructs a new Duration from the provided time.Duration.") + g.P("func New(d ", timePackage.Ident("Duration"), ") *Duration {") + g.P(" nanos := d.Nanoseconds()") + g.P(" secs := nanos / 1e9") + g.P(" nanos -= secs * 1e9") + g.P(" return &Duration{Seconds: int64(secs), Nanos: int32(nanos)}") + g.P("}") + g.P() + + g.P("// AsDuration converts x to a time.Duration,") + g.P("// returning the closest duration value in the event of overflow.") + g.P("func (x *Duration) AsDuration() ", timePackage.Ident("Duration"), " {") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" d := ", timePackage.Ident("Duration"), "(secs) * ", timePackage.Ident("Second")) + g.P(" overflow := d/", timePackage.Ident("Second"), " != ", timePackage.Ident("Duration"), "(secs)") + g.P(" d += ", timePackage.Ident("Duration"), "(nanos) * ", timePackage.Ident("Nanosecond")) + g.P(" overflow = overflow || (secs < 0 && nanos < 0 && d > 0)") + g.P(" overflow = overflow || (secs > 0 && nanos > 0 && d < 0)") + g.P(" if overflow {") + g.P(" switch {") + g.P(" case secs < 0:") + g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MinInt64"), ")") + g.P(" case secs > 0:") + g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MaxInt64"), ")") + g.P(" }") + g.P(" }") + g.P(" return d") + g.P("}") + g.P() + + g.P("// IsValid reports whether the duration is valid.") + g.P("// It is equivalent to CheckValid == nil.") + g.P("func (x *Duration) IsValid() bool {") + g.P(" return x.check() == 0") + g.P("}") + g.P() + + g.P("// CheckValid returns an error if the duration is invalid.") + g.P("// In particular, it checks whether the value is within the range of") + g.P("// -10000 years to +10000 years inclusive.") + g.P("// An error is reported for a nil Duration.") + g.P("func (x *Duration) CheckValid() error {") + g.P(" switch x.check() {") + g.P(" case invalidNil:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Duration\")") + g.P(" case invalidUnderflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds -10000 years\", x)") + g.P(" case invalidOverflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds +10000 years\", x)") + g.P(" case invalidNanosRange:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has out-of-range nanos\", x)") + g.P(" case invalidNanosSign:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has seconds and nanos with different signs\", x)") + g.P(" default:") + g.P(" return nil") + g.P(" }") + g.P("}") + g.P() + + g.P("const (") + g.P(" _ = iota") + g.P(" invalidNil") + g.P(" invalidUnderflow") + g.P(" invalidOverflow") + g.P(" invalidNanosRange") + g.P(" invalidNanosSign") + g.P(")") + g.P() + + g.P("func (x *Duration) check() uint {") + g.P(" const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" switch {") + g.P(" case x == nil:") + g.P(" return invalidNil") + g.P(" case secs < -absDuration:") + g.P(" return invalidUnderflow") + g.P(" case secs > +absDuration:") + g.P(" return invalidOverflow") + g.P(" case nanos <= -1e9 || nanos >= +1e9:") + g.P(" return invalidNanosRange") + g.P(" case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0):") + g.P(" return invalidNanosSign") + g.P(" default:") + g.P(" return 0") + g.P(" }") + g.P("}") + g.P() + + case genid.Struct_message_fullname: + g.P("// NewStruct constructs a Struct from a general-purpose Go map.") + g.P("// The map keys must be valid UTF-8.") + g.P("// The map values are converted using NewValue.") + g.P("func NewStruct(v map[string]interface{}) (*Struct, error) {") + g.P(" x := &Struct{Fields: make(map[string]*Value, len(v))}") + g.P(" for k, v := range v {") + g.P(" if !", utf8Package.Ident("ValidString"), "(k) {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", k)") + g.P(" }") + g.P(" var err error") + g.P(" x.Fields[k], err = NewValue(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" }") + g.P(" return x, nil") + g.P("}") + g.P() + + g.P("// AsMap converts x to a general-purpose Go map.") + g.P("// The map values are converted by calling Value.AsInterface.") + g.P("func (x *Struct) AsMap() map[string]interface{} {") + g.P(" vs := make(map[string]interface{})") + g.P(" for k, v := range x.GetFields() {") + g.P(" vs[k] = v.AsInterface()") + g.P(" }") + g.P(" return vs") + g.P("}") + g.P() + + g.P("func (x *Struct) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *Struct) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.ListValue_message_fullname: + g.P("// NewList constructs a ListValue from a general-purpose Go slice.") + g.P("// The slice elements are converted using NewValue.") + g.P("func NewList(v []interface{}) (*ListValue, error) {") + g.P(" x := &ListValue{Values: make([]*Value, len(v))}") + g.P(" for i, v := range v {") + g.P(" var err error") + g.P(" x.Values[i], err = NewValue(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" }") + g.P(" return x, nil") + g.P("}") + g.P() + + g.P("// AsSlice converts x to a general-purpose Go slice.") + g.P("// The slice elements are converted by calling Value.AsInterface.") + g.P("func (x *ListValue) AsSlice() []interface{} {") + g.P(" vs := make([]interface{}, len(x.GetValues()))") + g.P(" for i, v := range x.GetValues() {") + g.P(" vs[i] = v.AsInterface()") + g.P(" }") + g.P(" return vs") + g.P("}") + g.P() + + g.P("func (x *ListValue) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *ListValue) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.Value_message_fullname: + g.P("// NewValue constructs a Value from a general-purpose Go interface.") + g.P("//") + g.P("// ╔════════════════════════╤════════════════════════════════════════════╗") + g.P("// ║ Go type │ Conversion ║") + g.P("// ╠════════════════════════╪════════════════════════════════════════════╣") + g.P("// ║ nil │ stored as NullValue ║") + g.P("// ║ bool │ stored as BoolValue ║") + g.P("// ║ int, int32, int64 │ stored as NumberValue ║") + g.P("// ║ uint, uint32, uint64 │ stored as NumberValue ║") + g.P("// ║ float32, float64 │ stored as NumberValue ║") + g.P("// ║ string │ stored as StringValue; must be valid UTF-8 ║") + g.P("// ║ []byte │ stored as StringValue; base64-encoded ║") + g.P("// ║ map[string]interface{} │ stored as StructValue ║") + g.P("// ║ []interface{} │ stored as ListValue ║") + g.P("// ╚════════════════════════╧════════════════════════════════════════════╝") + g.P("//") + g.P("// When converting an int64 or uint64 to a NumberValue, numeric precision loss") + g.P("// is possible since they are stored as a float64.") + g.P("func NewValue(v interface{}) (*Value, error) {") + g.P(" switch v := v.(type) {") + g.P(" case nil:") + g.P(" return NewNullValue(), nil") + g.P(" case bool:") + g.P(" return NewBoolValue(v), nil") + g.P(" case int:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case int32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case int64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case float32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case float64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case string:") + g.P(" if !", utf8Package.Ident("ValidString"), "(v) {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", v)") + g.P(" }") + g.P(" return NewStringValue(v), nil") + g.P(" case []byte:") + g.P(" s := ", base64Package.Ident("StdEncoding"), ".EncodeToString(v)") + g.P(" return NewStringValue(s), nil") + g.P(" case map[string]interface{}:") + g.P(" v2, err := NewStruct(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return NewStructValue(v2), nil") + g.P(" case []interface{}:") + g.P(" v2, err := NewList(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return NewListValue(v2), nil") + g.P(" default:") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid type: %T\", v)") + g.P(" }") + g.P("}") + g.P() + + g.P("// NewNullValue constructs a new null Value.") + g.P("func NewNullValue() *Value {") + g.P(" return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}") + g.P("}") + g.P() + + g.P("// NewBoolValue constructs a new boolean Value.") + g.P("func NewBoolValue(v bool) *Value {") + g.P(" return &Value{Kind: &Value_BoolValue{BoolValue: v}}") + g.P("}") + g.P() + + g.P("// NewNumberValue constructs a new number Value.") + g.P("func NewNumberValue(v float64) *Value {") + g.P(" return &Value{Kind: &Value_NumberValue{NumberValue: v}}") + g.P("}") + g.P() + + g.P("// NewStringValue constructs a new string Value.") + g.P("func NewStringValue(v string) *Value {") + g.P(" return &Value{Kind: &Value_StringValue{StringValue: v}}") + g.P("}") + g.P() + + g.P("// NewStructValue constructs a new struct Value.") + g.P("func NewStructValue(v *Struct) *Value {") + g.P(" return &Value{Kind: &Value_StructValue{StructValue: v}}") + g.P("}") + g.P() + + g.P("// NewListValue constructs a new list Value.") + g.P("func NewListValue(v *ListValue) *Value {") + g.P(" return &Value{Kind: &Value_ListValue{ListValue: v}}") + g.P("}") + g.P() + + g.P("// AsInterface converts x to a general-purpose Go interface.") + g.P("//") + g.P("// Calling Value.MarshalJSON and \"encoding/json\".Marshal on this output produce") + g.P("// semantically equivalent JSON (assuming no errors occur).") + g.P("//") + g.P("// Floating-point values (i.e., \"NaN\", \"Infinity\", and \"-Infinity\") are") + g.P("// converted as strings to remain compatible with MarshalJSON.") + g.P("func (x *Value) AsInterface() interface{} {") + g.P(" switch v := x.GetKind().(type) {") + g.P(" case *Value_NumberValue:") + g.P(" if v != nil {") + g.P(" switch {") + g.P(" case ", mathPackage.Ident("IsNaN"), "(v.NumberValue):") + g.P(" return \"NaN\"") + g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, +1):") + g.P(" return \"Infinity\"") + g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, -1):") + g.P(" return \"-Infinity\"") + g.P(" default:") + g.P(" return v.NumberValue") + g.P(" }") + g.P(" }") + g.P(" case *Value_StringValue:") + g.P(" if v != nil {") + g.P(" return v.StringValue") + g.P(" }") + g.P(" case *Value_BoolValue:") + g.P(" if v != nil {") + g.P(" return v.BoolValue") + g.P(" }") + g.P(" case *Value_StructValue:") + g.P(" if v != nil {") + g.P(" return v.StructValue.AsMap()") + g.P(" }") + g.P(" case *Value_ListValue:") + g.P(" if v != nil {") + g.P(" return v.ListValue.AsSlice()") + g.P(" }") + g.P(" }") + g.P(" return nil") + g.P("}") + g.P() + + g.P("func (x *Value) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *Value) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.FieldMask_message_fullname: + g.P("// New constructs a field mask from a list of paths and verifies that") + g.P("// each one is valid according to the specified message type.") + g.P("func New(m ", protoPackage.Ident("Message"), ", paths ...string) (*FieldMask, error) {") + g.P(" x := new(FieldMask)") + g.P(" return x, x.Append(m, paths...)") + g.P("}") + g.P() + + g.P("// Union returns the union of all the paths in the input field masks.") + g.P("func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {") + g.P(" var out []string") + g.P(" out = append(out, mx.GetPaths()...)") + g.P(" out = append(out, my.GetPaths()...)") + g.P(" for _, m := range ms {") + g.P(" out = append(out, m.GetPaths()...)") + g.P(" }") + g.P(" return &FieldMask{Paths: normalizePaths(out)}") + g.P("}") + g.P() + + g.P("// Intersect returns the intersection of all the paths in the input field masks.") + g.P("func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {") + g.P(" var ss1, ss2 []string // reused buffers for performance") + g.P(" intersect := func(out, in []string) []string {") + g.P(" ss1 = normalizePaths(append(ss1[:0], in...))") + g.P(" ss2 = normalizePaths(append(ss2[:0], out...))") + g.P(" out = out[:0]") + g.P(" for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {") + g.P(" switch s1, s2 := ss1[i1], ss2[i2]; {") + g.P(" case hasPathPrefix(s1, s2):") + g.P(" out = append(out, s1)") + g.P(" i1++") + g.P(" case hasPathPrefix(s2, s1):") + g.P(" out = append(out, s2)") + g.P(" i2++") + g.P(" case lessPath(s1, s2):") + g.P(" i1++") + g.P(" case lessPath(s2, s1):") + g.P(" i2++") + g.P(" }") + g.P(" }") + g.P(" return out") + g.P(" }") + g.P() + g.P(" out := Union(mx, my, ms...).GetPaths()") + g.P(" out = intersect(out, mx.GetPaths())") + g.P(" out = intersect(out, my.GetPaths())") + g.P(" for _, m := range ms {") + g.P(" out = intersect(out, m.GetPaths())") + g.P(" }") + g.P(" return &FieldMask{Paths: normalizePaths(out)}") + g.P("}") + g.P() + + g.P("// IsValid reports whether all the paths are syntactically valid and") + g.P("// refer to known fields in the specified message type.") + g.P("// It reports false for a nil FieldMask.") + g.P("func (x *FieldMask) IsValid(m ", protoPackage.Ident("Message"), ") bool {") + g.P(" paths := x.GetPaths()") + g.P(" return x != nil && numValidPaths(m, paths) == len(paths)") + g.P("}") + g.P() + + g.P("// Append appends a list of paths to the mask and verifies that each one") + g.P("// is valid according to the specified message type.") + g.P("// An invalid path is not appended and breaks insertion of subsequent paths.") + g.P("func (x *FieldMask) Append(m ", protoPackage.Ident("Message"), ", paths ...string) error {") + g.P(" numValid := numValidPaths(m, paths)") + g.P(" x.Paths = append(x.Paths, paths[:numValid]...)") + g.P(" paths = paths[numValid:]") + g.P(" if len(paths) > 0 {") + g.P(" name := m.ProtoReflect().Descriptor().FullName()") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid path %q for message %q\", paths[0], name)") + g.P(" }") + g.P(" return nil") + g.P("}") + g.P() + + g.P("func numValidPaths(m ", protoPackage.Ident("Message"), ", paths []string) int {") + g.P(" md0 := m.ProtoReflect().Descriptor()") + g.P(" for i, path := range paths {") + g.P(" md := md0") + g.P(" if !rangeFields(path, func(field string) bool {") + g.P(" // Search the field within the message.") + g.P(" if md == nil {") + g.P(" return false // not within a message") + g.P(" }") + g.P(" fd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(field))") + g.P(" // The real field name of a group is the message name.") + g.P(" if fd == nil {") + g.P(" gd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(", stringsPackage.Ident("ToLower"), "(field)))") + g.P(" if gd != nil && gd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(gd.Message().Name()) == field {") + g.P(" fd = gd") + g.P(" }") + g.P(" } else if fd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(fd.Message().Name()) != field {") + g.P(" fd = nil") + g.P(" }") + g.P(" if fd == nil {") + g.P(" return false // message has does not have this field") + g.P(" }") + g.P() + g.P(" // Identify the next message to search within.") + g.P(" md = fd.Message() // may be nil") + g.P() + g.P(" // Repeated fields are only allowed at the last position.") + g.P(" if fd.IsList() || fd.IsMap() {") + g.P(" md = nil") + g.P(" }") + g.P() + g.P(" return true") + g.P(" }) {") + g.P(" return i") + g.P(" }") + g.P(" }") + g.P(" return len(paths)") + g.P("}") + g.P() + + g.P("// Normalize converts the mask to its canonical form where all paths are sorted") + g.P("// and redundant paths are removed.") + g.P("func (x *FieldMask) Normalize() {") + g.P(" x.Paths = normalizePaths(x.Paths)") + g.P("}") + g.P() + g.P("func normalizePaths(paths []string) []string {") + g.P(" ", sortPackage.Ident("Slice"), "(paths, func(i, j int) bool {") + g.P(" return lessPath(paths[i], paths[j])") + g.P(" })") + g.P() + g.P(" // Elide any path that is a prefix match on the previous.") + g.P(" out := paths[:0]") + g.P(" for _, path := range paths {") + g.P(" if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {") + g.P(" continue") + g.P(" }") + g.P(" out = append(out, path)") + g.P(" }") + g.P(" return out") + g.P("}") + g.P() + + g.P("// hasPathPrefix is like strings.HasPrefix, but further checks for either") + g.P("// an exact matche or that the prefix is delimited by a dot.") + g.P("func hasPathPrefix(path, prefix string) bool {") + g.P(" return ", stringsPackage.Ident("HasPrefix"), "(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')") + g.P("}") + g.P() + + g.P("// lessPath is a lexicographical comparison where dot is specially treated") + g.P("// as the smallest symbol.") + g.P("func lessPath(x, y string) bool {") + g.P(" for i := 0; i < len(x) && i < len(y); i++ {") + g.P(" if x[i] != y[i] {") + g.P(" return (x[i] - '.') < (y[i] - '.')") + g.P(" }") + g.P(" }") + g.P(" return len(x) < len(y)") + g.P("}") + g.P() + + g.P("// rangeFields is like strings.Split(path, \".\"), but avoids allocations by") + g.P("// iterating over each field in place and calling a iterator function.") + g.P("func rangeFields(path string, f func(field string) bool) bool {") + g.P(" for {") + g.P(" var field string") + g.P(" if i := ", stringsPackage.Ident("IndexByte"), "(path, '.'); i >= 0 {") + g.P(" field, path = path[:i], path[i:]") + g.P(" } else {") + g.P(" field, path = path, \"\"") + g.P(" }") + g.P() + g.P(" if !f(field) {") + g.P(" return false") + g.P(" }") + g.P() + g.P(" if len(path) == 0 {") + g.P(" return true") + g.P(" }") + g.P(" path = ", stringsPackage.Ident("TrimPrefix"), "(path, \".\")") + g.P(" }") + g.P("}") + g.P() + + case genid.BoolValue_message_fullname, + genid.Int32Value_message_fullname, + genid.Int64Value_message_fullname, + genid.UInt32Value_message_fullname, + genid.UInt64Value_message_fullname, + genid.FloatValue_message_fullname, + genid.DoubleValue_message_fullname, + genid.StringValue_message_fullname, + genid.BytesValue_message_fullname: + funcName := strings.TrimSuffix(m.GoIdent.GoName, "Value") + typeName := strings.ToLower(funcName) + switch typeName { + case "float": + typeName = "float32" + case "double": + typeName = "float64" + case "bytes": + typeName = "[]byte" + } + + g.P("// ", funcName, " stores v in a new ", m.GoIdent, " and returns a pointer to it.") + g.P("func ", funcName, "(v ", typeName, ") *", m.GoIdent, " {") + g.P(" return &", m.GoIdent, "{Value: v}") + g.P("}") + g.P() + } +} diff --git a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go new file mode 100644 index 000000000000..a475adfdd20b --- /dev/null +++ b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go @@ -0,0 +1,1261 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protogen provides support for writing protoc plugins. +// +// Plugins for protoc, the Protocol Buffer compiler, +// are programs which read a CodeGeneratorRequest message from standard input +// and write a CodeGeneratorResponse message to standard output. +// This package provides support for writing plugins which generate Go code. +package protogen + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "io/ioutil" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/pluginpb" +) + +const goPackageDocURL = "https://developers.google.com/protocol-buffers/docs/reference/go-generated#package" + +// Run executes a function as a protoc plugin. +// +// It reads a CodeGeneratorRequest message from os.Stdin, invokes the plugin +// function, and writes a CodeGeneratorResponse message to os.Stdout. +// +// If a failure occurs while reading or writing, Run prints an error to +// os.Stderr and calls os.Exit(1). +func (opts Options) Run(f func(*Plugin) error) { + if err := run(opts, f); err != nil { + fmt.Fprintf(os.Stderr, "%s: %v\n", filepath.Base(os.Args[0]), err) + os.Exit(1) + } +} + +func run(opts Options, f func(*Plugin) error) error { + if len(os.Args) > 1 { + return fmt.Errorf("unknown argument %q (this program should be run by protoc, not directly)", os.Args[1]) + } + in, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + req := &pluginpb.CodeGeneratorRequest{} + if err := proto.Unmarshal(in, req); err != nil { + return err + } + gen, err := opts.New(req) + if err != nil { + return err + } + if err := f(gen); err != nil { + // Errors from the plugin function are reported by setting the + // error field in the CodeGeneratorResponse. + // + // In contrast, errors that indicate a problem in protoc + // itself (unparsable input, I/O errors, etc.) are reported + // to stderr. + gen.Error(err) + } + resp := gen.Response() + out, err := proto.Marshal(resp) + if err != nil { + return err + } + if _, err := os.Stdout.Write(out); err != nil { + return err + } + return nil +} + +// A Plugin is a protoc plugin invocation. +type Plugin struct { + // Request is the CodeGeneratorRequest provided by protoc. + Request *pluginpb.CodeGeneratorRequest + + // Files is the set of files to generate and everything they import. + // Files appear in topological order, so each file appears before any + // file that imports it. + Files []*File + FilesByPath map[string]*File + + // SupportedFeatures is the set of protobuf language features supported by + // this generator plugin. See the documentation for + // google.protobuf.CodeGeneratorResponse.supported_features for details. + SupportedFeatures uint64 + + fileReg *protoregistry.Files + enumsByName map[protoreflect.FullName]*Enum + messagesByName map[protoreflect.FullName]*Message + annotateCode bool + pathType pathType + module string + genFiles []*GeneratedFile + opts Options + err error +} + +type Options struct { + // If ParamFunc is non-nil, it will be called with each unknown + // generator parameter. + // + // Plugins for protoc can accept parameters from the command line, + // passed in the --_out protoc, separated from the output + // directory with a colon; e.g., + // + // --go_out==,=: + // + // Parameters passed in this fashion as a comma-separated list of + // key=value pairs will be passed to the ParamFunc. + // + // The (flag.FlagSet).Set method matches this function signature, + // so parameters can be converted into flags as in the following: + // + // var flags flag.FlagSet + // value := flags.Bool("param", false, "") + // opts := &protogen.Options{ + // ParamFunc: flags.Set, + // } + // protogen.Run(opts, func(p *protogen.Plugin) error { + // if *value { ... } + // }) + ParamFunc func(name, value string) error + + // ImportRewriteFunc is called with the import path of each package + // imported by a generated file. It returns the import path to use + // for this package. + ImportRewriteFunc func(GoImportPath) GoImportPath +} + +// New returns a new Plugin. +func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) { + gen := &Plugin{ + Request: req, + FilesByPath: make(map[string]*File), + fileReg: new(protoregistry.Files), + enumsByName: make(map[protoreflect.FullName]*Enum), + messagesByName: make(map[protoreflect.FullName]*Message), + opts: opts, + } + + packageNames := make(map[string]GoPackageName) // filename -> package name + importPaths := make(map[string]GoImportPath) // filename -> import path + for _, param := range strings.Split(req.GetParameter(), ",") { + var value string + if i := strings.Index(param, "="); i >= 0 { + value = param[i+1:] + param = param[0:i] + } + switch param { + case "": + // Ignore. + case "module": + gen.module = value + case "paths": + switch value { + case "import": + gen.pathType = pathTypeImport + case "source_relative": + gen.pathType = pathTypeSourceRelative + default: + return nil, fmt.Errorf(`unknown path type %q: want "import" or "source_relative"`, value) + } + case "annotate_code": + switch value { + case "true", "": + gen.annotateCode = true + case "false": + default: + return nil, fmt.Errorf(`bad value for parameter %q: want "true" or "false"`, param) + } + default: + if param[0] == 'M' { + impPath, pkgName := splitImportPathAndPackageName(value) + if pkgName != "" { + packageNames[param[1:]] = pkgName + } + if impPath != "" { + importPaths[param[1:]] = impPath + } + continue + } + if opts.ParamFunc != nil { + if err := opts.ParamFunc(param, value); err != nil { + return nil, err + } + } + } + } + // When the module= option is provided, we strip the module name + // prefix from generated files. This only makes sense if generated + // filenames are based on the import path. + if gen.module != "" && gen.pathType == pathTypeSourceRelative { + return nil, fmt.Errorf("cannot use module= with paths=source_relative") + } + + // Figure out the import path and package name for each file. + // + // The rules here are complicated and have grown organically over time. + // Interactions between different ways of specifying package information + // may be surprising. + // + // The recommended approach is to include a go_package option in every + // .proto source file specifying the full import path of the Go package + // associated with this file. + // + // option go_package = "google.golang.org/protobuf/types/known/anypb"; + // + // Alternatively, build systems which want to exert full control over + // import paths may specify M= flags. + for _, fdesc := range gen.Request.ProtoFile { + // The "M" command-line flags take precedence over + // the "go_package" option in the .proto source file. + filename := fdesc.GetName() + impPath, pkgName := splitImportPathAndPackageName(fdesc.GetOptions().GetGoPackage()) + if importPaths[filename] == "" && impPath != "" { + importPaths[filename] = impPath + } + if packageNames[filename] == "" && pkgName != "" { + packageNames[filename] = pkgName + } + switch { + case importPaths[filename] == "": + // The import path must be specified one way or another. + return nil, fmt.Errorf( + "unable to determine Go import path for %q\n\n"+ + "Please specify either:\n"+ + "\t• a \"go_package\" option in the .proto source file, or\n"+ + "\t• a \"M\" argument on the command line.\n\n"+ + "See %v for more information.\n", + fdesc.GetName(), goPackageDocURL) + case !strings.Contains(string(importPaths[filename]), ".") && + !strings.Contains(string(importPaths[filename]), "/"): + // Check that import paths contain at least a dot or slash to avoid + // a common mistake where import path is confused with package name. + return nil, fmt.Errorf( + "invalid Go import path %q for %q\n\n"+ + "The import path must contain at least one period ('.') or forward slash ('/') character.\n\n"+ + "See %v for more information.\n", + string(importPaths[filename]), fdesc.GetName(), goPackageDocURL) + case packageNames[filename] == "": + // If the package name is not explicitly specified, + // then derive a reasonable package name from the import path. + // + // NOTE: The package name is derived first from the import path in + // the "go_package" option (if present) before trying the "M" flag. + // The inverted order for this is because the primary use of the "M" + // flag is by build systems that have full control over the + // import paths all packages, where it is generally expected that + // the Go package name still be identical for the Go toolchain and + // for custom build systems like Bazel. + if impPath == "" { + impPath = importPaths[filename] + } + packageNames[filename] = cleanPackageName(path.Base(string(impPath))) + } + } + + // Consistency check: Every file with the same Go import path should have + // the same Go package name. + packageFiles := make(map[GoImportPath][]string) + for filename, importPath := range importPaths { + if _, ok := packageNames[filename]; !ok { + // Skip files mentioned in a M= parameter + // but which do not appear in the CodeGeneratorRequest. + continue + } + packageFiles[importPath] = append(packageFiles[importPath], filename) + } + for importPath, filenames := range packageFiles { + for i := 1; i < len(filenames); i++ { + if a, b := packageNames[filenames[0]], packageNames[filenames[i]]; a != b { + return nil, fmt.Errorf("Go package %v has inconsistent names %v (%v) and %v (%v)", + importPath, a, filenames[0], b, filenames[i]) + } + } + } + + for _, fdesc := range gen.Request.ProtoFile { + filename := fdesc.GetName() + if gen.FilesByPath[filename] != nil { + return nil, fmt.Errorf("duplicate file name: %q", filename) + } + f, err := newFile(gen, fdesc, packageNames[filename], importPaths[filename]) + if err != nil { + return nil, err + } + gen.Files = append(gen.Files, f) + gen.FilesByPath[filename] = f + } + for _, filename := range gen.Request.FileToGenerate { + f, ok := gen.FilesByPath[filename] + if !ok { + return nil, fmt.Errorf("no descriptor for generated file: %v", filename) + } + f.Generate = true + } + return gen, nil +} + +// Error records an error in code generation. The generator will report the +// error back to protoc and will not produce output. +func (gen *Plugin) Error(err error) { + if gen.err == nil { + gen.err = err + } +} + +// Response returns the generator output. +func (gen *Plugin) Response() *pluginpb.CodeGeneratorResponse { + resp := &pluginpb.CodeGeneratorResponse{} + if gen.err != nil { + resp.Error = proto.String(gen.err.Error()) + return resp + } + for _, g := range gen.genFiles { + if g.skip { + continue + } + content, err := g.Content() + if err != nil { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(err.Error()), + } + } + filename := g.filename + if gen.module != "" { + trim := gen.module + "/" + if !strings.HasPrefix(filename, trim) { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(fmt.Sprintf("%v: generated file does not match prefix %q", filename, gen.module)), + } + } + filename = strings.TrimPrefix(filename, trim) + } + resp.File = append(resp.File, &pluginpb.CodeGeneratorResponse_File{ + Name: proto.String(filename), + Content: proto.String(string(content)), + }) + if gen.annotateCode && strings.HasSuffix(g.filename, ".go") { + meta, err := g.metaFile(content) + if err != nil { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(err.Error()), + } + } + resp.File = append(resp.File, &pluginpb.CodeGeneratorResponse_File{ + Name: proto.String(filename + ".meta"), + Content: proto.String(meta), + }) + } + } + if gen.SupportedFeatures > 0 { + resp.SupportedFeatures = proto.Uint64(gen.SupportedFeatures) + } + return resp +} + +// A File describes a .proto source file. +type File struct { + Desc protoreflect.FileDescriptor + Proto *descriptorpb.FileDescriptorProto + + GoDescriptorIdent GoIdent // name of Go variable for the file descriptor + GoPackageName GoPackageName // name of this file's Go package + GoImportPath GoImportPath // import path of this file's Go package + + Enums []*Enum // top-level enum declarations + Messages []*Message // top-level message declarations + Extensions []*Extension // top-level extension declarations + Services []*Service // top-level service declarations + + Generate bool // true if we should generate code for this file + + // GeneratedFilenamePrefix is used to construct filenames for generated + // files associated with this source file. + // + // For example, the source file "dir/foo.proto" might have a filename prefix + // of "dir/foo". Appending ".pb.go" produces an output file of "dir/foo.pb.go". + GeneratedFilenamePrefix string + + location Location +} + +func newFile(gen *Plugin, p *descriptorpb.FileDescriptorProto, packageName GoPackageName, importPath GoImportPath) (*File, error) { + desc, err := protodesc.NewFile(p, gen.fileReg) + if err != nil { + return nil, fmt.Errorf("invalid FileDescriptorProto %q: %v", p.GetName(), err) + } + if err := gen.fileReg.RegisterFile(desc); err != nil { + return nil, fmt.Errorf("cannot register descriptor %q: %v", p.GetName(), err) + } + f := &File{ + Desc: desc, + Proto: p, + GoPackageName: packageName, + GoImportPath: importPath, + location: Location{SourceFile: desc.Path()}, + } + + // Determine the prefix for generated Go files. + prefix := p.GetName() + if ext := path.Ext(prefix); ext == ".proto" || ext == ".protodevel" { + prefix = prefix[:len(prefix)-len(ext)] + } + switch gen.pathType { + case pathTypeImport: + // If paths=import, the output filename is derived from the Go import path. + prefix = path.Join(string(f.GoImportPath), path.Base(prefix)) + case pathTypeSourceRelative: + // If paths=source_relative, the output filename is derived from + // the input filename. + } + f.GoDescriptorIdent = GoIdent{ + GoName: "File_" + strs.GoSanitized(p.GetName()), + GoImportPath: f.GoImportPath, + } + f.GeneratedFilenamePrefix = prefix + + for i, eds := 0, desc.Enums(); i < eds.Len(); i++ { + f.Enums = append(f.Enums, newEnum(gen, f, nil, eds.Get(i))) + } + for i, mds := 0, desc.Messages(); i < mds.Len(); i++ { + f.Messages = append(f.Messages, newMessage(gen, f, nil, mds.Get(i))) + } + for i, xds := 0, desc.Extensions(); i < xds.Len(); i++ { + f.Extensions = append(f.Extensions, newField(gen, f, nil, xds.Get(i))) + } + for i, sds := 0, desc.Services(); i < sds.Len(); i++ { + f.Services = append(f.Services, newService(gen, f, sds.Get(i))) + } + for _, message := range f.Messages { + if err := message.resolveDependencies(gen); err != nil { + return nil, err + } + } + for _, extension := range f.Extensions { + if err := extension.resolveDependencies(gen); err != nil { + return nil, err + } + } + for _, service := range f.Services { + for _, method := range service.Methods { + if err := method.resolveDependencies(gen); err != nil { + return nil, err + } + } + } + return f, nil +} + +// splitImportPathAndPackageName splits off the optional Go package name +// from the Go import path when separated by a ';' delimiter. +func splitImportPathAndPackageName(s string) (GoImportPath, GoPackageName) { + if i := strings.Index(s, ";"); i >= 0 { + return GoImportPath(s[:i]), GoPackageName(s[i+1:]) + } + return GoImportPath(s), "" +} + +// An Enum describes an enum. +type Enum struct { + Desc protoreflect.EnumDescriptor + + GoIdent GoIdent // name of the generated Go type + + Values []*EnumValue // enum value declarations + + Location Location // location of this enum + Comments CommentSet // comments associated with this enum +} + +func newEnum(gen *Plugin, f *File, parent *Message, desc protoreflect.EnumDescriptor) *Enum { + var loc Location + if parent != nil { + loc = parent.Location.appendPath(genid.DescriptorProto_EnumType_field_number, desc.Index()) + } else { + loc = f.location.appendPath(genid.FileDescriptorProto_EnumType_field_number, desc.Index()) + } + enum := &Enum{ + Desc: desc, + GoIdent: newGoIdent(f, desc), + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + gen.enumsByName[desc.FullName()] = enum + for i, vds := 0, enum.Desc.Values(); i < vds.Len(); i++ { + enum.Values = append(enum.Values, newEnumValue(gen, f, parent, enum, vds.Get(i))) + } + return enum +} + +// An EnumValue describes an enum value. +type EnumValue struct { + Desc protoreflect.EnumValueDescriptor + + GoIdent GoIdent // name of the generated Go declaration + + Parent *Enum // enum in which this value is declared + + Location Location // location of this enum value + Comments CommentSet // comments associated with this enum value +} + +func newEnumValue(gen *Plugin, f *File, message *Message, enum *Enum, desc protoreflect.EnumValueDescriptor) *EnumValue { + // A top-level enum value's name is: EnumName_ValueName + // An enum value contained in a message is: MessageName_ValueName + // + // For historical reasons, enum value names are not camel-cased. + parentIdent := enum.GoIdent + if message != nil { + parentIdent = message.GoIdent + } + name := parentIdent.GoName + "_" + string(desc.Name()) + loc := enum.Location.appendPath(genid.EnumDescriptorProto_Value_field_number, desc.Index()) + return &EnumValue{ + Desc: desc, + GoIdent: f.GoImportPath.Ident(name), + Parent: enum, + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } +} + +// A Message describes a message. +type Message struct { + Desc protoreflect.MessageDescriptor + + GoIdent GoIdent // name of the generated Go type + + Fields []*Field // message field declarations + Oneofs []*Oneof // message oneof declarations + + Enums []*Enum // nested enum declarations + Messages []*Message // nested message declarations + Extensions []*Extension // nested extension declarations + + Location Location // location of this message + Comments CommentSet // comments associated with this message +} + +func newMessage(gen *Plugin, f *File, parent *Message, desc protoreflect.MessageDescriptor) *Message { + var loc Location + if parent != nil { + loc = parent.Location.appendPath(genid.DescriptorProto_NestedType_field_number, desc.Index()) + } else { + loc = f.location.appendPath(genid.FileDescriptorProto_MessageType_field_number, desc.Index()) + } + message := &Message{ + Desc: desc, + GoIdent: newGoIdent(f, desc), + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + gen.messagesByName[desc.FullName()] = message + for i, eds := 0, desc.Enums(); i < eds.Len(); i++ { + message.Enums = append(message.Enums, newEnum(gen, f, message, eds.Get(i))) + } + for i, mds := 0, desc.Messages(); i < mds.Len(); i++ { + message.Messages = append(message.Messages, newMessage(gen, f, message, mds.Get(i))) + } + for i, fds := 0, desc.Fields(); i < fds.Len(); i++ { + message.Fields = append(message.Fields, newField(gen, f, message, fds.Get(i))) + } + for i, ods := 0, desc.Oneofs(); i < ods.Len(); i++ { + message.Oneofs = append(message.Oneofs, newOneof(gen, f, message, ods.Get(i))) + } + for i, xds := 0, desc.Extensions(); i < xds.Len(); i++ { + message.Extensions = append(message.Extensions, newField(gen, f, message, xds.Get(i))) + } + + // Resolve local references between fields and oneofs. + for _, field := range message.Fields { + if od := field.Desc.ContainingOneof(); od != nil { + oneof := message.Oneofs[od.Index()] + field.Oneof = oneof + oneof.Fields = append(oneof.Fields, field) + } + } + + // Field name conflict resolution. + // + // We assume well-known method names that may be attached to a generated + // message type, as well as a 'Get*' method for each field. For each + // field in turn, we add _s to its name until there are no conflicts. + // + // Any change to the following set of method names is a potential + // incompatible API change because it may change generated field names. + // + // TODO: If we ever support a 'go_name' option to set the Go name of a + // field, we should consider dropping this entirely. The conflict + // resolution algorithm is subtle and surprising (changing the order + // in which fields appear in the .proto source file can change the + // names of fields in generated code), and does not adapt well to + // adding new per-field methods such as setters. + usedNames := map[string]bool{ + "Reset": true, + "String": true, + "ProtoMessage": true, + "Marshal": true, + "Unmarshal": true, + "ExtensionRangeArray": true, + "ExtensionMap": true, + "Descriptor": true, + } + makeNameUnique := func(name string, hasGetter bool) string { + for usedNames[name] || (hasGetter && usedNames["Get"+name]) { + name += "_" + } + usedNames[name] = true + usedNames["Get"+name] = hasGetter + return name + } + for _, field := range message.Fields { + field.GoName = makeNameUnique(field.GoName, true) + field.GoIdent.GoName = message.GoIdent.GoName + "_" + field.GoName + if field.Oneof != nil && field.Oneof.Fields[0] == field { + // Make the name for a oneof unique as well. For historical reasons, + // this assumes that a getter method is not generated for oneofs. + // This is incorrect, but fixing it breaks existing code. + field.Oneof.GoName = makeNameUnique(field.Oneof.GoName, false) + field.Oneof.GoIdent.GoName = message.GoIdent.GoName + "_" + field.Oneof.GoName + } + } + + // Oneof field name conflict resolution. + // + // This conflict resolution is incomplete as it does not consider collisions + // with other oneof field types, but fixing it breaks existing code. + for _, field := range message.Fields { + if field.Oneof != nil { + Loop: + for { + for _, nestedMessage := range message.Messages { + if nestedMessage.GoIdent == field.GoIdent { + field.GoIdent.GoName += "_" + continue Loop + } + } + for _, nestedEnum := range message.Enums { + if nestedEnum.GoIdent == field.GoIdent { + field.GoIdent.GoName += "_" + continue Loop + } + } + break Loop + } + } + } + + return message +} + +func (message *Message) resolveDependencies(gen *Plugin) error { + for _, field := range message.Fields { + if err := field.resolveDependencies(gen); err != nil { + return err + } + } + for _, message := range message.Messages { + if err := message.resolveDependencies(gen); err != nil { + return err + } + } + for _, extension := range message.Extensions { + if err := extension.resolveDependencies(gen); err != nil { + return err + } + } + return nil +} + +// A Field describes a message field. +type Field struct { + Desc protoreflect.FieldDescriptor + + // GoName is the base name of this field's Go field and methods. + // For code generated by protoc-gen-go, this means a field named + // '{{GoName}}' and a getter method named 'Get{{GoName}}'. + GoName string // e.g., "FieldName" + + // GoIdent is the base name of a top-level declaration for this field. + // For code generated by protoc-gen-go, this means a wrapper type named + // '{{GoIdent}}' for members fields of a oneof, and a variable named + // 'E_{{GoIdent}}' for extension fields. + GoIdent GoIdent // e.g., "MessageName_FieldName" + + Parent *Message // message in which this field is declared; nil if top-level extension + Oneof *Oneof // containing oneof; nil if not part of a oneof + Extendee *Message // extended message for extension fields; nil otherwise + + Enum *Enum // type for enum fields; nil otherwise + Message *Message // type for message or group fields; nil otherwise + + Location Location // location of this field + Comments CommentSet // comments associated with this field +} + +func newField(gen *Plugin, f *File, message *Message, desc protoreflect.FieldDescriptor) *Field { + var loc Location + switch { + case desc.IsExtension() && message == nil: + loc = f.location.appendPath(genid.FileDescriptorProto_Extension_field_number, desc.Index()) + case desc.IsExtension() && message != nil: + loc = message.Location.appendPath(genid.DescriptorProto_Extension_field_number, desc.Index()) + default: + loc = message.Location.appendPath(genid.DescriptorProto_Field_field_number, desc.Index()) + } + camelCased := strs.GoCamelCase(string(desc.Name())) + var parentPrefix string + if message != nil { + parentPrefix = message.GoIdent.GoName + "_" + } + field := &Field{ + Desc: desc, + GoName: camelCased, + GoIdent: GoIdent{ + GoImportPath: f.GoImportPath, + GoName: parentPrefix + camelCased, + }, + Parent: message, + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + return field +} + +func (field *Field) resolveDependencies(gen *Plugin) error { + desc := field.Desc + switch desc.Kind() { + case protoreflect.EnumKind: + name := field.Desc.Enum().FullName() + enum, ok := gen.enumsByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for enum %v", desc.FullName(), name) + } + field.Enum = enum + case protoreflect.MessageKind, protoreflect.GroupKind: + name := desc.Message().FullName() + message, ok := gen.messagesByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for type %v", desc.FullName(), name) + } + field.Message = message + } + if desc.IsExtension() { + name := desc.ContainingMessage().FullName() + message, ok := gen.messagesByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for type %v", desc.FullName(), name) + } + field.Extendee = message + } + return nil +} + +// A Oneof describes a message oneof. +type Oneof struct { + Desc protoreflect.OneofDescriptor + + // GoName is the base name of this oneof's Go field and methods. + // For code generated by protoc-gen-go, this means a field named + // '{{GoName}}' and a getter method named 'Get{{GoName}}'. + GoName string // e.g., "OneofName" + + // GoIdent is the base name of a top-level declaration for this oneof. + GoIdent GoIdent // e.g., "MessageName_OneofName" + + Parent *Message // message in which this oneof is declared + + Fields []*Field // fields that are part of this oneof + + Location Location // location of this oneof + Comments CommentSet // comments associated with this oneof +} + +func newOneof(gen *Plugin, f *File, message *Message, desc protoreflect.OneofDescriptor) *Oneof { + loc := message.Location.appendPath(genid.DescriptorProto_OneofDecl_field_number, desc.Index()) + camelCased := strs.GoCamelCase(string(desc.Name())) + parentPrefix := message.GoIdent.GoName + "_" + return &Oneof{ + Desc: desc, + Parent: message, + GoName: camelCased, + GoIdent: GoIdent{ + GoImportPath: f.GoImportPath, + GoName: parentPrefix + camelCased, + }, + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } +} + +// Extension is an alias of Field for documentation. +type Extension = Field + +// A Service describes a service. +type Service struct { + Desc protoreflect.ServiceDescriptor + + GoName string + + Methods []*Method // service method declarations + + Location Location // location of this service + Comments CommentSet // comments associated with this service +} + +func newService(gen *Plugin, f *File, desc protoreflect.ServiceDescriptor) *Service { + loc := f.location.appendPath(genid.FileDescriptorProto_Service_field_number, desc.Index()) + service := &Service{ + Desc: desc, + GoName: strs.GoCamelCase(string(desc.Name())), + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + for i, mds := 0, desc.Methods(); i < mds.Len(); i++ { + service.Methods = append(service.Methods, newMethod(gen, f, service, mds.Get(i))) + } + return service +} + +// A Method describes a method in a service. +type Method struct { + Desc protoreflect.MethodDescriptor + + GoName string + + Parent *Service // service in which this method is declared + + Input *Message + Output *Message + + Location Location // location of this method + Comments CommentSet // comments associated with this method +} + +func newMethod(gen *Plugin, f *File, service *Service, desc protoreflect.MethodDescriptor) *Method { + loc := service.Location.appendPath(genid.ServiceDescriptorProto_Method_field_number, desc.Index()) + method := &Method{ + Desc: desc, + GoName: strs.GoCamelCase(string(desc.Name())), + Parent: service, + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + return method +} + +func (method *Method) resolveDependencies(gen *Plugin) error { + desc := method.Desc + + inName := desc.Input().FullName() + in, ok := gen.messagesByName[inName] + if !ok { + return fmt.Errorf("method %v: no descriptor for type %v", desc.FullName(), inName) + } + method.Input = in + + outName := desc.Output().FullName() + out, ok := gen.messagesByName[outName] + if !ok { + return fmt.Errorf("method %v: no descriptor for type %v", desc.FullName(), outName) + } + method.Output = out + + return nil +} + +// A GeneratedFile is a generated file. +type GeneratedFile struct { + gen *Plugin + skip bool + filename string + goImportPath GoImportPath + buf bytes.Buffer + packageNames map[GoImportPath]GoPackageName + usedPackageNames map[GoPackageName]bool + manualImports map[GoImportPath]bool + annotations map[string][]Location +} + +// NewGeneratedFile creates a new generated file with the given filename +// and import path. +func (gen *Plugin) NewGeneratedFile(filename string, goImportPath GoImportPath) *GeneratedFile { + g := &GeneratedFile{ + gen: gen, + filename: filename, + goImportPath: goImportPath, + packageNames: make(map[GoImportPath]GoPackageName), + usedPackageNames: make(map[GoPackageName]bool), + manualImports: make(map[GoImportPath]bool), + annotations: make(map[string][]Location), + } + + // All predeclared identifiers in Go are already used. + for _, s := range types.Universe.Names() { + g.usedPackageNames[GoPackageName(s)] = true + } + + gen.genFiles = append(gen.genFiles, g) + return g +} + +// P prints a line to the generated output. It converts each parameter to a +// string following the same rules as fmt.Print. It never inserts spaces +// between parameters. +func (g *GeneratedFile) P(v ...interface{}) { + for _, x := range v { + switch x := x.(type) { + case GoIdent: + fmt.Fprint(&g.buf, g.QualifiedGoIdent(x)) + default: + fmt.Fprint(&g.buf, x) + } + } + fmt.Fprintln(&g.buf) +} + +// QualifiedGoIdent returns the string to use for a Go identifier. +// +// If the identifier is from a different Go package than the generated file, +// the returned name will be qualified (package.name) and an import statement +// for the identifier's package will be included in the file. +func (g *GeneratedFile) QualifiedGoIdent(ident GoIdent) string { + if ident.GoImportPath == g.goImportPath { + return ident.GoName + } + if packageName, ok := g.packageNames[ident.GoImportPath]; ok { + return string(packageName) + "." + ident.GoName + } + packageName := cleanPackageName(path.Base(string(ident.GoImportPath))) + for i, orig := 1, packageName; g.usedPackageNames[packageName]; i++ { + packageName = orig + GoPackageName(strconv.Itoa(i)) + } + g.packageNames[ident.GoImportPath] = packageName + g.usedPackageNames[packageName] = true + return string(packageName) + "." + ident.GoName +} + +// Import ensures a package is imported by the generated file. +// +// Packages referenced by QualifiedGoIdent are automatically imported. +// Explicitly importing a package with Import is generally only necessary +// when the import will be blank (import _ "package"). +func (g *GeneratedFile) Import(importPath GoImportPath) { + g.manualImports[importPath] = true +} + +// Write implements io.Writer. +func (g *GeneratedFile) Write(p []byte) (n int, err error) { + return g.buf.Write(p) +} + +// Skip removes the generated file from the plugin output. +func (g *GeneratedFile) Skip() { + g.skip = true +} + +// Unskip reverts a previous call to Skip, re-including the generated file in +// the plugin output. +func (g *GeneratedFile) Unskip() { + g.skip = false +} + +// Annotate associates a symbol in a generated Go file with a location in a +// source .proto file. +// +// The symbol may refer to a type, constant, variable, function, method, or +// struct field. The "T.sel" syntax is used to identify the method or field +// 'sel' on type 'T'. +func (g *GeneratedFile) Annotate(symbol string, loc Location) { + g.annotations[symbol] = append(g.annotations[symbol], loc) +} + +// Content returns the contents of the generated file. +func (g *GeneratedFile) Content() ([]byte, error) { + if !strings.HasSuffix(g.filename, ".go") { + return g.buf.Bytes(), nil + } + + // Reformat generated code. + original := g.buf.Bytes() + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "", original, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(original)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + return nil, fmt.Errorf("%v: unparsable Go source: %v\n%v", g.filename, err, src.String()) + } + + // Collect a sorted list of all imports. + var importPaths [][2]string + rewriteImport := func(importPath string) string { + if f := g.gen.opts.ImportRewriteFunc; f != nil { + return string(f(GoImportPath(importPath))) + } + return importPath + } + for importPath := range g.packageNames { + pkgName := string(g.packageNames[GoImportPath(importPath)]) + pkgPath := rewriteImport(string(importPath)) + importPaths = append(importPaths, [2]string{pkgName, pkgPath}) + } + for importPath := range g.manualImports { + if _, ok := g.packageNames[importPath]; !ok { + pkgPath := rewriteImport(string(importPath)) + importPaths = append(importPaths, [2]string{"_", pkgPath}) + } + } + sort.Slice(importPaths, func(i, j int) bool { + return importPaths[i][1] < importPaths[j][1] + }) + + // Modify the AST to include a new import block. + if len(importPaths) > 0 { + // Insert block after package statement or + // possible comment attached to the end of the package statement. + pos := file.Package + tokFile := fset.File(file.Package) + pkgLine := tokFile.Line(file.Package) + for _, c := range file.Comments { + if tokFile.Line(c.Pos()) > pkgLine { + break + } + pos = c.End() + } + + // Construct the import block. + impDecl := &ast.GenDecl{ + Tok: token.IMPORT, + TokPos: pos, + Lparen: pos, + Rparen: pos, + } + for _, importPath := range importPaths { + impDecl.Specs = append(impDecl.Specs, &ast.ImportSpec{ + Name: &ast.Ident{ + Name: importPath[0], + NamePos: pos, + }, + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(importPath[1]), + ValuePos: pos, + }, + EndPos: pos, + }) + } + file.Decls = append([]ast.Decl{impDecl}, file.Decls...) + } + + var out bytes.Buffer + if err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(&out, fset, file); err != nil { + return nil, fmt.Errorf("%v: can not reformat Go source: %v", g.filename, err) + } + return out.Bytes(), nil +} + +// metaFile returns the contents of the file's metadata file, which is a +// text formatted string of the google.protobuf.GeneratedCodeInfo. +func (g *GeneratedFile) metaFile(content []byte) (string, error) { + fset := token.NewFileSet() + astFile, err := parser.ParseFile(fset, "", content, 0) + if err != nil { + return "", err + } + info := &descriptorpb.GeneratedCodeInfo{} + + seenAnnotations := make(map[string]bool) + annotate := func(s string, ident *ast.Ident) { + seenAnnotations[s] = true + for _, loc := range g.annotations[s] { + info.Annotation = append(info.Annotation, &descriptorpb.GeneratedCodeInfo_Annotation{ + SourceFile: proto.String(loc.SourceFile), + Path: loc.Path, + Begin: proto.Int32(int32(fset.Position(ident.Pos()).Offset)), + End: proto.Int32(int32(fset.Position(ident.End()).Offset)), + }) + } + } + for _, decl := range astFile.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + annotate(spec.Name.Name, spec.Name) + switch st := spec.Type.(type) { + case *ast.StructType: + for _, field := range st.Fields.List { + for _, name := range field.Names { + annotate(spec.Name.Name+"."+name.Name, name) + } + } + case *ast.InterfaceType: + for _, field := range st.Methods.List { + for _, name := range field.Names { + annotate(spec.Name.Name+"."+name.Name, name) + } + } + } + case *ast.ValueSpec: + for _, name := range spec.Names { + annotate(name.Name, name) + } + } + } + case *ast.FuncDecl: + if decl.Recv == nil { + annotate(decl.Name.Name, decl.Name) + } else { + recv := decl.Recv.List[0].Type + if s, ok := recv.(*ast.StarExpr); ok { + recv = s.X + } + if id, ok := recv.(*ast.Ident); ok { + annotate(id.Name+"."+decl.Name.Name, decl.Name) + } + } + } + } + for a := range g.annotations { + if !seenAnnotations[a] { + return "", fmt.Errorf("%v: no symbol matching annotation %q", g.filename, a) + } + } + + b, err := prototext.Marshal(info) + if err != nil { + return "", err + } + return string(b), nil +} + +// A GoIdent is a Go identifier, consisting of a name and import path. +// The name is a single identifier and may not be a dot-qualified selector. +type GoIdent struct { + GoName string + GoImportPath GoImportPath +} + +func (id GoIdent) String() string { return fmt.Sprintf("%q.%v", id.GoImportPath, id.GoName) } + +// newGoIdent returns the Go identifier for a descriptor. +func newGoIdent(f *File, d protoreflect.Descriptor) GoIdent { + name := strings.TrimPrefix(string(d.FullName()), string(f.Desc.Package())+".") + return GoIdent{ + GoName: strs.GoCamelCase(name), + GoImportPath: f.GoImportPath, + } +} + +// A GoImportPath is the import path of a Go package. +// For example: "google.golang.org/protobuf/compiler/protogen" +type GoImportPath string + +func (p GoImportPath) String() string { return strconv.Quote(string(p)) } + +// Ident returns a GoIdent with s as the GoName and p as the GoImportPath. +func (p GoImportPath) Ident(s string) GoIdent { + return GoIdent{GoName: s, GoImportPath: p} +} + +// A GoPackageName is the name of a Go package. e.g., "protobuf". +type GoPackageName string + +// cleanPackageName converts a string to a valid Go package name. +func cleanPackageName(name string) GoPackageName { + return GoPackageName(strs.GoSanitized(name)) +} + +type pathType int + +const ( + pathTypeImport pathType = iota + pathTypeSourceRelative +) + +// A Location is a location in a .proto source file. +// +// See the google.protobuf.SourceCodeInfo documentation in descriptor.proto +// for details. +type Location struct { + SourceFile string + Path protoreflect.SourcePath +} + +// appendPath add elements to a Location's path, returning a new Location. +func (loc Location) appendPath(num protoreflect.FieldNumber, idx int) Location { + loc.Path = append(protoreflect.SourcePath(nil), loc.Path...) // make copy + loc.Path = append(loc.Path, int32(num), int32(idx)) + return loc +} + +// CommentSet is a set of leading and trailing comments associated +// with a .proto descriptor declaration. +type CommentSet struct { + LeadingDetached []Comments + Leading Comments + Trailing Comments +} + +func makeCommentSet(loc protoreflect.SourceLocation) CommentSet { + var leadingDetached []Comments + for _, s := range loc.LeadingDetachedComments { + leadingDetached = append(leadingDetached, Comments(s)) + } + return CommentSet{ + LeadingDetached: leadingDetached, + Leading: Comments(loc.LeadingComments), + Trailing: Comments(loc.TrailingComments), + } +} + +// Comments is a comments string as provided by protoc. +type Comments string + +// String formats the comments by inserting // to the start of each line, +// ensuring that there is a trailing newline. +// An empty comment is formatted as an empty string. +func (c Comments) String() string { + if c == "" { + return "" + } + var b []byte + for _, line := range strings.Split(strings.TrimSuffix(string(c), "\n"), "\n") { + b = append(b, "//"...) + b = append(b, line...) + b = append(b, "\n"...) + } + return string(b) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 07da5db3450e..5f28148d805b 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -19,7 +19,7 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -113,7 +113,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { } // unmarshalMessage unmarshals a message into the given protoreflect.Message. -func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { return unmarshal(d, m) } @@ -159,10 +159,10 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { } // Get the FieldDescriptor. - var fd pref.FieldDescriptor + var fd protoreflect.FieldDescriptor if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { // Only extension names are in [name] format. - extName := pref.FullName(name[1 : len(name)-1]) + extName := protoreflect.FullName(name[1 : len(name)-1]) extType, err := d.opts.Resolver.FindExtensionByName(extName) if err != nil && err != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) @@ -240,23 +240,23 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { } } -func isKnownValue(fd pref.FieldDescriptor) bool { +func isKnownValue(fd protoreflect.FieldDescriptor) bool { md := fd.Message() return md != nil && md.FullName() == genid.Value_message_fullname } -func isNullValue(fd pref.FieldDescriptor) bool { +func isNullValue(fd protoreflect.FieldDescriptor) bool { ed := fd.Enum() return ed != nil && ed.FullName() == genid.NullValue_enum_fullname } // unmarshalSingular unmarshals to the non-repeated field specified // by the given FieldDescriptor. -func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) error { - var val pref.Value +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value var err error switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: val = m.NewField(fd) err = d.unmarshalMessage(val.Message(), false) default: @@ -272,63 +272,63 @@ func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) erro // unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by // the given FieldDescriptor. -func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { const b32 int = 32 const b64 int = 64 tok, err := d.Read() if err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: if tok.Kind() == json.Bool { - return pref.ValueOfBool(tok.Bool()), nil + return protoreflect.ValueOfBool(tok.Bool()), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if v, ok := unmarshalInt(tok, b32); ok { return v, nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if v, ok := unmarshalInt(tok, b64); ok { return v, nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if v, ok := unmarshalUint(tok, b32); ok { return v, nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if v, ok := unmarshalUint(tok, b64); ok { return v, nil } - case pref.FloatKind: + case protoreflect.FloatKind: if v, ok := unmarshalFloat(tok, b32); ok { return v, nil } - case pref.DoubleKind: + case protoreflect.DoubleKind: if v, ok := unmarshalFloat(tok, b64); ok { return v, nil } - case pref.StringKind: + case protoreflect.StringKind: if tok.Kind() == json.String { - return pref.ValueOfString(tok.ParsedString()), nil + return protoreflect.ValueOfString(tok.ParsedString()), nil } - case pref.BytesKind: + case protoreflect.BytesKind: if v, ok := unmarshalBytes(tok); ok { return v, nil } - case pref.EnumKind: + case protoreflect.EnumKind: if v, ok := unmarshalEnum(tok, fd); ok { return v, nil } @@ -337,10 +337,10 @@ func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) } -func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) { +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { switch tok.Kind() { case json.Number: return getInt(tok, bitSize) @@ -349,30 +349,30 @@ func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) { // Decode number from string. s := strings.TrimSpace(tok.ParsedString()) if len(s) != len(tok.ParsedString()) { - return pref.Value{}, false + return protoreflect.Value{}, false } dec := json.NewDecoder([]byte(s)) tok, err := dec.Read() if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } return getInt(tok, bitSize) } - return pref.Value{}, false + return protoreflect.Value{}, false } -func getInt(tok json.Token, bitSize int) (pref.Value, bool) { +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { n, ok := tok.Int(bitSize) if !ok { - return pref.Value{}, false + return protoreflect.Value{}, false } if bitSize == 32 { - return pref.ValueOfInt32(int32(n)), true + return protoreflect.ValueOfInt32(int32(n)), true } - return pref.ValueOfInt64(n), true + return protoreflect.ValueOfInt64(n), true } -func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) { +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { switch tok.Kind() { case json.Number: return getUint(tok, bitSize) @@ -381,30 +381,30 @@ func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) { // Decode number from string. s := strings.TrimSpace(tok.ParsedString()) if len(s) != len(tok.ParsedString()) { - return pref.Value{}, false + return protoreflect.Value{}, false } dec := json.NewDecoder([]byte(s)) tok, err := dec.Read() if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } return getUint(tok, bitSize) } - return pref.Value{}, false + return protoreflect.Value{}, false } -func getUint(tok json.Token, bitSize int) (pref.Value, bool) { +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { n, ok := tok.Uint(bitSize) if !ok { - return pref.Value{}, false + return protoreflect.Value{}, false } if bitSize == 32 { - return pref.ValueOfUint32(uint32(n)), true + return protoreflect.ValueOfUint32(uint32(n)), true } - return pref.ValueOfUint64(n), true + return protoreflect.ValueOfUint64(n), true } -func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) { +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { switch tok.Kind() { case json.Number: return getFloat(tok, bitSize) @@ -414,49 +414,49 @@ func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) { switch s { case "NaN": if bitSize == 32 { - return pref.ValueOfFloat32(float32(math.NaN())), true + return protoreflect.ValueOfFloat32(float32(math.NaN())), true } - return pref.ValueOfFloat64(math.NaN()), true + return protoreflect.ValueOfFloat64(math.NaN()), true case "Infinity": if bitSize == 32 { - return pref.ValueOfFloat32(float32(math.Inf(+1))), true + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true } - return pref.ValueOfFloat64(math.Inf(+1)), true + return protoreflect.ValueOfFloat64(math.Inf(+1)), true case "-Infinity": if bitSize == 32 { - return pref.ValueOfFloat32(float32(math.Inf(-1))), true + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true } - return pref.ValueOfFloat64(math.Inf(-1)), true + return protoreflect.ValueOfFloat64(math.Inf(-1)), true } // Decode number from string. if len(s) != len(strings.TrimSpace(s)) { - return pref.Value{}, false + return protoreflect.Value{}, false } dec := json.NewDecoder([]byte(s)) tok, err := dec.Read() if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } return getFloat(tok, bitSize) } - return pref.Value{}, false + return protoreflect.Value{}, false } -func getFloat(tok json.Token, bitSize int) (pref.Value, bool) { +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { n, ok := tok.Float(bitSize) if !ok { - return pref.Value{}, false + return protoreflect.Value{}, false } if bitSize == 32 { - return pref.ValueOfFloat32(float32(n)), true + return protoreflect.ValueOfFloat32(float32(n)), true } - return pref.ValueOfFloat64(n), true + return protoreflect.ValueOfFloat64(n), true } -func unmarshalBytes(tok json.Token) (pref.Value, bool) { +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { if tok.Kind() != json.String { - return pref.Value{}, false + return protoreflect.Value{}, false } s := tok.ParsedString() @@ -469,36 +469,36 @@ func unmarshalBytes(tok json.Token) (pref.Value, bool) { } b, err := enc.DecodeString(s) if err != nil { - return pref.Value{}, false + return protoreflect.Value{}, false } - return pref.ValueOfBytes(b), true + return protoreflect.ValueOfBytes(b), true } -func unmarshalEnum(tok json.Token, fd pref.FieldDescriptor) (pref.Value, bool) { +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { switch tok.Kind() { case json.String: // Lookup EnumNumber based on name. s := tok.ParsedString() - if enumVal := fd.Enum().Values().ByName(pref.Name(s)); enumVal != nil { - return pref.ValueOfEnum(enumVal.Number()), true + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true } case json.Number: if n, ok := tok.Int(32); ok { - return pref.ValueOfEnum(pref.EnumNumber(n)), true + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true } case json.Null: // This is only valid for google.protobuf.NullValue. if isNullValue(fd) { - return pref.ValueOfEnum(0), true + return protoreflect.ValueOfEnum(0), true } } - return pref.Value{}, false + return protoreflect.Value{}, false } -func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { tok, err := d.Read() if err != nil { return err @@ -508,7 +508,7 @@ func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { } switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: for { tok, err := d.Peek() if err != nil { @@ -549,7 +549,7 @@ func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error { return nil } -func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { tok, err := d.Read() if err != nil { return err @@ -561,18 +561,18 @@ func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { // Determine ahead whether map entry is a scalar type or a message type in // order to call the appropriate unmarshalMapValue func inside the for loop // below. - var unmarshalMapValue func() (pref.Value, error) + var unmarshalMapValue func() (protoreflect.Value, error) switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: - unmarshalMapValue = func() (pref.Value, error) { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { val := mmap.NewValue() if err := d.unmarshalMessage(val.Message(), false); err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } return val, nil } default: - unmarshalMapValue = func() (pref.Value, error) { + unmarshalMapValue = func() (protoreflect.Value, error) { return d.unmarshalScalar(fd.MapValue()) } } @@ -618,7 +618,7 @@ Loop: // unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. // A map key type is any integral or string type. -func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.MapKey, error) { +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { const b32 = 32 const b64 = 64 const base10 = 10 @@ -626,40 +626,40 @@ func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref. name := tok.Name() kind := fd.Kind() switch kind { - case pref.StringKind: - return pref.ValueOfString(name).MapKey(), nil + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil - case pref.BoolKind: + case protoreflect.BoolKind: switch name { case "true": - return pref.ValueOfBool(true).MapKey(), nil + return protoreflect.ValueOfBool(true).MapKey(), nil case "false": - return pref.ValueOfBool(false).MapKey(), nil + return protoreflect.ValueOfBool(false).MapKey(), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if n, err := strconv.ParseInt(name, base10, b32); err == nil { - return pref.ValueOfInt32(int32(n)).MapKey(), nil + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if n, err := strconv.ParseInt(name, base10, b64); err == nil { - return pref.ValueOfInt64(int64(n)).MapKey(), nil + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if n, err := strconv.ParseUint(name, base10, b32); err == nil { - return pref.ValueOfUint32(uint32(n)).MapKey(), nil + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if n, err := strconv.ParseUint(name, base10, b64); err == nil { - return pref.ValueOfUint64(uint64(n)).MapKey(), nil + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil } default: panic(fmt.Sprintf("invalid kind for map key: %v", kind)) } - return pref.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index ba971f07810c..d09d22e139bc 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -18,7 +18,6 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -164,8 +163,8 @@ type typeURLFieldRanger struct { typeURL string } -func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { - if !f(typeFieldDesc, pref.ValueOfString(m.typeURL)) { +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { return } m.FieldRanger.Range(f) @@ -173,9 +172,9 @@ func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range // method to additionally iterate over unpopulated fields. -type unpopulatedFieldRanger struct{ pref.Message } +type unpopulatedFieldRanger struct{ protoreflect.Message } -func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { fds := m.Descriptor().Fields() for i := 0; i < fds.Len(); i++ { fd := fds.Get(i) @@ -184,10 +183,10 @@ func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) b } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil + isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil if isProto2Scalar || isSingularMessage { - v = pref.Value{} // use invalid value to emit null + v = protoreflect.Value{} // use invalid value to emit null } if !f(fd, v) { return @@ -199,7 +198,7 @@ func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) b // marshalMessage marshals the fields in the given protoreflect.Message. // If the typeURL is non-empty, then a synthetic "@type" field is injected // containing the URL as the value. -func (e encoder) marshalMessage(m pref.Message, typeURL string) error { +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { return errors.New("no support for proto1 MessageSets") } @@ -220,7 +219,7 @@ func (e encoder) marshalMessage(m pref.Message, typeURL string) error { } var err error - order.RangeFields(fields, order.IndexNameFieldOrder, func(fd pref.FieldDescriptor, v pref.Value) bool { + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { name := fd.JSONName() if e.opts.UseProtoNames { name = fd.TextName() @@ -238,7 +237,7 @@ func (e encoder) marshalMessage(m pref.Message, typeURL string) error { } // marshalValue marshals the given protoreflect.Value. -func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { switch { case fd.IsList(): return e.marshalList(val.List(), fd) @@ -251,44 +250,44 @@ func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error { // marshalSingular marshals the given non-repeated field value. This includes // all scalar types, enums, messages, and groups. -func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { if !val.IsValid() { e.WriteNull() return nil } switch kind := fd.Kind(); kind { - case pref.BoolKind: + case protoreflect.BoolKind: e.WriteBool(val.Bool()) - case pref.StringKind: + case protoreflect.StringKind: if e.WriteString(val.String()) != nil { return errors.InvalidUTF8(string(fd.FullName())) } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: e.WriteInt(val.Int()) - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: e.WriteUint(val.Uint()) - case pref.Int64Kind, pref.Sint64Kind, pref.Uint64Kind, - pref.Sfixed64Kind, pref.Fixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: // 64-bit integers are written out as JSON string. e.WriteString(val.String()) - case pref.FloatKind: + case protoreflect.FloatKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 32) - case pref.DoubleKind: + case protoreflect.DoubleKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 64) - case pref.BytesKind: + case protoreflect.BytesKind: e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) - case pref.EnumKind: + case protoreflect.EnumKind: if fd.Enum().FullName() == genid.NullValue_enum_fullname { e.WriteNull() } else { @@ -300,7 +299,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: if err := e.marshalMessage(val.Message(), ""); err != nil { return err } @@ -312,7 +311,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } // marshalList marshals the given protoreflect.List. -func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { e.StartArray() defer e.EndArray() @@ -326,12 +325,12 @@ func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { } // marshalMap marshals given protoreflect.Map. -func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { e.StartObject() defer e.EndObject() var err error - order.RangeEntries(mmap, order.GenericKeyOrder, func(k pref.MapKey, v pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { if err = e.WriteName(k.String()); err != nil { return false } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 72924a9050cf..c85f8469480a 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -17,14 +17,14 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) -type marshalFunc func(encoder, pref.Message) error +type marshalFunc func(encoder, protoreflect.Message) error // wellKnownTypeMarshaler returns a marshal function if the message type // has specialized serialization behavior. It returns nil otherwise. -func wellKnownTypeMarshaler(name pref.FullName) marshalFunc { +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { if name.Parent() == genid.GoogleProtobuf_package { switch name.Name() { case genid.Any_message_name: @@ -58,11 +58,11 @@ func wellKnownTypeMarshaler(name pref.FullName) marshalFunc { return nil } -type unmarshalFunc func(decoder, pref.Message) error +type unmarshalFunc func(decoder, protoreflect.Message) error // wellKnownTypeUnmarshaler returns a unmarshal function if the message type // has specialized serialization behavior. It returns nil otherwise. -func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc { +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { if name.Parent() == genid.GoogleProtobuf_package { switch name.Name() { case genid.Any_message_name: @@ -102,7 +102,7 @@ func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc { // custom JSON representation, that representation will be embedded adding a // field `value` which holds the custom JSON in addition to the `@type` field. -func (e encoder) marshalAny(m pref.Message) error { +func (e encoder) marshalAny(m protoreflect.Message) error { fds := m.Descriptor().Fields() fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) fdValue := fds.ByNumber(genid.Any_Value_field_number) @@ -163,7 +163,7 @@ func (e encoder) marshalAny(m pref.Message) error { return nil } -func (d decoder) unmarshalAny(m pref.Message) error { +func (d decoder) unmarshalAny(m protoreflect.Message) error { // Peek to check for json.ObjectOpen to avoid advancing a read. start, err := d.Peek() if err != nil { @@ -233,8 +233,8 @@ func (d decoder) unmarshalAny(m pref.Message) error { fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) fdValue := fds.ByNumber(genid.Any_Value_field_number) - m.Set(fdType, pref.ValueOfString(typeURL)) - m.Set(fdValue, pref.ValueOfBytes(b)) + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) return nil } @@ -354,7 +354,7 @@ func (d decoder) skipJSONValue() error { // unmarshalAnyValue unmarshals the given custom-type message from the JSON // object's "value" field. -func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) error { +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { // Skip ObjectOpen, and start reading the fields. d.Read() @@ -402,13 +402,13 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) erro // Wrapper types are encoded as JSON primitives like string, number or boolean. -func (e encoder) marshalWrapperType(m pref.Message) error { +func (e encoder) marshalWrapperType(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) val := m.Get(fd) return e.marshalSingular(val, fd) } -func (d decoder) unmarshalWrapperType(m pref.Message) error { +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) val, err := d.unmarshalScalar(fd) if err != nil { @@ -420,13 +420,13 @@ func (d decoder) unmarshalWrapperType(m pref.Message) error { // The JSON representation for Empty is an empty JSON object. -func (e encoder) marshalEmpty(pref.Message) error { +func (e encoder) marshalEmpty(protoreflect.Message) error { e.StartObject() e.EndObject() return nil } -func (d decoder) unmarshalEmpty(pref.Message) error { +func (d decoder) unmarshalEmpty(protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -462,12 +462,12 @@ func (d decoder) unmarshalEmpty(pref.Message) error { // The JSON representation for Struct is a JSON object that contains the encoded // Struct.fields map and follows the serialization rules for a map. -func (e encoder) marshalStruct(m pref.Message) error { +func (e encoder) marshalStruct(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) return e.marshalMap(m.Get(fd).Map(), fd) } -func (d decoder) unmarshalStruct(m pref.Message) error { +func (d decoder) unmarshalStruct(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) return d.unmarshalMap(m.Mutable(fd).Map(), fd) } @@ -476,12 +476,12 @@ func (d decoder) unmarshalStruct(m pref.Message) error { // ListValue.values repeated field and follows the serialization rules for a // repeated field. -func (e encoder) marshalListValue(m pref.Message) error { +func (e encoder) marshalListValue(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) return e.marshalList(m.Get(fd).List(), fd) } -func (d decoder) unmarshalListValue(m pref.Message) error { +func (d decoder) unmarshalListValue(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) return d.unmarshalList(m.Mutable(fd).List(), fd) } @@ -490,7 +490,7 @@ func (d decoder) unmarshalListValue(m pref.Message) error { // set. Each of the field in the oneof has its own custom serialization rule. A // Value message needs to be a oneof field set, else it is an error. -func (e encoder) marshalKnownValue(m pref.Message) error { +func (e encoder) marshalKnownValue(m protoreflect.Message) error { od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) fd := m.WhichOneof(od) if fd == nil { @@ -504,19 +504,19 @@ func (e encoder) marshalKnownValue(m pref.Message) error { return e.marshalSingular(m.Get(fd), fd) } -func (d decoder) unmarshalKnownValue(m pref.Message) error { +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { tok, err := d.Peek() if err != nil { return err } - var fd pref.FieldDescriptor - var val pref.Value + var fd protoreflect.FieldDescriptor + var val protoreflect.Value switch tok.Kind() { case json.Null: d.Read() fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) - val = pref.ValueOfEnum(0) + val = protoreflect.ValueOfEnum(0) case json.Bool: tok, err := d.Read() @@ -524,7 +524,7 @@ func (d decoder) unmarshalKnownValue(m pref.Message) error { return err } fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) - val = pref.ValueOfBool(tok.Bool()) + val = protoreflect.ValueOfBool(tok.Bool()) case json.Number: tok, err := d.Read() @@ -550,7 +550,7 @@ func (d decoder) unmarshalKnownValue(m pref.Message) error { return err } fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) - val = pref.ValueOfString(tok.ParsedString()) + val = protoreflect.ValueOfString(tok.ParsedString()) case json.ObjectOpen: fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) @@ -591,7 +591,7 @@ const ( maxSecondsInDuration = 315576000000 ) -func (e encoder) marshalDuration(m pref.Message) error { +func (e encoder) marshalDuration(m protoreflect.Message) error { fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) @@ -623,7 +623,7 @@ func (e encoder) marshalDuration(m pref.Message) error { return nil } -func (d decoder) unmarshalDuration(m pref.Message) error { +func (d decoder) unmarshalDuration(m protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -646,8 +646,8 @@ func (d decoder) unmarshalDuration(m pref.Message) error { fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) - m.Set(fdSeconds, pref.ValueOfInt64(secs)) - m.Set(fdNanos, pref.ValueOfInt32(nanos)) + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) return nil } @@ -779,7 +779,7 @@ const ( minTimestampSeconds = -62135596800 ) -func (e encoder) marshalTimestamp(m pref.Message) error { +func (e encoder) marshalTimestamp(m protoreflect.Message) error { fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) @@ -805,7 +805,7 @@ func (e encoder) marshalTimestamp(m pref.Message) error { return nil } -func (d decoder) unmarshalTimestamp(m pref.Message) error { +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -829,8 +829,8 @@ func (d decoder) unmarshalTimestamp(m pref.Message) error { fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) - m.Set(fdSeconds, pref.ValueOfInt64(secs)) - m.Set(fdNanos, pref.ValueOfInt32(int32(t.Nanosecond()))) + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) return nil } @@ -839,14 +839,14 @@ func (d decoder) unmarshalTimestamp(m pref.Message) error { // lower-camel naming conventions. Encoding should fail if the path name would // end up differently after a round-trip. -func (e encoder) marshalFieldMask(m pref.Message) error { +func (e encoder) marshalFieldMask(m protoreflect.Message) error { fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) list := m.Get(fd).List() paths := make([]string, 0, list.Len()) for i := 0; i < list.Len(); i++ { s := list.Get(i).String() - if !pref.FullName(s).IsValid() { + if !protoreflect.FullName(s).IsValid() { return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) } // Return error if conversion to camelCase is not reversible. @@ -861,7 +861,7 @@ func (e encoder) marshalFieldMask(m pref.Message) error { return nil } -func (d decoder) unmarshalFieldMask(m pref.Message) error { +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { tok, err := d.Read() if err != nil { return err @@ -880,10 +880,10 @@ func (d decoder) unmarshalFieldMask(m pref.Message) error { for _, s0 := range paths { s := strs.JSONSnakeCase(s0) - if strings.Contains(s0, "_") || !pref.FullName(s).IsValid() { + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) } - list.Append(pref.ValueOfString(s)) + list.Append(protoreflect.ValueOfString(s)) } return nil } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 179d6e8fc1ce..4921b2d4a76f 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -17,7 +17,7 @@ import ( "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -103,7 +103,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { } // unmarshalMessage unmarshals into the given protoreflect.Message. -func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { +func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) error { messageDesc := m.Descriptor() if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { return errors.New("no support for proto1 MessageSets") @@ -150,24 +150,24 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { } // Resolve the field descriptor. - var name pref.Name - var fd pref.FieldDescriptor - var xt pref.ExtensionType + var name protoreflect.Name + var fd protoreflect.FieldDescriptor + var xt protoreflect.ExtensionType var xtErr error var isFieldNumberName bool switch tok.NameKind() { case text.IdentName: - name = pref.Name(tok.IdentName()) + name = protoreflect.Name(tok.IdentName()) fd = fieldDescs.ByTextName(string(name)) case text.TypeName: // Handle extensions only. This code path is not for Any. - xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) + xt, xtErr = d.opts.Resolver.FindExtensionByName(protoreflect.FullName(tok.TypeName())) case text.FieldNumber: isFieldNumberName = true - num := pref.FieldNumber(tok.FieldNumber()) + num := protoreflect.FieldNumber(tok.FieldNumber()) if !num.IsValid() { return d.newError(tok.Pos(), "invalid field number: %d", num) } @@ -215,7 +215,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { switch { case fd.IsList(): kind := fd.Kind() - if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -232,7 +232,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { default: kind := fd.Kind() - if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -262,11 +262,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { // unmarshalSingular unmarshals a non-repeated field value specified by the // given FieldDescriptor. -func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { - var val pref.Value +func (d decoder) unmarshalSingular(fd protoreflect.FieldDescriptor, m protoreflect.Message) error { + var val protoreflect.Value var err error switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: val = m.NewField(fd) err = d.unmarshalMessage(val.Message(), true) default: @@ -280,94 +280,94 @@ func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) erro // unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the // given FieldDescriptor. -func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { tok, err := d.Read() if err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } if tok.Kind() != text.Scalar { - return pref.Value{}, d.unexpectedTokenError(tok) + return protoreflect.Value{}, d.unexpectedTokenError(tok) } kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: if b, ok := tok.Bool(); ok { - return pref.ValueOfBool(b), nil + return protoreflect.ValueOfBool(b), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if n, ok := tok.Int32(); ok { - return pref.ValueOfInt32(n), nil + return protoreflect.ValueOfInt32(n), nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if n, ok := tok.Int64(); ok { - return pref.ValueOfInt64(n), nil + return protoreflect.ValueOfInt64(n), nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if n, ok := tok.Uint32(); ok { - return pref.ValueOfUint32(n), nil + return protoreflect.ValueOfUint32(n), nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if n, ok := tok.Uint64(); ok { - return pref.ValueOfUint64(n), nil + return protoreflect.ValueOfUint64(n), nil } - case pref.FloatKind: + case protoreflect.FloatKind: if n, ok := tok.Float32(); ok { - return pref.ValueOfFloat32(n), nil + return protoreflect.ValueOfFloat32(n), nil } - case pref.DoubleKind: + case protoreflect.DoubleKind: if n, ok := tok.Float64(); ok { - return pref.ValueOfFloat64(n), nil + return protoreflect.ValueOfFloat64(n), nil } - case pref.StringKind: + case protoreflect.StringKind: if s, ok := tok.String(); ok { if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { - return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") + return protoreflect.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") } - return pref.ValueOfString(s), nil + return protoreflect.ValueOfString(s), nil } - case pref.BytesKind: + case protoreflect.BytesKind: if b, ok := tok.String(); ok { - return pref.ValueOfBytes([]byte(b)), nil + return protoreflect.ValueOfBytes([]byte(b)), nil } - case pref.EnumKind: + case protoreflect.EnumKind: if lit, ok := tok.Enum(); ok { // Lookup EnumNumber based on name. - if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil { - return pref.ValueOfEnum(enumVal.Number()), nil + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(lit)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), nil } } if num, ok := tok.Int32(); ok { - return pref.ValueOfEnum(pref.EnumNumber(num)), nil + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(num)), nil } default: panic(fmt.Sprintf("invalid scalar kind %v", kind)) } - return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) } // unmarshalList unmarshals into given protoreflect.List. A list value can // either be in [] syntax or simply just a single scalar/message value. -func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { +func (d decoder) unmarshalList(fd protoreflect.FieldDescriptor, list protoreflect.List) error { tok, err := d.Peek() if err != nil { return err } switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: switch tok.Kind() { case text.ListOpen: d.Read() @@ -441,22 +441,22 @@ func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { // unmarshalMap unmarshals into given protoreflect.Map. A map value is a // textproto message containing {key: , value: }. -func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { +func (d decoder) unmarshalMap(fd protoreflect.FieldDescriptor, mmap protoreflect.Map) error { // Determine ahead whether map entry is a scalar type or a message type in // order to call the appropriate unmarshalMapValue func inside // unmarshalMapEntry. - var unmarshalMapValue func() (pref.Value, error) + var unmarshalMapValue func() (protoreflect.Value, error) switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: - unmarshalMapValue = func() (pref.Value, error) { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { pval := mmap.NewValue() if err := d.unmarshalMessage(pval.Message(), true); err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } return pval, nil } default: - unmarshalMapValue = func() (pref.Value, error) { + unmarshalMapValue = func() (protoreflect.Value, error) { return d.unmarshalScalar(fd.MapValue()) } } @@ -494,9 +494,9 @@ func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { // unmarshalMap unmarshals into given protoreflect.Map. A map value is a // textproto message containing {key: , value: }. -func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error { - var key pref.MapKey - var pval pref.Value +func (d decoder) unmarshalMapEntry(fd protoreflect.FieldDescriptor, mmap protoreflect.Map, unmarshalMapValue func() (protoreflect.Value, error)) error { + var key protoreflect.MapKey + var pval protoreflect.Value Loop: for { // Read field name. @@ -520,7 +520,7 @@ Loop: return d.unexpectedTokenError(tok) } - switch name := pref.Name(tok.IdentName()); name { + switch name := protoreflect.Name(tok.IdentName()); name { case genid.MapEntry_Key_field_name: if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") @@ -535,7 +535,7 @@ Loop: key = val.MapKey() case genid.MapEntry_Value_field_name: - if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { + if kind := fd.MapValue().Kind(); (kind != protoreflect.MessageKind) && (kind != protoreflect.GroupKind) { if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -561,7 +561,7 @@ Loop: } if !pval.IsValid() { switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: // If value field is not set for message/group types, construct an // empty one as default. pval = mmap.NewValue() @@ -575,7 +575,7 @@ Loop: // unmarshalAny unmarshals an Any textproto. It can either be in expanded form // or non-expanded form. -func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { +func (d decoder) unmarshalAny(m protoreflect.Message, checkDelims bool) error { var typeURL string var bValue []byte var seenTypeUrl bool @@ -619,7 +619,7 @@ Loop: return d.syntaxError(tok.Pos(), "missing field separator :") } - switch name := pref.Name(tok.IdentName()); name { + switch name := protoreflect.Name(tok.IdentName()); name { case genid.Any_TypeUrl_field_name: if seenTypeUrl { return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) @@ -686,10 +686,10 @@ Loop: fds := m.Descriptor().Fields() if len(typeURL) > 0 { - m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), protoreflect.ValueOfString(typeURL)) } if len(bValue) > 0 { - m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) + m.Set(fds.ByNumber(genid.Any_Value_field_number), protoreflect.ValueOfBytes(bValue)) } return nil } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 8d5304dc5b32..ebf6c65284dd 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -20,7 +20,6 @@ import ( "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -150,7 +149,7 @@ type encoder struct { } // marshalMessage marshals the given protoreflect.Message. -func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { +func (e encoder) marshalMessage(m protoreflect.Message, inclDelims bool) error { messageDesc := m.Descriptor() if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { return errors.New("no support for proto1 MessageSets") @@ -190,7 +189,7 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { } // marshalField marshals the given field with protoreflect.Value. -func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalField(name string, val protoreflect.Value, fd protoreflect.FieldDescriptor) error { switch { case fd.IsList(): return e.marshalList(name, val.List(), fd) @@ -204,40 +203,40 @@ func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescript // marshalSingular marshals the given non-repeated field value. This includes // all scalar types, enums, messages, and groups. -func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: e.WriteBool(val.Bool()) - case pref.StringKind: + case protoreflect.StringKind: s := val.String() if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { return errors.InvalidUTF8(string(fd.FullName())) } e.WriteString(s) - case pref.Int32Kind, pref.Int64Kind, - pref.Sint32Kind, pref.Sint64Kind, - pref.Sfixed32Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Int64Kind, + protoreflect.Sint32Kind, protoreflect.Sint64Kind, + protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: e.WriteInt(val.Int()) - case pref.Uint32Kind, pref.Uint64Kind, - pref.Fixed32Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: e.WriteUint(val.Uint()) - case pref.FloatKind: + case protoreflect.FloatKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 32) - case pref.DoubleKind: + case protoreflect.DoubleKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 64) - case pref.BytesKind: + case protoreflect.BytesKind: e.WriteString(string(val.Bytes())) - case pref.EnumKind: + case protoreflect.EnumKind: num := val.Enum() if desc := fd.Enum().Values().ByNumber(num); desc != nil { e.WriteLiteral(string(desc.Name())) @@ -246,7 +245,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error e.WriteInt(int64(num)) } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return e.marshalMessage(val.Message(), true) default: @@ -256,7 +255,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } // marshalList marshals the given protoreflect.List as multiple name-value fields. -func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error { +func (e encoder) marshalList(name string, list protoreflect.List, fd protoreflect.FieldDescriptor) error { size := list.Len() for i := 0; i < size; i++ { e.WriteName(name) @@ -268,9 +267,9 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto } // marshalMap marshals the given protoreflect.Map as multiple name-value fields. -func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { +func (e encoder) marshalMap(name string, mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { var err error - order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(key protoreflect.MapKey, val protoreflect.Value) bool { e.WriteName(name) e.StartMessage() defer e.EndMessage() @@ -334,7 +333,7 @@ func (e encoder) marshalUnknown(b []byte) { // marshalAny marshals the given google.protobuf.Any message in expanded form. // It returns true if it was able to marshal, else false. -func (e encoder) marshalAny(any pref.Message) bool { +func (e encoder) marshalAny(any protoreflect.Message) bool { // Construct the embedded message. fds := any.Descriptor().Fields() fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index a427f8b7043b..ce57f57ebd48 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -21,10 +21,11 @@ import ( type Number int32 const ( - MinValidNumber Number = 1 - FirstReservedNumber Number = 19000 - LastReservedNumber Number = 19999 - MaxValidNumber Number = 1<<29 - 1 + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 + DefaultRecursionLimit = 10000 ) // IsValid reports whether the field number is semantically valid. @@ -55,6 +56,7 @@ const ( errCodeOverflow errCodeReserved errCodeEndGroup + errCodeRecursionDepth ) var ( @@ -112,6 +114,10 @@ func ConsumeField(b []byte) (Number, Type, int) { // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + return consumeFieldValueD(num, typ, b, DefaultRecursionLimit) +} + +func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) { switch typ { case VarintType: _, n = ConsumeVarint(b) @@ -126,6 +132,9 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { _, n = ConsumeBytes(b) return n case StartGroupType: + if depth < 0 { + return errCodeRecursionDepth + } n0 := len(b) for { num2, typ2, n := ConsumeTag(b) @@ -140,7 +149,7 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { return n0 - len(b) } - n = ConsumeFieldValue(num2, typ2, b) + n = consumeFieldValueD(num2, typ2, b, depth-1) if n < 0 { return n // forward error code } @@ -507,6 +516,7 @@ func EncodeTag(num Number, typ Type) uint64 { } // DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. +// // Input: {…, 5, 3, 1, 0, 2, 4, 6, …} // Output: {…, -3, -2, -1, 0, +1, +2, +3, …} func DecodeZigZag(x uint64) int64 { @@ -514,6 +524,7 @@ func DecodeZigZag(x uint64) int64 { } // EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. +// // Input: {…, -3, -2, -1, 0, +1, +2, +3, …} // Output: {…, 5, 3, 1, 0, 2, 4, 6, …} func EncodeZigZag(x int64) uint64 { @@ -521,6 +532,7 @@ func EncodeZigZag(x int64) uint64 { } // DecodeBool decodes a uint64 as a bool. +// // Input: { 0, 1, 2, …} // Output: {false, true, true, …} func DecodeBool(x uint64) bool { @@ -528,6 +540,7 @@ func DecodeBool(x uint64) bool { } // EncodeBool encodes a bool as a uint64. +// // Input: {false, true} // Output: { 0, 1} func EncodeBool(x bool) uint64 { diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index 360c63329d4d..db5248e1b512 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type list interface { @@ -30,17 +30,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { if isRoot { var name string switch vs.(type) { - case pref.Names: + case protoreflect.Names: name = "Names" - case pref.FieldNumbers: + case protoreflect.FieldNumbers: name = "FieldNumbers" - case pref.FieldRanges: + case protoreflect.FieldRanges: name = "FieldRanges" - case pref.EnumRanges: + case protoreflect.EnumRanges: name = "EnumRanges" - case pref.FileImports: + case protoreflect.FileImports: name = "FileImports" - case pref.Descriptor: + case protoreflect.Descriptor: name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" default: name = reflect.ValueOf(vs).Elem().Type().Name() @@ -50,17 +50,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { var ss []string switch vs := vs.(type) { - case pref.Names: + case protoreflect.Names: for i := 0; i < vs.Len(); i++ { ss = append(ss, fmt.Sprint(vs.Get(i))) } return start + joinStrings(ss, false) + end - case pref.FieldNumbers: + case protoreflect.FieldNumbers: for i := 0; i < vs.Len(); i++ { ss = append(ss, fmt.Sprint(vs.Get(i))) } return start + joinStrings(ss, false) + end - case pref.FieldRanges: + case protoreflect.FieldRanges: for i := 0; i < vs.Len(); i++ { r := vs.Get(i) if r[0]+1 == r[1] { @@ -70,7 +70,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } } return start + joinStrings(ss, false) + end - case pref.EnumRanges: + case protoreflect.EnumRanges: for i := 0; i < vs.Len(); i++ { r := vs.Get(i) if r[0] == r[1] { @@ -80,7 +80,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } } return start + joinStrings(ss, false) + end - case pref.FileImports: + case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") @@ -88,11 +88,11 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } return start + joinStrings(ss, allowMulti) + end default: - _, isEnumValue := vs.(pref.EnumValueDescriptors) + _, isEnumValue := vs.(protoreflect.EnumValueDescriptors) for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } @@ -106,20 +106,20 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { // // Using a list allows us to print the accessors in a sensible order. var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, + reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, + reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, + reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, + reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt + reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, + reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, + reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, + reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, } -func FormatDesc(s fmt.State, r rune, t pref.Descriptor) { +func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) } -func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -128,7 +128,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { start = rt.Name() + "{" } - _, isFile := t.(pref.FileDescriptor) + _, isFile := t.(protoreflect.FileDescriptor) rs := records{allowMulti: allowMulti} if t.IsPlaceholder() { if isFile { @@ -146,7 +146,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { rs.Append(rv, "Name") } switch t := t.(type) { - case pref.FieldDescriptor: + case protoreflect.FieldDescriptor: for _, s := range descriptorAccessors[rt] { switch s { case "MapKey": @@ -156,9 +156,9 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { case "MapValue": if v := t.MapValue(); v != nil { switch v.Kind() { - case pref.EnumKind: + case protoreflect.EnumKind: rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) default: rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) @@ -180,7 +180,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { rs.Append(rv, s) } } - case pref.OneofDescriptor: + case protoreflect.OneofDescriptor: var ss []string fs := t.Fields() for i := 0; i < fs.Len(); i++ { @@ -216,7 +216,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { if !rv.IsValid() { panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) } - if _, ok := rv.Interface().(pref.Value); ok { + if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] if !rv.IsNil() { rv = rv.Elem() @@ -250,9 +250,9 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { switch v := v.(type) { case list: s = formatListOpt(v, false, rs.allowMulti) - case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor: - s = string(v.(pref.Descriptor).Name()) - case pref.Descriptor: + case protoreflect.FieldDescriptor, protoreflect.OneofDescriptor, protoreflect.EnumValueDescriptor, protoreflect.MethodDescriptor: + s = string(v.(protoreflect.Descriptor).Name()) + case protoreflect.Descriptor: s = string(v.FullName()) case string: s = strconv.Quote(v) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go index fdd9b13f2fcf..328dc733b042 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go @@ -15,8 +15,8 @@ import ( "strconv" ptext "google.golang.org/protobuf/internal/encoding/text" - errors "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" ) // Format is the serialization format used to represent the default value. @@ -35,56 +35,56 @@ const ( // Unmarshal deserializes the default string s according to the given kind k. // When k is an enum, a list of enum value descriptors must be provided. -func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) { +func Unmarshal(s string, k protoreflect.Kind, evs protoreflect.EnumValueDescriptors, f Format) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { switch k { - case pref.BoolKind: + case protoreflect.BoolKind: if f == GoTag { switch s { case "1": - return pref.ValueOfBool(true), nil, nil + return protoreflect.ValueOfBool(true), nil, nil case "0": - return pref.ValueOfBool(false), nil, nil + return protoreflect.ValueOfBool(false), nil, nil } } else { switch s { case "true": - return pref.ValueOfBool(true), nil, nil + return protoreflect.ValueOfBool(true), nil, nil case "false": - return pref.ValueOfBool(false), nil, nil + return protoreflect.ValueOfBool(false), nil, nil } } - case pref.EnumKind: + case protoreflect.EnumKind: if f == GoTag { // Go tags use the numeric form of the enum value. if n, err := strconv.ParseInt(s, 10, 32); err == nil { - if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil { - return pref.ValueOfEnum(ev.Number()), ev, nil + if ev := evs.ByNumber(protoreflect.EnumNumber(n)); ev != nil { + return protoreflect.ValueOfEnum(ev.Number()), ev, nil } } } else { // Descriptor default_value use the enum identifier. - ev := evs.ByName(pref.Name(s)) + ev := evs.ByName(protoreflect.Name(s)) if ev != nil { - return pref.ValueOfEnum(ev.Number()), ev, nil + return protoreflect.ValueOfEnum(ev.Number()), ev, nil } } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if v, err := strconv.ParseInt(s, 10, 32); err == nil { - return pref.ValueOfInt32(int32(v)), nil, nil + return protoreflect.ValueOfInt32(int32(v)), nil, nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if v, err := strconv.ParseInt(s, 10, 64); err == nil { - return pref.ValueOfInt64(int64(v)), nil, nil + return protoreflect.ValueOfInt64(int64(v)), nil, nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if v, err := strconv.ParseUint(s, 10, 32); err == nil { - return pref.ValueOfUint32(uint32(v)), nil, nil + return protoreflect.ValueOfUint32(uint32(v)), nil, nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if v, err := strconv.ParseUint(s, 10, 64); err == nil { - return pref.ValueOfUint64(uint64(v)), nil, nil + return protoreflect.ValueOfUint64(uint64(v)), nil, nil } - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: var v float64 var err error switch s { @@ -98,29 +98,29 @@ func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) ( v, err = strconv.ParseFloat(s, 64) } if err == nil { - if k == pref.FloatKind { - return pref.ValueOfFloat32(float32(v)), nil, nil + if k == protoreflect.FloatKind { + return protoreflect.ValueOfFloat32(float32(v)), nil, nil } else { - return pref.ValueOfFloat64(float64(v)), nil, nil + return protoreflect.ValueOfFloat64(float64(v)), nil, nil } } - case pref.StringKind: + case protoreflect.StringKind: // String values are already unescaped and can be used as is. - return pref.ValueOfString(s), nil, nil - case pref.BytesKind: + return protoreflect.ValueOfString(s), nil, nil + case protoreflect.BytesKind: if b, ok := unmarshalBytes(s); ok { - return pref.ValueOfBytes(b), nil, nil + return protoreflect.ValueOfBytes(b), nil, nil } } - return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) + return protoreflect.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) } // Marshal serializes v as the default string according to the given kind k. // When specifying the Descriptor format for an enum kind, the associated // enum value descriptor must be provided. -func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) { +func Marshal(v protoreflect.Value, ev protoreflect.EnumValueDescriptor, k protoreflect.Kind, f Format) (string, error) { switch k { - case pref.BoolKind: + case protoreflect.BoolKind: if f == GoTag { if v.Bool() { return "1", nil @@ -134,17 +134,17 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) ( return "false", nil } } - case pref.EnumKind: + case protoreflect.EnumKind: if f == GoTag { return strconv.FormatInt(int64(v.Enum()), 10), nil } else { return string(ev.Name()), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: return strconv.FormatInt(v.Int(), 10), nil - case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: return strconv.FormatUint(v.Uint(), 10), nil - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: f := v.Float() switch { case math.IsInf(f, -1): @@ -154,16 +154,16 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) ( case math.IsNaN(f): return "nan", nil default: - if k == pref.FloatKind { + if k == protoreflect.FloatKind { return strconv.FormatFloat(f, 'g', -1, 32), nil } else { return strconv.FormatFloat(f, 'g', -1, 64), nil } } - case pref.StringKind: + case protoreflect.StringKind: // String values are serialized as is without any escaping. return v.String(), nil - case pref.BytesKind: + case protoreflect.BytesKind: if s, ok := marshalBytes(v.Bytes()); ok { return s, nil } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go index c1866f3c1a78..a6693f0a2f39 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // The MessageSet wire format is equivalent to a message defined as follows, @@ -33,6 +33,7 @@ const ( // ExtensionName is the field name for extensions of MessageSet. // // A valid MessageSet extension must be of the form: +// // message MyMessage { // extend proto2.bridge.MessageSet { // optional MyMessage message_set_extension = 1234; @@ -42,13 +43,13 @@ const ( const ExtensionName = "message_set_extension" // IsMessageSet returns whether the message uses the MessageSet wire format. -func IsMessageSet(md pref.MessageDescriptor) bool { +func IsMessageSet(md protoreflect.MessageDescriptor) bool { xmd, ok := md.(interface{ IsMessageSet() bool }) return ok && xmd.IsMessageSet() } // IsMessageSetExtension reports this field properly extends a MessageSet. -func IsMessageSetExtension(fd pref.FieldDescriptor) bool { +func IsMessageSetExtension(fd protoreflect.FieldDescriptor) bool { switch { case fd.Name() != ExtensionName: return false diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 38f1931c6fd1..373d208374f8 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -11,10 +11,10 @@ import ( "strconv" "strings" - defval "google.golang.org/protobuf/internal/encoding/defval" - fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) var byteType = reflect.TypeOf(byte(0)) @@ -29,9 +29,9 @@ var byteType = reflect.TypeOf(byte(0)) // This does not populate the Enum or Message (except for weak message). // // This function is a best effort attempt; parsing errors are ignored. -func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor { - f := new(fdesc.Field) - f.L0.ParentFile = fdesc.SurrogateProto2 +func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { + f := new(filedesc.Field) + f.L0.ParentFile = filedesc.SurrogateProto2 for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -39,68 +39,68 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p } switch s := tag[:i]; { case strings.HasPrefix(s, "name="): - f.L0.FullName = pref.FullName(s[len("name="):]) + f.L0.FullName = protoreflect.FullName(s[len("name="):]) case strings.Trim(s, "0123456789") == "": n, _ := strconv.ParseUint(s, 10, 32) - f.L1.Number = pref.FieldNumber(n) + f.L1.Number = protoreflect.FieldNumber(n) case s == "opt": - f.L1.Cardinality = pref.Optional + f.L1.Cardinality = protoreflect.Optional case s == "req": - f.L1.Cardinality = pref.Required + f.L1.Cardinality = protoreflect.Required case s == "rep": - f.L1.Cardinality = pref.Repeated + f.L1.Cardinality = protoreflect.Repeated case s == "varint": switch goType.Kind() { case reflect.Bool: - f.L1.Kind = pref.BoolKind + f.L1.Kind = protoreflect.BoolKind case reflect.Int32: - f.L1.Kind = pref.Int32Kind + f.L1.Kind = protoreflect.Int32Kind case reflect.Int64: - f.L1.Kind = pref.Int64Kind + f.L1.Kind = protoreflect.Int64Kind case reflect.Uint32: - f.L1.Kind = pref.Uint32Kind + f.L1.Kind = protoreflect.Uint32Kind case reflect.Uint64: - f.L1.Kind = pref.Uint64Kind + f.L1.Kind = protoreflect.Uint64Kind } case s == "zigzag32": if goType.Kind() == reflect.Int32 { - f.L1.Kind = pref.Sint32Kind + f.L1.Kind = protoreflect.Sint32Kind } case s == "zigzag64": if goType.Kind() == reflect.Int64 { - f.L1.Kind = pref.Sint64Kind + f.L1.Kind = protoreflect.Sint64Kind } case s == "fixed32": switch goType.Kind() { case reflect.Int32: - f.L1.Kind = pref.Sfixed32Kind + f.L1.Kind = protoreflect.Sfixed32Kind case reflect.Uint32: - f.L1.Kind = pref.Fixed32Kind + f.L1.Kind = protoreflect.Fixed32Kind case reflect.Float32: - f.L1.Kind = pref.FloatKind + f.L1.Kind = protoreflect.FloatKind } case s == "fixed64": switch goType.Kind() { case reflect.Int64: - f.L1.Kind = pref.Sfixed64Kind + f.L1.Kind = protoreflect.Sfixed64Kind case reflect.Uint64: - f.L1.Kind = pref.Fixed64Kind + f.L1.Kind = protoreflect.Fixed64Kind case reflect.Float64: - f.L1.Kind = pref.DoubleKind + f.L1.Kind = protoreflect.DoubleKind } case s == "bytes": switch { case goType.Kind() == reflect.String: - f.L1.Kind = pref.StringKind + f.L1.Kind = protoreflect.StringKind case goType.Kind() == reflect.Slice && goType.Elem() == byteType: - f.L1.Kind = pref.BytesKind + f.L1.Kind = protoreflect.BytesKind default: - f.L1.Kind = pref.MessageKind + f.L1.Kind = protoreflect.MessageKind } case s == "group": - f.L1.Kind = pref.GroupKind + f.L1.Kind = protoreflect.GroupKind case strings.HasPrefix(s, "enum="): - f.L1.Kind = pref.EnumKind + f.L1.Kind = protoreflect.EnumKind case strings.HasPrefix(s, "json="): jsonName := s[len("json="):] if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { @@ -111,23 +111,23 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p f.L1.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true - f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):])) + f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. s, i = tag[len("def="):], len(tag) v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) - f.L1.Default = fdesc.DefaultValue(v, ev) + f.L1.Default = filedesc.DefaultValue(v, ev) case s == "proto3": - f.L0.ParentFile = fdesc.SurrogateProto3 + f.L0.ParentFile = filedesc.SurrogateProto3 } tag = strings.TrimPrefix(tag[i:], ",") } // The generator uses the group message name instead of the field name. // We obtain the real field name by lowercasing the group name. - if f.L1.Kind == pref.GroupKind { - f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName))) + if f.L1.Kind == protoreflect.GroupKind { + f.L0.FullName = protoreflect.FullName(strings.ToLower(string(f.L0.FullName))) } return f } @@ -140,38 +140,38 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p // Depending on the context on how Marshal is called, there are different ways // through which that information is determined. As such it is the caller's // responsibility to provide a function to obtain that information. -func Marshal(fd pref.FieldDescriptor, enumName string) string { +func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { var tag []string switch fd.Kind() { - case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind: + case protoreflect.BoolKind, protoreflect.EnumKind, protoreflect.Int32Kind, protoreflect.Uint32Kind, protoreflect.Int64Kind, protoreflect.Uint64Kind: tag = append(tag, "varint") - case pref.Sint32Kind: + case protoreflect.Sint32Kind: tag = append(tag, "zigzag32") - case pref.Sint64Kind: + case protoreflect.Sint64Kind: tag = append(tag, "zigzag64") - case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind: + case protoreflect.Sfixed32Kind, protoreflect.Fixed32Kind, protoreflect.FloatKind: tag = append(tag, "fixed32") - case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind: + case protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind, protoreflect.DoubleKind: tag = append(tag, "fixed64") - case pref.StringKind, pref.BytesKind, pref.MessageKind: + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind: tag = append(tag, "bytes") - case pref.GroupKind: + case protoreflect.GroupKind: tag = append(tag, "group") } tag = append(tag, strconv.Itoa(int(fd.Number()))) switch fd.Cardinality() { - case pref.Optional: + case protoreflect.Optional: tag = append(tag, "opt") - case pref.Required: + case protoreflect.Required: tag = append(tag, "req") - case pref.Repeated: + case protoreflect.Repeated: tag = append(tag, "rep") } if fd.IsPacked() { tag = append(tag, "packed") } name := string(fd.Name()) - if fd.Kind() == pref.GroupKind { + if fd.Kind() == protoreflect.GroupKind { // The name of the FieldDescriptor for a group field is // lowercased. To find the original capitalization, we // look in the field's MessageType. @@ -189,10 +189,10 @@ func Marshal(fd pref.FieldDescriptor, enumName string) string { // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. - if fd.Syntax() == pref.Proto3 && !fd.IsExtension() { + if fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension() { tag = append(tag, "proto3") } - if fd.Kind() == pref.EnumKind && enumName != "" { + if fd.Kind() == protoreflect.EnumKind && enumName != "" { tag = append(tag, "enum="+enumName) } if fd.ContainingOneof() != nil { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index eb10ea10261a..427c62d037fc 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -8,7 +8,6 @@ import ( "bytes" "fmt" "io" - "regexp" "strconv" "unicode/utf8" @@ -381,7 +380,7 @@ func (d *Decoder) currentOpenKind() (Kind, byte) { case '[': return ListOpen, ']' } - panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh)) } func (d *Decoder) pushOpenStack(ch byte) { @@ -421,7 +420,7 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) } - return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in)) + return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) } // parseTypeName parses Any type URL or extension field name. The name is @@ -571,7 +570,7 @@ func (d *Decoder) parseScalar() (Token, error) { return tok, nil } - return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in)) + return Token{}, d.newSyntaxError("invalid scalar value: %s", errId(d.in)) } // parseLiteralValue parses a literal value. A literal value is used for @@ -653,8 +652,29 @@ func consume(b []byte, n int) []byte { return b } -// Any sequence that looks like a non-delimiter (for error reporting). -var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`) +// errId extracts a byte sequence that looks like an invalid ID +// (for the purposes of error reporting). +func errId(seq []byte) []byte { + const maxLen = 32 + for i := 0; i < len(seq); { + if i > maxLen { + return append(seq[:i:i], "…"...) + } + r, size := utf8.DecodeRune(seq[i:]) + if r > utf8.RuneSelf || (r != '/' && isDelim(byte(r))) { + if i == 0 { + // Either the first byte is invalid UTF-8 or a + // delimiter, or the first rune is non-ASCII. + // Return it as-is. + i = size + } + return seq[:i:i] + } + i += size + } + // No delimiter found. + return seq +} // isDelim returns true if given byte is a delimiter character. func isDelim(c byte) bool { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go index f2d90b78999f..81a5d8c86139 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -50,8 +50,10 @@ type number struct { // parseNumber constructs a number object from given input. It allows for the // following patterns: -// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) -// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// +// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) +// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// // It also returns the number of parsed bytes for the given number, 0 if it is // not a number. func parseNumber(input []byte) number { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go index 0ce8d6fb83d9..7ae6c2a3c26d 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go @@ -24,6 +24,6 @@ // the Go implementation should as well. // // The text format is almost a superset of JSON except: -// * message keys are not quoted strings, but identifiers -// * the top-level value must be a message without the delimiters +// - message keys are not quoted strings, but identifiers +// - the top-level value must be a message without the delimiters package text diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go index f90e909b37ab..fbcd349207dd 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.13 // +build !go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go index dc05f4191c01..5e72f1cde9e1 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go index b293b6947361..7cac1c19016f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -12,8 +12,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" ) // Builder construct a protoreflect.FileDescriptor from the raw descriptor. @@ -38,7 +37,7 @@ type Builder struct { // TypeResolver resolves extension field types for descriptor options. // If nil, it uses protoregistry.GlobalTypes. TypeResolver interface { - preg.ExtensionTypeResolver + protoregistry.ExtensionTypeResolver } // FileRegistry is use to lookup file, enum, and message dependencies. @@ -46,8 +45,8 @@ type Builder struct { // If nil, it uses protoregistry.GlobalFiles. FileRegistry interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) - FindDescriptorByName(pref.FullName) (pref.Descriptor, error) - RegisterFile(pref.FileDescriptor) error + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) + RegisterFile(protoreflect.FileDescriptor) error } } @@ -55,8 +54,8 @@ type Builder struct { // If so, it permits looking up an enum or message dependency based on the // sub-list and element index into filetype.Builder.DependencyIndexes. type resolverByIndex interface { - FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor - FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor + FindEnumByIndex(int32, int32, []Enum, []Message) protoreflect.EnumDescriptor + FindMessageByIndex(int32, int32, []Enum, []Message) protoreflect.MessageDescriptor } // Indexes of each sub-list in filetype.Builder.DependencyIndexes. @@ -70,7 +69,7 @@ const ( // Out is the output of the Builder. type Out struct { - File pref.FileDescriptor + File protoreflect.FileDescriptor // Enums is all enum descriptors in "flattened ordering". Enums []Enum @@ -97,10 +96,10 @@ func (db Builder) Build() (out Out) { // Initialize resolvers and registries if unpopulated. if db.TypeResolver == nil { - db.TypeResolver = preg.GlobalTypes + db.TypeResolver = protoregistry.GlobalTypes } if db.FileRegistry == nil { - db.FileRegistry = preg.GlobalFiles + db.FileRegistry = protoregistry.GlobalFiles } fd := newRawFile(db) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 98ab142aeee6..7c3689baee8a 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -17,7 +17,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -43,9 +43,9 @@ type ( L2 *FileL2 } FileL1 struct { - Syntax pref.Syntax + Syntax protoreflect.Syntax Path string - Package pref.FullName + Package protoreflect.FullName Enums Enums Messages Messages @@ -53,36 +53,36 @@ type ( Services Services } FileL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } ) -func (fd *File) ParentFile() pref.FileDescriptor { return fd } -func (fd *File) Parent() pref.Descriptor { return nil } -func (fd *File) Index() int { return 0 } -func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax } -func (fd *File) Name() pref.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() pref.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } -func (fd *File) Options() pref.ProtoMessage { +func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } +func (fd *File) Parent() protoreflect.Descriptor { return nil } +func (fd *File) Index() int { return 0 } +func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() } return descopts.File } -func (fd *File) Path() string { return fd.L1.Path } -func (fd *File) Package() pref.FullName { return fd.L1.Package } -func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports } -func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } -func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } -func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } -func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } -func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } -func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } -func (fd *File) ProtoType(pref.FileDescriptor) {} -func (fd *File) ProtoInternal(pragma.DoNotImplement) {} +func (fd *File) Path() string { return fd.L1.Path } +func (fd *File) Package() protoreflect.FullName { return fd.L1.Package } +func (fd *File) Imports() protoreflect.FileImports { return &fd.lazyInit().Imports } +func (fd *File) Enums() protoreflect.EnumDescriptors { return &fd.L1.Enums } +func (fd *File) Messages() protoreflect.MessageDescriptors { return &fd.L1.Messages } +func (fd *File) Extensions() protoreflect.ExtensionDescriptors { return &fd.L1.Extensions } +func (fd *File) Services() protoreflect.ServiceDescriptors { return &fd.L1.Services } +func (fd *File) SourceLocations() protoreflect.SourceLocations { return &fd.lazyInit().Locations } +func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *File) ProtoType(protoreflect.FileDescriptor) {} +func (fd *File) ProtoInternal(pragma.DoNotImplement) {} func (fd *File) lazyInit() *FileL2 { if atomic.LoadUint32(&fd.once) == 0 { @@ -119,7 +119,7 @@ type ( eagerValues bool // controls whether EnumL2.Values is already populated } EnumL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Values EnumValues ReservedNames Names ReservedRanges EnumRanges @@ -130,41 +130,41 @@ type ( L1 EnumValueL1 } EnumValueL1 struct { - Options func() pref.ProtoMessage - Number pref.EnumNumber + Options func() protoreflect.ProtoMessage + Number protoreflect.EnumNumber } ) -func (ed *Enum) Options() pref.ProtoMessage { +func (ed *Enum) Options() protoreflect.ProtoMessage { if f := ed.lazyInit().Options; f != nil { return f() } return descopts.Enum } -func (ed *Enum) Values() pref.EnumValueDescriptors { +func (ed *Enum) Values() protoreflect.EnumValueDescriptors { if ed.L1.eagerValues { return &ed.L2.Values } return &ed.lazyInit().Values } -func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames } -func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges } -func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } -func (ed *Enum) ProtoType(pref.EnumDescriptor) {} +func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit().ReservedNames } +func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges } +func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {} func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } -func (ed *EnumValue) Options() pref.ProtoMessage { +func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { return f() } return descopts.EnumValue } -func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number } -func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } -func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {} +func (ed *EnumValue) Number() protoreflect.EnumNumber { return ed.L1.Number } +func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *EnumValue) ProtoType(protoreflect.EnumValueDescriptor) {} type ( Message struct { @@ -180,14 +180,14 @@ type ( IsMessageSet bool // promoted from google.protobuf.MessageOptions } MessageL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Fields Fields Oneofs Oneofs ReservedNames Names ReservedRanges FieldRanges RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality ExtensionRanges FieldRanges - ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges + ExtensionRangeOptions []func() protoreflect.ProtoMessage // must be same length as ExtensionRanges } Field struct { @@ -195,10 +195,10 @@ type ( L1 FieldL1 } FieldL1 struct { - Options func() pref.ProtoMessage - Number pref.FieldNumber - Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers - Kind pref.Kind + Options func() protoreflect.ProtoMessage + Number protoreflect.FieldNumber + Cardinality protoreflect.Cardinality // must be consistent with Message.RequiredNumbers + Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions @@ -207,9 +207,9 @@ type ( HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue - ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields - Enum pref.EnumDescriptor - Message pref.MessageDescriptor + ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields + Enum protoreflect.EnumDescriptor + Message protoreflect.MessageDescriptor } Oneof struct { @@ -217,35 +217,35 @@ type ( L1 OneofL1 } OneofL1 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof } ) -func (md *Message) Options() pref.ProtoMessage { +func (md *Message) Options() protoreflect.ProtoMessage { if f := md.lazyInit().Options; f != nil { return f() } return descopts.Message } -func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } -func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields } -func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs } -func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames } -func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges } -func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers } -func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges } -func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage { +func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } +func (md *Message) Fields() protoreflect.FieldDescriptors { return &md.lazyInit().Fields } +func (md *Message) Oneofs() protoreflect.OneofDescriptors { return &md.lazyInit().Oneofs } +func (md *Message) ReservedNames() protoreflect.Names { return &md.lazyInit().ReservedNames } +func (md *Message) ReservedRanges() protoreflect.FieldRanges { return &md.lazyInit().ReservedRanges } +func (md *Message) RequiredNumbers() protoreflect.FieldNumbers { return &md.lazyInit().RequiredNumbers } +func (md *Message) ExtensionRanges() protoreflect.FieldRanges { return &md.lazyInit().ExtensionRanges } +func (md *Message) ExtensionRangeOptions(i int) protoreflect.ProtoMessage { if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { return f() } return descopts.ExtensionRange } -func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums } -func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages } -func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions } -func (md *Message) ProtoType(pref.MessageDescriptor) {} -func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Message) Enums() protoreflect.EnumDescriptors { return &md.L1.Enums } +func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L1.Messages } +func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions } +func (md *Message) ProtoType(protoreflect.MessageDescriptor) {} +func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } func (md *Message) lazyInit() *MessageL2 { md.L0.ParentFile.lazyInit() // implicitly initializes L2 return md.L2 @@ -260,28 +260,28 @@ func (md *Message) IsMessageSet() bool { return md.L1.IsMessageSet } -func (fd *Field) Options() pref.ProtoMessage { +func (fd *Field) Options() protoreflect.ProtoMessage { if f := fd.L1.Options; f != nil { return f() } return descopts.Field } -func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } -func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } +func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } +func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } func (fd *Field) HasOptionalKeyword() bool { - return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional + return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { + if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { switch fd.L1.Kind { - case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind: + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: default: return true } @@ -290,40 +290,40 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } -func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() } +func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } -func (fd *Field) MapKey() pref.FieldDescriptor { +func (fd *Field) MapKey() protoreflect.FieldDescriptor { if !fd.IsMap() { return nil } return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) } -func (fd *Field) MapValue() pref.FieldDescriptor { +func (fd *Field) MapValue() protoreflect.FieldDescriptor { if !fd.IsMap() { return nil } return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) } -func (fd *Field) HasDefault() bool { return fd.L1.Default.has } -func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } -func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum } -func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof } -func (fd *Field) ContainingMessage() pref.MessageDescriptor { - return fd.L0.Parent.(pref.MessageDescriptor) +func (fd *Field) HasDefault() bool { return fd.L1.Default.has } +func (fd *Field) Default() protoreflect.Value { return fd.L1.Default.get(fd) } +func (fd *Field) DefaultEnumValue() protoreflect.EnumValueDescriptor { return fd.L1.Default.enum } +func (fd *Field) ContainingOneof() protoreflect.OneofDescriptor { return fd.L1.ContainingOneof } +func (fd *Field) ContainingMessage() protoreflect.MessageDescriptor { + return fd.L0.Parent.(protoreflect.MessageDescriptor) } -func (fd *Field) Enum() pref.EnumDescriptor { +func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } -func (fd *Field) Message() pref.MessageDescriptor { +func (fd *Field) Message() protoreflect.MessageDescriptor { if fd.L1.IsWeak { if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(pref.MessageDescriptor) + return d.(protoreflect.MessageDescriptor) } } return fd.L1.Message } -func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } -func (fd *Field) ProtoType(pref.FieldDescriptor) {} +func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 // validation for the string field. This exists for Google-internal use only @@ -336,21 +336,21 @@ func (fd *Field) EnforceUTF8() bool { if fd.L1.HasEnforceUTF8 { return fd.L1.EnforceUTF8 } - return fd.L0.ParentFile.L1.Syntax == pref.Proto3 + return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 } func (od *Oneof) IsSynthetic() bool { - return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() + return od.L0.ParentFile.L1.Syntax == protoreflect.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() } -func (od *Oneof) Options() pref.ProtoMessage { +func (od *Oneof) Options() protoreflect.ProtoMessage { if f := od.L1.Options; f != nil { return f() } return descopts.Oneof } -func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields } -func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } -func (od *Oneof) ProtoType(pref.OneofDescriptor) {} +func (od *Oneof) Fields() protoreflect.FieldDescriptors { return &od.L1.Fields } +func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } +func (od *Oneof) ProtoType(protoreflect.OneofDescriptor) {} type ( Extension struct { @@ -359,55 +359,57 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number pref.FieldNumber - Extendee pref.MessageDescriptor - Cardinality pref.Cardinality - Kind pref.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind } ExtensionL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue - Enum pref.EnumDescriptor - Message pref.MessageDescriptor + Enum protoreflect.EnumDescriptor + Message protoreflect.MessageDescriptor } ) -func (xd *Extension) Options() pref.ProtoMessage { +func (xd *Extension) Options() protoreflect.ProtoMessage { if f := xd.lazyInit().Options; f != nil { return f() } return descopts.Field } -func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } -func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } -func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } -func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } -func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } -func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } -func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } +func (xd *Extension) Number() protoreflect.FieldNumber { return xd.L1.Number } +func (xd *Extension) Cardinality() protoreflect.Cardinality { return xd.L1.Cardinality } +func (xd *Extension) Kind() protoreflect.Kind { return xd.L1.Kind } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } +func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != protoreflect.Repeated } func (xd *Extension) HasOptionalKeyword() bool { - return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional -} -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } -func (xd *Extension) IsExtension() bool { return true } -func (xd *Extension) IsWeak() bool { return false } -func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated } -func (xd *Extension) IsMap() bool { return false } -func (xd *Extension) MapKey() pref.FieldDescriptor { return nil } -func (xd *Extension) MapValue() pref.FieldDescriptor { return nil } -func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } -func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) } -func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum } -func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil } -func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee } -func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum } -func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message } -func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } -func (xd *Extension) ProtoType(pref.FieldDescriptor) {} -func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} + return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional +} +func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsExtension() bool { return true } +func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } +func (xd *Extension) IsMap() bool { return false } +func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } +func (xd *Extension) MapValue() protoreflect.FieldDescriptor { return nil } +func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } +func (xd *Extension) Default() protoreflect.Value { return xd.lazyInit().Default.get(xd) } +func (xd *Extension) DefaultEnumValue() protoreflect.EnumValueDescriptor { + return xd.lazyInit().Default.enum +} +func (xd *Extension) ContainingOneof() protoreflect.OneofDescriptor { return nil } +func (xd *Extension) ContainingMessage() protoreflect.MessageDescriptor { return xd.L1.Extendee } +func (xd *Extension) Enum() protoreflect.EnumDescriptor { return xd.lazyInit().Enum } +func (xd *Extension) Message() protoreflect.MessageDescriptor { return xd.lazyInit().Message } +func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } +func (xd *Extension) ProtoType(protoreflect.FieldDescriptor) {} +func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} func (xd *Extension) lazyInit() *ExtensionL2 { xd.L0.ParentFile.lazyInit() // implicitly initializes L2 return xd.L2 @@ -421,7 +423,7 @@ type ( } ServiceL1 struct{} ServiceL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Methods Methods } @@ -430,48 +432,48 @@ type ( L1 MethodL1 } MethodL1 struct { - Options func() pref.ProtoMessage - Input pref.MessageDescriptor - Output pref.MessageDescriptor + Options func() protoreflect.ProtoMessage + Input protoreflect.MessageDescriptor + Output protoreflect.MessageDescriptor IsStreamingClient bool IsStreamingServer bool } ) -func (sd *Service) Options() pref.ProtoMessage { +func (sd *Service) Options() protoreflect.ProtoMessage { if f := sd.lazyInit().Options; f != nil { return f() } return descopts.Service } -func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods } -func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } -func (sd *Service) ProtoType(pref.ServiceDescriptor) {} -func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} +func (sd *Service) Methods() protoreflect.MethodDescriptors { return &sd.lazyInit().Methods } +func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } +func (sd *Service) ProtoType(protoreflect.ServiceDescriptor) {} +func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} func (sd *Service) lazyInit() *ServiceL2 { sd.L0.ParentFile.lazyInit() // implicitly initializes L2 return sd.L2 } -func (md *Method) Options() pref.ProtoMessage { +func (md *Method) Options() protoreflect.ProtoMessage { if f := md.L1.Options; f != nil { return f() } return descopts.Method } -func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input } -func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output } -func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } -func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } -func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } -func (md *Method) ProtoType(pref.MethodDescriptor) {} -func (md *Method) ProtoInternal(pragma.DoNotImplement) {} +func (md *Method) Input() protoreflect.MessageDescriptor { return md.L1.Input } +func (md *Method) Output() protoreflect.MessageDescriptor { return md.L1.Output } +func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } +func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } +func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Method) ProtoType(protoreflect.MethodDescriptor) {} +func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} ) type ( @@ -479,24 +481,24 @@ type ( L0 BaseL0 } BaseL0 struct { - FullName pref.FullName // must be populated - ParentFile *File // must be populated - Parent pref.Descriptor + FullName protoreflect.FullName // must be populated + ParentFile *File // must be populated + Parent protoreflect.Descriptor Index int } ) -func (d *Base) Name() pref.Name { return d.L0.FullName.Name() } -func (d *Base) FullName() pref.FullName { return d.L0.FullName } -func (d *Base) ParentFile() pref.FileDescriptor { +func (d *Base) Name() protoreflect.Name { return d.L0.FullName.Name() } +func (d *Base) FullName() protoreflect.FullName { return d.L0.FullName } +func (d *Base) ParentFile() protoreflect.FileDescriptor { if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { return nil // surrogate files are not real parents } return d.L0.ParentFile } -func (d *Base) Parent() pref.Descriptor { return d.L0.Parent } +func (d *Base) Parent() protoreflect.Descriptor { return d.L0.Parent } func (d *Base) Index() int { return d.L0.Index } -func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() } +func (d *Base) Syntax() protoreflect.Syntax { return d.L0.ParentFile.Syntax() } func (d *Base) IsPlaceholder() bool { return false } func (d *Base) ProtoInternal(pragma.DoNotImplement) {} @@ -513,7 +515,7 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } -func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { +func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { // For extensions, JSON and text are formatted the same way. @@ -533,7 +535,7 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == pref.GroupKind { + if fd.Kind() == protoreflect.GroupKind { s.nameText = string(fd.Message().Name()) } } @@ -541,10 +543,10 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { return s } -func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } -func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } +func (s *stringName) getJSON(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameText } -func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { +func DefaultValue(v protoreflect.Value, ev protoreflect.EnumValueDescriptor) defaultValue { dv := defaultValue{has: v.IsValid(), val: v, enum: ev} if b, ok := v.Interface().([]byte); ok { // Store a copy of the default bytes, so that we can detect @@ -554,9 +556,9 @@ func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { return dv } -func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue { - var evs pref.EnumValueDescriptors - if k == pref.EnumKind { +func unmarshalDefault(b []byte, k protoreflect.Kind, pf *File, ed protoreflect.EnumDescriptor) defaultValue { + var evs protoreflect.EnumValueDescriptors + if k == protoreflect.EnumKind { // If the enum is declared within the same file, be careful not to // blindly call the Values method, lest we bind ourselves in a deadlock. if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { @@ -567,9 +569,9 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d // If we are unable to resolve the enum dependency, use a placeholder // enum value since we will not be able to parse the default value. - if ed.IsPlaceholder() && pref.Name(b).IsValid() { - v := pref.ValueOfEnum(0) - ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b))) + if ed.IsPlaceholder() && protoreflect.Name(b).IsValid() { + v := protoreflect.ValueOfEnum(0) + ev := PlaceholderEnumValue(ed.FullName().Parent().Append(protoreflect.Name(b))) return DefaultValue(v, ev) } } @@ -583,41 +585,41 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d type defaultValue struct { has bool - val pref.Value - enum pref.EnumValueDescriptor + val protoreflect.Value + enum protoreflect.EnumValueDescriptor bytes []byte } -func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value { +func (dv *defaultValue) get(fd protoreflect.FieldDescriptor) protoreflect.Value { // Return the zero value as the default if unpopulated. if !dv.has { - if fd.Cardinality() == pref.Repeated { - return pref.Value{} + if fd.Cardinality() == protoreflect.Repeated { + return protoreflect.Value{} } switch fd.Kind() { - case pref.BoolKind: - return pref.ValueOfBool(false) - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: - return pref.ValueOfInt32(0) - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - return pref.ValueOfInt64(0) - case pref.Uint32Kind, pref.Fixed32Kind: - return pref.ValueOfUint32(0) - case pref.Uint64Kind, pref.Fixed64Kind: - return pref.ValueOfUint64(0) - case pref.FloatKind: - return pref.ValueOfFloat32(0) - case pref.DoubleKind: - return pref.ValueOfFloat64(0) - case pref.StringKind: - return pref.ValueOfString("") - case pref.BytesKind: - return pref.ValueOfBytes(nil) - case pref.EnumKind: + case protoreflect.BoolKind: + return protoreflect.ValueOfBool(false) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return protoreflect.ValueOfInt32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return protoreflect.ValueOfInt64(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return protoreflect.ValueOfUint32(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return protoreflect.ValueOfUint64(0) + case protoreflect.FloatKind: + return protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + return protoreflect.ValueOfFloat64(0) + case protoreflect.StringKind: + return protoreflect.ValueOfString("") + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes(nil) + case protoreflect.EnumKind: if evs := fd.Enum().Values(); evs.Len() > 0 { - return pref.ValueOfEnum(evs.Get(0).Number()) + return protoreflect.ValueOfEnum(evs.Get(0).Number()) } - return pref.ValueOfEnum(0) + return protoreflect.ValueOfEnum(0) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 66e1fee52243..4a1584c9d29f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // fileRaw is a data struct used when initializing a file descriptor from @@ -95,7 +95,7 @@ func (fd *File) unmarshalSeed(b []byte) { sb := getBuilder() defer putBuilder(sb) - var prevField pref.FieldNumber + var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int b0 := b @@ -110,16 +110,16 @@ func (fd *File) unmarshalSeed(b []byte) { case genid.FileDescriptorProto_Syntax_field_number: switch string(v) { case "proto2": - fd.L1.Syntax = pref.Proto2 + fd.L1.Syntax = protoreflect.Proto2 case "proto3": - fd.L1.Syntax = pref.Proto3 + fd.L1.Syntax = protoreflect.Proto3 default: panic("invalid syntax") } case genid.FileDescriptorProto_Name_field_number: fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: - fd.L1.Package = pref.FullName(sb.MakeString(v)) + fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -163,7 +163,7 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { - fd.L1.Syntax = pref.Proto2 + fd.L1.Syntax = protoreflect.Proto2 } // Must allocate all declarations before parsing each descriptor type @@ -219,7 +219,7 @@ func (fd *File) unmarshalSeed(b []byte) { } } -func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i @@ -271,12 +271,12 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc } } -func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i - var prevField pref.FieldNumber + var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int var posEnums, posMessages, posExtensions int b0 := b @@ -387,7 +387,7 @@ func (md *Message) unmarshalSeedOptions(b []byte) { } } -func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i @@ -401,11 +401,11 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref b = b[m:] switch num { case genid.FieldDescriptorProto_Number_field_number: - xd.L1.Number = pref.FieldNumber(v) + xd.L1.Number = protoreflect.FieldNumber(v) case genid.FieldDescriptorProto_Label_field_number: - xd.L1.Cardinality = pref.Cardinality(v) + xd.L1.Cardinality = protoreflect.Cardinality(v) case genid.FieldDescriptorProto_Type_field_number: - xd.L1.Kind = pref.Kind(v) + xd.L1.Kind = protoreflect.Kind(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -423,7 +423,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref } } -func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { sd.L0.ParentFile = pf sd.L0.Parent = pd sd.L0.Index = i @@ -459,13 +459,13 @@ func putBuilder(b *strs.Builder) { // makeFullName converts b to a protoreflect.FullName, // where b must start with a leading dot. -func makeFullName(sb *strs.Builder, b []byte) pref.FullName { +func makeFullName(sb *strs.Builder, b []byte) protoreflect.FullName { if len(b) == 0 || b[0] != '.' { panic("name reference must be fully qualified") } - return pref.FullName(sb.MakeString(b[1:])) + return protoreflect.FullName(sb.MakeString(b[1:])) } -func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName { - return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix))) +func appendFullName(sb *strs.Builder, prefix protoreflect.FullName, suffix []byte) protoreflect.FullName { + return sb.AppendFullName(prefix, protoreflect.Name(strs.UnsafeString(suffix))) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 198451e3ec94..736a19a75bc7 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -13,7 +13,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) func (fd *File) lazyRawInit() { @@ -39,10 +39,10 @@ func (file *File) resolveMessages() { // Resolve message field dependency. switch fd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ } @@ -62,10 +62,10 @@ func (file *File) resolveExtensions() { // Resolve extension field dependency. switch xd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) depIdx++ } @@ -92,7 +92,7 @@ func (file *File) resolveServices() { } } -func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor { +func (file *File) resolveEnumDependency(ed protoreflect.EnumDescriptor, i, j int32) protoreflect.EnumDescriptor { r := file.builder.FileRegistry if r, ok := r.(resolverByIndex); ok { if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { @@ -105,12 +105,12 @@ func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref } } if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { - return d.(pref.EnumDescriptor) + return d.(protoreflect.EnumDescriptor) } return ed } -func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor { +func (file *File) resolveMessageDependency(md protoreflect.MessageDescriptor, i, j int32) protoreflect.MessageDescriptor { r := file.builder.FileRegistry if r, ok := r.(resolverByIndex); ok { if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { @@ -123,7 +123,7 @@ func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32 } } if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { - return d.(pref.MessageDescriptor) + return d.(protoreflect.MessageDescriptor) } return md } @@ -158,7 +158,7 @@ func (fd *File) unmarshalFull(b []byte) { if imp == nil { imp = PlaceholderFile(path) } - fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) + fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp}) case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ @@ -199,7 +199,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { case genid.EnumDescriptorProto_Value_field_number: rawValues = append(rawValues, v) case genid.EnumDescriptorProto_ReservedName_field_number: - ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) case genid.EnumDescriptorProto_ReservedRange_field_number: ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) case genid.EnumDescriptorProto_Options_field_number: @@ -219,7 +219,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) } -func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { +func unmarshalEnumReservedRange(b []byte) (r [2]protoreflect.EnumNumber) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -229,9 +229,9 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { b = b[m:] switch num { case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: - r[0] = pref.EnumNumber(v) + r[0] = protoreflect.EnumNumber(v) case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: - r[1] = pref.EnumNumber(v) + r[1] = protoreflect.EnumNumber(v) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -241,7 +241,7 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { return r } -func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { vd.L0.ParentFile = pf vd.L0.Parent = pd vd.L0.Index = i @@ -256,7 +256,7 @@ func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref b = b[m:] switch num { case genid.EnumValueDescriptorProto_Number_field_number: - vd.L1.Number = pref.EnumNumber(v) + vd.L1.Number = protoreflect.EnumNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -294,7 +294,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { case genid.DescriptorProto_OneofDecl_field_number: rawOneofs = append(rawOneofs, v) case genid.DescriptorProto_ReservedName_field_number: - md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) case genid.DescriptorProto_ReservedRange_field_number: md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) case genid.DescriptorProto_ExtensionRange_field_number: @@ -326,7 +326,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { for i, b := range rawFields { fd := &md.L2.Fields.List[i] fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) - if fd.L1.Cardinality == pref.Required { + if fd.L1.Cardinality == protoreflect.Required { md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) } } @@ -359,7 +359,7 @@ func (md *Message) unmarshalOptions(b []byte) { } } -func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { +func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -369,9 +369,9 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { b = b[m:] switch num { case genid.DescriptorProto_ReservedRange_Start_field_number: - r[0] = pref.FieldNumber(v) + r[0] = protoreflect.FieldNumber(v) case genid.DescriptorProto_ReservedRange_End_field_number: - r[1] = pref.FieldNumber(v) + r[1] = protoreflect.FieldNumber(v) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -381,7 +381,7 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { return r } -func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) { +func unmarshalMessageExtensionRange(b []byte) (r [2]protoreflect.FieldNumber, rawOptions []byte) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -391,9 +391,9 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions b = b[m:] switch num { case genid.DescriptorProto_ExtensionRange_Start_field_number: - r[0] = pref.FieldNumber(v) + r[0] = protoreflect.FieldNumber(v) case genid.DescriptorProto_ExtensionRange_End_field_number: - r[1] = pref.FieldNumber(v) + r[1] = protoreflect.FieldNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -410,7 +410,7 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions return r, rawOptions } -func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i @@ -426,11 +426,11 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des b = b[m:] switch num { case genid.FieldDescriptorProto_Number_field_number: - fd.L1.Number = pref.FieldNumber(v) + fd.L1.Number = protoreflect.FieldNumber(v) case genid.FieldDescriptorProto_Label_field_number: - fd.L1.Cardinality = pref.Cardinality(v) + fd.L1.Cardinality = protoreflect.Cardinality(v) case genid.FieldDescriptorProto_Type_field_number: - fd.L1.Kind = pref.Kind(v) + fd.L1.Kind = protoreflect.Kind(v) case genid.FieldDescriptorProto_OneofIndex_field_number: // In Message.unmarshalFull, we allocate slices for both // the field and oneof descriptors before unmarshaling either @@ -453,7 +453,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des case genid.FieldDescriptorProto_JsonName_field_number: fd.L1.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: - fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages + fd.L1.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: @@ -468,9 +468,9 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: fd.L1.Enum = PlaceholderEnum(name) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = PlaceholderMessage(name) } } @@ -504,7 +504,7 @@ func (fd *Field) unmarshalOptions(b []byte) { } } -func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { od.L0.ParentFile = pf od.L0.Parent = pd od.L0.Index = i @@ -553,7 +553,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_JsonName_field_number: xd.L2.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: - xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions + xd.L2.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: @@ -568,9 +568,9 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: xd.L2.Enum = PlaceholderEnum(name) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: xd.L2.Message = PlaceholderMessage(name) } } @@ -627,7 +627,7 @@ func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) } -func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i @@ -680,18 +680,18 @@ func appendOptions(dst, src []byte) []byte { // // The type of message to unmarshal to is passed as a pointer since the // vars in descopts may not yet be populated at the time this function is called. -func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage { +func (db *Builder) optionsUnmarshaler(p *protoreflect.ProtoMessage, b []byte) func() protoreflect.ProtoMessage { if b == nil { return nil } - var opts pref.ProtoMessage + var opts protoreflect.ProtoMessage var once sync.Once - return func() pref.ProtoMessage { + return func() protoreflect.ProtoMessage { once.Do(func() { if *p == nil { panic("Descriptor.Options called without importing the descriptor package") } - opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage) + opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(protoreflect.ProtoMessage) if err := (proto.UnmarshalOptions{ AllowPartial: true, Resolver: db.TypeResolver, diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index aa294fff99a8..e3b6587da63a 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -17,31 +17,30 @@ import ( "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" ) -type FileImports []pref.FileImport +type FileImports []protoreflect.FileImport func (p *FileImports) Len() int { return len(*p) } -func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] } +func (p *FileImports) Get(i int) protoreflect.FileImport { return (*p)[i] } func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} type Names struct { - List []pref.Name + List []protoreflect.Name once sync.Once - has map[pref.Name]int // protected by once + has map[protoreflect.Name]int // protected by once } func (p *Names) Len() int { return len(p.List) } -func (p *Names) Get(i int) pref.Name { return p.List[i] } -func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 } +func (p *Names) Get(i int) protoreflect.Name { return p.List[i] } +func (p *Names) Has(s protoreflect.Name) bool { return p.lazyInit().has[s] > 0 } func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *Names) ProtoInternal(pragma.DoNotImplement) {} func (p *Names) lazyInit() *Names { p.once.Do(func() { if len(p.List) > 0 { - p.has = make(map[pref.Name]int, len(p.List)) + p.has = make(map[protoreflect.Name]int, len(p.List)) for _, s := range p.List { p.has[s] = p.has[s] + 1 } @@ -67,14 +66,14 @@ func (p *Names) CheckValid() error { } type EnumRanges struct { - List [][2]pref.EnumNumber // start inclusive; end inclusive + List [][2]protoreflect.EnumNumber // start inclusive; end inclusive once sync.Once - sorted [][2]pref.EnumNumber // protected by once + sorted [][2]protoreflect.EnumNumber // protected by once } -func (p *EnumRanges) Len() int { return len(p.List) } -func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] } -func (p *EnumRanges) Has(n pref.EnumNumber) bool { +func (p *EnumRanges) Len() int { return len(p.List) } +func (p *EnumRanges) Get(i int) [2]protoreflect.EnumNumber { return p.List[i] } +func (p *EnumRanges) Has(n protoreflect.EnumNumber) bool { for ls := p.lazyInit().sorted; len(ls) > 0; { i := len(ls) / 2 switch r := enumRange(ls[i]); { @@ -129,14 +128,14 @@ func (r enumRange) String() string { } type FieldRanges struct { - List [][2]pref.FieldNumber // start inclusive; end exclusive + List [][2]protoreflect.FieldNumber // start inclusive; end exclusive once sync.Once - sorted [][2]pref.FieldNumber // protected by once + sorted [][2]protoreflect.FieldNumber // protected by once } -func (p *FieldRanges) Len() int { return len(p.List) } -func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] } -func (p *FieldRanges) Has(n pref.FieldNumber) bool { +func (p *FieldRanges) Len() int { return len(p.List) } +func (p *FieldRanges) Get(i int) [2]protoreflect.FieldNumber { return p.List[i] } +func (p *FieldRanges) Has(n protoreflect.FieldNumber) bool { for ls := p.lazyInit().sorted; len(ls) > 0; { i := len(ls) / 2 switch r := fieldRange(ls[i]); { @@ -221,17 +220,17 @@ func (r fieldRange) String() string { } type FieldNumbers struct { - List []pref.FieldNumber + List []protoreflect.FieldNumber once sync.Once - has map[pref.FieldNumber]struct{} // protected by once + has map[protoreflect.FieldNumber]struct{} // protected by once } -func (p *FieldNumbers) Len() int { return len(p.List) } -func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] } -func (p *FieldNumbers) Has(n pref.FieldNumber) bool { +func (p *FieldNumbers) Len() int { return len(p.List) } +func (p *FieldNumbers) Get(i int) protoreflect.FieldNumber { return p.List[i] } +func (p *FieldNumbers) Has(n protoreflect.FieldNumber) bool { p.once.Do(func() { if len(p.List) > 0 { - p.has = make(map[pref.FieldNumber]struct{}, len(p.List)) + p.has = make(map[protoreflect.FieldNumber]struct{}, len(p.List)) for _, n := range p.List { p.has[n] = struct{}{} } @@ -244,30 +243,38 @@ func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} type OneofFields struct { - List []pref.FieldDescriptor + List []protoreflect.FieldDescriptor once sync.Once - byName map[pref.Name]pref.FieldDescriptor // protected by once - byJSON map[string]pref.FieldDescriptor // protected by once - byText map[string]pref.FieldDescriptor // protected by once - byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once + byName map[protoreflect.Name]protoreflect.FieldDescriptor // protected by once + byJSON map[string]protoreflect.FieldDescriptor // protected by once + byText map[string]protoreflect.FieldDescriptor // protected by once + byNum map[protoreflect.FieldNumber]protoreflect.FieldDescriptor // protected by once } -func (p *OneofFields) Len() int { return len(p.List) } -func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } -func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } -func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } -func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } -func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } -func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } -func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} +func (p *OneofFields) Len() int { return len(p.List) } +func (p *OneofFields) Get(i int) protoreflect.FieldDescriptor { return p.List[i] } +func (p *OneofFields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + return p.lazyInit().byName[s] +} +func (p *OneofFields) ByJSONName(s string) protoreflect.FieldDescriptor { + return p.lazyInit().byJSON[s] +} +func (p *OneofFields) ByTextName(s string) protoreflect.FieldDescriptor { + return p.lazyInit().byText[s] +} +func (p *OneofFields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + return p.lazyInit().byNum[n] +} +func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} func (p *OneofFields) lazyInit() *OneofFields { p.once.Do(func() { if len(p.List) > 0 { - p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) - p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) - p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) - p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) + p.byName = make(map[protoreflect.Name]protoreflect.FieldDescriptor, len(p.List)) + p.byJSON = make(map[string]protoreflect.FieldDescriptor, len(p.List)) + p.byText = make(map[string]protoreflect.FieldDescriptor, len(p.List)) + p.byNum = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor, len(p.List)) for _, f := range p.List { // Field names and numbers are guaranteed to be unique. p.byName[f.Name()] = f @@ -284,123 +291,123 @@ type SourceLocations struct { // List is a list of SourceLocations. // The SourceLocation.Next field does not need to be populated // as it will be lazily populated upon first need. - List []pref.SourceLocation + List []protoreflect.SourceLocation // File is the parent file descriptor that these locations are relative to. // If non-nil, ByDescriptor verifies that the provided descriptor // is a child of this file descriptor. - File pref.FileDescriptor + File protoreflect.FileDescriptor once sync.Once byPath map[pathKey]int } -func (p *SourceLocations) Len() int { return len(p.List) } -func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } -func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) protoreflect.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) protoreflect.SourceLocation { if i, ok := p.lazyInit().byPath[k]; ok { return p.List[i] } - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } -func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { +func (p *SourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation { return p.byKey(newPathKey(path)) } -func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { +func (p *SourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation { if p.File != nil && desc != nil && p.File != desc.ParentFile() { - return pref.SourceLocation{} // mismatching parent files + return protoreflect.SourceLocation{} // mismatching parent files } var pathArr [16]int32 path := pathArr[:0] for { switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: // Reverse the path since it was constructed in reverse. for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { path[i], path[j] = path[j], path[i] } return p.byKey(newPathKey(path)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.FieldDescriptor: - isExtension := desc.(pref.FieldDescriptor).IsExtension() + case protoreflect.FieldDescriptor: + isExtension := desc.(protoreflect.FieldDescriptor).IsExtension() path = append(path, int32(desc.Index())) desc = desc.Parent() if isExtension { switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_Extension_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } else { switch desc.(type) { - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_Field_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } - case pref.OneofDescriptor: + case protoreflect.OneofDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.EnumDescriptor: + case protoreflect.EnumDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.EnumValueDescriptor: + case protoreflect.EnumValueDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.EnumDescriptor: + case protoreflect.EnumDescriptor: path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.ServiceDescriptor: + case protoreflect.ServiceDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.MethodDescriptor: + case protoreflect.MethodDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.ServiceDescriptor: + case protoreflect.ServiceDescriptor: path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } } @@ -435,7 +442,7 @@ type pathKey struct { str string // used if the path does not fit in arr } -func newPathKey(p pref.SourcePath) (k pathKey) { +func newPathKey(p protoreflect.SourcePath) (k pathKey) { if len(p) < len(k.arr) { for i, ps := range p { if ps < 0 || math.MaxUint8 <= ps { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index dbf2c605bfe5..28240ebc5c4a 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -7,7 +7,7 @@ package filedesc import ( "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) var ( @@ -30,78 +30,80 @@ var ( // PlaceholderFile is a placeholder, representing only the file path. type PlaceholderFile string -func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f } -func (f PlaceholderFile) Parent() pref.Descriptor { return nil } -func (f PlaceholderFile) Index() int { return 0 } -func (f PlaceholderFile) Syntax() pref.Syntax { return 0 } -func (f PlaceholderFile) Name() pref.Name { return "" } -func (f PlaceholderFile) FullName() pref.FullName { return "" } -func (f PlaceholderFile) IsPlaceholder() bool { return true } -func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File } -func (f PlaceholderFile) Path() string { return string(f) } -func (f PlaceholderFile) Package() pref.FullName { return "" } -func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles } -func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages } -func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums } -func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions } -func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices } -func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations } -func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return } -func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } +func (f PlaceholderFile) ParentFile() protoreflect.FileDescriptor { return f } +func (f PlaceholderFile) Parent() protoreflect.Descriptor { return nil } +func (f PlaceholderFile) Index() int { return 0 } +func (f PlaceholderFile) Syntax() protoreflect.Syntax { return 0 } +func (f PlaceholderFile) Name() protoreflect.Name { return "" } +func (f PlaceholderFile) FullName() protoreflect.FullName { return "" } +func (f PlaceholderFile) IsPlaceholder() bool { return true } +func (f PlaceholderFile) Options() protoreflect.ProtoMessage { return descopts.File } +func (f PlaceholderFile) Path() string { return string(f) } +func (f PlaceholderFile) Package() protoreflect.FullName { return "" } +func (f PlaceholderFile) Imports() protoreflect.FileImports { return emptyFiles } +func (f PlaceholderFile) Messages() protoreflect.MessageDescriptors { return emptyMessages } +func (f PlaceholderFile) Enums() protoreflect.EnumDescriptors { return emptyEnums } +func (f PlaceholderFile) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } +func (f PlaceholderFile) Services() protoreflect.ServiceDescriptors { return emptyServices } +func (f PlaceholderFile) SourceLocations() protoreflect.SourceLocations { return emptySourceLocations } +func (f PlaceholderFile) ProtoType(protoreflect.FileDescriptor) { return } +func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderEnum is a placeholder, representing only the full name. -type PlaceholderEnum pref.FullName +type PlaceholderEnum protoreflect.FullName -func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil } -func (e PlaceholderEnum) Parent() pref.Descriptor { return nil } -func (e PlaceholderEnum) Index() int { return 0 } -func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 } -func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() } -func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) } -func (e PlaceholderEnum) IsPlaceholder() bool { return true } -func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum } -func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues } -func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames } -func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges } -func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return } -func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } +func (e PlaceholderEnum) ParentFile() protoreflect.FileDescriptor { return nil } +func (e PlaceholderEnum) Parent() protoreflect.Descriptor { return nil } +func (e PlaceholderEnum) Index() int { return 0 } +func (e PlaceholderEnum) Syntax() protoreflect.Syntax { return 0 } +func (e PlaceholderEnum) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } +func (e PlaceholderEnum) FullName() protoreflect.FullName { return protoreflect.FullName(e) } +func (e PlaceholderEnum) IsPlaceholder() bool { return true } +func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return descopts.Enum } +func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } +func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } +func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } +func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderEnumValue is a placeholder, representing only the full name. -type PlaceholderEnumValue pref.FullName +type PlaceholderEnumValue protoreflect.FullName -func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil } -func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil } -func (e PlaceholderEnumValue) Index() int { return 0 } -func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 } -func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() } -func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) } -func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } -func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue } -func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 } -func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return } -func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } +func (e PlaceholderEnumValue) ParentFile() protoreflect.FileDescriptor { return nil } +func (e PlaceholderEnumValue) Parent() protoreflect.Descriptor { return nil } +func (e PlaceholderEnumValue) Index() int { return 0 } +func (e PlaceholderEnumValue) Syntax() protoreflect.Syntax { return 0 } +func (e PlaceholderEnumValue) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } +func (e PlaceholderEnumValue) FullName() protoreflect.FullName { return protoreflect.FullName(e) } +func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } +func (e PlaceholderEnumValue) Options() protoreflect.ProtoMessage { return descopts.EnumValue } +func (e PlaceholderEnumValue) Number() protoreflect.EnumNumber { return 0 } +func (e PlaceholderEnumValue) ProtoType(protoreflect.EnumValueDescriptor) { return } +func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderMessage is a placeholder, representing only the full name. -type PlaceholderMessage pref.FullName +type PlaceholderMessage protoreflect.FullName -func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil } -func (m PlaceholderMessage) Parent() pref.Descriptor { return nil } -func (m PlaceholderMessage) Index() int { return 0 } -func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 } -func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() } -func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) } -func (m PlaceholderMessage) IsPlaceholder() bool { return true } -func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message } -func (m PlaceholderMessage) IsMapEntry() bool { return false } -func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields } -func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs } -func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames } -func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges } -func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers } -func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges } -func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") } -func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages } -func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums } -func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions } -func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return } -func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } +func (m PlaceholderMessage) ParentFile() protoreflect.FileDescriptor { return nil } +func (m PlaceholderMessage) Parent() protoreflect.Descriptor { return nil } +func (m PlaceholderMessage) Index() int { return 0 } +func (m PlaceholderMessage) Syntax() protoreflect.Syntax { return 0 } +func (m PlaceholderMessage) Name() protoreflect.Name { return protoreflect.FullName(m).Name() } +func (m PlaceholderMessage) FullName() protoreflect.FullName { return protoreflect.FullName(m) } +func (m PlaceholderMessage) IsPlaceholder() bool { return true } +func (m PlaceholderMessage) Options() protoreflect.ProtoMessage { return descopts.Message } +func (m PlaceholderMessage) IsMapEntry() bool { return false } +func (m PlaceholderMessage) Fields() protoreflect.FieldDescriptors { return emptyFields } +func (m PlaceholderMessage) Oneofs() protoreflect.OneofDescriptors { return emptyOneofs } +func (m PlaceholderMessage) ReservedNames() protoreflect.Names { return emptyNames } +func (m PlaceholderMessage) ReservedRanges() protoreflect.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) RequiredNumbers() protoreflect.FieldNumbers { return emptyFieldNumbers } +func (m PlaceholderMessage) ExtensionRanges() protoreflect.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) ExtensionRangeOptions(int) protoreflect.ProtoMessage { + panic("index out of range") +} +func (m PlaceholderMessage) Messages() protoreflect.MessageDescriptors { return emptyMessages } +func (m PlaceholderMessage) Enums() protoreflect.EnumDescriptors { return emptyEnums } +func (m PlaceholderMessage) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } +func (m PlaceholderMessage) ProtoType(protoreflect.MessageDescriptor) { return } +func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index 0a0dd35de5a7..f0e38c4ef4e0 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -10,17 +10,16 @@ import ( "reflect" "google.golang.org/protobuf/internal/descopts" - fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/filedesc" pimpl "google.golang.org/protobuf/internal/impl" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) // Builder constructs type descriptors from a raw file descriptor // and associated Go types for each enum and message declaration. // -// -// Flattened Ordering +// # Flattened Ordering // // The protobuf type system represents declarations as a tree. Certain nodes in // the tree require us to either associate it with a concrete Go type or to @@ -52,7 +51,7 @@ import ( // that children themselves may have. type Builder struct { // File is the underlying file descriptor builder. - File fdesc.Builder + File filedesc.Builder // GoTypes is a unique set of the Go types for all declarations and // dependencies. Each type is represented as a zero value of the Go type. @@ -108,22 +107,22 @@ type Builder struct { // TypeRegistry is the registry to register each type descriptor. // If nil, it uses protoregistry.GlobalTypes. TypeRegistry interface { - RegisterMessage(pref.MessageType) error - RegisterEnum(pref.EnumType) error - RegisterExtension(pref.ExtensionType) error + RegisterMessage(protoreflect.MessageType) error + RegisterEnum(protoreflect.EnumType) error + RegisterExtension(protoreflect.ExtensionType) error } } // Out is the output of the builder. type Out struct { - File pref.FileDescriptor + File protoreflect.FileDescriptor } func (tb Builder) Build() (out Out) { // Replace the resolver with one that resolves dependencies by index, // which is faster and more reliable than relying on the global registry. if tb.File.FileRegistry == nil { - tb.File.FileRegistry = preg.GlobalFiles + tb.File.FileRegistry = protoregistry.GlobalFiles } tb.File.FileRegistry = &resolverByIndex{ goTypes: tb.GoTypes, @@ -133,7 +132,7 @@ func (tb Builder) Build() (out Out) { // Initialize registry if unpopulated. if tb.TypeRegistry == nil { - tb.TypeRegistry = preg.GlobalTypes + tb.TypeRegistry = protoregistry.GlobalTypes } fbOut := tb.File.Build() @@ -183,23 +182,23 @@ func (tb Builder) Build() (out Out) { for i := range fbOut.Messages { switch fbOut.Messages[i].Name() { case "FileOptions": - descopts.File = messageGoTypes[i].(pref.ProtoMessage) + descopts.File = messageGoTypes[i].(protoreflect.ProtoMessage) case "EnumOptions": - descopts.Enum = messageGoTypes[i].(pref.ProtoMessage) + descopts.Enum = messageGoTypes[i].(protoreflect.ProtoMessage) case "EnumValueOptions": - descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage) + descopts.EnumValue = messageGoTypes[i].(protoreflect.ProtoMessage) case "MessageOptions": - descopts.Message = messageGoTypes[i].(pref.ProtoMessage) + descopts.Message = messageGoTypes[i].(protoreflect.ProtoMessage) case "FieldOptions": - descopts.Field = messageGoTypes[i].(pref.ProtoMessage) + descopts.Field = messageGoTypes[i].(protoreflect.ProtoMessage) case "OneofOptions": - descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage) + descopts.Oneof = messageGoTypes[i].(protoreflect.ProtoMessage) case "ExtensionRangeOptions": - descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage) + descopts.ExtensionRange = messageGoTypes[i].(protoreflect.ProtoMessage) case "ServiceOptions": - descopts.Service = messageGoTypes[i].(pref.ProtoMessage) + descopts.Service = messageGoTypes[i].(protoreflect.ProtoMessage) case "MethodOptions": - descopts.Method = messageGoTypes[i].(pref.ProtoMessage) + descopts.Method = messageGoTypes[i].(protoreflect.ProtoMessage) } } } @@ -216,11 +215,11 @@ func (tb Builder) Build() (out Out) { const listExtDeps = 2 var goType reflect.Type switch fbOut.Extensions[i].L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) goType = reflect.TypeOf(tb.GoTypes[j]) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) goType = reflect.TypeOf(tb.GoTypes[j]) depIdx++ @@ -242,22 +241,22 @@ func (tb Builder) Build() (out Out) { return out } -var goTypeForPBKind = map[pref.Kind]reflect.Type{ - pref.BoolKind: reflect.TypeOf(bool(false)), - pref.Int32Kind: reflect.TypeOf(int32(0)), - pref.Sint32Kind: reflect.TypeOf(int32(0)), - pref.Sfixed32Kind: reflect.TypeOf(int32(0)), - pref.Int64Kind: reflect.TypeOf(int64(0)), - pref.Sint64Kind: reflect.TypeOf(int64(0)), - pref.Sfixed64Kind: reflect.TypeOf(int64(0)), - pref.Uint32Kind: reflect.TypeOf(uint32(0)), - pref.Fixed32Kind: reflect.TypeOf(uint32(0)), - pref.Uint64Kind: reflect.TypeOf(uint64(0)), - pref.Fixed64Kind: reflect.TypeOf(uint64(0)), - pref.FloatKind: reflect.TypeOf(float32(0)), - pref.DoubleKind: reflect.TypeOf(float64(0)), - pref.StringKind: reflect.TypeOf(string("")), - pref.BytesKind: reflect.TypeOf([]byte(nil)), +var goTypeForPBKind = map[protoreflect.Kind]reflect.Type{ + protoreflect.BoolKind: reflect.TypeOf(bool(false)), + protoreflect.Int32Kind: reflect.TypeOf(int32(0)), + protoreflect.Sint32Kind: reflect.TypeOf(int32(0)), + protoreflect.Sfixed32Kind: reflect.TypeOf(int32(0)), + protoreflect.Int64Kind: reflect.TypeOf(int64(0)), + protoreflect.Sint64Kind: reflect.TypeOf(int64(0)), + protoreflect.Sfixed64Kind: reflect.TypeOf(int64(0)), + protoreflect.Uint32Kind: reflect.TypeOf(uint32(0)), + protoreflect.Fixed32Kind: reflect.TypeOf(uint32(0)), + protoreflect.Uint64Kind: reflect.TypeOf(uint64(0)), + protoreflect.Fixed64Kind: reflect.TypeOf(uint64(0)), + protoreflect.FloatKind: reflect.TypeOf(float32(0)), + protoreflect.DoubleKind: reflect.TypeOf(float64(0)), + protoreflect.StringKind: reflect.TypeOf(string("")), + protoreflect.BytesKind: reflect.TypeOf([]byte(nil)), } type depIdxs []int32 @@ -274,13 +273,13 @@ type ( fileRegistry } fileRegistry interface { - FindFileByPath(string) (pref.FileDescriptor, error) - FindDescriptorByName(pref.FullName) (pref.Descriptor, error) - RegisterFile(pref.FileDescriptor) error + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) + RegisterFile(protoreflect.FileDescriptor) error } ) -func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor { +func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.EnumDescriptor { if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { return &es[depIdx] } else { @@ -288,7 +287,7 @@ func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdes } } -func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor { +func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.MessageDescriptor { if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { return &ms[depIdx-len(es)] } else { diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go index a72995f02d9e..bda8e8cf3fce 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !protolegacy // +build !protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go index 772e2f0e4d69..6d8d9bd6b01a 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build protolegacy // +build protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index abee5f30e9fd..a371f98de143 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -12,8 +12,8 @@ import ( "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Export is a zero-length named type that exists only to export a set of @@ -32,11 +32,11 @@ type enum = interface{} // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. -func (Export) EnumOf(e enum) pref.Enum { +func (Export) EnumOf(e enum) protoreflect.Enum { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e default: return legacyWrapEnum(reflect.ValueOf(e)) @@ -45,11 +45,11 @@ func (Export) EnumOf(e enum) pref.Enum { // EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. // It returns nil if e is nil. -func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { +func (Export) EnumDescriptorOf(e enum) protoreflect.EnumDescriptor { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e.Descriptor() default: return LegacyLoadEnumDesc(reflect.TypeOf(e)) @@ -58,11 +58,11 @@ func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { // EnumTypeOf returns the protoreflect.EnumType for e. // It returns nil if e is nil. -func (Export) EnumTypeOf(e enum) pref.EnumType { +func (Export) EnumTypeOf(e enum) protoreflect.EnumType { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e.Type() default: return legacyLoadEnumType(reflect.TypeOf(e)) @@ -71,7 +71,7 @@ func (Export) EnumTypeOf(e enum) pref.EnumType { // EnumStringOf returns the enum value as a string, either as the name if // the number is resolvable, or the number formatted as a string. -func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { +func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNumber) string { ev := ed.Values().ByNumber(n) if ev != nil { return string(ev.Name()) @@ -84,7 +84,7 @@ func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { type message = interface{} // legacyMessageWrapper wraps a v2 message as a v1 message. -type legacyMessageWrapper struct{ m pref.ProtoMessage } +type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } @@ -92,30 +92,30 @@ func (m legacyMessageWrapper) ProtoMessage() {} // ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. // It returns nil if m is nil. -func (Export) ProtoMessageV1Of(m message) piface.MessageV1 { +func (Export) ProtoMessageV1Of(m message) protoiface.MessageV1 { switch mv := m.(type) { case nil: return nil - case piface.MessageV1: + case protoiface.MessageV1: return mv case unwrapper: return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) - case pref.ProtoMessage: + case protoreflect.ProtoMessage: return legacyMessageWrapper{mv} default: panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) } } -func (Export) protoMessageV2Of(m message) pref.ProtoMessage { +func (Export) protoMessageV2Of(m message) protoreflect.ProtoMessage { switch mv := m.(type) { case nil: return nil - case pref.ProtoMessage: + case protoreflect.ProtoMessage: return mv case legacyMessageWrapper: return mv.m - case piface.MessageV1: + case protoiface.MessageV1: return nil default: panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) @@ -124,7 +124,7 @@ func (Export) protoMessageV2Of(m message) pref.ProtoMessage { // ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. // It returns nil if m is nil. -func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { +func (Export) ProtoMessageV2Of(m message) protoreflect.ProtoMessage { if m == nil { return nil } @@ -136,7 +136,7 @@ func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { // MessageOf returns the protoreflect.Message interface over m. // It returns nil if m is nil. -func (Export) MessageOf(m message) pref.Message { +func (Export) MessageOf(m message) protoreflect.Message { if m == nil { return nil } @@ -148,7 +148,7 @@ func (Export) MessageOf(m message) pref.Message { // MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. // It returns nil if m is nil. -func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { +func (Export) MessageDescriptorOf(m message) protoreflect.MessageDescriptor { if m == nil { return nil } @@ -160,7 +160,7 @@ func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { // MessageTypeOf returns the protoreflect.MessageType for m. // It returns nil if m is nil. -func (Export) MessageTypeOf(m message) pref.MessageType { +func (Export) MessageTypeOf(m message) protoreflect.MessageType { if m == nil { return nil } @@ -172,6 +172,6 @@ func (Export) MessageTypeOf(m message) pref.MessageType { // MessageStringOf returns the message value as a string, // which is the message serialized in the protobuf text format. -func (Export) MessageStringOf(m pref.ProtoMessage) string { +func (Export) MessageStringOf(m protoreflect.ProtoMessage) string { return prototext.MarshalOptions{Multiline: false}.Format(m) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index b82341e575cb..bff041edc946 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -8,18 +8,18 @@ import ( "sync" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) -func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) { +func (mi *MessageInfo) checkInitialized(in protoiface.CheckInitializedInput) (protoiface.CheckInitializedOutput, error) { var p pointer if ms, ok := in.Message.(*messageState); ok { p = ms.pointer() } else { p = in.Message.(*messageReflectWrapper).pointer() } - return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) + return protoiface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) } func (mi *MessageInfo) checkInitializedPointer(p pointer) error { @@ -90,7 +90,7 @@ var ( // needsInitCheck reports whether a message needs to be checked for partial initialization. // // It returns true if the message transitively includes any required or extension fields. -func needsInitCheck(md pref.MessageDescriptor) bool { +func needsInitCheck(md protoreflect.MessageDescriptor) bool { if v, ok := needsInitCheckMap.Load(md); ok { if has, ok := v.(bool); ok { return has @@ -101,7 +101,7 @@ func needsInitCheck(md pref.MessageDescriptor) bool { return needsInitCheckLocked(md) } -func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) { +func needsInitCheckLocked(md protoreflect.MessageDescriptor) (has bool) { if v, ok := needsInitCheckMap.Load(md); ok { // If has is true, we've previously determined that this message // needs init checks. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 08d35170b66c..e74cefdc506f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type extensionFieldInfo struct { @@ -23,7 +23,7 @@ type extensionFieldInfo struct { var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo -func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { +func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info @@ -32,7 +32,7 @@ func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { } // legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { +func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { return xi.(*extensionFieldInfo) } @@ -43,7 +43,7 @@ func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { return e } -func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { +func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { var wiretag uint64 if !xd.IsPacked() { wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) @@ -59,10 +59,10 @@ func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { // This is true for composite types, where we pass in a message, list, or map to fill in, // and for enums, where we pass in a prototype value to specify the concrete enum type. switch xd.Kind() { - case pref.MessageKind, pref.GroupKind, pref.EnumKind: + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.EnumKind: e.unmarshalNeedsValue = true default: - if xd.Cardinality() == pref.Repeated { + if xd.Cardinality() == protoreflect.Repeated { e.unmarshalNeedsValue = true } } @@ -73,21 +73,21 @@ type lazyExtensionValue struct { atomicOnce uint32 // atomically set if value is valid mu sync.Mutex xi *extensionFieldInfo - value pref.Value + value protoreflect.Value b []byte - fn func() pref.Value + fn func() protoreflect.Value } type ExtensionField struct { - typ pref.ExtensionType + typ protoreflect.ExtensionType // value is either the value of GetValue, // or a *lazyExtensionValue that then returns the value of GetValue. - value pref.Value + value protoreflect.Value lazy *lazyExtensionValue } -func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { +func (f *ExtensionField) appendLazyBytes(xt protoreflect.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { if f.lazy == nil { f.lazy = &lazyExtensionValue{xi: xi} } @@ -97,7 +97,7 @@ func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFie f.lazy.b = append(f.lazy.b, b...) } -func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool { +func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { if f.typ == nil { return true } @@ -154,7 +154,7 @@ func (f *ExtensionField) lazyInit() { // Set sets the type and value of the extension field. // This must not be called concurrently. -func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { +func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) { f.typ = t f.value = v f.lazy = nil @@ -162,14 +162,14 @@ func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { // SetLazy sets the type and a value that is to be lazily evaluated upon first use. // This must not be called concurrently. -func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) { +func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { f.typ = t f.lazy = &lazyExtensionValue{fn: fn} } // Value returns the value of the extension field. // This may be called concurrently. -func (f *ExtensionField) Value() pref.Value { +func (f *ExtensionField) Value() protoreflect.Value { if f.lazy != nil { if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { f.lazyInit() @@ -181,7 +181,7 @@ func (f *ExtensionField) Value() pref.Value { // Type returns the type of the extension field. // This may be called concurrently. -func (f ExtensionField) Type() pref.ExtensionType { +func (f ExtensionField) Type() protoreflect.ExtensionType { return f.typ } @@ -193,7 +193,7 @@ func (f ExtensionField) IsSet() bool { // IsLazy reports whether a field is lazily encoded. // It is exported for testing. -func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { +func IsLazy(m protoreflect.Message, fd protoreflect.FieldDescriptor) bool { var mi *MessageInfo var p pointer switch m := m.(type) { @@ -206,7 +206,7 @@ func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { default: return false } - xd, ok := fd.(pref.ExtensionTypeDescriptor) + xd, ok := fd.(protoreflect.ExtensionTypeDescriptor) if !ok { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index cb4b482d166f..3fadd241e1c4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -12,9 +12,9 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) type errInvalidUTF8 struct{} @@ -30,7 +30,7 @@ func (errInvalidUTF8) Unwrap() error { return errors.Error } // to the appropriate field-specific function as necessary. // // The unmarshal function is set on each field individually as usual. -func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) { +func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si structInfo) { fs := si.oneofsByName[od.Name()] ft := fs.Type oneofFields := make(map[reflect.Type]*coderFieldInfo) @@ -118,13 +118,13 @@ func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structIn } } -func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { +func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { var once sync.Once - var messageType pref.MessageType + var messageType protoreflect.MessageType lazyInit := func() { once.Do(func() { messageName := fd.Message().FullName() - messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) }) } @@ -190,7 +190,7 @@ func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { } } -func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ size: sizeMessageInfo, @@ -280,7 +280,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh if n < 0 { return out, errDecode } - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: m.ProtoReflect(), }) @@ -288,27 +288,27 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh return out, err } out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } -func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int { +func sizeMessageValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { m := v.Message().Interface() return sizeMessage(m, tagsize, opts) } -func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendMessageValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { m := v.Message().Interface() return appendMessage(b, m, wiretag, opts) } -func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { +func consumeMessageValue(b []byte, v protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { m := v.Message().Interface() out, err := consumeMessage(b, m, wtyp, opts) return v, out, err } -func isInitMessageValue(v pref.Value) error { +func isInitMessageValue(v protoreflect.Value) error { m := v.Message().Interface() return proto.CheckInitialized(m) } @@ -321,17 +321,17 @@ var coderMessageValue = valueCoderFuncs{ merge: mergeMessageValue, } -func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int { +func sizeGroupValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { m := v.Message().Interface() return sizeGroup(m, tagsize, opts) } -func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendGroupValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { m := v.Message().Interface() return appendGroup(b, m, wiretag, opts) } -func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { +func consumeGroupValue(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { m := v.Message().Interface() out, err := consumeGroup(b, m, num, wtyp, opts) return v, out, err @@ -345,7 +345,7 @@ var coderGroupValue = valueCoderFuncs{ merge: mergeMessageValue, } -func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeGroupFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { num := fd.Number() if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ @@ -424,7 +424,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir if n < 0 { return out, errDecode } - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: m.ProtoReflect(), }) @@ -432,11 +432,11 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir return out, err } out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } -func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeMessageSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ size: sizeMessageSliceInfo, @@ -555,7 +555,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir return out, errDecode } mp := reflect.New(goType.Elem()) - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: asMessage(mp).ProtoReflect(), }) @@ -564,7 +564,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir } p.AppendPointerSlice(pointerOfValue(mp)) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } @@ -581,7 +581,7 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages -func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { +func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { @@ -591,7 +591,7 @@ func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) i return n } -func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() mopts := opts.Options() for i, llen := 0, list.Len(); i < llen; i++ { @@ -608,30 +608,30 @@ func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts ma return b, nil } -func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { +func consumeMessageSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { - return pref.Value{}, out, errUnknown + return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return pref.Value{}, out, errDecode + return protoreflect.Value{}, out, errDecode } m := list.NewElement() - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: m.Message(), }) if err != nil { - return pref.Value{}, out, err + return protoreflect.Value{}, out, err } list.Append(m) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return listv, out, nil } -func isInitMessageSliceValue(listv pref.Value) error { +func isInitMessageSliceValue(listv protoreflect.Value) error { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() @@ -650,7 +650,7 @@ var coderMessageSliceValue = valueCoderFuncs{ merge: mergeMessageListValue, } -func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { +func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { @@ -660,7 +660,7 @@ func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int return n } -func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendGroupSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() mopts := opts.Options() for i, llen := 0, list.Len(); i < llen; i++ { @@ -676,26 +676,26 @@ func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts mars return b, nil } -func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { +func consumeGroupSliceValue(b []byte, listv protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.StartGroupType { - return pref.Value{}, out, errUnknown + return protoreflect.Value{}, out, errUnknown } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return pref.Value{}, out, errDecode + return protoreflect.Value{}, out, errDecode } m := list.NewElement() - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: m.Message(), }) if err != nil { - return pref.Value{}, out, err + return protoreflect.Value{}, out, err } list.Append(m) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return listv, out, nil } @@ -707,7 +707,7 @@ var coderGroupSliceValue = valueCoderFuncs{ merge: mergeMessageListValue, } -func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { num := fd.Number() if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ @@ -772,7 +772,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire return out, errDecode } mp := reflect.New(goType.Elem()) - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: asMessage(mp).ProtoReflect(), }) @@ -781,7 +781,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire } p.AppendPointerSlice(pointerOfValue(mp)) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } @@ -822,8 +822,8 @@ func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFie return out, nil } -func asMessage(v reflect.Value) pref.ProtoMessage { - if m, ok := v.Interface().(pref.ProtoMessage); ok { +func asMessage(v reflect.Value) protoreflect.ProtoMessage { + if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { return m } return legacyWrapMessage(v).Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index c1245fef4876..111b9d16f993 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type mapInfo struct { @@ -19,12 +19,12 @@ type mapInfo struct { valWiretag uint64 keyFuncs valueCoderFuncs valFuncs valueCoderFuncs - keyZero pref.Value - keyKind pref.Kind + keyZero protoreflect.Value + keyKind protoreflect.Kind conv *mapConverter } -func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { +func encoderFuncsForMap(fd protoreflect.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { // TODO: Consider generating specialized map coders. keyField := fd.MapKey() valField := fd.MapValue() @@ -44,7 +44,7 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage keyKind: keyField.Kind(), conv: conv, } - if valField.Kind() == pref.MessageKind { + if valField.Kind() == protoreflect.MessageKind { valueMessage = getMessageInfo(ft.Elem()) } @@ -68,9 +68,9 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage }, } switch valField.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: funcs.merge = mergeMapOfMessage - case pref.BytesKind: + case protoreflect.BytesKind: funcs.merge = mergeMapOfBytes default: funcs.merge = mergeMap @@ -135,7 +135,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo err := errUnknown switch num { case genid.MapEntry_Key_field_number: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) if err != nil { @@ -144,7 +144,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo key = v n = o.n case genid.MapEntry_Value_field_number: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) if err != nil { @@ -192,7 +192,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi err := errUnknown switch num { case 1: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) if err != nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go index 2706bb67f5d8..4b15493f2f43 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.12 // +build !go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go index 1533ef600cd8..0b31b66eaf84 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.12 // +build go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index cd40527ff646..6b2fdbb739a2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -12,15 +12,15 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // coderMessageInfo contains per-message information used by the fast-path functions. // This is a different type from MessageInfo to keep MessageInfo as general-purpose as // possible. type coderMessageInfo struct { - methods piface.Methods + methods protoiface.Methods orderedCoderFields []*coderFieldInfo denseCoderFields []*coderFieldInfo @@ -38,13 +38,13 @@ type coderFieldInfo struct { funcs pointerCoderFuncs // fast-path per-field functions mi *MessageInfo // field's message ft reflect.Type - validation validationInfo // information used by message validation - num pref.FieldNumber // field number - offset offset // struct field offset - wiretag uint64 // field tag (number + wire type) - tagsize int // size of the varint-encoded tag - isPointer bool // true if IsNil may be called on the struct field - isRequired bool // true if field is required + validation validationInfo // information used by message validation + num protoreflect.FieldNumber // field number + offset offset // struct field offset + wiretag uint64 // field tag (number + wire type) + tagsize int // size of the varint-encoded tag + isPointer bool // true if IsNil may be called on the struct field + isRequired bool // true if field is required } func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { @@ -125,8 +125,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { funcs: funcs, mi: childMessage, validation: newFieldValidationInfo(mi, si, fd, ft), - isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), - isRequired: fd.Cardinality() == pref.Required, + isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), + isRequired: fd.Cardinality() == protoreflect.Required, } mi.orderedCoderFields = append(mi.orderedCoderFields, cf) mi.coderFields[cf.num] = cf @@ -149,7 +149,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num }) - var maxDense pref.FieldNumber + var maxDense protoreflect.FieldNumber for _, cf := range mi.orderedCoderFields { if cf.num >= 16 && cf.num >= 2*maxDense { break @@ -175,12 +175,12 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.needsInitCheck = needsInitCheck(mi.Desc) if mi.methods.Marshal == nil && mi.methods.Size == nil { - mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Flags |= protoiface.SupportMarshalDeterministic mi.methods.Marshal = mi.marshal mi.methods.Size = mi.size } if mi.methods.Unmarshal == nil { - mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Flags |= protoiface.SupportUnmarshalDiscardUnknown mi.methods.Unmarshal = mi.unmarshal } if mi.methods.CheckInitialized == nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 90705e3aea74..145c577bd6b2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index e89971238879..576dcf3aac50 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // pointerCoderFuncs is a set of pointer encoding functions. @@ -25,83 +25,83 @@ type pointerCoderFuncs struct { // valueCoderFuncs is a set of protoreflect.Value encoding functions. type valueCoderFuncs struct { - size func(v pref.Value, tagsize int, opts marshalOptions) int - marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) - unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) - isInit func(v pref.Value) error - merge func(dst, src pref.Value, opts mergeOptions) pref.Value + size func(v protoreflect.Value, tagsize int, opts marshalOptions) int + marshal func(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) + isInit func(v protoreflect.Value) error + merge func(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value } // fieldCoder returns pointer functions for a field, used for operating on // struct fields. -func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { +func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { switch { case fd.IsMap(): return encoderFuncsForMap(fd, ft) - case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): // Repeated fields (not packed). if ft.Kind() != reflect.Slice { break } ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolSlice } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumSlice } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32Slice } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32Slice } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32Slice } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64Slice } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64Slice } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64Slice } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32Slice } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32Slice } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatSlice } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64Slice } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64Slice } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoubleSlice } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringSliceValidateUTF8 } @@ -114,19 +114,19 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesSlice } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringSlice } if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesSlice } - case pref.MessageKind: + case protoreflect.MessageKind: return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) - case pref.GroupKind: + case protoreflect.GroupKind: return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) } - case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): // Packed repeated fields. // // Only repeated fields of primitive numeric types @@ -136,128 +136,128 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer } ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolPackedSlice } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumPackedSlice } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32PackedSlice } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32PackedSlice } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32PackedSlice } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64PackedSlice } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64PackedSlice } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64PackedSlice } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32PackedSlice } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32PackedSlice } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatPackedSlice } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64PackedSlice } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64PackedSlice } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoublePackedSlice } } - case fd.Kind() == pref.MessageKind: + case fd.Kind() == protoreflect.MessageKind: return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) - case fd.Kind() == pref.GroupKind: + case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil: + case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolNoZero } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumNoZero } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32NoZero } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32NoZero } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32NoZero } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64NoZero } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64NoZero } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64NoZero } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32NoZero } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32NoZero } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatNoZero } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64NoZero } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64NoZero } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoubleNoZero } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringNoZeroValidateUTF8 } @@ -270,7 +270,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesNoZero } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringNoZero } @@ -281,133 +281,133 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer case ft.Kind() == reflect.Ptr: ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolPtr } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumPtr } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32Ptr } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32Ptr } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32Ptr } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64Ptr } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64Ptr } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64Ptr } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32Ptr } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32Ptr } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatPtr } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64Ptr } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64Ptr } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoublePtr } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringPtrValidateUTF8 } if ft.Kind() == reflect.String { return nil, coderStringPtr } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringPtr } } default: switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBool } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnum } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32 } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32 } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32 } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64 } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64 } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64 } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32 } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32 } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloat } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64 } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64 } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDouble } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringValidateUTF8 } @@ -420,7 +420,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytes } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderString } @@ -434,122 +434,122 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer // encoderFuncsForValue returns value functions for a field, used for // extension values and map encoding. -func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs { +func encoderFuncsForValue(fd protoreflect.FieldDescriptor) valueCoderFuncs { switch { - case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolSliceValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumSliceValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32SliceValue - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32SliceValue - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32SliceValue - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64SliceValue - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64SliceValue - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64SliceValue - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32SliceValue - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32SliceValue - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatSliceValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64SliceValue - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64SliceValue - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoubleSliceValue - case pref.StringKind: + case protoreflect.StringKind: // We don't have a UTF-8 validating coder for repeated string fields. // Value coders are used for extensions and maps. // Extensions are never proto3, and maps never contain lists. return coderStringSliceValue - case pref.BytesKind: + case protoreflect.BytesKind: return coderBytesSliceValue - case pref.MessageKind: + case protoreflect.MessageKind: return coderMessageSliceValue - case pref.GroupKind: + case protoreflect.GroupKind: return coderGroupSliceValue } - case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolPackedSliceValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumPackedSliceValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32PackedSliceValue - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32PackedSliceValue - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32PackedSliceValue - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64PackedSliceValue - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64PackedSliceValue - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64PackedSliceValue - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32PackedSliceValue - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32PackedSliceValue - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatPackedSliceValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64PackedSliceValue - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64PackedSliceValue - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoublePackedSliceValue } default: switch fd.Kind() { default: - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32Value - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32Value - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32Value - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64Value - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64Value - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64Value - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32Value - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32Value - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64Value - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64Value - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoubleValue - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { return coderStringValueValidateUTF8 } return coderStringValue - case pref.BytesKind: + case protoreflect.BytesKind: return coderBytesValue - case pref.MessageKind: + case protoreflect.MessageKind: return coderMessageValue - case pref.GroupKind: + case protoreflect.GroupKind: return coderGroupValue } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index e118af1e20cd..757642e23c9e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index acd61bb50b2c..11a6128ba56b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -8,7 +8,7 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // unwrapper unwraps the value to the underlying value. @@ -20,13 +20,13 @@ type unwrapper interface { // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. type Converter interface { // PBValueOf converts a reflect.Value to a protoreflect.Value. - PBValueOf(reflect.Value) pref.Value + PBValueOf(reflect.Value) protoreflect.Value // GoValueOf converts a protoreflect.Value to a reflect.Value. - GoValueOf(pref.Value) reflect.Value + GoValueOf(protoreflect.Value) reflect.Value // IsValidPB returns whether a protoreflect.Value is compatible with this type. - IsValidPB(pref.Value) bool + IsValidPB(protoreflect.Value) bool // IsValidGo returns whether a reflect.Value is compatible with this type. IsValidGo(reflect.Value) bool @@ -34,12 +34,12 @@ type Converter interface { // New returns a new field value. // For scalars, it returns the default value of the field. // For composite types, it returns a new mutable value. - New() pref.Value + New() protoreflect.Value // Zero returns a new field value. // For scalars, it returns the default value of the field. // For composite types, it returns an immutable, empty value. - Zero() pref.Value + Zero() protoreflect.Value } // NewConverter matches a Go type with a protobuf field and returns a Converter @@ -50,7 +50,7 @@ type Converter interface { // This matcher deliberately supports a wider range of Go types than what // protoc-gen-go historically generated to be able to automatically wrap some // v1 messages generated by other forks of protoc-gen-go. -func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { +func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { switch { case fd.IsList(): return newListConverter(t, fd) @@ -76,68 +76,68 @@ var ( ) var ( - boolZero = pref.ValueOfBool(false) - int32Zero = pref.ValueOfInt32(0) - int64Zero = pref.ValueOfInt64(0) - uint32Zero = pref.ValueOfUint32(0) - uint64Zero = pref.ValueOfUint64(0) - float32Zero = pref.ValueOfFloat32(0) - float64Zero = pref.ValueOfFloat64(0) - stringZero = pref.ValueOfString("") - bytesZero = pref.ValueOfBytes(nil) + boolZero = protoreflect.ValueOfBool(false) + int32Zero = protoreflect.ValueOfInt32(0) + int64Zero = protoreflect.ValueOfInt64(0) + uint32Zero = protoreflect.ValueOfUint32(0) + uint64Zero = protoreflect.ValueOfUint64(0) + float32Zero = protoreflect.ValueOfFloat32(0) + float64Zero = protoreflect.ValueOfFloat64(0) + stringZero = protoreflect.ValueOfString("") + bytesZero = protoreflect.ValueOfBytes(nil) ) -func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { - defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value { - if fd.Cardinality() == pref.Repeated { +func newSingularConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { + defVal := func(fd protoreflect.FieldDescriptor, zero protoreflect.Value) protoreflect.Value { + if fd.Cardinality() == protoreflect.Repeated { // Default isn't defined for repeated fields. return zero } return fd.Default() } switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if t.Kind() == reflect.Bool { return &boolConverter{t, defVal(fd, boolZero)} } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if t.Kind() == reflect.Int32 { return &int32Converter{t, defVal(fd, int32Zero)} } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if t.Kind() == reflect.Int64 { return &int64Converter{t, defVal(fd, int64Zero)} } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if t.Kind() == reflect.Uint32 { return &uint32Converter{t, defVal(fd, uint32Zero)} } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if t.Kind() == reflect.Uint64 { return &uint64Converter{t, defVal(fd, uint64Zero)} } - case pref.FloatKind: + case protoreflect.FloatKind: if t.Kind() == reflect.Float32 { return &float32Converter{t, defVal(fd, float32Zero)} } - case pref.DoubleKind: + case protoreflect.DoubleKind: if t.Kind() == reflect.Float64 { return &float64Converter{t, defVal(fd, float64Zero)} } - case pref.StringKind: + case protoreflect.StringKind: if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { return &stringConverter{t, defVal(fd, stringZero)} } - case pref.BytesKind: + case protoreflect.BytesKind: if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { return &bytesConverter{t, defVal(fd, bytesZero)} } - case pref.EnumKind: + case protoreflect.EnumKind: // Handle enums, which must be a named int32 type. if t.Kind() == reflect.Int32 { return newEnumConverter(t, fd) } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return newMessageConverter(t) } panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) @@ -145,184 +145,184 @@ func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { type boolConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *boolConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfBool(v.Bool()) + return protoreflect.ValueOfBool(v.Bool()) } -func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *boolConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Bool()).Convert(c.goType) } -func (c *boolConverter) IsValidPB(v pref.Value) bool { +func (c *boolConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(bool) return ok } func (c *boolConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *boolConverter) New() pref.Value { return c.def } -func (c *boolConverter) Zero() pref.Value { return c.def } +func (c *boolConverter) New() protoreflect.Value { return c.def } +func (c *boolConverter) Zero() protoreflect.Value { return c.def } type int32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *int32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfInt32(int32(v.Int())) + return protoreflect.ValueOfInt32(int32(v.Int())) } -func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *int32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(int32(v.Int())).Convert(c.goType) } -func (c *int32Converter) IsValidPB(v pref.Value) bool { +func (c *int32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(int32) return ok } func (c *int32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *int32Converter) New() pref.Value { return c.def } -func (c *int32Converter) Zero() pref.Value { return c.def } +func (c *int32Converter) New() protoreflect.Value { return c.def } +func (c *int32Converter) Zero() protoreflect.Value { return c.def } type int64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *int64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfInt64(int64(v.Int())) + return protoreflect.ValueOfInt64(int64(v.Int())) } -func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *int64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(int64(v.Int())).Convert(c.goType) } -func (c *int64Converter) IsValidPB(v pref.Value) bool { +func (c *int64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(int64) return ok } func (c *int64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *int64Converter) New() pref.Value { return c.def } -func (c *int64Converter) Zero() pref.Value { return c.def } +func (c *int64Converter) New() protoreflect.Value { return c.def } +func (c *int64Converter) Zero() protoreflect.Value { return c.def } type uint32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *uint32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfUint32(uint32(v.Uint())) + return protoreflect.ValueOfUint32(uint32(v.Uint())) } -func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *uint32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) } -func (c *uint32Converter) IsValidPB(v pref.Value) bool { +func (c *uint32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(uint32) return ok } func (c *uint32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *uint32Converter) New() pref.Value { return c.def } -func (c *uint32Converter) Zero() pref.Value { return c.def } +func (c *uint32Converter) New() protoreflect.Value { return c.def } +func (c *uint32Converter) Zero() protoreflect.Value { return c.def } type uint64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *uint64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfUint64(uint64(v.Uint())) + return protoreflect.ValueOfUint64(uint64(v.Uint())) } -func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *uint64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) } -func (c *uint64Converter) IsValidPB(v pref.Value) bool { +func (c *uint64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(uint64) return ok } func (c *uint64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *uint64Converter) New() pref.Value { return c.def } -func (c *uint64Converter) Zero() pref.Value { return c.def } +func (c *uint64Converter) New() protoreflect.Value { return c.def } +func (c *uint64Converter) Zero() protoreflect.Value { return c.def } type float32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *float32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfFloat32(float32(v.Float())) + return protoreflect.ValueOfFloat32(float32(v.Float())) } -func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *float32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(float32(v.Float())).Convert(c.goType) } -func (c *float32Converter) IsValidPB(v pref.Value) bool { +func (c *float32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(float32) return ok } func (c *float32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *float32Converter) New() pref.Value { return c.def } -func (c *float32Converter) Zero() pref.Value { return c.def } +func (c *float32Converter) New() protoreflect.Value { return c.def } +func (c *float32Converter) Zero() protoreflect.Value { return c.def } type float64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *float64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfFloat64(float64(v.Float())) + return protoreflect.ValueOfFloat64(float64(v.Float())) } -func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *float64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(float64(v.Float())).Convert(c.goType) } -func (c *float64Converter) IsValidPB(v pref.Value) bool { +func (c *float64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(float64) return ok } func (c *float64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *float64Converter) New() pref.Value { return c.def } -func (c *float64Converter) Zero() pref.Value { return c.def } +func (c *float64Converter) New() protoreflect.Value { return c.def } +func (c *float64Converter) Zero() protoreflect.Value { return c.def } type stringConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfString(v.Convert(stringType).String()) + return protoreflect.ValueOfString(v.Convert(stringType).String()) } -func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { // pref.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) @@ -331,71 +331,71 @@ func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { } return reflect.ValueOf(s).Convert(c.goType) } -func (c *stringConverter) IsValidPB(v pref.Value) bool { +func (c *stringConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(string) return ok } func (c *stringConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *stringConverter) New() pref.Value { return c.def } -func (c *stringConverter) Zero() pref.Value { return c.def } +func (c *stringConverter) New() protoreflect.Value { return c.def } +func (c *stringConverter) Zero() protoreflect.Value { return c.def } type bytesConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *bytesConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } if c.goType.Kind() == reflect.String && v.Len() == 0 { - return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil) + return protoreflect.ValueOfBytes(nil) // ensure empty string is []byte(nil) } - return pref.ValueOfBytes(v.Convert(bytesType).Bytes()) + return protoreflect.ValueOfBytes(v.Convert(bytesType).Bytes()) } -func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *bytesConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Bytes()).Convert(c.goType) } -func (c *bytesConverter) IsValidPB(v pref.Value) bool { +func (c *bytesConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().([]byte) return ok } func (c *bytesConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *bytesConverter) New() pref.Value { return c.def } -func (c *bytesConverter) Zero() pref.Value { return c.def } +func (c *bytesConverter) New() protoreflect.Value { return c.def } +func (c *bytesConverter) Zero() protoreflect.Value { return c.def } type enumConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter { - var def pref.Value - if fd.Cardinality() == pref.Repeated { - def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) +func newEnumConverter(goType reflect.Type, fd protoreflect.FieldDescriptor) Converter { + var def protoreflect.Value + if fd.Cardinality() == protoreflect.Repeated { + def = protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number()) } else { def = fd.Default() } return &enumConverter{goType, def} } -func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *enumConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfEnum(pref.EnumNumber(v.Int())) + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v.Int())) } -func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *enumConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Enum()).Convert(c.goType) } -func (c *enumConverter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(pref.EnumNumber) +func (c *enumConverter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(protoreflect.EnumNumber) return ok } @@ -403,11 +403,11 @@ func (c *enumConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *enumConverter) New() pref.Value { +func (c *enumConverter) New() protoreflect.Value { return c.def } -func (c *enumConverter) Zero() pref.Value { +func (c *enumConverter) Zero() protoreflect.Value { return c.def } @@ -419,7 +419,7 @@ func newMessageConverter(goType reflect.Type) Converter { return &messageConverter{goType} } -func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *messageConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } @@ -430,13 +430,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { v = reflect.Zero(reflect.PtrTo(v.Type())) } } - if m, ok := v.Interface().(pref.ProtoMessage); ok { - return pref.ValueOfMessage(m.ProtoReflect()) + if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { + return protoreflect.ValueOfMessage(m.ProtoReflect()) } - return pref.ValueOfMessage(legacyWrapMessage(v)) + return protoreflect.ValueOfMessage(legacyWrapMessage(v)) } -func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *messageConverter) GoValueOf(v protoreflect.Value) reflect.Value { m := v.Message() var rv reflect.Value if u, ok := m.(unwrapper); ok { @@ -460,7 +460,7 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { return rv } -func (c *messageConverter) IsValidPB(v pref.Value) bool { +func (c *messageConverter) IsValidPB(v protoreflect.Value) bool { m := v.Message() var rv reflect.Value if u, ok := m.(unwrapper); ok { @@ -478,14 +478,14 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *messageConverter) New() pref.Value { +func (c *messageConverter) New() protoreflect.Value { if c.isNonPointer() { return c.PBValueOf(reflect.New(c.goType).Elem()) } return c.PBValueOf(reflect.New(c.goType.Elem())) } -func (c *messageConverter) Zero() pref.Value { +func (c *messageConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index 6fccab520e59..f89136516f96 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -8,10 +8,10 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) -func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { +func newListConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { switch { case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} @@ -26,16 +26,16 @@ type listConverter struct { c Converter } -func (c *listConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *listConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } pv := reflect.New(c.goType) pv.Elem().Set(v) - return pref.ValueOfList(&listReflect{pv, c.c}) + return protoreflect.ValueOfList(&listReflect{pv, c.c}) } -func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *listConverter) GoValueOf(v protoreflect.Value) reflect.Value { rv := v.List().(*listReflect).v if rv.IsNil() { return reflect.Zero(c.goType) @@ -43,7 +43,7 @@ func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { return rv.Elem() } -func (c *listConverter) IsValidPB(v pref.Value) bool { +func (c *listConverter) IsValidPB(v protoreflect.Value) bool { list, ok := v.Interface().(*listReflect) if !ok { return false @@ -55,12 +55,12 @@ func (c *listConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *listConverter) New() pref.Value { - return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) +func (c *listConverter) New() protoreflect.Value { + return protoreflect.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) } -func (c *listConverter) Zero() pref.Value { - return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) +func (c *listConverter) Zero() protoreflect.Value { + return protoreflect.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) } type listPtrConverter struct { @@ -68,18 +68,18 @@ type listPtrConverter struct { c Converter } -func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *listPtrConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfList(&listReflect{v, c.c}) + return protoreflect.ValueOfList(&listReflect{v, c.c}) } -func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *listPtrConverter) GoValueOf(v protoreflect.Value) reflect.Value { return v.List().(*listReflect).v } -func (c *listPtrConverter) IsValidPB(v pref.Value) bool { +func (c *listPtrConverter) IsValidPB(v protoreflect.Value) bool { list, ok := v.Interface().(*listReflect) if !ok { return false @@ -91,11 +91,11 @@ func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *listPtrConverter) New() pref.Value { +func (c *listPtrConverter) New() protoreflect.Value { return c.PBValueOf(reflect.New(c.goType.Elem())) } -func (c *listPtrConverter) Zero() pref.Value { +func (c *listPtrConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } @@ -110,16 +110,16 @@ func (ls *listReflect) Len() int { } return ls.v.Elem().Len() } -func (ls *listReflect) Get(i int) pref.Value { +func (ls *listReflect) Get(i int) protoreflect.Value { return ls.conv.PBValueOf(ls.v.Elem().Index(i)) } -func (ls *listReflect) Set(i int, v pref.Value) { +func (ls *listReflect) Set(i int, v protoreflect.Value) { ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) } -func (ls *listReflect) Append(v pref.Value) { +func (ls *listReflect) Append(v protoreflect.Value) { ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) } -func (ls *listReflect) AppendMutable() pref.Value { +func (ls *listReflect) AppendMutable() protoreflect.Value { if _, ok := ls.conv.(*messageConverter); !ok { panic("invalid AppendMutable on list with non-message type") } @@ -130,7 +130,7 @@ func (ls *listReflect) AppendMutable() pref.Value { func (ls *listReflect) Truncate(i int) { ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) } -func (ls *listReflect) NewElement() pref.Value { +func (ls *listReflect) NewElement() protoreflect.Value { return ls.conv.New() } func (ls *listReflect) IsValid() bool { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index de06b2593f89..f30b0a0576de 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -8,7 +8,7 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type mapConverter struct { @@ -16,7 +16,7 @@ type mapConverter struct { keyConv, valConv Converter } -func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { +func newMapConverter(t reflect.Type, fd protoreflect.FieldDescriptor) *mapConverter { if t.Kind() != reflect.Map { panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) } @@ -27,18 +27,18 @@ func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { } } -func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *mapConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) + return protoreflect.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) } -func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *mapConverter) GoValueOf(v protoreflect.Value) reflect.Value { return v.Map().(*mapReflect).v } -func (c *mapConverter) IsValidPB(v pref.Value) bool { +func (c *mapConverter) IsValidPB(v protoreflect.Value) bool { mapv, ok := v.Interface().(*mapReflect) if !ok { return false @@ -50,11 +50,11 @@ func (c *mapConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *mapConverter) New() pref.Value { +func (c *mapConverter) New() protoreflect.Value { return c.PBValueOf(reflect.MakeMap(c.goType)) } -func (c *mapConverter) Zero() pref.Value { +func (c *mapConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } @@ -67,29 +67,29 @@ type mapReflect struct { func (ms *mapReflect) Len() int { return ms.v.Len() } -func (ms *mapReflect) Has(k pref.MapKey) bool { +func (ms *mapReflect) Has(k protoreflect.MapKey) bool { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.v.MapIndex(rk) return rv.IsValid() } -func (ms *mapReflect) Get(k pref.MapKey) pref.Value { +func (ms *mapReflect) Get(k protoreflect.MapKey) protoreflect.Value { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.v.MapIndex(rk) if !rv.IsValid() { - return pref.Value{} + return protoreflect.Value{} } return ms.valConv.PBValueOf(rv) } -func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) { +func (ms *mapReflect) Set(k protoreflect.MapKey, v protoreflect.Value) { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.valConv.GoValueOf(v) ms.v.SetMapIndex(rk, rv) } -func (ms *mapReflect) Clear(k pref.MapKey) { +func (ms *mapReflect) Clear(k protoreflect.MapKey) { rk := ms.keyConv.GoValueOf(k.Value()) ms.v.SetMapIndex(rk, reflect.Value{}) } -func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { +func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { if _, ok := ms.valConv.(*messageConverter); !ok { panic("invalid Mutable on map with non-message value type") } @@ -100,7 +100,7 @@ func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { } return v } -func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { +func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { iter := mapRange(ms.v) for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() @@ -110,7 +110,7 @@ func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { } } } -func (ms *mapReflect) NewValue() pref.Value { +func (ms *mapReflect) NewValue() protoreflect.Value { return ms.valConv.New() } func (ms *mapReflect) IsValid() bool { diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index 949dc49a65b3..cda0520c275c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -12,12 +12,12 @@ import ( "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" - piface "google.golang.org/protobuf/runtime/protoiface" ) var errDecode = errors.New("cannot parse invalid wire-format data") +var errRecursionDepth = errors.New("exceeded maximum recursion depth") type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags @@ -25,6 +25,7 @@ type unmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + depth int } func (o unmarshalOptions) Options() proto.UnmarshalOptions { @@ -36,14 +37,17 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions { } } -func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 } +func (o unmarshalOptions) DiscardUnknown() bool { + return o.flags&protoiface.UnmarshalDiscardUnknown != 0 +} func (o unmarshalOptions) IsDefault() bool { - return o.flags == 0 && o.resolver == preg.GlobalTypes + return o.flags == 0 && o.resolver == protoregistry.GlobalTypes } var lazyUnmarshalOptions = unmarshalOptions{ - resolver: preg.GlobalTypes, + resolver: protoregistry.GlobalTypes, + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -52,7 +56,7 @@ type unmarshalOutput struct { } // unmarshal is protoreflect.Methods.Unmarshal. -func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { +func (mi *MessageInfo) unmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { var p pointer if ms, ok := in.Message.(*messageState); ok { p = ms.pointer() @@ -62,12 +66,13 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, + depth: in.Depth, }) - var flags piface.UnmarshalOutputFlags + var flags protoiface.UnmarshalOutputFlags if out.initialized { - flags |= piface.UnmarshalInitialized + flags |= protoiface.UnmarshalInitialized } - return piface.UnmarshalOutput{ + return protoiface.UnmarshalOutput{ Flags: flags, }, err } @@ -82,6 +87,10 @@ var errUnknown = errors.New("unknown") func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { mi.init() + opts.depth-- + if opts.depth < 0 { + return out, errRecursionDepth + } if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } @@ -202,7 +211,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p var err error xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) if err != nil { - if err == preg.NotFound { + if err == protoregistry.NotFound { return out, errUnknown } return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go index 8c1eab4bfd86..5f3ef5ad732f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go @@ -7,15 +7,15 @@ package impl import ( "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type EnumInfo struct { GoReflectType reflect.Type // int32 kind - Desc pref.EnumDescriptor + Desc protoreflect.EnumDescriptor } -func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum { - return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum) +func (t *EnumInfo) New(n protoreflect.EnumNumber) protoreflect.Enum { + return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(protoreflect.Enum) } -func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc } +func (t *EnumInfo) Descriptor() protoreflect.EnumDescriptor { return t.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index e904fd993657..cb25b0bae1d7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -9,8 +9,8 @@ import ( "sync" "sync/atomic" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // ExtensionInfo implements ExtensionType. @@ -45,7 +45,7 @@ type ExtensionInfo struct { // since the message may no longer implement the MessageV1 interface. // // Deprecated: Use the ExtendedType method instead. - ExtendedType piface.MessageV1 + ExtendedType protoiface.MessageV1 // ExtensionType is the zero value of the extension type. // @@ -83,31 +83,31 @@ const ( extensionInfoFullInit = 2 ) -func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) { +func InitExtensionInfo(xi *ExtensionInfo, xd protoreflect.ExtensionDescriptor, goType reflect.Type) { xi.goType = goType xi.desc = extensionTypeDescriptor{xd, xi} xi.init = extensionInfoDescInit } -func (xi *ExtensionInfo) New() pref.Value { +func (xi *ExtensionInfo) New() protoreflect.Value { return xi.lazyInit().New() } -func (xi *ExtensionInfo) Zero() pref.Value { +func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value { +func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { return xi.lazyInit().GoValueOf(v).Interface() } -func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool { +func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor { +func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { xi.lazyInitSlow() } @@ -144,13 +144,13 @@ func (xi *ExtensionInfo) lazyInitSlow() { } type extensionTypeDescriptor struct { - pref.ExtensionDescriptor + protoreflect.ExtensionDescriptor xi *ExtensionInfo } -func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType { +func (xtd *extensionTypeDescriptor) Type() protoreflect.ExtensionType { return xtd.xi } -func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { +func (xtd *extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor { return xtd.ExtensionDescriptor } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index f7d7ffb51039..c2a803bb2f92 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -13,13 +13,12 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" ) // legacyEnumName returns the name of enums used in legacy code. // It is neither the protobuf full name nor the qualified Go name, // but rather an odd hybrid of both. -func legacyEnumName(ed pref.EnumDescriptor) string { +func legacyEnumName(ed protoreflect.EnumDescriptor) string { var protoPkg string enumName := string(ed.FullName()) if fd := ed.ParentFile(); fd != nil { @@ -34,68 +33,68 @@ func legacyEnumName(ed pref.EnumDescriptor) string { // legacyWrapEnum wraps v as a protoreflect.Enum, // where v must be a int32 kind and not implement the v2 API already. -func legacyWrapEnum(v reflect.Value) pref.Enum { +func legacyWrapEnum(v reflect.Value) protoreflect.Enum { et := legacyLoadEnumType(v.Type()) - return et.New(pref.EnumNumber(v.Int())) + return et.New(protoreflect.EnumNumber(v.Int())) } var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType // legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, // where t must be an int32 kind and not implement the v2 API already. -func legacyLoadEnumType(t reflect.Type) pref.EnumType { +func legacyLoadEnumType(t reflect.Type) protoreflect.EnumType { // Fast-path: check if a EnumType is cached for this concrete type. if et, ok := legacyEnumTypeCache.Load(t); ok { - return et.(pref.EnumType) + return et.(protoreflect.EnumType) } // Slow-path: derive enum descriptor and initialize EnumType. - var et pref.EnumType + var et protoreflect.EnumType ed := LegacyLoadEnumDesc(t) et = &legacyEnumType{ desc: ed, goType: t, } if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { - return et.(pref.EnumType) + return et.(protoreflect.EnumType) } return et } type legacyEnumType struct { - desc pref.EnumDescriptor + desc protoreflect.EnumDescriptor goType reflect.Type m sync.Map // map[protoreflect.EnumNumber]proto.Enum } -func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum { +func (t *legacyEnumType) New(n protoreflect.EnumNumber) protoreflect.Enum { if e, ok := t.m.Load(n); ok { - return e.(pref.Enum) + return e.(protoreflect.Enum) } e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} t.m.Store(n, e) return e } -func (t *legacyEnumType) Descriptor() pref.EnumDescriptor { +func (t *legacyEnumType) Descriptor() protoreflect.EnumDescriptor { return t.desc } type legacyEnumWrapper struct { - num pref.EnumNumber - pbTyp pref.EnumType + num protoreflect.EnumNumber + pbTyp protoreflect.EnumType goTyp reflect.Type } -func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor { +func (e *legacyEnumWrapper) Descriptor() protoreflect.EnumDescriptor { return e.pbTyp.Descriptor() } -func (e *legacyEnumWrapper) Type() pref.EnumType { +func (e *legacyEnumWrapper) Type() protoreflect.EnumType { return e.pbTyp } -func (e *legacyEnumWrapper) Number() pref.EnumNumber { +func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { return e.num } -func (e *legacyEnumWrapper) ProtoReflect() pref.Enum { +func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } func (e *legacyEnumWrapper) protoUnwrap() interface{} { @@ -105,8 +104,8 @@ func (e *legacyEnumWrapper) protoUnwrap() interface{} { } var ( - _ pref.Enum = (*legacyEnumWrapper)(nil) - _ unwrapper = (*legacyEnumWrapper)(nil) + _ protoreflect.Enum = (*legacyEnumWrapper)(nil) + _ unwrapper = (*legacyEnumWrapper)(nil) ) var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor @@ -115,15 +114,15 @@ var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor // which must be an int32 kind and not implement the v2 API already. // // This is exported for testing purposes. -func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { +func LegacyLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { // Fast-path: check if an EnumDescriptor is cached for this concrete type. if ed, ok := legacyEnumDescCache.Load(t); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } // Slow-path: initialize EnumDescriptor from the raw descriptor. ev := reflect.Zero(t).Interface() - if _, ok := ev.(pref.Enum); ok { + if _, ok := ev.(protoreflect.Enum); ok { panic(fmt.Sprintf("%v already implements proto.Enum", t)) } edV1, ok := ev.(enumV1) @@ -132,7 +131,7 @@ func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { } b, idxs := edV1.EnumDescriptor() - var ed pref.EnumDescriptor + var ed protoreflect.EnumDescriptor if len(idxs) == 1 { ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) } else { @@ -158,10 +157,10 @@ var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescript // We are unable to use the global enum registry since it is // unfortunately keyed by the protobuf full name, which we also do not know. // Thus, this produces some bogus enum descriptor based on the Go type name. -func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { +func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { // Fast-path: check if an EnumDescriptor is cached for this concrete type. if ed, ok := aberrantEnumDescCache.Load(t); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } // Slow-path: construct a bogus, but unique EnumDescriptor. @@ -182,7 +181,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { // An exhaustive query is clearly impractical, but can be best-effort. if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } return ed } @@ -192,7 +191,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { // It should be sufficiently unique within a program. // // This is exported for testing purposes. -func AberrantDeriveFullName(t reflect.Type) pref.FullName { +func AberrantDeriveFullName(t reflect.Type) protoreflect.FullName { sanitize := func(r rune) rune { switch { case r == '/': @@ -215,5 +214,5 @@ func AberrantDeriveFullName(t reflect.Type) pref.FullName { ss[i] = "x" + s } } - return pref.FullName(strings.Join(ss, ".")) + return protoreflect.FullName(strings.Join(ss, ".")) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go index e3fb0b578586..9b64ad5bba28 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -12,21 +12,21 @@ import ( "reflect" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // These functions exist to support exported APIs in generated protobufs. // While these are deprecated, they cannot be removed for compatibility reasons. // LegacyEnumName returns the name of enums used in legacy code. -func (Export) LegacyEnumName(ed pref.EnumDescriptor) string { +func (Export) LegacyEnumName(ed protoreflect.EnumDescriptor) string { return legacyEnumName(ed) } // LegacyMessageTypeOf returns the protoreflect.MessageType for m, // with name used as the message name if necessary. -func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType { +func (Export) LegacyMessageTypeOf(m protoiface.MessageV1, name protoreflect.FullName) protoreflect.MessageType { if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } @@ -36,9 +36,9 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M // UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. // The input can either be a string representing the enum value by name, // or a number representing the enum number itself. -func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) { +func (Export) UnmarshalJSONEnum(ed protoreflect.EnumDescriptor, b []byte) (protoreflect.EnumNumber, error) { if b[0] == '"' { - var name pref.Name + var name protoreflect.Name if err := json.Unmarshal(b, &name); err != nil { return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) } @@ -48,7 +48,7 @@ func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumb } return ev.Number(), nil } else { - var num pref.EnumNumber + var num protoreflect.EnumNumber if err := json.Unmarshal(b, &num); err != nil { return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) } @@ -81,8 +81,8 @@ func (Export) CompressGZIP(in []byte) (out []byte) { blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. blockSize = len(in) } - binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000) - binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff) + binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)) + binary.LittleEndian.PutUint16(blockHeader[3:5], ^uint16(blockSize)) out = append(out, blockHeader[:]...) out = append(out, in[:blockSize]...) in = in[blockSize:] diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 49e723161c01..87b30d0504c1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -12,16 +12,16 @@ import ( ptag "google.golang.org/protobuf/internal/encoding/tag" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) func (xi *ExtensionInfo) initToLegacy() { xd := xi.desc - var parent piface.MessageV1 + var parent protoiface.MessageV1 messageName := xd.ContainingMessage().FullName() - if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil { + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(messageName); mt != nil { // Create a new parent message and unwrap it if possible. mv := mt.New().Interface() t := reflect.TypeOf(mv) @@ -31,7 +31,7 @@ func (xi *ExtensionInfo) initToLegacy() { // Check whether the message implements the legacy v1 Message interface. mz := reflect.Zero(t).Interface() - if mz, ok := mz.(piface.MessageV1); ok { + if mz, ok := mz.(protoiface.MessageV1); ok { parent = mz } } @@ -46,7 +46,7 @@ func (xi *ExtensionInfo) initToLegacy() { // Reconstruct the legacy enum full name. var enumName string - if xd.Kind() == pref.EnumKind { + if xd.Kind() == protoreflect.EnumKind { enumName = legacyEnumName(xd.Enum()) } @@ -77,16 +77,16 @@ func (xi *ExtensionInfo) initFromLegacy() { // field number is specified. In such a case, use a placeholder. if xi.ExtendedType == nil || xi.ExtensionType == nil { xd := placeholderExtension{ - name: pref.FullName(xi.Name), - number: pref.FieldNumber(xi.Field), + name: protoreflect.FullName(xi.Name), + number: protoreflect.FieldNumber(xi.Field), } xi.desc = extensionTypeDescriptor{xd, xi} return } // Resolve enum or message dependencies. - var ed pref.EnumDescriptor - var md pref.MessageDescriptor + var ed protoreflect.EnumDescriptor + var md protoreflect.MessageDescriptor t := reflect.TypeOf(xi.ExtensionType) isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 @@ -94,18 +94,18 @@ func (xi *ExtensionInfo) initFromLegacy() { t = t.Elem() } switch v := reflect.Zero(t).Interface().(type) { - case pref.Enum: + case protoreflect.Enum: ed = v.Descriptor() case enumV1: ed = LegacyLoadEnumDesc(t) - case pref.ProtoMessage: + case protoreflect.ProtoMessage: md = v.ProtoReflect().Descriptor() case messageV1: md = LegacyLoadMessageDesc(t) } // Derive basic field information from the struct tag. - var evs pref.EnumValueDescriptors + var evs protoreflect.EnumValueDescriptors if ed != nil { evs = ed.Values() } @@ -114,8 +114,8 @@ func (xi *ExtensionInfo) initFromLegacy() { // Construct a v2 ExtensionType. xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} xd.L0.ParentFile = filedesc.SurrogateProto2 - xd.L0.FullName = pref.FullName(xi.Name) - xd.L1.Number = pref.FieldNumber(xi.Field) + xd.L0.FullName = protoreflect.FullName(xi.Name) + xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind xd.L2.IsPacked = fd.L1.IsPacked @@ -138,39 +138,39 @@ func (xi *ExtensionInfo) initFromLegacy() { } type placeholderExtension struct { - name pref.FullName - number pref.FieldNumber + name protoreflect.FullName + number protoreflect.FieldNumber } -func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil } -func (x placeholderExtension) Parent() pref.Descriptor { return nil } -func (x placeholderExtension) Index() int { return 0 } -func (x placeholderExtension) Syntax() pref.Syntax { return 0 } -func (x placeholderExtension) Name() pref.Name { return x.name.Name() } -func (x placeholderExtension) FullName() pref.FullName { return x.name } -func (x placeholderExtension) IsPlaceholder() bool { return true } -func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field } -func (x placeholderExtension) Number() pref.FieldNumber { return x.number } -func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } -func (x placeholderExtension) Kind() pref.Kind { return 0 } -func (x placeholderExtension) HasJSONName() bool { return false } -func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } -func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } -func (x placeholderExtension) HasPresence() bool { return false } -func (x placeholderExtension) HasOptionalKeyword() bool { return false } -func (x placeholderExtension) IsExtension() bool { return true } -func (x placeholderExtension) IsWeak() bool { return false } -func (x placeholderExtension) IsPacked() bool { return false } -func (x placeholderExtension) IsList() bool { return false } -func (x placeholderExtension) IsMap() bool { return false } -func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil } -func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil } -func (x placeholderExtension) HasDefault() bool { return false } -func (x placeholderExtension) Default() pref.Value { return pref.Value{} } -func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil } -func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil } -func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil } -func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil } -func (x placeholderExtension) Message() pref.MessageDescriptor { return nil } -func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return } -func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } +func (x placeholderExtension) ParentFile() protoreflect.FileDescriptor { return nil } +func (x placeholderExtension) Parent() protoreflect.Descriptor { return nil } +func (x placeholderExtension) Index() int { return 0 } +func (x placeholderExtension) Syntax() protoreflect.Syntax { return 0 } +func (x placeholderExtension) Name() protoreflect.Name { return x.name.Name() } +func (x placeholderExtension) FullName() protoreflect.FullName { return x.name } +func (x placeholderExtension) IsPlaceholder() bool { return true } +func (x placeholderExtension) Options() protoreflect.ProtoMessage { return descopts.Field } +func (x placeholderExtension) Number() protoreflect.FieldNumber { return x.number } +func (x placeholderExtension) Cardinality() protoreflect.Cardinality { return 0 } +func (x placeholderExtension) Kind() protoreflect.Kind { return 0 } +func (x placeholderExtension) HasJSONName() bool { return false } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) HasPresence() bool { return false } +func (x placeholderExtension) HasOptionalKeyword() bool { return false } +func (x placeholderExtension) IsExtension() bool { return true } +func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsPacked() bool { return false } +func (x placeholderExtension) IsList() bool { return false } +func (x placeholderExtension) IsMap() bool { return false } +func (x placeholderExtension) MapKey() protoreflect.FieldDescriptor { return nil } +func (x placeholderExtension) MapValue() protoreflect.FieldDescriptor { return nil } +func (x placeholderExtension) HasDefault() bool { return false } +func (x placeholderExtension) Default() protoreflect.Value { return protoreflect.Value{} } +func (x placeholderExtension) DefaultEnumValue() protoreflect.EnumValueDescriptor { return nil } +func (x placeholderExtension) ContainingOneof() protoreflect.OneofDescriptor { return nil } +func (x placeholderExtension) ContainingMessage() protoreflect.MessageDescriptor { return nil } +func (x placeholderExtension) Enum() protoreflect.EnumDescriptor { return nil } +func (x placeholderExtension) Message() protoreflect.MessageDescriptor { return nil } +func (x placeholderExtension) ProtoType(protoreflect.FieldDescriptor) { return } +func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 029feeefd792..61c483fac06e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -16,14 +16,12 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" - piface "google.golang.org/protobuf/runtime/protoiface" ) // legacyWrapMessage wraps v as a protoreflect.Message, // where v must be a *struct kind and not implement the v2 API already. -func legacyWrapMessage(v reflect.Value) pref.Message { +func legacyWrapMessage(v reflect.Value) protoreflect.Message { t := v.Type() if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessage{v: v} @@ -35,7 +33,7 @@ func legacyWrapMessage(v reflect.Value) pref.Message { // legacyLoadMessageType dynamically loads a protoreflect.Type for t, // where t must be not implement the v2 API already. // The provided name is used if it cannot be determined from the message. -func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { +func legacyLoadMessageType(t reflect.Type, name protoreflect.FullName) protoreflect.MessageType { if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessageType{t} } @@ -47,7 +45,7 @@ var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo // legacyLoadMessageInfo dynamically loads a *MessageInfo for t, // where t must be a *struct kind and not implement the v2 API already. // The provided name is used if it cannot be determined from the message. -func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { +func legacyLoadMessageInfo(t reflect.Type, name protoreflect.FullName) *MessageInfo { // Fast-path: check if a MessageInfo is cached for this concrete type. if mt, ok := legacyMessageTypeCache.Load(t); ok { return mt.(*MessageInfo) @@ -68,7 +66,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { // supports deterministic serialization or not, but this // preserves the v1 implementation's behavior of always // calling Marshal methods when present. - mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Flags |= protoiface.SupportMarshalDeterministic } if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { mi.methods.Unmarshal = legacyUnmarshal @@ -89,18 +87,18 @@ var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDesc // which should be a *struct kind and must not implement the v2 API already. // // This is exported for testing purposes. -func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { +func LegacyLoadMessageDesc(t reflect.Type) protoreflect.MessageDescriptor { return legacyLoadMessageDesc(t, "") } -func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func legacyLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { // Fast-path: check if a MessageDescriptor is cached for this concrete type. if mi, ok := legacyMessageDescCache.Load(t); ok { - return mi.(pref.MessageDescriptor) + return mi.(protoreflect.MessageDescriptor) } // Slow-path: initialize MessageDescriptor from the raw descriptor. mv := reflect.Zero(t).Interface() - if _, ok := mv.(pref.ProtoMessage); ok { + if _, ok := mv.(protoreflect.ProtoMessage); ok { panic(fmt.Sprintf("%v already implements proto.Message", t)) } mdV1, ok := mv.(messageV1) @@ -164,7 +162,7 @@ var ( // // This is a best-effort derivation of the message descriptor using the protobuf // tags on the struct fields. -func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func aberrantLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { aberrantMessageDescLock.Lock() defer aberrantMessageDescLock.Unlock() if aberrantMessageDescCache == nil { @@ -172,7 +170,7 @@ func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDes } return aberrantLoadMessageDescReentrant(t, name) } -func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { // Fast-path: check if an MessageDescriptor is cached for this concrete type. if md, ok := aberrantMessageDescCache[t]; ok { return md @@ -225,9 +223,9 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] for i := 0; i < vs.Len(); i++ { v := vs.Index(i) - md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{ - pref.FieldNumber(v.FieldByName("Start").Int()), - pref.FieldNumber(v.FieldByName("End").Int() + 1), + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(v.FieldByName("Start").Int()), + protoreflect.FieldNumber(v.FieldByName("End").Int() + 1), }) md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) } @@ -245,7 +243,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M n := len(md.L2.Oneofs.List) md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) od := &md.L2.Oneofs.List[n] - od.L0.FullName = md.FullName().Append(pref.Name(tag)) + od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile od.L0.Parent = md od.L0.Index = n @@ -267,14 +265,14 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M return md } -func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName { +func aberrantDeriveMessageName(t reflect.Type, name protoreflect.FullName) protoreflect.FullName { if name.IsValid() { return name } func() { defer func() { recover() }() // swallow possible nil panics if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { - name = pref.FullName(m.XXX_MessageName()) + name = protoreflect.FullName(m.XXX_MessageName()) } }() if name.IsValid() { @@ -305,7 +303,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Index = n if fd.L1.IsWeak || fd.L1.HasPacked { - fd.L1.Options = func() pref.ProtoMessage { + fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) @@ -318,17 +316,17 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, } // Populate Enum and Message. - if fd.Enum() == nil && fd.Kind() == pref.EnumKind { + if fd.Enum() == nil && fd.Kind() == protoreflect.EnumKind { switch v := reflect.Zero(t).Interface().(type) { - case pref.Enum: + case protoreflect.Enum: fd.L1.Enum = v.Descriptor() default: fd.L1.Enum = LegacyLoadEnumDesc(t) } } - if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) { + if fd.Message() == nil && (fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind) { switch v := reflect.Zero(t).Interface().(type) { - case pref.ProtoMessage: + case protoreflect.ProtoMessage: fd.L1.Message = v.ProtoReflect().Descriptor() case messageV1: fd.L1.Message = LegacyLoadMessageDesc(t) @@ -337,13 +335,13 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, n := len(md.L1.Messages.List) md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) md2 := &md.L1.Messages.List[n] - md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name())))) + md2.L0.FullName = md.FullName().Append(protoreflect.Name(strs.MapEntryName(string(fd.Name())))) md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n md2.L1.IsMapEntry = true - md2.L2.Options = func() pref.ProtoMessage { + md2.L2.Options = func() protoreflect.ProtoMessage { opts := descopts.Message.ProtoReflect().New() opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) return opts.Interface() @@ -364,8 +362,8 @@ type placeholderEnumValues struct { protoreflect.EnumValueDescriptors } -func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor { - return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n))) +func (placeholderEnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + return filedesc.PlaceholderEnumValue(protoreflect.FullName(fmt.Sprintf("UNKNOWN_%d", n))) } // legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. @@ -383,7 +381,7 @@ type legacyMerger interface { Merge(protoiface.MessageV1) } -var aberrantProtoMethods = &piface.Methods{ +var aberrantProtoMethods = &protoiface.Methods{ Marshal: legacyMarshal, Unmarshal: legacyUnmarshal, Merge: legacyMerge, @@ -392,40 +390,40 @@ var aberrantProtoMethods = &piface.Methods{ // supports deterministic serialization or not, but this // preserves the v1 implementation's behavior of always // calling Marshal methods when present. - Flags: piface.SupportMarshalDeterministic, + Flags: protoiface.SupportMarshalDeterministic, } -func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) { +func legacyMarshal(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() marshaler, ok := v.(legacyMarshaler) if !ok { - return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) + return protoiface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) } out, err := marshaler.Marshal() if in.Buf != nil { out = append(in.Buf, out...) } - return piface.MarshalOutput{ + return protoiface.MarshalOutput{ Buf: out, }, err } -func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { +func legacyUnmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() unmarshaler, ok := v.(legacyUnmarshaler) if !ok { - return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) + return protoiface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) } - return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) + return protoiface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) } -func legacyMerge(in piface.MergeInput) piface.MergeOutput { +func legacyMerge(in protoiface.MergeInput) protoiface.MergeOutput { // Check whether this supports the legacy merger. dstv := in.Destination.(unwrapper).protoUnwrap() merger, ok := dstv.(legacyMerger) if ok { merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } // If legacy merger is unavailable, implement merge in terms of @@ -433,29 +431,29 @@ func legacyMerge(in piface.MergeInput) piface.MergeOutput { srcv := in.Source.(unwrapper).protoUnwrap() marshaler, ok := srcv.(legacyMarshaler) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } dstv = in.Destination.(unwrapper).protoUnwrap() unmarshaler, ok := dstv.(legacyUnmarshaler) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } if !in.Source.IsValid() { // Legacy Marshal methods may not function on nil messages. // Check for a typed nil source only after we confirm that // legacy Marshal/Unmarshal methods are present, for // consistency. - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } b, err := marshaler.Marshal() if err != nil { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } err = unmarshaler.Unmarshal(b) if err != nil { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } // aberrantMessageType implements MessageType for all types other than pointer-to-struct. @@ -463,19 +461,19 @@ type aberrantMessageType struct { t reflect.Type } -func (mt aberrantMessageType) New() pref.Message { +func (mt aberrantMessageType) New() protoreflect.Message { if mt.t.Kind() == reflect.Ptr { return aberrantMessage{reflect.New(mt.t.Elem())} } return aberrantMessage{reflect.Zero(mt.t)} } -func (mt aberrantMessageType) Zero() pref.Message { +func (mt aberrantMessageType) Zero() protoreflect.Message { return aberrantMessage{reflect.Zero(mt.t)} } func (mt aberrantMessageType) GoType() reflect.Type { return mt.t } -func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor { +func (mt aberrantMessageType) Descriptor() protoreflect.MessageDescriptor { return LegacyLoadMessageDesc(mt.t) } @@ -499,56 +497,56 @@ func (m aberrantMessage) Reset() { } } -func (m aberrantMessage) ProtoReflect() pref.Message { +func (m aberrantMessage) ProtoReflect() protoreflect.Message { return m } -func (m aberrantMessage) Descriptor() pref.MessageDescriptor { +func (m aberrantMessage) Descriptor() protoreflect.MessageDescriptor { return LegacyLoadMessageDesc(m.v.Type()) } -func (m aberrantMessage) Type() pref.MessageType { +func (m aberrantMessage) Type() protoreflect.MessageType { return aberrantMessageType{m.v.Type()} } -func (m aberrantMessage) New() pref.Message { +func (m aberrantMessage) New() protoreflect.Message { if m.v.Type().Kind() == reflect.Ptr { return aberrantMessage{reflect.New(m.v.Type().Elem())} } return aberrantMessage{reflect.Zero(m.v.Type())} } -func (m aberrantMessage) Interface() pref.ProtoMessage { +func (m aberrantMessage) Interface() protoreflect.ProtoMessage { return m } -func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m aberrantMessage) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { return } -func (m aberrantMessage) Has(pref.FieldDescriptor) bool { +func (m aberrantMessage) Has(protoreflect.FieldDescriptor) bool { return false } -func (m aberrantMessage) Clear(pref.FieldDescriptor) { +func (m aberrantMessage) Clear(protoreflect.FieldDescriptor) { panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { if fd.Default().IsValid() { return fd.Default() } panic("invalid Message.Get on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { +func (m aberrantMessage) Set(protoreflect.FieldDescriptor, protoreflect.Value) { panic("invalid Message.Set on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) Mutable(protoreflect.FieldDescriptor) protoreflect.Value { panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) NewField(protoreflect.FieldDescriptor) protoreflect.Value { panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { +func (m aberrantMessage) WhichOneof(protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) GetUnknown() pref.RawFields { +func (m aberrantMessage) GetUnknown() protoreflect.RawFields { return nil } -func (m aberrantMessage) SetUnknown(pref.RawFields) { +func (m aberrantMessage) SetUnknown(protoreflect.RawFields) { // SetUnknown discards its input on messages which don't support unknown field storage. } func (m aberrantMessage) IsValid() bool { @@ -557,7 +555,7 @@ func (m aberrantMessage) IsValid() bool { } return false } -func (m aberrantMessage) ProtoMethods() *piface.Methods { +func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } func (m aberrantMessage) protoUnwrap() interface{} { diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index c65bbc0446ea..7e65f64f28e3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -9,8 +9,8 @@ import ( "reflect" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) type mergeOptions struct{} @@ -20,17 +20,17 @@ func (o mergeOptions) Merge(dst, src proto.Message) { } // merge is protoreflect.Methods.Merge. -func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput { +func (mi *MessageInfo) merge(in protoiface.MergeInput) protoiface.MergeOutput { dp, ok := mi.getPointer(in.Destination) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } sp, ok := mi.getPointer(in.Source) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } mi.mergePointer(dp, sp, mergeOptions{}) - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { @@ -64,7 +64,7 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { continue } dx := (*dext)[num] - var dv pref.Value + var dv protoreflect.Value if dx.Type() == sx.Type() { dv = dx.Value() } @@ -85,15 +85,15 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { } } -func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeScalarValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { return src } -func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value { - return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) +func mergeBytesValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + return protoreflect.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) } -func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { @@ -102,29 +102,29 @@ func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { return dst } -func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeBytesListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { sb := srcl.Get(i).Bytes() db := append(emptyBuf[:], sb...) - dstl.Append(pref.ValueOfBytes(db)) + dstl.Append(protoreflect.ValueOfBytes(db)) } return dst } -func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeMessageListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { sm := srcl.Get(i).Message() dm := proto.Clone(sm.Interface()).ProtoReflect() - dstl.Append(pref.ValueOfMessage(dm)) + dstl.Append(protoreflect.ValueOfMessage(dm)) } return dst } -func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeMessageValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { opts.Merge(dst.Message().Interface(), src.Message().Interface()) return dst } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index a104e28e858f..4f5fb67a0ddb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,8 +14,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -29,7 +28,7 @@ type MessageInfo struct { GoReflectType reflect.Type // pointer to struct // Desc is the underlying message descriptor type and must be populated. - Desc pref.MessageDescriptor + Desc protoreflect.MessageDescriptor // Exporter must be provided in a purego environment in order to provide // access to unexported fields. @@ -54,7 +53,7 @@ type exporter func(v interface{}, i int) interface{} // is generated by our implementation of protoc-gen-go (for v2 and on). // If it is unable to obtain a MessageInfo, it returns nil. func getMessageInfo(mt reflect.Type) *MessageInfo { - m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage) + m, ok := reflect.Zero(mt).Interface().(protoreflect.ProtoMessage) if !ok { return nil } @@ -97,7 +96,7 @@ func (mi *MessageInfo) initOnce() { // getPointer returns the pointer for a message, which should be of // the type of the MessageInfo. If the message is of a different type, // it returns ok==false. -func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { +func (mi *MessageInfo) getPointer(m protoreflect.Message) (p pointer, ok bool) { switch m := m.(type) { case *messageState: return m.pointer(), m.messageInfo() == mi @@ -134,10 +133,10 @@ type structInfo struct { extensionOffset offset extensionType reflect.Type - fieldsByNumber map[pref.FieldNumber]reflect.StructField - oneofsByName map[pref.Name]reflect.StructField - oneofWrappersByType map[reflect.Type]pref.FieldNumber - oneofWrappersByNumber map[pref.FieldNumber]reflect.Type + fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField + oneofsByName map[protoreflect.Name]reflect.StructField + oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber + oneofWrappersByNumber map[protoreflect.FieldNumber]reflect.Type } func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { @@ -147,10 +146,10 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { unknownOffset: invalidOffset, extensionOffset: invalidOffset, - fieldsByNumber: map[pref.FieldNumber]reflect.StructField{}, - oneofsByName: map[pref.Name]reflect.StructField{}, - oneofWrappersByType: map[reflect.Type]pref.FieldNumber{}, - oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{}, + fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, + oneofsByName: map[protoreflect.Name]reflect.StructField{}, + oneofWrappersByType: map[reflect.Type]protoreflect.FieldNumber{}, + oneofWrappersByNumber: map[protoreflect.FieldNumber]reflect.Type{}, } fieldLoop: @@ -180,12 +179,12 @@ fieldLoop: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { n, _ := strconv.ParseUint(s, 10, 64) - si.fieldsByNumber[pref.FieldNumber(n)] = f + si.fieldsByNumber[protoreflect.FieldNumber(n)] = f continue fieldLoop } } if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { - si.oneofsByName[pref.Name(s)] = f + si.oneofsByName[protoreflect.Name(s)] = f continue fieldLoop } } @@ -208,8 +207,8 @@ fieldLoop: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { n, _ := strconv.ParseUint(s, 10, 64) - si.oneofWrappersByType[tf] = pref.FieldNumber(n) - si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf + si.oneofWrappersByType[tf] = protoreflect.FieldNumber(n) + si.oneofWrappersByNumber[protoreflect.FieldNumber(n)] = tf break } } @@ -219,7 +218,11 @@ fieldLoop: } func (mi *MessageInfo) New() protoreflect.Message { - return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface()) + m := reflect.New(mi.GoReflectType.Elem()).Interface() + if r, ok := m.(protoreflect.ProtoMessage); ok { + return r.ProtoReflect() + } + return mi.MessageOf(m) } func (mi *MessageInfo) Zero() protoreflect.Message { return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) @@ -237,7 +240,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { fd := mi.Desc.Fields().Get(i) switch { case fd.IsWeak(): - mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) + mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 9488b7261313..d9ea010bef9a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -10,17 +10,17 @@ import ( "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type reflectMessageInfo struct { - fields map[pref.FieldNumber]*fieldInfo - oneofs map[pref.Name]*oneofInfo + fields map[protoreflect.FieldNumber]*fieldInfo + oneofs map[protoreflect.Name]*oneofInfo // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[pref.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]interface{} // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -30,8 +30,8 @@ type reflectMessageInfo struct { // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. rangeInfos []interface{} // either *fieldInfo or *oneofInfo - getUnknown func(pointer) pref.RawFields - setUnknown func(pointer, pref.RawFields) + getUnknown func(pointer) protoreflect.RawFields + setUnknown func(pointer, protoreflect.RawFields) extensionMap func(pointer) *extensionMap nilMessage atomicNilMessage @@ -52,7 +52,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { // This code assumes that the struct is well-formed and panics if there are // any discrepancies. func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { - mi.fields = map[pref.FieldNumber]*fieldInfo{} + mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} md := mi.Desc fds := md.Fields() for i := 0; i < fds.Len(); i++ { @@ -82,7 +82,7 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { mi.fields[fd.Number()] = &fi } - mi.oneofs = map[pref.Name]*oneofInfo{} + mi.oneofs = map[protoreflect.Name]*oneofInfo{} for i := 0; i < md.Oneofs().Len(); i++ { od := md.Oneofs().Get(i) mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) @@ -117,13 +117,13 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { switch { case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: // Handle as []byte. - mi.getUnknown = func(p pointer) pref.RawFields { + mi.getUnknown = func(p pointer) protoreflect.RawFields { if p.IsNil() { return nil } return *p.Apply(mi.unknownOffset).Bytes() } - mi.setUnknown = func(p pointer, b pref.RawFields) { + mi.setUnknown = func(p pointer, b protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -131,7 +131,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { } case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: // Handle as *[]byte. - mi.getUnknown = func(p pointer) pref.RawFields { + mi.getUnknown = func(p pointer) protoreflect.RawFields { if p.IsNil() { return nil } @@ -141,7 +141,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { } return **bp } - mi.setUnknown = func(p pointer, b pref.RawFields) { + mi.setUnknown = func(p pointer, b protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -152,10 +152,10 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { **bp = b } default: - mi.getUnknown = func(pointer) pref.RawFields { + mi.getUnknown = func(pointer) protoreflect.RawFields { return nil } - mi.setUnknown = func(p pointer, _ pref.RawFields) { + mi.setUnknown = func(p pointer, _ protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[pref.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -233,7 +233,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { type extensionMap map[int32]ExtensionField -func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { if m != nil { for _, x := range *m { xd := x.Type().TypeDescriptor() @@ -247,7 +247,7 @@ func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { } } } -func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { if m == nil { return false } @@ -266,10 +266,10 @@ func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { } return true } -func (m *extensionMap) Clear(xt pref.ExtensionType) { +func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { delete(*m, int32(xt.TypeDescriptor().Number())) } -func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { +func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { xd := xt.TypeDescriptor() if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { @@ -278,7 +278,7 @@ func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { } return xt.Zero() } -func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { +func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { xd := xt.TypeDescriptor() isValid := true switch { @@ -302,9 +302,9 @@ func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { +func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { xd := xt.TypeDescriptor() - if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() { + if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { @@ -320,7 +320,6 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { // in an allocation-free way without needing to have a shadow Go type generated // for every message type. This technique only works using unsafe. // -// // Example generated code: // // type M struct { @@ -351,12 +350,11 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { // It has access to the message info as its first field, and a pointer to the // MessageState is identical to a pointer to the concrete message value. // -// // Requirements: -// • The type M must implement protoreflect.ProtoMessage. -// • The address of m must not be nil. -// • The address of m and the address of m.state must be equal, -// even though they are different Go types. +// - The type M must implement protoreflect.ProtoMessage. +// - The address of m must not be nil. +// - The address of m and the address of m.state must be equal, +// even though they are different Go types. type MessageState struct { pragma.NoUnkeyedLiterals pragma.DoNotCompare @@ -368,8 +366,8 @@ type MessageState struct { type messageState MessageState var ( - _ pref.Message = (*messageState)(nil) - _ unwrapper = (*messageState)(nil) + _ protoreflect.Message = (*messageState)(nil) + _ unwrapper = (*messageState)(nil) ) // messageDataType is a tuple of a pointer to the message data and @@ -387,16 +385,16 @@ type ( ) var ( - _ pref.Message = (*messageReflectWrapper)(nil) - _ unwrapper = (*messageReflectWrapper)(nil) - _ pref.ProtoMessage = (*messageIfaceWrapper)(nil) - _ unwrapper = (*messageIfaceWrapper)(nil) + _ protoreflect.Message = (*messageReflectWrapper)(nil) + _ unwrapper = (*messageReflectWrapper)(nil) + _ protoreflect.ProtoMessage = (*messageIfaceWrapper)(nil) + _ unwrapper = (*messageIfaceWrapper)(nil) ) // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { +func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -421,7 +419,7 @@ func (m *messageIfaceWrapper) Reset() { rv.Elem().Set(reflect.Zero(rv.Type().Elem())) } } -func (m *messageIfaceWrapper) ProtoReflect() pref.Message { +func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } func (m *messageIfaceWrapper) protoUnwrap() interface{} { @@ -430,7 +428,7 @@ func (m *messageIfaceWrapper) protoUnwrap() interface{} { // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -455,7 +453,7 @@ func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.Ext if !mi.Desc.ExtensionRanges().Has(fd.Number()) { panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) } - xtd, ok := fd.(pref.ExtensionTypeDescriptor) + xtd, ok := fd.(protoreflect.ExtensionTypeDescriptor) if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 343cf872197f..5e736c60efc7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -11,24 +11,24 @@ import ( "sync" "google.golang.org/protobuf/internal/flags" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { - fieldDesc pref.FieldDescriptor + fieldDesc protoreflect.FieldDescriptor // These fields are used for protobuf reflection support. has func(pointer) bool clear func(pointer) - get func(pointer) pref.Value - set func(pointer, pref.Value) - mutable func(pointer) pref.Value - newMessage func() pref.Message - newField func() pref.Value + get func(pointer) protoreflect.Value + set func(pointer, protoreflect.Value) + mutable func(pointer) protoreflect.Value + newMessage func() protoreflect.Message + newField func() protoreflect.Value } -func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { +func fieldInfoForMissing(fd protoreflect.FieldDescriptor) fieldInfo { // This never occurs for generated message types. // It implies that a hand-crafted type has missing Go fields // for specific protobuf message fields. @@ -40,19 +40,19 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { clear: func(p pointer) { panic("missing Go struct field for " + string(fd.FullName())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { return fd.Default() }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { panic("missing Go struct field for " + string(fd.FullName())) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { panic("missing Go struct field for " + string(fd.FullName())) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { panic("missing Go struct field for " + string(fd.FullName())) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { if v := fd.Default(); v.IsValid() { return v } @@ -61,7 +61,7 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { } } -func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { +func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Interface { panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) @@ -102,7 +102,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export } rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -113,7 +113,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv = rv.Elem().Elem().Field(0) return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { rv.Set(reflect.New(ot)) @@ -121,7 +121,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv = rv.Elem().Elem().Field(0) rv.Set(conv.GoValueOf(v)) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { if !isMessage { panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) } @@ -131,20 +131,20 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export } rv = rv.Elem().Elem().Field(0) if rv.Kind() == reflect.Ptr && rv.IsNil() { - rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) + rv.Set(conv.GoValueOf(protoreflect.ValueOfMessage(conv.New().Message()))) } return conv.PBValueOf(rv) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { return conv.New().Message() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Map { panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) @@ -166,7 +166,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -176,7 +176,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() pv := conv.GoValueOf(v) if pv.IsNil() { @@ -184,20 +184,20 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter } rv.Set(pv) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if v.IsNil() { v.Set(reflect.MakeMap(fs.Type)) } return conv.PBValueOf(v) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Slice { panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) @@ -219,7 +219,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -229,7 +229,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() pv := conv.GoValueOf(v) if pv.IsNil() { @@ -237,11 +237,11 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte } rv.Set(pv.Elem()) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { v := p.Apply(fieldOffset).AsValueOf(fs.Type) return conv.PBValueOf(v) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } @@ -252,7 +252,7 @@ var ( emptyBytes = reflect.ValueOf([]byte{}) ) -func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type nullable := fd.HasPresence() isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 @@ -300,7 +300,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -315,7 +315,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable && rv.Kind() == reflect.Ptr { if rv.IsNil() { @@ -332,23 +332,23 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor } } }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo { +func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { if !flags.ProtoLegacy { panic("no support for proto1 weak fields") } var once sync.Once - var messageType pref.MessageType + var messageType protoreflect.MessageType lazyInit := func() { once.Do(func() { messageName := fd.Message().FullName() - messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) if messageType == nil { panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) } @@ -368,18 +368,18 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn clear: func(p pointer) { p.Apply(weakOffset).WeakFields().clear(num) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { lazyInit() if p.IsNil() { - return pref.ValueOfMessage(messageType.Zero()) + return protoreflect.ValueOfMessage(messageType.Zero()) } m, ok := p.Apply(weakOffset).WeakFields().get(num) if !ok { - return pref.ValueOfMessage(messageType.Zero()) + return protoreflect.ValueOfMessage(messageType.Zero()) } - return pref.ValueOfMessage(m.ProtoReflect()) + return protoreflect.ValueOfMessage(m.ProtoReflect()) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { lazyInit() m := v.Message() if m.Descriptor() != messageType.Descriptor() { @@ -390,7 +390,7 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn } p.Apply(weakOffset).WeakFields().set(num, m.Interface()) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { lazyInit() fs := p.Apply(weakOffset).WeakFields() m, ok := fs.get(num) @@ -398,20 +398,20 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn m = messageType.New().Interface() fs.set(num, m) } - return pref.ValueOfMessage(m.ProtoReflect()) + return protoreflect.ValueOfMessage(m.ProtoReflect()) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { lazyInit() return messageType.New() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { lazyInit() - return pref.ValueOfMessage(messageType.New()) + return protoreflect.ValueOfMessage(messageType.New()) }, } } -func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) @@ -433,47 +433,47 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(conv.GoValueOf(v)) if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) } }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(conv.New())) } return conv.PBValueOf(rv) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { return conv.New().Message() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } type oneofInfo struct { - oneofDesc pref.OneofDescriptor - which func(pointer) pref.FieldNumber + oneofDesc protoreflect.OneofDescriptor + which func(pointer) protoreflect.FieldNumber } -func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { +func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] fieldOffset := offsetOf(fs, x) - oi.which = func(p pointer) pref.FieldNumber { + oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 } @@ -486,7 +486,7 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf } else { fs := si.oneofsByName[od.Name()] fieldOffset := offsetOf(fs, x) - oi.which = func(p pointer) pref.FieldNumber { + oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 9e3ed821efb3..4c491bdf4825 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 9ecf23a85bb7..ee0e0573e395 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index 08cfb6054b43..a24e6bbd7a5f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -16,9 +16,9 @@ import ( "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) // ValidationStatus is the result of validating the wire-format encoding of a message. @@ -56,20 +56,20 @@ func (v ValidationStatus) String() string { // of the message type. // // This function is exposed for testing. -func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) { +func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out protoiface.UnmarshalOutput, _ ValidationStatus) { mi, ok := mt.(*MessageInfo) if !ok { return out, ValidationUnknown } if in.Resolver == nil { - in.Resolver = preg.GlobalTypes + in.Resolver = protoregistry.GlobalTypes } o, st := mi.validate(in.Buf, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, }) if o.initialized { - out.Flags |= piface.UnmarshalInitialized + out.Flags |= protoiface.UnmarshalInitialized } return out, st } @@ -106,22 +106,22 @@ const ( validationTypeMessageSetItem ) -func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { +func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { var vi validationInfo switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { vi.mi = getMessageInfo(ot.Field(0).Type) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { vi.mi = getMessageInfo(ot.Field(0).Type) } - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String } @@ -129,7 +129,7 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip default: vi = newValidationInfo(fd, ft) } - if fd.Cardinality() == pref.Required { + if fd.Cardinality() == protoreflect.Required { // Avoid overflow. The required field check is done with a 64-bit mask, with // any message containing more than 64 required fields always reported as // potentially uninitialized, so it is not important to get a precise count @@ -142,22 +142,22 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip return vi } -func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo { +func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { var vi validationInfo switch { case fd.IsList(): switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } - case pref.StringKind: + case protoreflect.StringKind: vi.typ = validationTypeBytes if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String @@ -175,33 +175,33 @@ func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo case fd.IsMap(): vi.typ = validationTypeMap switch fd.MapKey().Kind() { - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.keyType = validationTypeUTF8String } } switch fd.MapValue().Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.valType = validationTypeMessage if ft.Kind() == reflect.Map { vi.mi = getMessageInfo(ft.Elem()) } - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.valType = validationTypeUTF8String } } default: switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if !fd.IsWeak() { vi.mi = getMessageInfo(ft) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) - case pref.StringKind: + case protoreflect.StringKind: vi.typ = validationTypeBytes if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String @@ -314,11 +314,11 @@ State: break } messageName := fd.Message().FullName() - messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) switch err { case nil: vi.mi, _ = messageType.(*MessageInfo) - case preg.NotFound: + case protoregistry.NotFound: vi.typ = validationTypeBytes default: return out, ValidationUnknown @@ -335,7 +335,7 @@ State: // unmarshaling to begin failing. Supporting this requires some way to // determine if the resolver is frozen. xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) - if err != nil && err != preg.NotFound { + if err != nil && err != protoregistry.NotFound { return out, ValidationUnknown } if err == nil { @@ -513,7 +513,7 @@ State: } xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) switch { - case err == preg.NotFound: + case err == protoregistry.NotFound: b = b[n:] case err != nil: return out, ValidationUnknown diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go index 009cbefd1ed2..eb79a7ba94c0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go @@ -7,7 +7,7 @@ package impl import ( "fmt" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -17,32 +17,32 @@ import ( // defined directly on it. type weakFields WeakFields -func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { +func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { m, ok := w[int32(num)] return m, ok } -func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { +func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { if *w == nil { *w = make(weakFields) } (*w)[int32(num)] = m } -func (w *weakFields) clear(num pref.FieldNumber) { +func (w *weakFields) clear(num protoreflect.FieldNumber) { delete(*w, int32(num)) } -func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { +func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { _, ok := w[int32(num)] return ok } -func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { +func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { delete(*w, int32(num)) } -func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { +func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { if m, ok := w[int32(num)]; ok { return m } @@ -53,7 +53,7 @@ func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pr return mt.Zero().Interface() } -func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { +func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { if m != nil { mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) if mt == nil { diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go index 2a24953f6a47..33745ed06254 100644 --- a/vendor/google.golang.org/protobuf/internal/order/order.go +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -5,12 +5,12 @@ package order import ( - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // FieldOrder specifies the ordering to visit message fields. // It is a function that reports whether x is ordered before y. -type FieldOrder func(x, y pref.FieldDescriptor) bool +type FieldOrder func(x, y protoreflect.FieldDescriptor) bool var ( // AnyFieldOrder specifies no specific field ordering. @@ -18,9 +18,9 @@ var ( // LegacyFieldOrder sorts fields in the same ordering as emitted by // wire serialization in the github.com/golang/protobuf implementation. - LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + LegacyFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { ox, oy := x.ContainingOneof(), y.ContainingOneof() - inOneof := func(od pref.OneofDescriptor) bool { + inOneof := func(od protoreflect.OneofDescriptor) bool { return od != nil && !od.IsSynthetic() } @@ -41,14 +41,14 @@ var ( } // NumberFieldOrder sorts fields by their field number. - NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + NumberFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { return x.Number() < y.Number() } // IndexNameFieldOrder sorts non-extension fields before extension fields. // Non-extensions are sorted according to their declaration index. // Extensions are sorted according to their full name. - IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + IndexNameFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { // Non-extension fields sort before extension fields. if x.IsExtension() != y.IsExtension() { return !x.IsExtension() && y.IsExtension() @@ -64,7 +64,7 @@ var ( // KeyOrder specifies the ordering to visit map entries. // It is a function that reports whether x is ordered before y. -type KeyOrder func(x, y pref.MapKey) bool +type KeyOrder func(x, y protoreflect.MapKey) bool var ( // AnyKeyOrder specifies no specific key ordering. @@ -72,7 +72,7 @@ var ( // GenericKeyOrder sorts false before true, numeric keys in ascending order, // and strings in lexicographical ordering according to UTF-8 codepoints. - GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { + GenericKeyOrder KeyOrder = func(x, y protoreflect.MapKey) bool { switch x.Interface().(type) { case bool: return !x.Bool() && y.Bool() diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index c8090e0c547f..1665a68e5b7c 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -9,12 +9,12 @@ import ( "sort" "sync" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type messageField struct { - fd pref.FieldDescriptor - v pref.Value + fd protoreflect.FieldDescriptor + v protoreflect.Value } var messageFieldPool = sync.Pool{ @@ -25,8 +25,8 @@ type ( // FieldRnger is an interface for visiting all fields in a message. // The protoreflect.Message type implements this interface. FieldRanger interface{ Range(VisitField) } - // VisitField is called everytime a message field is visited. - VisitField = func(pref.FieldDescriptor, pref.Value) bool + // VisitField is called every time a message field is visited. + VisitField = func(protoreflect.FieldDescriptor, protoreflect.Value) bool ) // RangeFields iterates over the fields of fs according to the specified order. @@ -47,7 +47,7 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { }() // Collect all fields in the message and sort them. - fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + fs.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { fields = append(fields, messageField{fd, v}) return true }) @@ -64,8 +64,8 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { } type mapEntry struct { - k pref.MapKey - v pref.Value + k protoreflect.MapKey + v protoreflect.Value } var mapEntryPool = sync.Pool{ @@ -76,8 +76,8 @@ type ( // EntryRanger is an interface for visiting all fields in a message. // The protoreflect.Map type implements this interface. EntryRanger interface{ Range(VisitEntry) } - // VisitEntry is called everytime a map entry is visited. - VisitEntry = func(pref.MapKey, pref.Value) bool + // VisitEntry is called every time a map entry is visited. + VisitEntry = func(protoreflect.MapKey, protoreflect.Value) bool ) // RangeEntries iterates over the entries of es according to the specified order. @@ -98,7 +98,7 @@ func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { }() // Collect all entries in the map and sort them. - es.Range(func(k pref.MapKey, v pref.Value) bool { + es.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { entries = append(entries, mapEntry{k, v}) return true }) diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go index 85e074c977dc..a1f6f333860e 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 2160c7019145..fea589c457e9 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package strs @@ -9,7 +10,7 @@ package strs import ( "unsafe" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type ( @@ -58,7 +59,7 @@ type Builder struct { // AppendFullName is equivalent to protoreflect.FullName.Append, // but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { n := len(prefix) + len(".") + len(name) if len(prefix) == 0 { n -= len(".") @@ -67,7 +68,7 @@ func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.Ful sb.buf = append(sb.buf, prefix...) sb.buf = append(sb.buf, '.') sb.buf = append(sb.buf, name...) - return pref.FullName(sb.last(n)) + return protoreflect.FullName(sb.last(n)) } // MakeString is equivalent to string(b), but optimized for large batches diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 14e774fb2ec7..b480c5010f1d 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -12,47 +12,46 @@ import ( // These constants determine the current version of this module. // -// // For our release process, we enforce the following rules: -// * Tagged releases use a tag that is identical to String. -// * Tagged releases never reference a commit where the String -// contains "devel". -// * The set of all commits in this repository where String -// does not contain "devel" must have a unique String. -// +// - Tagged releases use a tag that is identical to String. +// - Tagged releases never reference a commit where the String +// contains "devel". +// - The set of all commits in this repository where String +// does not contain "devel" must have a unique String. // // Steps for tagging a new release: -// 1. Create a new CL. // -// 2. Update Minor, Patch, and/or PreRelease as necessary. -// PreRelease must not contain the string "devel". +// 1. Create a new CL. // -// 3. Since the last released minor version, have there been any changes to -// generator that relies on new functionality in the runtime? -// If yes, then increment RequiredGenerated. +// 2. Update Minor, Patch, and/or PreRelease as necessary. +// PreRelease must not contain the string "devel". // -// 4. Since the last released minor version, have there been any changes to -// the runtime that removes support for old .pb.go source code? -// If yes, then increment SupportMinimum. +// 3. Since the last released minor version, have there been any changes to +// generator that relies on new functionality in the runtime? +// If yes, then increment RequiredGenerated. // -// 5. Send out the CL for review and submit it. -// Note that the next CL in step 8 must be submitted after this CL -// without any other CLs in-between. +// 4. Since the last released minor version, have there been any changes to +// the runtime that removes support for old .pb.go source code? +// If yes, then increment SupportMinimum. // -// 6. Tag a new version, where the tag is is the current String. +// 5. Send out the CL for review and submit it. +// Note that the next CL in step 8 must be submitted after this CL +// without any other CLs in-between. // -// 7. Write release notes for all notable changes -// between this release and the last release. +// 6. Tag a new version, where the tag is is the current String. // -// 8. Create a new CL. +// 7. Write release notes for all notable changes +// between this release and the last release. // -// 9. Update PreRelease to include the string "devel". -// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// 8. Create a new CL. // -// 10. Send out the CL for review and submit it. +// 9. Update PreRelease to include the string "devel". +// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// +// 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 27 + Minor = 28 Patch = 1 PreRelease = "" ) @@ -60,6 +59,7 @@ const ( // String formats the version string for this module in semver format. // // Examples: +// // v1.20.1 // v1.21.0-rc.1 func String() string { diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 49f9b8c88cfd..48d47946bb1a 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -19,7 +19,8 @@ import ( // UnmarshalOptions configures the unmarshaler. // // Example usage: -// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) +// +// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) type UnmarshalOptions struct { pragma.NoUnkeyedLiterals @@ -42,18 +43,25 @@ type UnmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { - _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } _, err := o.unmarshal(b, m.ProtoReflect()) return err } @@ -63,6 +71,9 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // This method permits fine-grained control over the unmarshaler. // Most users should use Unmarshal instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } return o.unmarshal(in.Buf, in.Message) } @@ -86,12 +97,17 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto Message: m, Buf: b, Resolver: o.Resolver, + Depth: o.RecursionLimit, } if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } out, err = methods.Unmarshal(in) } else { + o.RecursionLimit-- + if o.RecursionLimit < 0 { + return out, errors.New("exceeded max recursion depth") + } err = o.unmarshalMessageSlow(b, m) } if err != nil { diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index c52d8c4ab79f..08d2a46f5352 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -6,18 +6,17 @@ // // For documentation on protocol buffers in general, see: // -// https://developers.google.com/protocol-buffers +// https://developers.google.com/protocol-buffers // // For a tutorial on using protocol buffers with Go, see: // -// https://developers.google.com/protocol-buffers/docs/gotutorial +// https://developers.google.com/protocol-buffers/docs/gotutorial // // For a guide to generated Go protocol buffer code, see: // -// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// https://developers.google.com/protocol-buffers/docs/reference/go-generated // -// -// Binary serialization +// # Binary serialization // // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. @@ -30,8 +29,7 @@ // • Unmarshal converts a message from the wire format. // The UnmarshalOptions type provides more control over wire unmarshaling. // -// -// Basic message operations +// # Basic message operations // // • Clone makes a deep copy of a message. // @@ -45,8 +43,7 @@ // // • CheckInitialized reports whether all required fields in a message are set. // -// -// Optional scalar constructors +// # Optional scalar constructors // // The API for some generated messages represents optional scalar fields // as pointers to a value. For example, an optional string field has the @@ -61,16 +58,14 @@ // // Optional scalar fields are only supported in proto2. // -// -// Extension accessors +// # Extension accessors // // • HasExtension, GetExtension, SetExtension, and ClearExtension // access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // -// -// Related packages +// # Related packages // // • Package "google.golang.org/protobuf/encoding/protojson" converts messages to // and from JSON. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index d18239c23723..bf7f816d0e86 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -16,7 +16,8 @@ import ( // MarshalOptions configures the marshaler. // // Example usage: -// b, err := MarshalOptions{Deterministic: true}.Marshal(m) +// +// b, err := MarshalOptions{Deterministic: true}.Marshal(m) type MarshalOptions struct { pragma.NoUnkeyedLiterals @@ -101,7 +102,9 @@ func (o MarshalOptions) Marshal(m Message) ([]byte, error) { // otherwise it returns a non-nil empty buffer. // // This is to assist the edge-case where user-code does the following: +// // m1.OptionalBytes, _ = proto.Marshal(m2) +// // where they expect the proto2 "optional_bytes" field to be populated // if any only if m2 is a valid message. func emptyBytesForMessage(m Message) []byte { diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 4dba2b969972..67948dd1df8c 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -10,7 +10,7 @@ import ( "reflect" "google.golang.org/protobuf/encoding/protowire" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // Equal reports whether two messages are equal. @@ -33,6 +33,10 @@ func Equal(x, y Message) bool { if x == nil || y == nil { return x == nil && y == nil } + if reflect.TypeOf(x).Kind() == reflect.Ptr && x == y { + // Avoid an expensive comparison if both inputs are identical pointers. + return true + } mx := x.ProtoReflect() my := y.ProtoReflect() if mx.IsValid() != my.IsValid() { @@ -42,14 +46,14 @@ func Equal(x, y Message) bool { } // equalMessage compares two messages. -func equalMessage(mx, my pref.Message) bool { +func equalMessage(mx, my protoreflect.Message) bool { if mx.Descriptor() != my.Descriptor() { return false } nx := 0 equal := true - mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { nx++ vy := my.Get(fd) equal = my.Has(fd) && equalField(fd, vx, vy) @@ -59,7 +63,7 @@ func equalMessage(mx, my pref.Message) bool { return false } ny := 0 - my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + my.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { ny++ return true }) @@ -71,7 +75,7 @@ func equalMessage(mx, my pref.Message) bool { } // equalField compares two fields. -func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { +func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { switch { case fd.IsList(): return equalList(fd, x.List(), y.List()) @@ -83,12 +87,12 @@ func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { } // equalMap compares two maps. -func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { +func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool { if x.Len() != y.Len() { return false } equal := true - x.Range(func(k pref.MapKey, vx pref.Value) bool { + x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { vy := y.Get(k) equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) return equal @@ -97,7 +101,7 @@ func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { } // equalList compares two lists. -func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { +func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool { if x.Len() != y.Len() { return false } @@ -110,31 +114,31 @@ func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { } // equalValue compares two singular values. -func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { +func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return x.Bool() == y.Bool() - case pref.EnumKind: + case protoreflect.EnumKind: return x.Enum() == y.Enum() - case pref.Int32Kind, pref.Sint32Kind, - pref.Int64Kind, pref.Sint64Kind, - pref.Sfixed32Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, + protoreflect.Int64Kind, protoreflect.Sint64Kind, + protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: return x.Int() == y.Int() - case pref.Uint32Kind, pref.Uint64Kind, - pref.Fixed32Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: return x.Uint() == y.Uint() - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: fx := x.Float() fy := y.Float() if math.IsNaN(fx) || math.IsNaN(fy) { return math.IsNaN(fx) && math.IsNaN(fy) } return fx == fy - case pref.StringKind: + case protoreflect.StringKind: return x.String() == y.String() - case pref.BytesKind: + case protoreflect.BytesKind: return bytes.Equal(x.Bytes(), y.Bytes()) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return equalMessage(x.Message(), y.Message()) default: return x.Interface() == y.Interface() @@ -143,7 +147,7 @@ func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { // equalUnknown compares unknown fields by direct comparison on the raw bytes // of each individual field number. -func equalUnknown(x, y pref.RawFields) bool { +func equalUnknown(x, y protoreflect.RawFields) bool { if len(x) != len(y) { return false } @@ -151,8 +155,8 @@ func equalUnknown(x, y pref.RawFields) bool { return true } - mx := make(map[pref.FieldNumber]pref.RawFields) - my := make(map[pref.FieldNumber]pref.RawFields) + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) for len(x) > 0 { fnum, _, n := protowire.ConsumeField(x) mx[fnum] = append(mx[fnum], x[:n]...) diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go index d8dd604f6b67..465e057b3238 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_methods.go +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build !protoreflect // +build !protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go index b103d43205c4..494d6ceef9e6 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_reflect.go +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build protoreflect // +build protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index cebb36cdade6..27d7e35012d3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -155,9 +155,9 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, // // Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", // then the following full names are searched: -// * fizz.buzz.Foo.Bar -// * fizz.Foo.Bar -// * Foo.Bar +// - fizz.buzz.Foo.Bar +// - fizz.Foo.Bar +// - Foo.Bar func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { if !ref.IsValid() { return nil, errors.New("invalid name reference: %q", ref) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index 6be5d16e9f37..d5d5af6ebedb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -53,6 +53,7 @@ type ( FindExtensionByName(field FullName) (ExtensionType, error) FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) } + Depth int } unmarshalOutput = struct { pragma.NoUnkeyedLiterals diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index dd85915bd4bf..55aa14922b01 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -8,8 +8,7 @@ // defined in proto source files and value interfaces which provide the // ability to examine and manipulate the contents of messages. // -// -// Protocol Buffer Descriptors +// # Protocol Buffer Descriptors // // Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) // are immutable objects that represent protobuf type information. @@ -26,8 +25,7 @@ // The "google.golang.org/protobuf/reflect/protodesc" package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // -// -// Go Type Descriptors +// # Go Type Descriptors // // A type descriptor (e.g., EnumType or MessageType) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. @@ -41,8 +39,7 @@ // The "google.golang.org/protobuf/types/dynamicpb" package can be used to // create Go type descriptors from protobuf descriptors. // -// -// Value Interfaces +// # Value Interfaces // // The Enum and Message interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve @@ -55,13 +52,11 @@ // The "github.com/golang/protobuf/proto".MessageReflect function can be used // to obtain a reflective view on older messages. // -// -// Relationships +// # Relationships // // The following diagrams demonstrate the relationships between // various types declared in this package. // -// // ┌───────────────────────────────────┐ // V │ // ┌────────────── New(n) ─────────────┐ │ @@ -83,7 +78,6 @@ // // • An Enum is a concrete enum instance. Generated enums implement Enum. // -// // ┌──────────────── New() ─────────────────┐ // │ │ // │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐ @@ -98,12 +92,22 @@ // // • A MessageType describes a concrete Go message type. // It has a MessageDescriptor and can construct a Message instance. +// Just as how Go's reflect.Type is a reflective description of a Go type, +// a MessageType is a reflective description of a Go type for a protobuf message. // // • A MessageDescriptor describes an abstract protobuf message type. -// -// • A Message is a concrete message instance. Generated messages implement -// ProtoMessage, which can convert to/from a Message. -// +// It has no understanding of Go types. In order to construct a MessageType +// from just a MessageDescriptor, you can consider looking up the message type +// in the global registry using protoregistry.GlobalTypes.FindMessageByName +// or constructing a dynamic MessageType using dynamicpb.NewMessageType. +// +// • A Message is a reflective view over a concrete message instance. +// Generated messages implement ProtoMessage, which can convert to a Message. +// Just as how Go's reflect.Value is a reflective view over a Go value, +// a Message is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the ProtoReflect method is similar to +// calling reflect.ValueOf, and the Message.Interface method is similar to +// calling reflect.Value.Interface. // // ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ // │ V │ V diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go index 121ba3a07bba..0b99428855fe 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -87,6 +87,7 @@ func (p1 SourcePath) Equal(p2 SourcePath) bool { // in a future version of this module. // // Example output: +// // .message_type[6].nested_type[15].field[3] func (p SourcePath) String() string { b := p.appendFileDescriptorProto(nil) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 8e53c44a9188..3867470d30ac 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -480,6 +480,7 @@ type ExtensionDescriptors interface { // relative to the parent that it is declared within. // // For example: +// // syntax = "proto2"; // package example; // message FooMessage { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 918e685e1d57..7ced876f4e89 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 5a3414724193..ca8e28c5bc8b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -41,6 +41,32 @@ import ( // Converting to/from a Value and a concrete Go value panics on type mismatch. // For example, ValueOf("hello").Int() panics because this attempts to // retrieve an int64 from a string. +// +// List, Map, and Message Values are called "composite" values. +// +// A composite Value may alias (reference) memory at some location, +// such that changes to the Value updates the that location. +// A composite value acquired with a Mutable method, such as Message.Mutable, +// always references the source object. +// +// For example: +// +// // Append a 0 to a "repeated int32" field. +// // Since the Value returned by Mutable is guaranteed to alias +// // the source message, modifying the Value modifies the message. +// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// +// // Assign [0] to a "repeated int32" field by creating a new Value, +// // modifying it, and assigning it. +// list := message.NewField(fieldDesc).(List) +// list.Append(protoreflect.ValueOfInt32(0)) +// message.Set(fieldDesc, list) +// // ERROR: Since it is not defined whether Set aliases the source, +// // appending to the List here may or may not modify the message. +// list.Append(protoreflect.ValueOfInt32(0)) +// +// Some operations, such as Message.Get, may return an "empty, read-only" +// composite Value. Modifying an empty, read-only value panics. type Value value // The protoreflect API uses a custom Value union type instead of interface{} @@ -367,6 +393,7 @@ func (v Value) MapKey() MapKey { // ╚═════════╧═════════════════════════════════════╝ // // A MapKey is constructed and accessed through a Value: +// // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index c45debdcac6c..702ddf22a274 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 59f024c444fc..58352a6978be 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -30,9 +30,11 @@ import ( // conflictPolicy configures the policy for handling registration conflicts. // // It can be over-written at compile time with a linker-initialized variable: +// // go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" // // It can be over-written at program execution with an environment variable: +// // GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main // // Neither of the above are covered by the compatibility promise and diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 32c04f67eb73..44cf467d8845 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -103,6 +103,7 @@ type UnmarshalInput = struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + Depth int } // UnmarshalOutput is output from the Unmarshal method. diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go index ff094e1ba44b..a105cb23e033 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go @@ -26,16 +26,19 @@ const ( // EnforceVersion is used by code generated by protoc-gen-go // to statically enforce minimum and maximum versions of this package. // A compilation failure implies either that: -// * the runtime package is too old and needs to be updated OR -// * the generated code is too old and needs to be regenerated. +// - the runtime package is too old and needs to be updated OR +// - the generated code is too old and needs to be regenerated. // // The runtime package can be upgraded by running: +// // go get google.golang.org/protobuf // // The generated code can be regenerated by running: +// // protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} // // Example usage by generated code: +// // const ( // // Verify that this generated code is sufficiently up-to-date. // _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) @@ -49,6 +52,7 @@ const ( type EnforceVersion uint // This enforces the following invariant: +// // MinVersion ≤ GenVersion ≤ MaxVersion const ( _ = EnforceVersion(GenVersion - MinVersion) diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index 7f94443d2699..1b2085d46905 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -394,7 +394,7 @@ func numValidPaths(m proto.Message, paths []string) int { // Identify the next message to search within. md = fd.Message() // may be nil - // Repeated fields are only allowed at the last postion. + // Repeated fields are only allowed at the last position. if fd.IsList() || fd.IsMap() { md = nil } diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go new file mode 100644 index 000000000000..e511ad6f7fb9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go @@ -0,0 +1,653 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +package pluginpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +// Sync with code_generator.h. +type CodeGeneratorResponse_Feature int32 + +const ( + CodeGeneratorResponse_FEATURE_NONE CodeGeneratorResponse_Feature = 0 + CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL CodeGeneratorResponse_Feature = 1 +) + +// Enum value maps for CodeGeneratorResponse_Feature. +var ( + CodeGeneratorResponse_Feature_name = map[int32]string{ + 0: "FEATURE_NONE", + 1: "FEATURE_PROTO3_OPTIONAL", + } + CodeGeneratorResponse_Feature_value = map[string]int32{ + "FEATURE_NONE": 0, + "FEATURE_PROTO3_OPTIONAL": 1, + } +) + +func (x CodeGeneratorResponse_Feature) Enum() *CodeGeneratorResponse_Feature { + p := new(CodeGeneratorResponse_Feature) + *p = x + return p +} + +func (x CodeGeneratorResponse_Feature) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CodeGeneratorResponse_Feature) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_compiler_plugin_proto_enumTypes[0].Descriptor() +} + +func (CodeGeneratorResponse_Feature) Type() protoreflect.EnumType { + return &file_google_protobuf_compiler_plugin_proto_enumTypes[0] +} + +func (x CodeGeneratorResponse_Feature) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *CodeGeneratorResponse_Feature) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = CodeGeneratorResponse_Feature(num) + return nil +} + +// Deprecated: Use CodeGeneratorResponse_Feature.Descriptor instead. +func (CodeGeneratorResponse_Feature) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +// The version number of protocol compiler. +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{0} +} + +func (x *Version) GetMajor() int32 { + if x != nil && x.Major != nil { + return *x.Major + } + return 0 +} + +func (x *Version) GetMinor() int32 { + if x != nil && x.Minor != nil { + return *x.Minor + } + return 0 +} + +func (x *Version) GetPatch() int32 { + if x != nil && x.Patch != nil { + return *x.Patch + } + return 0 +} + +func (x *Version) GetSuffix() string { + if x != nil && x.Suffix != nil { + return *x.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*descriptorpb.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (x *CodeGeneratorRequest) Reset() { + *x = CodeGeneratorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (x *CodeGeneratorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorRequest.ProtoReflect.Descriptor instead. +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{1} +} + +func (x *CodeGeneratorRequest) GetFileToGenerate() []string { + if x != nil { + return x.FileToGenerate + } + return nil +} + +func (x *CodeGeneratorRequest) GetParameter() string { + if x != nil && x.Parameter != nil { + return *x.Parameter + } + return "" +} + +func (x *CodeGeneratorRequest) GetProtoFile() []*descriptorpb.FileDescriptorProto { + if x != nil { + return x.ProtoFile + } + return nil +} + +func (x *CodeGeneratorRequest) GetCompilerVersion() *Version { + if x != nil { + return x.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + // A bitmask of supported features that the code generator supports. + // This is a bitwise "or" of values from the Feature enum. + SupportedFeatures *uint64 `protobuf:"varint,2,opt,name=supported_features,json=supportedFeatures" json:"supported_features,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` +} + +func (x *CodeGeneratorResponse) Reset() { + *x = CodeGeneratorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (x *CodeGeneratorResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2} +} + +func (x *CodeGeneratorResponse) GetError() string { + if x != nil && x.Error != nil { + return *x.Error + } + return "" +} + +func (x *CodeGeneratorResponse) GetSupportedFeatures() uint64 { + if x != nil && x.SupportedFeatures != nil { + return *x.SupportedFeatures + } + return 0 +} + +func (x *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if x != nil { + return x.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + // Information describing the file content being inserted. If an insertion + // point is used, this information will be appropriately offset and inserted + // into the code generation metadata for the generated files. + GeneratedCodeInfo *descriptorpb.GeneratedCodeInfo `protobuf:"bytes,16,opt,name=generated_code_info,json=generatedCodeInfo" json:"generated_code_info,omitempty"` +} + +func (x *CodeGeneratorResponse_File) Reset() { + *x = CodeGeneratorResponse_File{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse_File) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (x *CodeGeneratorResponse_File) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse_File.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *CodeGeneratorResponse_File) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetInsertionPoint() string { + if x != nil && x.InsertionPoint != nil { + return *x.InsertionPoint + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetContent() string { + if x != nil && x.Content != nil { + return *x.Content + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetGeneratedCodeInfo() *descriptorpb.GeneratedCodeInfo { + if x != nil { + return x.GeneratedCodeInfo + } + return nil +} + +var File_google_protobuf_compiler_plugin_proto protoreflect.FileDescriptor + +var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, + 0x72, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, + 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xf1, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x64, + 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, + 0x65, 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x4c, + 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, + 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x03, 0x0a, + 0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, + 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x04, 0x66, + 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, + 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0xb1, 0x01, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73, + 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x52, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, + 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, + 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, + 0x4c, 0x10, 0x01, 0x42, 0x57, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x42, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x5a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x70, 0x62, +} + +var ( + file_google_protobuf_compiler_plugin_proto_rawDescOnce sync.Once + file_google_protobuf_compiler_plugin_proto_rawDescData = file_google_protobuf_compiler_plugin_proto_rawDesc +) + +func file_google_protobuf_compiler_plugin_proto_rawDescGZIP() []byte { + file_google_protobuf_compiler_plugin_proto_rawDescOnce.Do(func() { + file_google_protobuf_compiler_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_compiler_plugin_proto_rawDescData) + }) + return file_google_protobuf_compiler_plugin_proto_rawDescData +} + +var file_google_protobuf_compiler_plugin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_compiler_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_compiler_plugin_proto_goTypes = []interface{}{ + (CodeGeneratorResponse_Feature)(0), // 0: google.protobuf.compiler.CodeGeneratorResponse.Feature + (*Version)(nil), // 1: google.protobuf.compiler.Version + (*CodeGeneratorRequest)(nil), // 2: google.protobuf.compiler.CodeGeneratorRequest + (*CodeGeneratorResponse)(nil), // 3: google.protobuf.compiler.CodeGeneratorResponse + (*CodeGeneratorResponse_File)(nil), // 4: google.protobuf.compiler.CodeGeneratorResponse.File + (*descriptorpb.FileDescriptorProto)(nil), // 5: google.protobuf.FileDescriptorProto + (*descriptorpb.GeneratedCodeInfo)(nil), // 6: google.protobuf.GeneratedCodeInfo +} +var file_google_protobuf_compiler_plugin_proto_depIdxs = []int32{ + 5, // 0: google.protobuf.compiler.CodeGeneratorRequest.proto_file:type_name -> google.protobuf.FileDescriptorProto + 1, // 1: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version + 4, // 2: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File + 6, // 3: google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info:type_name -> google.protobuf.GeneratedCodeInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_protobuf_compiler_plugin_proto_init() } +func file_google_protobuf_compiler_plugin_proto_init() { + if File_google_protobuf_compiler_plugin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorResponse_File); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_compiler_plugin_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_compiler_plugin_proto_goTypes, + DependencyIndexes: file_google_protobuf_compiler_plugin_proto_depIdxs, + EnumInfos: file_google_protobuf_compiler_plugin_proto_enumTypes, + MessageInfos: file_google_protobuf_compiler_plugin_proto_msgTypes, + }.Build() + File_google_protobuf_compiler_plugin_proto = out.File + file_google_protobuf_compiler_plugin_proto_rawDesc = nil + file_google_protobuf_compiler_plugin_proto_goTypes = nil + file_google_protobuf_compiler_plugin_proto_depIdxs = nil +} diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go index df36e3a30f55..0173b6982e84 100644 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -100,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t { if p.event.typ != yaml_NO_EVENT { return p.event.typ } - if !yaml_parser_parse(&p.parser, &p.event) { + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { p.fail() } return p.event.typ @@ -320,6 +323,8 @@ type decoder struct { decodeCount int aliasCount int aliasDepth int + + mergedFields map[interface{}]bool } var ( @@ -808,6 +813,11 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + mapIsNew := false if out.IsNil() { out.Set(reflect.MakeMap(outt)) @@ -815,11 +825,18 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } for i := 0; i < l; i += 2 { if isMerge(n.Content[i]) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } k := reflect.New(kt).Elem() if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } kkind := k.Kind() if kkind == reflect.Interface { kkind = k.Elem().Kind() @@ -833,6 +850,12 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + d.stringMapType = stringMapType d.generalMapType = generalMapType return true @@ -844,7 +867,8 @@ func isStringMap(n *Node) bool { } l := len(n.Content) for i := 0; i < l; i += 2 { - if n.Content[i].ShortTag() != strTag { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { return false } } @@ -861,7 +885,6 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { var elemType reflect.Type if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) elemType = inlineMap.Type().Elem() } @@ -870,6 +893,9 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.prepare(n, field) } + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node var doneFields []bool if d.uniqueKeys { doneFields = make([]bool, len(sinfo.FieldsList)) @@ -879,13 +905,20 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { for i := 0; i < l; i += 2 { ni := n.Content[i] if isMerge(ni) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } if !d.unmarshal(ni, name) { continue } - if info, ok := sinfo.FieldsMap[name.String()]; ok { + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { if d.uniqueKeys { if doneFields[info.Id] { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) @@ -911,6 +944,11 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } return true } @@ -918,19 +956,29 @@ func failWantMap() { failf("map merge requires map or sequence of maps as the value") } -func (d *decoder) merge(n *Node, out reflect.Value) { - switch n.Kind { +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { case MappingNode: - d.unmarshal(n, out) + d.unmarshal(merge, out) case AliasNode: - if n.Alias != nil && n.Alias.Kind != MappingNode { + if merge.Alias != nil && merge.Alias.Kind != MappingNode { failWantMap() } - d.unmarshal(n, out) + d.unmarshal(merge, out) case SequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.Content) - 1; i >= 0; i-- { - ni := n.Content[i] + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] if ni.Kind == AliasNode { if ni.Alias != nil && ni.Alias.Kind != MappingNode { failWantMap() @@ -943,6 +991,8 @@ func (d *decoder) merge(n *Node, out reflect.Value) { default: failWantMap() } + + d.mergedFields = mergedFields } func isMerge(n *Node) bool { diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go index ac66fccc059e..268558a0d632 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -687,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -786,7 +789,7 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { } token := peek_token(parser) - if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { return } @@ -813,6 +816,9 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -922,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } diff --git a/vendor/gotest.tools/v3/LICENSE b/vendor/gotest.tools/v3/LICENSE deleted file mode 100644 index aeaa2fac3dcb..000000000000 --- a/vendor/gotest.tools/v3/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2018 gotest.tools authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/gotest.tools/v3/assert/assert.go b/vendor/gotest.tools/v3/assert/assert.go deleted file mode 100644 index f3f805dde9c9..000000000000 --- a/vendor/gotest.tools/v3/assert/assert.go +++ /dev/null @@ -1,219 +0,0 @@ -/*Package assert provides assertions for comparing expected values to actual -values. When an assertion fails a helpful error message is printed. - -Assert and Check - -Assert() and Check() both accept a Comparison, and fail the test when the -comparison fails. The one difference is that Assert() will end the test execution -immediately (using t.FailNow()) whereas Check() will fail the test (using t.Fail()), -return the value of the comparison, then proceed with the rest of the test case. - -Example usage - -The example below shows assert used with some common types. - - - import ( - "testing" - - "gotest.tools/assert" - is "gotest.tools/assert/cmp" - ) - - func TestEverything(t *testing.T) { - // booleans - assert.Assert(t, ok) - assert.Assert(t, !missing) - - // primitives - assert.Equal(t, count, 1) - assert.Equal(t, msg, "the message") - assert.Assert(t, total != 10) // NotEqual - - // errors - assert.NilError(t, closer.Close()) - assert.Error(t, err, "the exact error message") - assert.ErrorContains(t, err, "includes this") - assert.ErrorType(t, err, os.IsNotExist) - - // complex types - assert.DeepEqual(t, result, myStruct{Name: "title"}) - assert.Assert(t, is.Len(items, 3)) - assert.Assert(t, len(sequence) != 0) // NotEmpty - assert.Assert(t, is.Contains(mapping, "key")) - - // pointers and interface - assert.Assert(t, is.Nil(ref)) - assert.Assert(t, ref != nil) // NotNil - } - -Comparisons - -Package http://pkg.go.dev/gotest.tools/v3/assert/cmp provides -many common comparisons. Additional comparisons can be written to compare -values in other ways. See the example Assert (CustomComparison). - -Automated migration from testify - -gty-migrate-from-testify is a command which translates Go source code from -testify assertions to the assertions provided by this package. - -See http://pkg.go.dev/gotest.tools/v3/assert/cmd/gty-migrate-from-testify. - - -*/ -package assert // import "gotest.tools/v3/assert" - -import ( - gocmp "github.com/google/go-cmp/cmp" - "gotest.tools/v3/assert/cmp" - "gotest.tools/v3/internal/assert" -) - -// BoolOrComparison can be a bool, or cmp.Comparison. See Assert() for usage. -type BoolOrComparison interface{} - -// TestingT is the subset of testing.T used by the assert package. -type TestingT interface { - FailNow() - Fail() - Log(args ...interface{}) -} - -type helperT interface { - Helper() -} - -// Assert performs a comparison. If the comparison fails, the test is marked as -// failed, a failure message is logged, and execution is stopped immediately. -// -// The comparison argument may be one of three types: -// bool -// True is success. False is a failure. -// The failure message will contain the literal source code of the expression. -// cmp.Comparison -// Uses cmp.Result.Success() to check for success of failure. -// The comparison is responsible for producing a helpful failure message. -// http://pkg.go.dev/gotest.tools/v3/assert/cmp provides many common comparisons. -// error -// A nil value is considered success. -// A non-nil error is a failure, err.Error() is used as the failure message. -func Assert(t TestingT, comparison BoolOrComparison, msgAndArgs ...interface{}) { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - if !assert.Eval(t, assert.ArgsFromComparisonCall, comparison, msgAndArgs...) { - t.FailNow() - } -} - -// Check performs a comparison. If the comparison fails the test is marked as -// failed, a failure message is logged, and Check returns false. Otherwise returns -// true. -// -// See Assert for details about the comparison arg and failure messages. -func Check(t TestingT, comparison BoolOrComparison, msgAndArgs ...interface{}) bool { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - if !assert.Eval(t, assert.ArgsFromComparisonCall, comparison, msgAndArgs...) { - t.Fail() - return false - } - return true -} - -// NilError fails the test immediately if err is not nil. -// This is equivalent to Assert(t, err) -func NilError(t TestingT, err error, msgAndArgs ...interface{}) { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - if !assert.Eval(t, assert.ArgsAfterT, err, msgAndArgs...) { - t.FailNow() - } -} - -// Equal uses the == operator to assert two values are equal and fails the test -// if they are not equal. -// -// If the comparison fails Equal will use the variable names for x and y as part -// of the failure message to identify the actual and expected values. -// -// If either x or y are a multi-line string the failure message will include a -// unified diff of the two values. If the values only differ by whitespace -// the unified diff will be augmented by replacing whitespace characters with -// visible characters to identify the whitespace difference. -// -// This is equivalent to Assert(t, cmp.Equal(x, y)). -func Equal(t TestingT, x, y interface{}, msgAndArgs ...interface{}) { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - if !assert.Eval(t, assert.ArgsAfterT, cmp.Equal(x, y), msgAndArgs...) { - t.FailNow() - } -} - -// DeepEqual uses google/go-cmp (https://godoc.org/github.com/google/go-cmp/cmp) -// to assert two values are equal and fails the test if they are not equal. -// -// Package http://pkg.go.dev/gotest.tools/v3/assert/opt provides some additional -// commonly used Options. -// -// This is equivalent to Assert(t, cmp.DeepEqual(x, y)). -func DeepEqual(t TestingT, x, y interface{}, opts ...gocmp.Option) { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - if !assert.Eval(t, assert.ArgsAfterT, cmp.DeepEqual(x, y, opts...)) { - t.FailNow() - } -} - -// Error fails the test if err is nil, or the error message is not the expected -// message. -// Equivalent to Assert(t, cmp.Error(err, message)). -func Error(t TestingT, err error, message string, msgAndArgs ...interface{}) { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - if !assert.Eval(t, assert.ArgsAfterT, cmp.Error(err, message), msgAndArgs...) { - t.FailNow() - } -} - -// ErrorContains fails the test if err is nil, or the error message does not -// contain the expected substring. -// Equivalent to Assert(t, cmp.ErrorContains(err, substring)). -func ErrorContains(t TestingT, err error, substring string, msgAndArgs ...interface{}) { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - if !assert.Eval(t, assert.ArgsAfterT, cmp.ErrorContains(err, substring), msgAndArgs...) { - t.FailNow() - } -} - -// ErrorType fails the test if err is nil, or err is not the expected type. -// Equivalent to Assert(t, cmp.ErrorType(err, expected)). -// -// Expected can be one of: -// func(error) bool -// Function should return true if the error is the expected type. -// type struct{}, type &struct{} -// A struct or a pointer to a struct. -// Fails if the error is not of the same type as expected. -// type &interface{} -// A pointer to an interface type. -// Fails if err does not implement the interface. -// reflect.Type -// Fails if err does not implement the reflect.Type -func ErrorType(t TestingT, err error, expected interface{}, msgAndArgs ...interface{}) { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - if !assert.Eval(t, assert.ArgsAfterT, cmp.ErrorType(err, expected), msgAndArgs...) { - t.FailNow() - } -} diff --git a/vendor/gotest.tools/v3/assert/cmp/compare.go b/vendor/gotest.tools/v3/assert/cmp/compare.go deleted file mode 100644 index 3c0e05ab5fe9..000000000000 --- a/vendor/gotest.tools/v3/assert/cmp/compare.go +++ /dev/null @@ -1,365 +0,0 @@ -/*Package cmp provides Comparisons for Assert and Check*/ -package cmp // import "gotest.tools/v3/assert/cmp" - -import ( - "fmt" - "reflect" - "regexp" - "strings" - - "github.com/google/go-cmp/cmp" - "gotest.tools/v3/internal/format" -) - -// Comparison is a function which compares values and returns ResultSuccess if -// the actual value matches the expected value. If the values do not match the -// Result will contain a message about why it failed. -type Comparison func() Result - -// DeepEqual compares two values using google/go-cmp -// (https://godoc.org/github.com/google/go-cmp/cmp) -// and succeeds if the values are equal. -// -// The comparison can be customized using comparison Options. -// Package http://pkg.go.dev/gotest.tools/v3/assert/opt provides some additional -// commonly used Options. -func DeepEqual(x, y interface{}, opts ...cmp.Option) Comparison { - return func() (result Result) { - defer func() { - if panicmsg, handled := handleCmpPanic(recover()); handled { - result = ResultFailure(panicmsg) - } - }() - diff := cmp.Diff(x, y, opts...) - if diff == "" { - return ResultSuccess - } - return multiLineDiffResult(diff) - } -} - -func handleCmpPanic(r interface{}) (string, bool) { - if r == nil { - return "", false - } - panicmsg, ok := r.(string) - if !ok { - panic(r) - } - switch { - case strings.HasPrefix(panicmsg, "cannot handle unexported field"): - return panicmsg, true - } - panic(r) -} - -func toResult(success bool, msg string) Result { - if success { - return ResultSuccess - } - return ResultFailure(msg) -} - -// RegexOrPattern may be either a *regexp.Regexp or a string that is a valid -// regexp pattern. -type RegexOrPattern interface{} - -// Regexp succeeds if value v matches regular expression re. -// -// Example: -// assert.Assert(t, cmp.Regexp("^[0-9a-f]{32}$", str)) -// r := regexp.MustCompile("^[0-9a-f]{32}$") -// assert.Assert(t, cmp.Regexp(r, str)) -func Regexp(re RegexOrPattern, v string) Comparison { - match := func(re *regexp.Regexp) Result { - return toResult( - re.MatchString(v), - fmt.Sprintf("value %q does not match regexp %q", v, re.String())) - } - - return func() Result { - switch regex := re.(type) { - case *regexp.Regexp: - return match(regex) - case string: - re, err := regexp.Compile(regex) - if err != nil { - return ResultFailure(err.Error()) - } - return match(re) - default: - return ResultFailure(fmt.Sprintf("invalid type %T for regex pattern", regex)) - } - } -} - -// Equal succeeds if x == y. See assert.Equal for full documentation. -func Equal(x, y interface{}) Comparison { - return func() Result { - switch { - case x == y: - return ResultSuccess - case isMultiLineStringCompare(x, y): - diff := format.UnifiedDiff(format.DiffConfig{A: x.(string), B: y.(string)}) - return multiLineDiffResult(diff) - } - return ResultFailureTemplate(` - {{- printf "%v" .Data.x}} ( - {{- with callArg 0 }}{{ formatNode . }} {{end -}} - {{- printf "%T" .Data.x -}} - ) != {{ printf "%v" .Data.y}} ( - {{- with callArg 1 }}{{ formatNode . }} {{end -}} - {{- printf "%T" .Data.y -}} - )`, - map[string]interface{}{"x": x, "y": y}) - } -} - -func isMultiLineStringCompare(x, y interface{}) bool { - strX, ok := x.(string) - if !ok { - return false - } - strY, ok := y.(string) - if !ok { - return false - } - return strings.Contains(strX, "\n") || strings.Contains(strY, "\n") -} - -func multiLineDiffResult(diff string) Result { - return ResultFailureTemplate(` ---- {{ with callArg 0 }}{{ formatNode . }}{{else}}←{{end}} -+++ {{ with callArg 1 }}{{ formatNode . }}{{else}}→{{end}} -{{ .Data.diff }}`, - map[string]interface{}{"diff": diff}) -} - -// Len succeeds if the sequence has the expected length. -func Len(seq interface{}, expected int) Comparison { - return func() (result Result) { - defer func() { - if e := recover(); e != nil { - result = ResultFailure(fmt.Sprintf("type %T does not have a length", seq)) - } - }() - value := reflect.ValueOf(seq) - length := value.Len() - if length == expected { - return ResultSuccess - } - msg := fmt.Sprintf("expected %s (length %d) to have length %d", seq, length, expected) - return ResultFailure(msg) - } -} - -// Contains succeeds if item is in collection. Collection may be a string, map, -// slice, or array. -// -// If collection is a string, item must also be a string, and is compared using -// strings.Contains(). -// If collection is a Map, contains will succeed if item is a key in the map. -// If collection is a slice or array, item is compared to each item in the -// sequence using reflect.DeepEqual(). -func Contains(collection interface{}, item interface{}) Comparison { - return func() Result { - colValue := reflect.ValueOf(collection) - if !colValue.IsValid() { - return ResultFailure(fmt.Sprintf("nil does not contain items")) - } - msg := fmt.Sprintf("%v does not contain %v", collection, item) - - itemValue := reflect.ValueOf(item) - switch colValue.Type().Kind() { - case reflect.String: - if itemValue.Type().Kind() != reflect.String { - return ResultFailure("string may only contain strings") - } - return toResult( - strings.Contains(colValue.String(), itemValue.String()), - fmt.Sprintf("string %q does not contain %q", collection, item)) - - case reflect.Map: - if itemValue.Type() != colValue.Type().Key() { - return ResultFailure(fmt.Sprintf( - "%v can not contain a %v key", colValue.Type(), itemValue.Type())) - } - return toResult(colValue.MapIndex(itemValue).IsValid(), msg) - - case reflect.Slice, reflect.Array: - for i := 0; i < colValue.Len(); i++ { - if reflect.DeepEqual(colValue.Index(i).Interface(), item) { - return ResultSuccess - } - } - return ResultFailure(msg) - default: - return ResultFailure(fmt.Sprintf("type %T does not contain items", collection)) - } - } -} - -// Panics succeeds if f() panics. -func Panics(f func()) Comparison { - return func() (result Result) { - defer func() { - if err := recover(); err != nil { - result = ResultSuccess - } - }() - f() - return ResultFailure("did not panic") - } -} - -// Error succeeds if err is a non-nil error, and the error message equals the -// expected message. -func Error(err error, message string) Comparison { - return func() Result { - switch { - case err == nil: - return ResultFailure("expected an error, got nil") - case err.Error() != message: - return ResultFailure(fmt.Sprintf( - "expected error %q, got %s", message, formatErrorMessage(err))) - } - return ResultSuccess - } -} - -// ErrorContains succeeds if err is a non-nil error, and the error message contains -// the expected substring. -func ErrorContains(err error, substring string) Comparison { - return func() Result { - switch { - case err == nil: - return ResultFailure("expected an error, got nil") - case !strings.Contains(err.Error(), substring): - return ResultFailure(fmt.Sprintf( - "expected error to contain %q, got %s", substring, formatErrorMessage(err))) - } - return ResultSuccess - } -} - -type causer interface { - Cause() error -} - -func formatErrorMessage(err error) string { - if _, ok := err.(causer); ok { - return fmt.Sprintf("%q\n%+v", err, err) - } - // This error was not wrapped with github.com/pkg/errors - return fmt.Sprintf("%q", err) -} - -// Nil succeeds if obj is a nil interface, pointer, or function. -// -// Use NilError() for comparing errors. Use Len(obj, 0) for comparing slices, -// maps, and channels. -func Nil(obj interface{}) Comparison { - msgFunc := func(value reflect.Value) string { - return fmt.Sprintf("%v (type %s) is not nil", reflect.Indirect(value), value.Type()) - } - return isNil(obj, msgFunc) -} - -func isNil(obj interface{}, msgFunc func(reflect.Value) string) Comparison { - return func() Result { - if obj == nil { - return ResultSuccess - } - value := reflect.ValueOf(obj) - kind := value.Type().Kind() - if kind >= reflect.Chan && kind <= reflect.Slice { - if value.IsNil() { - return ResultSuccess - } - return ResultFailure(msgFunc(value)) - } - - return ResultFailure(fmt.Sprintf("%v (type %s) can not be nil", value, value.Type())) - } -} - -// ErrorType succeeds if err is not nil and is of the expected type. -// -// Expected can be one of: -// func(error) bool -// Function should return true if the error is the expected type. -// type struct{}, type &struct{} -// A struct or a pointer to a struct. -// Fails if the error is not of the same type as expected. -// type &interface{} -// A pointer to an interface type. -// Fails if err does not implement the interface. -// reflect.Type -// Fails if err does not implement the reflect.Type -func ErrorType(err error, expected interface{}) Comparison { - return func() Result { - switch expectedType := expected.(type) { - case func(error) bool: - return cmpErrorTypeFunc(err, expectedType) - case reflect.Type: - if expectedType.Kind() == reflect.Interface { - return cmpErrorTypeImplementsType(err, expectedType) - } - return cmpErrorTypeEqualType(err, expectedType) - case nil: - return ResultFailure(fmt.Sprintf("invalid type for expected: nil")) - } - - expectedType := reflect.TypeOf(expected) - switch { - case expectedType.Kind() == reflect.Struct, isPtrToStruct(expectedType): - return cmpErrorTypeEqualType(err, expectedType) - case isPtrToInterface(expectedType): - return cmpErrorTypeImplementsType(err, expectedType.Elem()) - } - return ResultFailure(fmt.Sprintf("invalid type for expected: %T", expected)) - } -} - -func cmpErrorTypeFunc(err error, f func(error) bool) Result { - if f(err) { - return ResultSuccess - } - actual := "nil" - if err != nil { - actual = fmt.Sprintf("%s (%T)", err, err) - } - return ResultFailureTemplate(`error is {{ .Data.actual }} - {{- with callArg 1 }}, not {{ formatNode . }}{{end -}}`, - map[string]interface{}{"actual": actual}) -} - -func cmpErrorTypeEqualType(err error, expectedType reflect.Type) Result { - if err == nil { - return ResultFailure(fmt.Sprintf("error is nil, not %s", expectedType)) - } - errValue := reflect.ValueOf(err) - if errValue.Type() == expectedType { - return ResultSuccess - } - return ResultFailure(fmt.Sprintf("error is %s (%T), not %s", err, err, expectedType)) -} - -func cmpErrorTypeImplementsType(err error, expectedType reflect.Type) Result { - if err == nil { - return ResultFailure(fmt.Sprintf("error is nil, not %s", expectedType)) - } - errValue := reflect.ValueOf(err) - if errValue.Type().Implements(expectedType) { - return ResultSuccess - } - return ResultFailure(fmt.Sprintf("error is %s (%T), not %s", err, err, expectedType)) -} - -func isPtrToInterface(typ reflect.Type) bool { - return typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Interface -} - -func isPtrToStruct(typ reflect.Type) bool { - return typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct -} diff --git a/vendor/gotest.tools/v3/assert/cmp/result.go b/vendor/gotest.tools/v3/assert/cmp/result.go deleted file mode 100644 index 3b48d9bf0c0c..000000000000 --- a/vendor/gotest.tools/v3/assert/cmp/result.go +++ /dev/null @@ -1,99 +0,0 @@ -package cmp - -import ( - "bytes" - "fmt" - "go/ast" - "text/template" - - "gotest.tools/v3/internal/source" -) - -// A Result of a Comparison. -type Result interface { - Success() bool -} - -// StringResult is an implementation of Result that reports the error message -// string verbatim and does not provide any templating or formatting of the -// message. -type StringResult struct { - success bool - message string -} - -// Success returns true if the comparison was successful. -func (r StringResult) Success() bool { - return r.success -} - -// FailureMessage returns the message used to provide additional information -// about the failure. -func (r StringResult) FailureMessage() string { - return r.message -} - -// ResultSuccess is a constant which is returned by a ComparisonWithResult to -// indicate success. -var ResultSuccess = StringResult{success: true} - -// ResultFailure returns a failed Result with a failure message. -func ResultFailure(message string) StringResult { - return StringResult{message: message} -} - -// ResultFromError returns ResultSuccess if err is nil. Otherwise ResultFailure -// is returned with the error message as the failure message. -func ResultFromError(err error) Result { - if err == nil { - return ResultSuccess - } - return ResultFailure(err.Error()) -} - -type templatedResult struct { - template string - data map[string]interface{} -} - -func (r templatedResult) Success() bool { - return false -} - -func (r templatedResult) FailureMessage(args []ast.Expr) string { - msg, err := renderMessage(r, args) - if err != nil { - return fmt.Sprintf("failed to render failure message: %s", err) - } - return msg -} - -// ResultFailureTemplate returns a Result with a template string and data which -// can be used to format a failure message. The template may access data from .Data, -// the comparison args with the callArg function, and the formatNode function may -// be used to format the call args. -func ResultFailureTemplate(template string, data map[string]interface{}) Result { - return templatedResult{template: template, data: data} -} - -func renderMessage(result templatedResult, args []ast.Expr) (string, error) { - tmpl := template.New("failure").Funcs(template.FuncMap{ - "formatNode": source.FormatNode, - "callArg": func(index int) ast.Expr { - if index >= len(args) { - return nil - } - return args[index] - }, - }) - var err error - tmpl, err = tmpl.Parse(result.template) - if err != nil { - return "", err - } - buf := new(bytes.Buffer) - err = tmpl.Execute(buf, map[string]interface{}{ - "Data": result.data, - }) - return buf.String(), err -} diff --git a/vendor/gotest.tools/v3/internal/assert/assert.go b/vendor/gotest.tools/v3/internal/assert/assert.go deleted file mode 100644 index 8dc01f4b5e08..000000000000 --- a/vendor/gotest.tools/v3/internal/assert/assert.go +++ /dev/null @@ -1,143 +0,0 @@ -package assert - -import ( - "fmt" - "go/ast" - "go/token" - "reflect" - - "gotest.tools/v3/assert/cmp" - "gotest.tools/v3/internal/format" - "gotest.tools/v3/internal/source" -) - -// LogT is the subset of testing.T used by the assert package. -type LogT interface { - Log(args ...interface{}) -} - -type helperT interface { - Helper() -} - -const failureMessage = "assertion failed: " - -// Eval the comparison and print a failure messages if the comparison has failed. -// nolint: gocyclo -func Eval( - t LogT, - argSelector argSelector, - comparison interface{}, - msgAndArgs ...interface{}, -) bool { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - var success bool - switch check := comparison.(type) { - case bool: - if check { - return true - } - logFailureFromBool(t, msgAndArgs...) - - // Undocumented legacy comparison without Result type - case func() (success bool, message string): - success = runCompareFunc(t, check, msgAndArgs...) - - case nil: - return true - - case error: - msg := failureMsgFromError(check) - t.Log(format.WithCustomMessage(failureMessage+msg, msgAndArgs...)) - - case cmp.Comparison: - success = RunComparison(t, argSelector, check, msgAndArgs...) - - case func() cmp.Result: - success = RunComparison(t, argSelector, check, msgAndArgs...) - - default: - t.Log(fmt.Sprintf("invalid Comparison: %v (%T)", check, check)) - } - return success -} - -func runCompareFunc( - t LogT, - f func() (success bool, message string), - msgAndArgs ...interface{}, -) bool { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - if success, message := f(); !success { - t.Log(format.WithCustomMessage(failureMessage+message, msgAndArgs...)) - return false - } - return true -} - -func logFailureFromBool(t LogT, msgAndArgs ...interface{}) { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - const stackIndex = 3 // Assert()/Check(), assert(), logFailureFromBool() - args, err := source.CallExprArgs(stackIndex) - if err != nil { - t.Log(err.Error()) - return - } - - const comparisonArgIndex = 1 // Assert(t, comparison) - if len(args) <= comparisonArgIndex { - t.Log(failureMessage + "but assert failed to find the expression to print") - return - } - - msg, err := boolFailureMessage(args[comparisonArgIndex]) - if err != nil { - t.Log(err.Error()) - msg = "expression is false" - } - - t.Log(format.WithCustomMessage(failureMessage+msg, msgAndArgs...)) -} - -func failureMsgFromError(err error) string { - // Handle errors with non-nil types - v := reflect.ValueOf(err) - if v.Kind() == reflect.Ptr && v.IsNil() { - return fmt.Sprintf("error is not nil: error has type %T", err) - } - return "error is not nil: " + err.Error() -} - -func boolFailureMessage(expr ast.Expr) (string, error) { - if binaryExpr, ok := expr.(*ast.BinaryExpr); ok && binaryExpr.Op == token.NEQ { - x, err := source.FormatNode(binaryExpr.X) - if err != nil { - return "", err - } - y, err := source.FormatNode(binaryExpr.Y) - if err != nil { - return "", err - } - return x + " is " + y, nil - } - - if unaryExpr, ok := expr.(*ast.UnaryExpr); ok && unaryExpr.Op == token.NOT { - x, err := source.FormatNode(unaryExpr.X) - if err != nil { - return "", err - } - return x + " is true", nil - } - - formatted, err := source.FormatNode(expr) - if err != nil { - return "", err - } - return "expression is false: " + formatted, nil -} diff --git a/vendor/gotest.tools/v3/internal/assert/result.go b/vendor/gotest.tools/v3/internal/assert/result.go deleted file mode 100644 index 20cd54129ed3..000000000000 --- a/vendor/gotest.tools/v3/internal/assert/result.go +++ /dev/null @@ -1,125 +0,0 @@ -package assert - -import ( - "fmt" - "go/ast" - - "gotest.tools/v3/assert/cmp" - "gotest.tools/v3/internal/format" - "gotest.tools/v3/internal/source" -) - -// RunComparison and return Comparison.Success. If the comparison fails a messages -// will be printed using t.Log. -func RunComparison( - t LogT, - argSelector argSelector, - f cmp.Comparison, - msgAndArgs ...interface{}, -) bool { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - result := f() - if result.Success() { - return true - } - - var message string - switch typed := result.(type) { - case resultWithComparisonArgs: - const stackIndex = 3 // Assert/Check, assert, RunComparison - args, err := source.CallExprArgs(stackIndex) - if err != nil { - t.Log(err.Error()) - } - message = typed.FailureMessage(filterPrintableExpr(argSelector(args))) - case resultBasic: - message = typed.FailureMessage() - default: - message = fmt.Sprintf("comparison returned invalid Result type: %T", result) - } - - t.Log(format.WithCustomMessage(failureMessage+message, msgAndArgs...)) - return false -} - -type resultWithComparisonArgs interface { - FailureMessage(args []ast.Expr) string -} - -type resultBasic interface { - FailureMessage() string -} - -// filterPrintableExpr filters the ast.Expr slice to only include Expr that are -// easy to read when printed and contain relevant information to an assertion. -// -// Ident and SelectorExpr are included because they print nicely and the variable -// names may provide additional context to their values. -// BasicLit and CompositeLit are excluded because their source is equivalent to -// their value, which is already available. -// Other types are ignored for now, but could be added if they are relevant. -func filterPrintableExpr(args []ast.Expr) []ast.Expr { - result := make([]ast.Expr, len(args)) - for i, arg := range args { - if isShortPrintableExpr(arg) { - result[i] = arg - continue - } - - if starExpr, ok := arg.(*ast.StarExpr); ok { - result[i] = starExpr.X - continue - } - } - return result -} - -func isShortPrintableExpr(expr ast.Expr) bool { - switch expr.(type) { - case *ast.Ident, *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr: - return true - case *ast.BinaryExpr, *ast.UnaryExpr: - return true - default: - // CallExpr, ParenExpr, TypeAssertExpr, KeyValueExpr, StarExpr - return false - } -} - -type argSelector func([]ast.Expr) []ast.Expr - -// ArgsAfterT selects args starting at position 1. Used when the caller has a -// testing.T as the first argument, and the args to select should follow it. -func ArgsAfterT(args []ast.Expr) []ast.Expr { - if len(args) < 1 { - return nil - } - return args[1:] -} - -// ArgsFromComparisonCall selects args from the CallExpression at position 1. -// Used when the caller has a testing.T as the first argument, and the args to -// select are passed to the cmp.Comparison at position 1. -func ArgsFromComparisonCall(args []ast.Expr) []ast.Expr { - if len(args) <= 1 { - return nil - } - if callExpr, ok := args[1].(*ast.CallExpr); ok { - return callExpr.Args - } - return nil -} - -// ArgsAtZeroIndex selects args from the CallExpression at position 1. -// Used when the caller accepts a single cmp.Comparison argument. -func ArgsAtZeroIndex(args []ast.Expr) []ast.Expr { - if len(args) == 0 { - return nil - } - if callExpr, ok := args[0].(*ast.CallExpr); ok { - return callExpr.Args - } - return nil -} diff --git a/vendor/gotest.tools/v3/internal/difflib/LICENSE b/vendor/gotest.tools/v3/internal/difflib/LICENSE deleted file mode 100644 index c67dad612a3d..000000000000 --- a/vendor/gotest.tools/v3/internal/difflib/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013, Patrick Mezard -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gotest.tools/v3/internal/difflib/difflib.go b/vendor/gotest.tools/v3/internal/difflib/difflib.go deleted file mode 100644 index 9bf506b6be0e..000000000000 --- a/vendor/gotest.tools/v3/internal/difflib/difflib.go +++ /dev/null @@ -1,423 +0,0 @@ -/*Package difflib is a partial port of Python difflib module. - -Original source: https://github.com/pmezard/go-difflib - -This file is trimmed to only the parts used by this repository. -*/ -package difflib // import "gotest.tools/v3/internal/difflib" - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -// Match stores line numbers of size of match -type Match struct { - A int - B int - Size int -} - -// OpCode identifies the type of diff -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

    " lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -// NewMatcher returns a new SequenceMatcher -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -// SetSeqs sets two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// SetSeq1 sets the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// SetSeq2 sets the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// GetMatchingBlocks returns a list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// GetOpCodes returns a list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// GetGroupedOpCodes isolates change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n)}) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} diff --git a/vendor/gotest.tools/v3/internal/format/diff.go b/vendor/gotest.tools/v3/internal/format/diff.go deleted file mode 100644 index 9897d4b9d9e5..000000000000 --- a/vendor/gotest.tools/v3/internal/format/diff.go +++ /dev/null @@ -1,161 +0,0 @@ -package format - -import ( - "bytes" - "fmt" - "strings" - "unicode" - - "gotest.tools/v3/internal/difflib" -) - -const ( - contextLines = 2 -) - -// DiffConfig for a unified diff -type DiffConfig struct { - A string - B string - From string - To string -} - -// UnifiedDiff is a modified version of difflib.WriteUnifiedDiff with better -// support for showing the whitespace differences. -func UnifiedDiff(conf DiffConfig) string { - a := strings.SplitAfter(conf.A, "\n") - b := strings.SplitAfter(conf.B, "\n") - groups := difflib.NewMatcher(a, b).GetGroupedOpCodes(contextLines) - if len(groups) == 0 { - return "" - } - - buf := new(bytes.Buffer) - writeFormat := func(format string, args ...interface{}) { - buf.WriteString(fmt.Sprintf(format, args...)) - } - writeLine := func(prefix string, s string) { - buf.WriteString(prefix + s) - } - if hasWhitespaceDiffLines(groups, a, b) { - writeLine = visibleWhitespaceLine(writeLine) - } - formatHeader(writeFormat, conf) - for _, group := range groups { - formatRangeLine(writeFormat, group) - for _, opCode := range group { - in, out := a[opCode.I1:opCode.I2], b[opCode.J1:opCode.J2] - switch opCode.Tag { - case 'e': - formatLines(writeLine, " ", in) - case 'r': - formatLines(writeLine, "-", in) - formatLines(writeLine, "+", out) - case 'd': - formatLines(writeLine, "-", in) - case 'i': - formatLines(writeLine, "+", out) - } - } - } - return buf.String() -} - -// hasWhitespaceDiffLines returns true if any diff groups is only different -// because of whitespace characters. -func hasWhitespaceDiffLines(groups [][]difflib.OpCode, a, b []string) bool { - for _, group := range groups { - in, out := new(bytes.Buffer), new(bytes.Buffer) - for _, opCode := range group { - if opCode.Tag == 'e' { - continue - } - for _, line := range a[opCode.I1:opCode.I2] { - in.WriteString(line) - } - for _, line := range b[opCode.J1:opCode.J2] { - out.WriteString(line) - } - } - if removeWhitespace(in.String()) == removeWhitespace(out.String()) { - return true - } - } - return false -} - -func removeWhitespace(s string) string { - var result []rune - for _, r := range s { - if !unicode.IsSpace(r) { - result = append(result, r) - } - } - return string(result) -} - -func visibleWhitespaceLine(ws func(string, string)) func(string, string) { - mapToVisibleSpace := func(r rune) rune { - switch r { - case '\n': - case ' ': - return '·' - case '\t': - return '▷' - case '\v': - return '▽' - case '\r': - return '↵' - case '\f': - return '↓' - default: - if unicode.IsSpace(r) { - return '�' - } - } - return r - } - return func(prefix, s string) { - ws(prefix, strings.Map(mapToVisibleSpace, s)) - } -} - -func formatHeader(wf func(string, ...interface{}), conf DiffConfig) { - if conf.From != "" || conf.To != "" { - wf("--- %s\n", conf.From) - wf("+++ %s\n", conf.To) - } -} - -func formatRangeLine(wf func(string, ...interface{}), group []difflib.OpCode) { - first, last := group[0], group[len(group)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - wf("@@ -%s +%s @@\n", range1, range2) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning-- // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -func formatLines(writeLine func(string, string), prefix string, lines []string) { - for _, line := range lines { - writeLine(prefix, line) - } - // Add a newline if the last line is missing one so that the diff displays - // properly. - if !strings.HasSuffix(lines[len(lines)-1], "\n") { - writeLine("", "\n") - } -} diff --git a/vendor/gotest.tools/v3/internal/format/format.go b/vendor/gotest.tools/v3/internal/format/format.go deleted file mode 100644 index 5097e4bd6eb5..000000000000 --- a/vendor/gotest.tools/v3/internal/format/format.go +++ /dev/null @@ -1,27 +0,0 @@ -package format // import "gotest.tools/v3/internal/format" - -import "fmt" - -// Message accepts a msgAndArgs varargs and formats it using fmt.Sprintf -func Message(msgAndArgs ...interface{}) string { - switch len(msgAndArgs) { - case 0: - return "" - case 1: - return fmt.Sprintf("%v", msgAndArgs[0]) - default: - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } -} - -// WithCustomMessage accepts one or two messages and formats them appropriately -func WithCustomMessage(source string, msgAndArgs ...interface{}) string { - custom := Message(msgAndArgs...) - switch { - case custom == "": - return source - case source == "": - return custom - } - return fmt.Sprintf("%s: %s", source, custom) -} diff --git a/vendor/gotest.tools/v3/internal/source/defers.go b/vendor/gotest.tools/v3/internal/source/defers.go deleted file mode 100644 index 66cfafbb648a..000000000000 --- a/vendor/gotest.tools/v3/internal/source/defers.go +++ /dev/null @@ -1,53 +0,0 @@ -package source - -import ( - "go/ast" - "go/token" - - "github.com/pkg/errors" -) - -func scanToDeferLine(fileset *token.FileSet, node ast.Node, lineNum int) ast.Node { - var matchedNode ast.Node - ast.Inspect(node, func(node ast.Node) bool { - switch { - case node == nil || matchedNode != nil: - return false - case fileset.Position(node.End()).Line == lineNum: - if funcLit, ok := node.(*ast.FuncLit); ok { - matchedNode = funcLit - return false - } - } - return true - }) - debug("defer line node: %s", debugFormatNode{matchedNode}) - return matchedNode -} - -func guessDefer(node ast.Node) (ast.Node, error) { - defers := collectDefers(node) - switch len(defers) { - case 0: - return nil, errors.New("failed to expression in defer") - case 1: - return defers[0].Call, nil - default: - return nil, errors.Errorf( - "ambiguous call expression: multiple (%d) defers in call block", - len(defers)) - } -} - -func collectDefers(node ast.Node) []*ast.DeferStmt { - var defers []*ast.DeferStmt - ast.Inspect(node, func(node ast.Node) bool { - if d, ok := node.(*ast.DeferStmt); ok { - defers = append(defers, d) - debug("defer: %s", debugFormatNode{d}) - return false - } - return true - }) - return defers -} diff --git a/vendor/gotest.tools/v3/internal/source/source.go b/vendor/gotest.tools/v3/internal/source/source.go deleted file mode 100644 index c2eef0337375..000000000000 --- a/vendor/gotest.tools/v3/internal/source/source.go +++ /dev/null @@ -1,181 +0,0 @@ -package source // import "gotest.tools/v3/internal/source" - -import ( - "bytes" - "fmt" - "go/ast" - "go/format" - "go/parser" - "go/token" - "os" - "runtime" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -const baseStackIndex = 1 - -// FormattedCallExprArg returns the argument from an ast.CallExpr at the -// index in the call stack. The argument is formatted using FormatNode. -func FormattedCallExprArg(stackIndex int, argPos int) (string, error) { - args, err := CallExprArgs(stackIndex + 1) - if err != nil { - return "", err - } - if argPos >= len(args) { - return "", errors.New("failed to find expression") - } - return FormatNode(args[argPos]) -} - -// CallExprArgs returns the ast.Expr slice for the args of an ast.CallExpr at -// the index in the call stack. -func CallExprArgs(stackIndex int) ([]ast.Expr, error) { - _, filename, lineNum, ok := runtime.Caller(baseStackIndex + stackIndex) - if !ok { - return nil, errors.New("failed to get call stack") - } - debug("call stack position: %s:%d", filename, lineNum) - - node, err := getNodeAtLine(filename, lineNum) - if err != nil { - return nil, err - } - debug("found node: %s", debugFormatNode{node}) - - return getCallExprArgs(node) -} - -func getNodeAtLine(filename string, lineNum int) (ast.Node, error) { - fileset := token.NewFileSet() - astFile, err := parser.ParseFile(fileset, filename, nil, parser.AllErrors) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse source file: %s", filename) - } - - if node := scanToLine(fileset, astFile, lineNum); node != nil { - return node, nil - } - if node := scanToDeferLine(fileset, astFile, lineNum); node != nil { - node, err := guessDefer(node) - if err != nil || node != nil { - return node, err - } - } - return nil, errors.Errorf( - "failed to find an expression on line %d in %s", lineNum, filename) -} - -func scanToLine(fileset *token.FileSet, node ast.Node, lineNum int) ast.Node { - var matchedNode ast.Node - ast.Inspect(node, func(node ast.Node) bool { - switch { - case node == nil || matchedNode != nil: - return false - case nodePosition(fileset, node).Line == lineNum: - matchedNode = node - return false - } - return true - }) - return matchedNode -} - -// In golang 1.9 the line number changed from being the line where the statement -// ended to the line where the statement began. -func nodePosition(fileset *token.FileSet, node ast.Node) token.Position { - if goVersionBefore19 { - return fileset.Position(node.End()) - } - return fileset.Position(node.Pos()) -} - -// GoVersionLessThan returns true if runtime.Version() is semantically less than -// version major.minor. Returns false if a release version can not be parsed from -// runtime.Version(). -func GoVersionLessThan(major, minor int64) bool { - version := runtime.Version() - // not a release version - if !strings.HasPrefix(version, "go") { - return false - } - version = strings.TrimPrefix(version, "go") - parts := strings.Split(version, ".") - if len(parts) < 2 { - return false - } - rMajor, err := strconv.ParseInt(parts[0], 10, 32) - if err != nil { - return false - } - if rMajor != major { - return rMajor < major - } - rMinor, err := strconv.ParseInt(parts[1], 10, 32) - if err != nil { - return false - } - return rMinor < minor -} - -var goVersionBefore19 = GoVersionLessThan(1, 9) - -func getCallExprArgs(node ast.Node) ([]ast.Expr, error) { - visitor := &callExprVisitor{} - ast.Walk(visitor, node) - if visitor.expr == nil { - return nil, errors.New("failed to find call expression") - } - debug("callExpr: %s", debugFormatNode{visitor.expr}) - return visitor.expr.Args, nil -} - -type callExprVisitor struct { - expr *ast.CallExpr -} - -func (v *callExprVisitor) Visit(node ast.Node) ast.Visitor { - if v.expr != nil || node == nil { - return nil - } - debug("visit: %s", debugFormatNode{node}) - - switch typed := node.(type) { - case *ast.CallExpr: - v.expr = typed - return nil - case *ast.DeferStmt: - ast.Walk(v, typed.Call.Fun) - return nil - } - return v -} - -// FormatNode using go/format.Node and return the result as a string -func FormatNode(node ast.Node) (string, error) { - buf := new(bytes.Buffer) - err := format.Node(buf, token.NewFileSet(), node) - return buf.String(), err -} - -var debugEnabled = os.Getenv("GOTESTTOOLS_DEBUG") != "" - -func debug(format string, args ...interface{}) { - if debugEnabled { - fmt.Fprintf(os.Stderr, "DEBUG: "+format+"\n", args...) - } -} - -type debugFormatNode struct { - ast.Node -} - -func (n debugFormatNode) String() string { - out, err := FormatNode(n.Node) - if err != nil { - return fmt.Sprintf("failed to format %s: %s", n.Node, err) - } - return fmt.Sprintf("(%T) %s", n.Node, out) -} diff --git a/vendor/gotest.tools/v3/poll/check.go b/vendor/gotest.tools/v3/poll/check.go deleted file mode 100644 index 060b0998906b..000000000000 --- a/vendor/gotest.tools/v3/poll/check.go +++ /dev/null @@ -1,39 +0,0 @@ -package poll - -import ( - "net" - "os" -) - -// Check is a function which will be used as check for the WaitOn method. -type Check func(t LogT) Result - -// FileExists looks on filesystem and check that path exists. -func FileExists(path string) Check { - return func(t LogT) Result { - _, err := os.Stat(path) - if os.IsNotExist(err) { - t.Logf("waiting on file %s to exist", path) - return Continue("file %s does not exist", path) - } - if err != nil { - return Error(err) - } - - return Success() - } -} - -// Connection try to open a connection to the address on the -// named network. See net.Dial for a description of the network and -// address parameters. -func Connection(network, address string) Check { - return func(t LogT) Result { - _, err := net.Dial(network, address) - if err != nil { - t.Logf("waiting on socket %s://%s to be available...", network, address) - return Continue("socket %s://%s not available", network, address) - } - return Success() - } -} diff --git a/vendor/gotest.tools/v3/poll/poll.go b/vendor/gotest.tools/v3/poll/poll.go deleted file mode 100644 index 29c5b40e187f..000000000000 --- a/vendor/gotest.tools/v3/poll/poll.go +++ /dev/null @@ -1,171 +0,0 @@ -/*Package poll provides tools for testing asynchronous code. - */ -package poll // import "gotest.tools/v3/poll" - -import ( - "fmt" - "strings" - "time" - - "gotest.tools/v3/assert/cmp" - "gotest.tools/v3/internal/assert" -) - -// TestingT is the subset of testing.T used by WaitOn -type TestingT interface { - LogT - Fatalf(format string, args ...interface{}) -} - -// LogT is a logging interface that is passed to the WaitOn check function -type LogT interface { - Log(args ...interface{}) - Logf(format string, args ...interface{}) -} - -type helperT interface { - Helper() -} - -// Settings are used to configure the behaviour of WaitOn -type Settings struct { - // Timeout is the maximum time to wait for the condition. Defaults to 10s. - Timeout time.Duration - // Delay is the time to sleep between checking the condition. Defaults to - // 100ms. - Delay time.Duration -} - -func defaultConfig() *Settings { - return &Settings{Timeout: 10 * time.Second, Delay: 100 * time.Millisecond} -} - -// SettingOp is a function which accepts and modifies Settings -type SettingOp func(config *Settings) - -// WithDelay sets the delay to wait between polls -func WithDelay(delay time.Duration) SettingOp { - return func(config *Settings) { - config.Delay = delay - } -} - -// WithTimeout sets the timeout -func WithTimeout(timeout time.Duration) SettingOp { - return func(config *Settings) { - config.Timeout = timeout - } -} - -// Result of a check performed by WaitOn -type Result interface { - // Error indicates that the check failed and polling should stop, and the - // the has failed - Error() error - // Done indicates that polling should stop, and the test should proceed - Done() bool - // Message provides the most recent state when polling has not completed - Message() string -} - -type result struct { - done bool - message string - err error -} - -func (r result) Done() bool { - return r.done -} - -func (r result) Message() string { - return r.message -} - -func (r result) Error() error { - return r.err -} - -// Continue returns a Result that indicates to WaitOn that it should continue -// polling. The message text will be used as the failure message if the timeout -// is reached. -func Continue(message string, args ...interface{}) Result { - return result{message: fmt.Sprintf(message, args...)} -} - -// Success returns a Result where Done() returns true, which indicates to WaitOn -// that it should stop polling and exit without an error. -func Success() Result { - return result{done: true} -} - -// Error returns a Result that indicates to WaitOn that it should fail the test -// and stop polling. -func Error(err error) Result { - return result{err: err} -} - -// WaitOn a condition or until a timeout. Poll by calling check and exit when -// check returns a done Result. To fail a test and exit polling with an error -// return a error result. -func WaitOn(t TestingT, check Check, pollOps ...SettingOp) { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - config := defaultConfig() - for _, pollOp := range pollOps { - pollOp(config) - } - - var lastMessage string - after := time.After(config.Timeout) - chResult := make(chan Result) - for { - go func() { - chResult <- check(t) - }() - select { - case <-after: - if lastMessage == "" { - lastMessage = "first check never completed" - } - t.Fatalf("timeout hit after %s: %s", config.Timeout, lastMessage) - case result := <-chResult: - switch { - case result.Error() != nil: - t.Fatalf("polling check failed: %s", result.Error()) - case result.Done(): - return - } - time.Sleep(config.Delay) - lastMessage = result.Message() - } - } -} - -// Compare values using the cmp.Comparison. If the comparison fails return a -// result which indicates to WaitOn that it should continue waiting. -// If the comparison is successful then WaitOn stops polling. -func Compare(compare cmp.Comparison) Result { - buf := new(logBuffer) - if assert.RunComparison(buf, assert.ArgsAtZeroIndex, compare) { - return Success() - } - return Continue(buf.String()) -} - -type logBuffer struct { - log [][]interface{} -} - -func (c *logBuffer) Log(args ...interface{}) { - c.log = append(c.log, args) -} - -func (c *logBuffer) String() string { - b := new(strings.Builder) - for _, item := range c.log { - b.WriteString(fmt.Sprint(item...) + " ") - } - return b.String() -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2df8f1e21e93..414a396bf6e3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,15 +1,64 @@ -# github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 -## explicit; go 1.16 -github.com/Azure/go-ansiterm -github.com/Azure/go-ansiterm/winterm -# github.com/Microsoft/go-winio v0.5.1 -## explicit; go 1.12 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/azcore +github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared +github.com/Azure/azure-sdk-for-go/sdk/azcore/log +github.com/Azure/azure-sdk-for-go/sdk/azcore/policy +github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime +github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming +github.com/Azure/azure-sdk-for-go/sdk/azcore/to +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/azidentity +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/internal/diag +github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo +github.com/Azure/azure-sdk-for-go/sdk/internal/log +github.com/Azure/azure-sdk-for-go/sdk/internal/temporal +github.com/Azure/azure-sdk-for-go/sdk/internal/uuid +# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal +# github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0 +## explicit; go 1.17 +github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache +github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential +github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version +github.com/AzureAD/microsoft-authentication-library-for-go/apps/public +# github.com/Microsoft/go-winio v0.5.2 +## explicit; go 1.13 github.com/Microsoft/go-winio github.com/Microsoft/go-winio/backuptar github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/pkg/security github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.9.2 +# github.com/Microsoft/hcsshim v0.9.6 ## explicit; go 1.13 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage @@ -22,11 +71,13 @@ github.com/Microsoft/hcsshim/internal/hcs/schema2 github.com/Microsoft/hcsshim/internal/hcserror github.com/Microsoft/hcsshim/internal/hns github.com/Microsoft/hcsshim/internal/interop +github.com/Microsoft/hcsshim/internal/jobobject github.com/Microsoft/hcsshim/internal/log github.com/Microsoft/hcsshim/internal/logfields github.com/Microsoft/hcsshim/internal/longpath github.com/Microsoft/hcsshim/internal/mergemaps github.com/Microsoft/hcsshim/internal/oc +github.com/Microsoft/hcsshim/internal/queue github.com/Microsoft/hcsshim/internal/regstate github.com/Microsoft/hcsshim/internal/runhcs github.com/Microsoft/hcsshim/internal/safefile @@ -42,6 +93,113 @@ github.com/agext/levenshtein # github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 ## explicit github.com/armon/circbuf +# github.com/aws/aws-sdk-go-v2 v1.16.3 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2 +github.com/aws/aws-sdk-go-v2/aws +github.com/aws/aws-sdk-go-v2/aws/arn +github.com/aws/aws-sdk-go-v2/aws/defaults +github.com/aws/aws-sdk-go-v2/aws/middleware +github.com/aws/aws-sdk-go-v2/aws/protocol/query +github.com/aws/aws-sdk-go-v2/aws/protocol/restjson +github.com/aws/aws-sdk-go-v2/aws/protocol/xml +github.com/aws/aws-sdk-go-v2/aws/ratelimit +github.com/aws/aws-sdk-go-v2/aws/retry +github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 +github.com/aws/aws-sdk-go-v2/aws/signer/v4 +github.com/aws/aws-sdk-go-v2/aws/transport/http +github.com/aws/aws-sdk-go-v2/internal/awsutil +github.com/aws/aws-sdk-go-v2/internal/rand +github.com/aws/aws-sdk-go-v2/internal/sdk +github.com/aws/aws-sdk-go-v2/internal/sdkio +github.com/aws/aws-sdk-go-v2/internal/strings +github.com/aws/aws-sdk-go-v2/internal/sync/singleflight +github.com/aws/aws-sdk-go-v2/internal/timeconv +# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi +# github.com/aws/aws-sdk-go-v2/config v1.15.5 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/config +# github.com/aws/aws-sdk-go-v2/credentials v1.12.0 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/credentials +github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds +github.com/aws/aws-sdk-go-v2/credentials/endpointcreds +github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client +github.com/aws/aws-sdk-go-v2/credentials/processcreds +github.com/aws/aws-sdk-go-v2/credentials/ssocreds +github.com/aws/aws-sdk-go-v2/credentials/stscreds +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/feature/ec2/imds +github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config +# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/feature/s3/manager +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/internal/configsources +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/internal/ini +# github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.1 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/internal/v4a +github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto +github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding +# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.5 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/service/internal/checksum +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url +# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.4 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/service/internal/s3shared +github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn +github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config +# github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/service/s3 +github.com/aws/aws-sdk-go-v2/service/s3/internal/arn +github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations +github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/s3/types +# github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/service/sso +github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/sso/types +# github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 +## explicit; go 1.15 +github.com/aws/aws-sdk-go-v2/service/sts +github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/sts/types +# github.com/aws/smithy-go v1.11.2 +## explicit; go 1.15 +github.com/aws/smithy-go +github.com/aws/smithy-go/document +github.com/aws/smithy-go/encoding +github.com/aws/smithy-go/encoding/httpbinding +github.com/aws/smithy-go/encoding/xml +github.com/aws/smithy-go/io +github.com/aws/smithy-go/logging +github.com/aws/smithy-go/middleware +github.com/aws/smithy-go/ptr +github.com/aws/smithy-go/rand +github.com/aws/smithy-go/sync +github.com/aws/smithy-go/time +github.com/aws/smithy-go/transport/http +github.com/aws/smithy-go/transport/http/internal/io +github.com/aws/smithy-go/waiter # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -51,14 +209,14 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.1.2 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/containerd/cgroups v1.0.3 -## explicit; go 1.16 +# github.com/containerd/cgroups v1.0.4 +## explicit; go 1.17 github.com/containerd/cgroups/stats/v1 # github.com/containerd/console v1.0.3 ## explicit; go 1.13 github.com/containerd/console -# github.com/containerd/containerd v1.6.1 -## explicit; go 1.16 +# github.com/containerd/containerd v1.6.18 +## explicit; go 1.17 github.com/containerd/containerd github.com/containerd/containerd/api/services/containers/v1 github.com/containerd/containerd/api/services/content/v1 @@ -80,6 +238,7 @@ github.com/containerd/containerd/containers github.com/containerd/containerd/content github.com/containerd/containerd/content/local github.com/containerd/containerd/content/proxy +github.com/containerd/containerd/contrib/seccomp/kernelversion github.com/containerd/containerd/defaults github.com/containerd/containerd/diff github.com/containerd/containerd/diff/apply @@ -104,6 +263,7 @@ github.com/containerd/containerd/namespaces github.com/containerd/containerd/oci github.com/containerd/containerd/pkg/cap github.com/containerd/containerd/pkg/dialer +github.com/containerd/containerd/pkg/kmutex github.com/containerd/containerd/pkg/seccomp github.com/containerd/containerd/pkg/seed github.com/containerd/containerd/pkg/userns @@ -130,8 +290,8 @@ github.com/containerd/containerd/snapshots/proxy github.com/containerd/containerd/snapshots/storage github.com/containerd/containerd/sys github.com/containerd/containerd/version -# github.com/containerd/continuity v0.2.2 -## explicit; go 1.13 +# github.com/containerd/continuity v0.3.0 +## explicit; go 1.17 github.com/containerd/continuity github.com/containerd/continuity/devices github.com/containerd/continuity/driver @@ -146,13 +306,18 @@ github.com/containerd/fifo # github.com/containerd/fuse-overlayfs-snapshotter v1.0.2 ## explicit; go 1.16 github.com/containerd/fuse-overlayfs-snapshotter -# github.com/containerd/go-cni v1.1.3 -## explicit; go 1.13 +# github.com/containerd/go-cni v1.1.6 +## explicit; go 1.17 github.com/containerd/go-cni # github.com/containerd/go-runc v1.0.0 ## explicit; go 1.13 github.com/containerd/go-runc -# github.com/containerd/stargz-snapshotter v0.11.2 +# github.com/containerd/nydus-snapshotter v0.3.1 +## explicit; go 1.17 +github.com/containerd/nydus-snapshotter/pkg/converter +github.com/containerd/nydus-snapshotter/pkg/converter/tool +github.com/containerd/nydus-snapshotter/pkg/errdefs +# github.com/containerd/stargz-snapshotter v0.13.0 ## explicit; go 1.16 github.com/containerd/stargz-snapshotter/cache github.com/containerd/stargz-snapshotter/fs @@ -166,15 +331,15 @@ github.com/containerd/stargz-snapshotter/fs/source github.com/containerd/stargz-snapshotter/metadata github.com/containerd/stargz-snapshotter/metadata/memory github.com/containerd/stargz-snapshotter/snapshot -github.com/containerd/stargz-snapshotter/snapshot/overlayutils github.com/containerd/stargz-snapshotter/task github.com/containerd/stargz-snapshotter/util/cacheutil github.com/containerd/stargz-snapshotter/util/namedmutex github.com/containerd/stargz-snapshotter/util/testutil -# github.com/containerd/stargz-snapshotter/estargz v0.11.2 +# github.com/containerd/stargz-snapshotter/estargz v0.13.0 ## explicit; go 1.16 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil +github.com/containerd/stargz-snapshotter/estargz/externaltoc github.com/containerd/stargz-snapshotter/estargz/zstdchunked # github.com/containerd/ttrpc v1.1.0 ## explicit; go 1.13 @@ -182,7 +347,7 @@ github.com/containerd/ttrpc # github.com/containerd/typeurl v1.0.2 ## explicit; go 1.13 github.com/containerd/typeurl -# github.com/containernetworking/cni v1.0.1 +# github.com/containernetworking/cni v1.1.1 ## explicit; go 1.14 github.com/containernetworking/cni/libcni github.com/containernetworking/cni/pkg/invoke @@ -194,12 +359,12 @@ github.com/containernetworking/cni/pkg/types/create github.com/containernetworking/cni/pkg/types/internal github.com/containernetworking/cni/pkg/utils github.com/containernetworking/cni/pkg/version -# github.com/coreos/go-systemd/v22 v22.3.2 +# github.com/coreos/go-systemd/v22 v22.4.0 ## explicit; go 1.12 github.com/coreos/go-systemd/v22/activation github.com/coreos/go-systemd/v22/daemon -# github.com/cpuguy83/go-md2man/v2 v2.0.0 -## explicit; go 1.12 +# github.com/cpuguy83/go-md2man/v2 v2.0.2 +## explicit; go 1.11 github.com/cpuguy83/go-md2man/v2/md2man # github.com/davecgh/go-spew v1.1.1 ## explicit @@ -207,19 +372,18 @@ github.com/davecgh/go-spew/spew # github.com/dimchansky/utfbom v1.1.1 ## explicit github.com/dimchansky/utfbom -# github.com/docker/cli v20.10.12+incompatible +# github.com/docker/cli v23.0.0-rc.1+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile github.com/docker/cli/cli/config/credentials github.com/docker/cli/cli/config/types github.com/docker/cli/cli/connhelper/commandconn -# github.com/docker/distribution v2.8.0+incompatible +# github.com/docker/distribution v2.8.1+incompatible ## explicit github.com/docker/distribution/digestset github.com/docker/distribution/reference -github.com/docker/distribution/registry/api/errcode -# github.com/docker/docker v20.10.7+incompatible => github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible +# github.com/docker/docker v23.0.0-rc.1+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -239,29 +403,19 @@ github.com/docker/docker/api/types/versions github.com/docker/docker/api/types/volume github.com/docker/docker/client github.com/docker/docker/errdefs -github.com/docker/docker/libnetwork/ipamutils github.com/docker/docker/libnetwork/resolvconf -github.com/docker/docker/libnetwork/types -github.com/docker/docker/opts github.com/docker/docker/pkg/archive github.com/docker/docker/pkg/chrootarchive -github.com/docker/docker/pkg/fileutils github.com/docker/docker/pkg/homedir github.com/docker/docker/pkg/idtools github.com/docker/docker/pkg/ioutils -github.com/docker/docker/pkg/jsonmessage github.com/docker/docker/pkg/longpath github.com/docker/docker/pkg/pools github.com/docker/docker/pkg/reexec -github.com/docker/docker/pkg/stringid github.com/docker/docker/pkg/system github.com/docker/docker/profiles/seccomp -github.com/docker/docker/testutil/daemon -github.com/docker/docker/testutil/environment -github.com/docker/docker/testutil/fixtures/load -github.com/docker/docker/testutil/request -# github.com/docker/docker-credential-helpers v0.6.4 -## explicit; go 1.13 +# github.com/docker/docker-credential-helpers v0.7.0 +## explicit; go 1.18 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials # github.com/docker/go-connections v0.4.0 @@ -275,20 +429,20 @@ github.com/docker/go-events # github.com/docker/go-metrics v0.0.1 ## explicit; go 1.11 github.com/docker/go-metrics -# github.com/docker/go-units v0.4.0 +# github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units # github.com/felixge/httpsnoop v1.0.2 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/go-logr/logr v1.2.2 +# github.com/go-logr/logr v1.2.3 ## explicit; go 1.16 github.com/go-logr/logr github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/gofrs/flock v0.7.3 +# github.com/gofrs/flock v0.8.1 ## explicit github.com/gofrs/flock # github.com/gogo/googleapis v1.4.1 @@ -297,12 +451,37 @@ github.com/gogo/googleapis/google/rpc # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/gogoproto +github.com/gogo/protobuf/plugin/compare +github.com/gogo/protobuf/plugin/defaultcheck +github.com/gogo/protobuf/plugin/description +github.com/gogo/protobuf/plugin/embedcheck +github.com/gogo/protobuf/plugin/enumstringer +github.com/gogo/protobuf/plugin/equal +github.com/gogo/protobuf/plugin/face +github.com/gogo/protobuf/plugin/gostring +github.com/gogo/protobuf/plugin/marshalto +github.com/gogo/protobuf/plugin/oneofcheck +github.com/gogo/protobuf/plugin/populate +github.com/gogo/protobuf/plugin/size +github.com/gogo/protobuf/plugin/stringer +github.com/gogo/protobuf/plugin/testgen +github.com/gogo/protobuf/plugin/union +github.com/gogo/protobuf/plugin/unmarshal github.com/gogo/protobuf/proto +github.com/gogo/protobuf/protoc-gen-gogo github.com/gogo/protobuf/protoc-gen-gogo/descriptor +github.com/gogo/protobuf/protoc-gen-gogo/generator +github.com/gogo/protobuf/protoc-gen-gogo/generator/internal/remap +github.com/gogo/protobuf/protoc-gen-gogo/grpc +github.com/gogo/protobuf/protoc-gen-gogo/plugin +github.com/gogo/protobuf/protoc-gen-gogofaster +github.com/gogo/protobuf/protoc-gen-gogoslick github.com/gogo/protobuf/sortkeys github.com/gogo/protobuf/types -# github.com/golang-jwt/jwt/v4 v4.1.0 -## explicit; go 1.15 +github.com/gogo/protobuf/vanity +github.com/gogo/protobuf/vanity/command +# github.com/golang-jwt/jwt/v4 v4.4.2 +## explicit; go 1.16 github.com/golang-jwt/jwt/v4 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit @@ -310,16 +489,18 @@ github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.2 ## explicit; go 1.9 github.com/golang/protobuf/descriptor +github.com/golang/protobuf/internal/gengogrpc github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto +github.com/golang/protobuf/protoc-gen-go github.com/golang/protobuf/protoc-gen-go/descriptor github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -# github.com/google/go-cmp v0.5.7 -## explicit; go 1.11 +# github.com/google/go-cmp v0.5.9 +## explicit; go 1.13 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags @@ -358,24 +539,35 @@ github.com/hashicorp/go-immutable-radix # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-retryablehttp v0.7.0 +# github.com/hashicorp/go-retryablehttp v0.7.1 ## explicit; go 1.13 github.com/hashicorp/go-retryablehttp -# github.com/hashicorp/golang-lru v0.5.3 +# github.com/hashicorp/golang-lru v0.5.4 ## explicit; go 1.12 github.com/hashicorp/golang-lru/simplelru -# github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee -## explicit; go 1.12 -github.com/ishidawataru/sctp -# github.com/klauspost/compress v1.15.0 -## explicit; go 1.15 +# github.com/in-toto/in-toto-golang v0.5.0 +## explicit; go 1.17 +github.com/in-toto/in-toto-golang/in_toto +github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common +github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1 +github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2 +# github.com/jmespath/go-jmespath v0.4.0 +## explicit; go 1.14 +github.com/jmespath/go-jmespath +# github.com/klauspost/compress v1.15.12 +## explicit; go 1.17 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 +# github.com/kylelemons/godebug v1.1.0 +## explicit; go 1.11 +github.com/kylelemons/godebug/diff +github.com/kylelemons/godebug/pretty +# github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/mitchellh/hashstructure/v2 v2.0.2 @@ -384,45 +576,53 @@ github.com/mitchellh/hashstructure/v2 # github.com/moby/locker v1.0.1 ## explicit; go 1.13 github.com/moby/locker -# github.com/moby/sys/mount v0.3.0 +# github.com/moby/patternmatcher v0.5.0 +## explicit; go 1.19 +github.com/moby/patternmatcher +# github.com/moby/sys/mount v0.3.3 ## explicit; go 1.16 github.com/moby/sys/mount -# github.com/moby/sys/mountinfo v0.6.0 +# github.com/moby/sys/mountinfo v0.6.2 ## explicit; go 1.16 github.com/moby/sys/mountinfo -# github.com/moby/sys/signal v0.6.0 +# github.com/moby/sys/sequential v0.5.0 +## explicit; go 1.17 +github.com/moby/sys/sequential +# github.com/moby/sys/signal v0.7.0 ## explicit; go 1.16 github.com/moby/sys/signal -# github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 -## explicit; go 1.13 -github.com/moby/term -github.com/moby/term/windows # github.com/morikuni/aec v1.0.0 ## explicit github.com/morikuni/aec # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5 -## explicit +# github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 +## explicit; go 1.16 github.com/opencontainers/image-spec/identity github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.1.0 +# github.com/opencontainers/runc v1.1.3 ## explicit; go 1.16 github.com/opencontainers/runc/libcontainer/user # github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/opencontainers/selinux v1.10.0 +# github.com/opencontainers/selinux v1.10.2 ## explicit; go 1.13 github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/pelletier/go-toml v1.9.4 +# github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 +## explicit; go 1.17 +github.com/package-url/packageurl-go +# github.com/pelletier/go-toml v1.9.5 ## explicit; go 1.12 github.com/pelletier/go-toml +# github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 +## explicit; go 1.14 +github.com/pkg/browser # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -432,46 +632,56 @@ github.com/pkg/profile # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.12.1 -## explicit; go 1.13 +# github.com/prometheus/client_golang v1.14.0 +## explicit; go 1.17 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.2.0 +# github.com/prometheus/client_model v0.3.0 ## explicit; go 1.9 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.32.1 -## explicit; go 1.13 +# github.com/prometheus/common v0.37.0 +## explicit; go 1.16 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.7.3 -## explicit; go 1.13 +# github.com/prometheus/procfs v0.8.0 +## explicit; go 1.17 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/russross/blackfriday/v2 v2.0.1 +# github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 +# github.com/secure-systems-lab/go-securesystemslib v0.4.0 +## explicit; go 1.17 +github.com/secure-systems-lab/go-securesystemslib/cjson +github.com/secure-systems-lab/go-securesystemslib/dsse # github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 ## explicit github.com/serialx/hashring -# github.com/shurcooL/sanitized_anchor_name v1.0.0 -## explicit -github.com/shurcooL/sanitized_anchor_name -# github.com/sirupsen/logrus v1.8.1 +# github.com/shibumi/go-pathspec v1.3.0 +## explicit; go 1.17 +github.com/shibumi/go-pathspec +# github.com/sirupsen/logrus v1.9.0 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/stretchr/testify v1.7.0 +# github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f +## explicit; go 1.13 +github.com/spdx/tools-golang/json +github.com/spdx/tools-golang/spdx/common +github.com/spdx/tools-golang/spdx/v2_2 +github.com/spdx/tools-golang/spdx/v2_3 +# github.com/stretchr/testify v1.8.0 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require -# github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274 -## explicit; go 1.13 +# github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa +## explicit; go 1.18 github.com/tonistiigi/fsutil github.com/tonistiigi/fsutil/copy github.com/tonistiigi/fsutil/types -# github.com/tonistiigi/go-actions-cache v0.0.0-20211202175116-9642704158ff +# github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 ## explicit; go 1.16 github.com/tonistiigi/go-actions-cache # github.com/tonistiigi/go-archvariant v1.0.0 @@ -559,6 +769,7 @@ go.opentelemetry.io/otel/sdk/internal go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace +go.opentelemetry.io/otel/sdk/trace/tracetest # go.opentelemetry.io/otel/trace v1.4.1 ## explicit; go 1.16 go.opentelemetry.io/otel/trace @@ -568,23 +779,23 @@ go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# golang.org/x/crypto v0.0.0-20211202192323-5770296d904e +# golang.org/x/crypto v0.2.0 ## explicit; go 1.17 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/ed25519 -golang.org/x/crypto/ed25519/internal/edwards25519 +golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 -golang.org/x/crypto/internal/subtle golang.org/x/crypto/nacl/sign +golang.org/x/crypto/pkcs12 +golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/net v0.0.0-20211216030914-fe4d6282115f +# golang.org/x/net v0.4.0 ## explicit; go 1.17 -golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -594,12 +805,12 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c +# golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 +# golang.org/x/sys v0.3.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -607,23 +818,23 @@ golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.3.7 +# golang.org/x/text v0.5.0 ## explicit; go 1.17 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac +# golang.org/x/time v0.1.0 ## explicit golang.org/x/time/rate -# google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa -## explicit; go 1.11 +# google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 +## explicit; go 1.15 google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.44.0 -## explicit; go 1.14 +# google.golang.org/grpc v1.50.1 +## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -632,6 +843,7 @@ google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials @@ -644,6 +856,7 @@ google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer @@ -655,6 +868,7 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough @@ -672,8 +886,10 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.27.1 -## explicit; go 1.9 +# google.golang.org/protobuf v1.28.1 +## explicit; go 1.11 +google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo +google.golang.org/protobuf/compiler/protogen google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -708,16 +924,7 @@ google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb -# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b +google.golang.org/protobuf/types/pluginpb +# gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# gotest.tools/v3 v3.0.3 -## explicit; go 1.11 -gotest.tools/v3/assert -gotest.tools/v3/assert/cmp -gotest.tools/v3/internal/assert -gotest.tools/v3/internal/difflib -gotest.tools/v3/internal/format -gotest.tools/v3/internal/source -gotest.tools/v3/poll -# github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible diff --git a/version/version.go b/version/version.go index 137c4a13fb20..49640f0f8689 100644 --- a/version/version.go +++ b/version/version.go @@ -45,18 +45,18 @@ var ( ) func UserAgent() string { - version := defaultVersion + uaVersion := defaultVersion reOnce.Do(func() { reRelease = regexp.MustCompile(`^(v[0-9]+\.[0-9]+)\.[0-9]+$`) reDev = regexp.MustCompile(`^(v[0-9]+\.[0-9]+)\.[0-9]+`) }) - if matches := reRelease.FindAllStringSubmatch(version, 1); len(matches) > 0 { - version = matches[0][1] - } else if matches := reDev.FindAllStringSubmatch(version, 1); len(matches) > 0 { - version = matches[0][1] + "-dev" + if matches := reRelease.FindAllStringSubmatch(Version, 1); len(matches) > 0 { + uaVersion = matches[0][1] + } else if matches := reDev.FindAllStringSubmatch(Version, 1); len(matches) > 0 { + uaVersion = matches[0][1] + "-dev" } - return "buildkit/" + version + return "buildkit/" + uaVersion } diff --git a/worker/base/worker.go b/worker/base/worker.go index fa8b7692d9fd..2c3e4defd17f 100644 --- a/worker/base/worker.go +++ b/worker/base/worker.go @@ -3,7 +3,6 @@ package base import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "time" @@ -16,6 +15,7 @@ import ( "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes/docker" "github.com/docker/docker/pkg/idtools" + "github.com/hashicorp/go-multierror" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/client" @@ -42,6 +42,7 @@ import ( "github.com/moby/buildkit/source/local" "github.com/moby/buildkit/util/archutil" "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/network" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/progress/controller" digest "github.com/opencontainers/go-digest" @@ -58,33 +59,36 @@ const labelCreatedAt = "buildkit/createdat" // WorkerOpt is specific to a worker. // See also CommonOpt. type WorkerOpt struct { - ID string - Labels map[string]string - Platforms []ocispecs.Platform - GCPolicy []client.PruneInfo - Executor executor.Executor - Snapshotter snapshot.Snapshotter - ContentStore content.Store - Applier diff.Applier - Differ diff.Comparer - ImageStore images.Store // optional - RegistryHosts docker.RegistryHosts - IdentityMapping *idtools.IdentityMapping - LeaseManager leases.Manager - GarbageCollect func(context.Context) (gc.Stats, error) - ParallelismSem *semaphore.Weighted - MetadataStore *metadata.Store - MountPoolRoot string + ID string + Labels map[string]string + Platforms []ocispecs.Platform + GCPolicy []client.PruneInfo + BuildkitVersion client.BuildkitVersion + NetworkProviders map[pb.NetMode]network.Provider + Executor executor.Executor + Snapshotter snapshot.Snapshotter + ContentStore content.Store + Applier diff.Applier + Differ diff.Comparer + ImageStore images.Store // optional + RegistryHosts docker.RegistryHosts + IdentityMapping *idtools.IdentityMapping + LeaseManager leases.Manager + GarbageCollect func(context.Context) (gc.Stats, error) + ParallelismSem *semaphore.Weighted + MetadataStore *metadata.Store + MountPoolRoot string } // Worker is a local worker instance with dedicated snapshotter, cache, and so on. // TODO: s/Worker/OpWorker/g ? type Worker struct { WorkerOpt - CacheMgr cache.Manager - SourceManager *source.Manager - imageWriter *imageexporter.ImageWriter - ImageSource *containerimage.Source + CacheMgr cache.Manager + SourceManager *source.Manager + imageWriter *imageexporter.ImageWriter + ImageSource *containerimage.Source + OCILayoutSource *containerimage.Source } // NewWorker instantiates a local worker @@ -121,6 +125,7 @@ func NewWorker(ctx context.Context, opt WorkerOpt) (*Worker, error) { ImageStore: opt.ImageStore, CacheAccessor: cm, RegistryHosts: opt.RegistryHosts, + ResolverType: containerimage.ResolverTypeRegistry, LeaseManager: opt.LeaseManager, }) if err != nil { @@ -158,6 +163,21 @@ func NewWorker(ctx context.Context, opt WorkerOpt) (*Worker, error) { } sm.Register(ss) + os, err := containerimage.NewSource(containerimage.SourceOpt{ + Snapshotter: opt.Snapshotter, + ContentStore: opt.ContentStore, + Applier: opt.Applier, + ImageStore: opt.ImageStore, + CacheAccessor: cm, + ResolverType: containerimage.ResolverTypeOCILayout, + LeaseManager: opt.LeaseManager, + }) + if err != nil { + return nil, err + } + + sm.Register(os) + iw, err := imageexporter.NewImageWriter(imageexporter.WriterOpt{ Snapshotter: opt.Snapshotter, ContentStore: opt.ContentStore, @@ -177,18 +197,33 @@ func NewWorker(ctx context.Context, opt WorkerOpt) (*Worker, error) { } return &Worker{ - WorkerOpt: opt, - CacheMgr: cm, - SourceManager: sm, - imageWriter: iw, - ImageSource: is, + WorkerOpt: opt, + CacheMgr: cm, + SourceManager: sm, + imageWriter: iw, + ImageSource: is, + OCILayoutSource: os, }, nil } +func (w *Worker) Close() error { + var rerr error + for _, provider := range w.NetworkProviders { + if err := provider.Close(); err != nil { + rerr = multierror.Append(rerr, err) + } + } + return rerr +} + func (w *Worker) ContentStore() content.Store { return w.WorkerOpt.ContentStore } +func (w *Worker) LeaseManager() leases.Manager { + return w.WorkerOpt.LeaseManager +} + func (w *Worker) ID() string { return w.WorkerOpt.ID } @@ -219,6 +254,10 @@ func (w *Worker) GCPolicy() []client.PruneInfo { return w.WorkerOpt.GCPolicy } +func (w *Worker) BuildkitVersion() client.BuildkitVersion { + return w.WorkerOpt.BuildkitVersion +} + func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) { var opts []cache.RefOption if hidden { @@ -230,20 +269,11 @@ func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.Imm return nil, nil } - var pg progress.Controller - optGetter := solver.CacheOptGetterOf(ctx) - if optGetter != nil { - if kv := optGetter(false, cache.ProgressKey{}); kv != nil { - if v, ok := kv[cache.ProgressKey{}].(progress.Controller); ok { - pg = v - } - } - } - + pg := solver.ProgressControllerFromContext(ctx) ref, err := w.CacheMgr.Get(ctx, id, pg, opts...) var needsRemoteProviders cache.NeedsRemoteProviderError if errors.As(err, &needsRemoteProviders) { - if optGetter != nil { + if optGetter := solver.CacheOptGetterOf(ctx); optGetter != nil { var keys []interface{} for _, dgst := range needsRemoteProviders { keys = append(keys, cache.DescHandlerKey(dgst)) @@ -325,6 +355,14 @@ func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error { } func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) { + // is this an registry source? Or an OCI layout source? + switch opt.ResolverType { + case llb.ResolverTypeOCILayout: + return w.OCILayoutSource.ResolveImageConfig(ctx, ref, opt, sm, g) + // we probably should put an explicit case llb.ResolverTypeRegistry and default here, + // but then go complains that we do not have a return statement, + // so we just add it after + } return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g) } @@ -344,7 +382,7 @@ func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, SessionManager: sm, ImageWriter: w.imageWriter, RegistryHosts: w.RegistryHosts, - LeaseManager: w.LeaseManager, + LeaseManager: w.LeaseManager(), }) case client.ExporterLocal: return localexporter.New(localexporter.Opt{ @@ -359,14 +397,14 @@ func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, SessionManager: sm, ImageWriter: w.imageWriter, Variant: ociexporter.VariantOCI, - LeaseManager: w.LeaseManager, + LeaseManager: w.LeaseManager(), }) case client.ExporterDocker: return ociexporter.New(ociexporter.Opt{ SessionManager: sm, ImageWriter: w.imageWriter, Variant: ociexporter.VariantDocker, - LeaseManager: w.LeaseManager, + LeaseManager: w.LeaseManager(), }) default: return nil, errors.Errorf("exporter %q could not be found", name) @@ -392,15 +430,7 @@ func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (ref cac } } - var pg progress.Controller - optGetter := solver.CacheOptGetterOf(ctx) - if optGetter != nil { - if kv := optGetter(false, cache.ProgressKey{}); kv != nil { - if v, ok := kv[cache.ProgressKey{}].(progress.Controller); ok { - pg = v - } - } - } + pg := solver.ProgressControllerFromContext(ctx) if pg == nil { pg = &controller.Controller{ WriterFactory: progress.FromContext(ctx), @@ -447,6 +477,14 @@ func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (ref cac cache.WithCreationTime(tm), descHandlers, } + if ul, ok := remote.Provider.(interface { + UnlazySession(ocispecs.Descriptor) session.Group + }); ok { + s := ul.UnlazySession(desc) + if s != nil { + opts = append(opts, cache.Unlazy(s)) + } + } if dh, ok := descHandlers[desc.Digest]; ok { if ref, ok := dh.Annotations["containerd.io/distribution.source.ref"]; ok { opts = append(opts, cache.WithImageRef(ref)) // can set by registry cache importer @@ -468,11 +506,11 @@ func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (ref cac // If not exist, it creates a random one, func ID(root string) (string, error) { f := filepath.Join(root, "workerid") - b, err := ioutil.ReadFile(f) + b, err := os.ReadFile(f) if err != nil { if errors.Is(err, os.ErrNotExist) { id := identity.NewID() - err := ioutil.WriteFile(f, []byte(id), 0400) + err := os.WriteFile(f, []byte(id), 0400) return id, err } return "", err diff --git a/worker/base/worker_test.go b/worker/base/worker_test.go index a80372baaaf0..290fa2f6ddc5 100644 --- a/worker/base/worker_test.go +++ b/worker/base/worker_test.go @@ -1,7 +1,6 @@ package base import ( - "io/ioutil" "os" "testing" @@ -10,8 +9,7 @@ import ( func TestID(t *testing.T) { t.Parallel() - tmpdir, err := ioutil.TempDir("", "worker-base-test-id") - require.NoError(t, err) + tmpdir := t.TempDir() id0, err := ID(tmpdir) require.NoError(t, err) @@ -29,6 +27,4 @@ func TestID(t *testing.T) { require.NoError(t, err) require.NotEqual(t, id0, id2) - - require.NoError(t, os.RemoveAll(tmpdir)) } diff --git a/worker/containerd/containerd.go b/worker/containerd/containerd.go index c671c99e3c99..a829d457575b 100644 --- a/worker/containerd/containerd.go +++ b/worker/containerd/containerd.go @@ -4,6 +4,7 @@ import ( "context" "os" "path/filepath" + "strconv" "strings" "github.com/containerd/containerd" @@ -18,24 +19,24 @@ import ( "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/network/netproviders" "github.com/moby/buildkit/util/winlayers" - "github.com/moby/buildkit/worker" "github.com/moby/buildkit/worker/base" + wlabel "github.com/moby/buildkit/worker/label" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/semaphore" ) // NewWorkerOpt creates a WorkerOpt. -func NewWorkerOpt(root string, address, snapshotterName, ns string, rootless bool, labels map[string]string, dns *oci.DNSConfig, nopt netproviders.Opt, apparmorProfile string, parallelismSem *semaphore.Weighted, traceSocket string, opts ...containerd.ClientOpt) (base.WorkerOpt, error) { +func NewWorkerOpt(root string, address, snapshotterName, ns string, rootless bool, labels map[string]string, dns *oci.DNSConfig, nopt netproviders.Opt, apparmorProfile string, selinux bool, parallelismSem *semaphore.Weighted, traceSocket string, opts ...containerd.ClientOpt) (base.WorkerOpt, error) { opts = append(opts, containerd.WithDefaultNamespace(ns)) client, err := containerd.New(address, opts...) if err != nil { return base.WorkerOpt{}, errors.Wrapf(err, "failed to connect client to %q . make sure containerd is running", address) } - return newContainerd(root, client, snapshotterName, ns, rootless, labels, dns, nopt, apparmorProfile, parallelismSem, traceSocket) + return newContainerd(root, client, snapshotterName, ns, rootless, labels, dns, nopt, apparmorProfile, selinux, parallelismSem, traceSocket) } -func newContainerd(root string, client *containerd.Client, snapshotterName, ns string, rootless bool, labels map[string]string, dns *oci.DNSConfig, nopt netproviders.Opt, apparmorProfile string, parallelismSem *semaphore.Weighted, traceSocket string) (base.WorkerOpt, error) { +func newContainerd(root string, client *containerd.Client, snapshotterName, ns string, rootless bool, labels map[string]string, dns *oci.DNSConfig, nopt netproviders.Opt, apparmorProfile string, selinux bool, parallelismSem *semaphore.Weighted, traceSocket string) (base.WorkerOpt, error) { if strings.Contains(snapshotterName, "/") { return base.WorkerOpt{}, errors.Errorf("bad snapshotter name: %q", snapshotterName) } @@ -67,16 +68,17 @@ func newContainerd(root string, client *containerd.Client, snapshotterName, ns s hostname = "unknown" } xlabels := map[string]string{ - worker.LabelExecutor: "containerd", - worker.LabelSnapshotter: snapshotterName, - worker.LabelHostname: hostname, - worker.LabelNetwork: npResolvedMode, + wlabel.Executor: "containerd", + wlabel.Snapshotter: snapshotterName, + wlabel.Hostname: hostname, + wlabel.Network: npResolvedMode, + wlabel.SELinuxEnabled: strconv.FormatBool(selinux), } if apparmorProfile != "" { - xlabels[worker.LabelApparmorProfile] = apparmorProfile + xlabels[wlabel.ApparmorProfile] = apparmorProfile } - xlabels[worker.LabelContainerdNamespace] = ns - xlabels[worker.LabelContainerdUUID] = serverInfo.UUID + xlabels[wlabel.ContainerdNamespace] = ns + xlabels[wlabel.ContainerdUUID] = serverInfo.UUID for k, v := range labels { xlabels[k] = v } @@ -131,20 +133,21 @@ func newContainerd(root string, client *containerd.Client, snapshotterName, ns s } opt := base.WorkerOpt{ - ID: id, - Labels: xlabels, - MetadataStore: md, - Executor: containerdexecutor.New(client, root, "", np, dns, apparmorProfile, traceSocket, rootless), - Snapshotter: snap, - ContentStore: cs, - Applier: winlayers.NewFileSystemApplierWithWindows(cs, df), - Differ: winlayers.NewWalkingDiffWithWindows(cs, df), - ImageStore: client.ImageService(), - Platforms: platforms, - LeaseManager: lm, - GarbageCollect: gc, - ParallelismSem: parallelismSem, - MountPoolRoot: filepath.Join(root, "cachemounts"), + ID: id, + Labels: xlabels, + MetadataStore: md, + NetworkProviders: np, + Executor: containerdexecutor.New(client, root, "", np, dns, apparmorProfile, selinux, traceSocket, rootless), + Snapshotter: snap, + ContentStore: cs, + Applier: winlayers.NewFileSystemApplierWithWindows(cs, df), + Differ: winlayers.NewWalkingDiffWithWindows(cs, df), + ImageStore: client.ImageService(), + Platforms: platforms, + LeaseManager: lm, + GarbageCollect: gc, + ParallelismSem: parallelismSem, + MountPoolRoot: filepath.Join(root, "cachemounts"), } return opt, nil } diff --git a/worker/containerd/containerd_test.go b/worker/containerd/containerd_test.go index c3c5286b027d..2084da078120 100644 --- a/worker/containerd/containerd_test.go +++ b/worker/containerd/containerd_test.go @@ -5,7 +5,6 @@ package containerd import ( "context" - "io/ioutil" "os" "testing" @@ -28,14 +27,12 @@ func TestContainerdWorkerIntegration(t *testing.T) { )) } -func newWorkerOpt(t *testing.T, addr string) (base.WorkerOpt, func()) { - tmpdir, err := ioutil.TempDir("", "workertest") - require.NoError(t, err) - cleanup := func() { os.RemoveAll(tmpdir) } +func newWorkerOpt(t *testing.T, addr string) base.WorkerOpt { + tmpdir := t.TempDir() rootless := false - workerOpt, err := NewWorkerOpt(tmpdir, addr, "overlayfs", "buildkit-test", rootless, nil, nil, netproviders.Opt{Mode: "host"}, "", nil, "") + workerOpt, err := NewWorkerOpt(tmpdir, addr, "overlayfs", "buildkit-test", rootless, nil, nil, netproviders.Opt{Mode: "host"}, "", false, nil, "") require.NoError(t, err) - return workerOpt, cleanup + return workerOpt } func checkRequirement(t *testing.T) { @@ -48,8 +45,7 @@ func testContainerdWorkerExec(t *testing.T, sb integration.Sandbox) { if sb.Rootless() { t.Skip("requires root") } - workerOpt, cleanupWorkerOpt := newWorkerOpt(t, sb.ContainerdAddress()) - defer cleanupWorkerOpt() + workerOpt := newWorkerOpt(t, sb.ContainerdAddress()) w, err := base.NewWorker(context.TODO(), workerOpt) require.NoError(t, err) @@ -60,8 +56,7 @@ func testContainerdWorkerExecFailures(t *testing.T, sb integration.Sandbox) { if sb.Rootless() { t.Skip("requires root") } - workerOpt, cleanupWorkerOpt := newWorkerOpt(t, sb.ContainerdAddress()) - defer cleanupWorkerOpt() + workerOpt := newWorkerOpt(t, sb.ContainerdAddress()) w, err := base.NewWorker(context.TODO(), workerOpt) require.NoError(t, err) diff --git a/worker/label/label.go b/worker/label/label.go new file mode 100644 index 000000000000..3c08d395cbcc --- /dev/null +++ b/worker/label/label.go @@ -0,0 +1,16 @@ +package label + +// Pre-defined label keys +const ( + prefix = "org.mobyproject.buildkit.worker." + + Executor = prefix + "executor" // "oci" or "containerd" + Snapshotter = prefix + "snapshotter" // containerd snapshotter name ("overlay", "native", ...) + Hostname = prefix + "hostname" + Network = prefix + "network" // "cni" or "host" + ApparmorProfile = prefix + "apparmor.profile" + SELinuxEnabled = prefix + "selinux.enabled" // "true" or "false" + OCIProcessMode = prefix + "oci.process-mode" // OCI worker: process mode ("sandbox", "no-sandbox") + ContainerdUUID = prefix + "containerd.uuid" // containerd worker: containerd UUID + ContainerdNamespace = prefix + "containerd.namespace" // containerd worker: containerd namespace +) diff --git a/worker/result.go b/worker/result.go index 5691c630f6ff..26054cf8c206 100644 --- a/worker/result.go +++ b/worker/result.go @@ -26,6 +26,13 @@ func (wr *WorkerRef) ID() string { return wr.Worker.ID() + "::" + refID } +func (wr *WorkerRef) Release(ctx context.Context) error { + if wr.ImmutableRef == nil { + return nil + } + return wr.ImmutableRef.Release(ctx) +} + // GetRemotes method abstracts ImmutableRef's GetRemotes to allow a Worker to override. // This is needed for moby integration. // Use this method instead of calling ImmutableRef.GetRemotes() directly. diff --git a/worker/runc/runc.go b/worker/runc/runc.go index bebee7868d70..d619ad9f0a70 100644 --- a/worker/runc/runc.go +++ b/worker/runc/runc.go @@ -4,6 +4,7 @@ import ( "context" "os" "path/filepath" + "strconv" "github.com/containerd/containerd/content/local" "github.com/containerd/containerd/diff/apply" @@ -20,8 +21,8 @@ import ( "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/network/netproviders" "github.com/moby/buildkit/util/winlayers" - "github.com/moby/buildkit/worker" "github.com/moby/buildkit/worker/base" + wlabel "github.com/moby/buildkit/worker/label" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" bolt "go.etcd.io/bbolt" "golang.org/x/sync/semaphore" @@ -34,7 +35,7 @@ type SnapshotterFactory struct { } // NewWorkerOpt creates a WorkerOpt. -func NewWorkerOpt(root string, snFactory SnapshotterFactory, rootless bool, processMode oci.ProcessMode, labels map[string]string, idmap *idtools.IdentityMapping, nopt netproviders.Opt, dns *oci.DNSConfig, binary, apparmorProfile string, parallelismSem *semaphore.Weighted, traceSocket, defaultCgroupParent string) (base.WorkerOpt, error) { +func NewWorkerOpt(root string, snFactory SnapshotterFactory, rootless bool, processMode oci.ProcessMode, labels map[string]string, idmap *idtools.IdentityMapping, nopt netproviders.Opt, dns *oci.DNSConfig, binary, apparmorProfile string, selinux bool, parallelismSem *semaphore.Weighted, traceSocket, defaultCgroupParent string) (base.WorkerOpt, error) { var opt base.WorkerOpt name := "runc-" + snFactory.Name root = filepath.Join(root, name) @@ -65,6 +66,7 @@ func NewWorkerOpt(root string, snFactory SnapshotterFactory, rootless bool, proc IdentityMapping: idmap, DNS: dns, ApparmorProfile: apparmorProfile, + SELinux: selinux, TracingSocket: traceSocket, DefaultCgroupParent: defaultCgroupParent, }, np) @@ -104,14 +106,15 @@ func NewWorkerOpt(root string, snFactory SnapshotterFactory, rootless bool, proc hostname = "unknown" } xlabels := map[string]string{ - worker.LabelExecutor: "oci", - worker.LabelSnapshotter: snFactory.Name, - worker.LabelHostname: hostname, - worker.LabelNetwork: npResolvedMode, - worker.LabelOCIProcessMode: processMode.String(), + wlabel.Executor: "oci", + wlabel.Snapshotter: snFactory.Name, + wlabel.Hostname: hostname, + wlabel.Network: npResolvedMode, + wlabel.OCIProcessMode: processMode.String(), + wlabel.SELinuxEnabled: strconv.FormatBool(selinux), } if apparmorProfile != "" { - xlabels[worker.LabelApparmorProfile] = apparmorProfile + xlabels[wlabel.ApparmorProfile] = apparmorProfile } for k, v := range labels { @@ -137,21 +140,22 @@ func NewWorkerOpt(root string, snFactory SnapshotterFactory, rootless bool, proc } opt = base.WorkerOpt{ - ID: id, - Labels: xlabels, - MetadataStore: md, - Executor: exe, - Snapshotter: snap, - ContentStore: c, - Applier: winlayers.NewFileSystemApplierWithWindows(c, apply.NewFileSystemApplier(c)), - Differ: winlayers.NewWalkingDiffWithWindows(c, walking.NewWalkingDiff(c)), - ImageStore: nil, // explicitly - Platforms: []ocispecs.Platform{platforms.Normalize(platforms.DefaultSpec())}, - IdentityMapping: idmap, - LeaseManager: lm, - GarbageCollect: mdb.GarbageCollect, - ParallelismSem: parallelismSem, - MountPoolRoot: filepath.Join(root, "cachemounts"), + ID: id, + Labels: xlabels, + MetadataStore: md, + NetworkProviders: np, + Executor: exe, + Snapshotter: snap, + ContentStore: c, + Applier: winlayers.NewFileSystemApplierWithWindows(c, apply.NewFileSystemApplier(c)), + Differ: winlayers.NewWalkingDiffWithWindows(c, walking.NewWalkingDiff(c)), + ImageStore: nil, // explicitly + Platforms: []ocispecs.Platform{platforms.Normalize(platforms.DefaultSpec())}, + IdentityMapping: idmap, + LeaseManager: lm, + GarbageCollect: mdb.GarbageCollect, + ParallelismSem: parallelismSem, + MountPoolRoot: filepath.Join(root, "cachemounts"), } return opt, nil } diff --git a/worker/runc/runc_test.go b/worker/runc/runc_test.go index bcaaf812df6d..59373a53a68b 100644 --- a/worker/runc/runc_test.go +++ b/worker/runc/runc_test.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -29,10 +28,8 @@ import ( "github.com/stretchr/testify/require" ) -func newWorkerOpt(t *testing.T, processMode oci.ProcessMode) (base.WorkerOpt, func()) { - tmpdir, err := ioutil.TempDir("", "workertest") - require.NoError(t, err) - cleanup := func() { os.RemoveAll(tmpdir) } +func newWorkerOpt(t *testing.T, processMode oci.ProcessMode) base.WorkerOpt { + tmpdir := t.TempDir() snFactory := SnapshotterFactory{ Name: "overlayfs", @@ -41,10 +38,10 @@ func newWorkerOpt(t *testing.T, processMode oci.ProcessMode) (base.WorkerOpt, fu }, } rootless := false - workerOpt, err := NewWorkerOpt(tmpdir, snFactory, rootless, processMode, nil, nil, netproviders.Opt{Mode: "host"}, nil, "", "", nil, "", "") + workerOpt, err := NewWorkerOpt(tmpdir, snFactory, rootless, processMode, nil, nil, netproviders.Opt{Mode: "host"}, nil, "", "", false, nil, "", "") require.NoError(t, err) - return workerOpt, cleanup + return workerOpt } func checkRequirement(t *testing.T) { @@ -63,8 +60,7 @@ func TestRuncWorker(t *testing.T) { t.Parallel() checkRequirement(t) - workerOpt, cleanupWorkerOpt := newWorkerOpt(t, oci.ProcessSandbox) - defer cleanupWorkerOpt() + workerOpt := newWorkerOpt(t, oci.ProcessSandbox) w, err := base.NewWorker(context.TODO(), workerOpt) require.NoError(t, err) @@ -143,7 +139,7 @@ func TestRuncWorker(t *testing.T) { require.NoError(t, err) //Verifies fix for issue https://github.com/moby/buildkit/issues/429 - dt, err := ioutil.ReadFile(filepath.Join(target, "run", "bar")) + dt, err := os.ReadFile(filepath.Join(target, "run", "bar")) require.NoError(t, err) require.Equal(t, string(dt), "foo\n") @@ -185,8 +181,7 @@ func TestRuncWorkerNoProcessSandbox(t *testing.T) { t.Parallel() checkRequirement(t) - workerOpt, cleanupWorkerOpt := newWorkerOpt(t, oci.NoProcessSandbox) - defer cleanupWorkerOpt() + workerOpt := newWorkerOpt(t, oci.NoProcessSandbox) w, err := base.NewWorker(context.TODO(), workerOpt) require.NoError(t, err) @@ -199,7 +194,7 @@ func TestRuncWorkerNoProcessSandbox(t *testing.T) { // ensure the procfs is shared selfPID := os.Getpid() - selfCmdline, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cmdline", selfPID)) + selfCmdline, err := os.ReadFile(fmt.Sprintf("/proc/%d/cmdline", selfPID)) require.NoError(t, err) meta := executor.Meta{ Args: []string{"/bin/cat", fmt.Sprintf("/proc/%d/cmdline", selfPID)}, @@ -216,8 +211,7 @@ func TestRuncWorkerExec(t *testing.T) { t.Parallel() checkRequirement(t) - workerOpt, cleanupWorkerOpt := newWorkerOpt(t, oci.ProcessSandbox) - defer cleanupWorkerOpt() + workerOpt := newWorkerOpt(t, oci.ProcessSandbox) w, err := base.NewWorker(context.TODO(), workerOpt) require.NoError(t, err) @@ -228,8 +222,7 @@ func TestRuncWorkerExecFailures(t *testing.T) { t.Parallel() checkRequirement(t) - workerOpt, cleanupWorkerOpt := newWorkerOpt(t, oci.ProcessSandbox) - defer cleanupWorkerOpt() + workerOpt := newWorkerOpt(t, oci.ProcessSandbox) w, err := base.NewWorker(context.TODO(), workerOpt) require.NoError(t, err) diff --git a/worker/tests/common.go b/worker/tests/common.go index 67e362283c61..46c99561d989 100644 --- a/worker/tests/common.go +++ b/worker/tests/common.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "io" - "io/ioutil" "testing" "time" @@ -126,7 +125,7 @@ func TestWorkerExec(t *testing.T, w *base.Worker) { Meta: executor.Meta{ Args: []string{"sh", "-c", "cat > /tmp/msg"}, }, - Stdin: ioutil.NopCloser(stdin), + Stdin: io.NopCloser(stdin), Stdout: &nopCloser{stdout}, Stderr: &nopCloser{stderr}, }) diff --git a/worker/worker.go b/worker/worker.go index 743513bb0a98..2f426e9ead40 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -2,8 +2,10 @@ package worker import ( "context" + "io" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/leases" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" @@ -17,10 +19,12 @@ import ( ) type Worker interface { + io.Closer // ID needs to be unique in the cluster ID() string Labels() map[string]string Platforms(noCache bool) []ocispecs.Platform + BuildkitVersion() client.BuildkitVersion GCPolicy() []client.PruneInfo LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) @@ -35,22 +39,10 @@ type Worker interface { ContentStore() content.Store Executor() executor.Executor CacheManager() cache.Manager + LeaseManager() leases.Manager } type Infos interface { GetDefault() (Worker, error) WorkerInfos() []client.WorkerInfo } - -// Pre-defined label keys -const ( - labelPrefix = "org.mobyproject.buildkit.worker." - LabelExecutor = labelPrefix + "executor" // "oci" or "containerd" - LabelSnapshotter = labelPrefix + "snapshotter" // containerd snapshotter name ("overlay", "native", ...) - LabelHostname = labelPrefix + "hostname" - LabelNetwork = labelPrefix + "network" // "cni" or "host" - LabelApparmorProfile = labelPrefix + "apparmor.profile" - LabelOCIProcessMode = labelPrefix + "oci.process-mode" // OCI worker: process mode ("sandbox", "no-sandbox") - LabelContainerdUUID = labelPrefix + "containerd.uuid" // containerd worker: containerd UUID - LabelContainerdNamespace = labelPrefix + "containerd.namespace" // containerd worker: containerd namespace -) diff --git a/worker/workercontroller.go b/worker/workercontroller.go index 3899d5999cb6..e175b4002b4a 100644 --- a/worker/workercontroller.go +++ b/worker/workercontroller.go @@ -2,6 +2,7 @@ package worker import ( "github.com/containerd/containerd/filters" + "github.com/hashicorp/go-multierror" "github.com/moby/buildkit/client" "github.com/pkg/errors" ) @@ -13,6 +14,16 @@ type Controller struct { workers []Worker } +func (c *Controller) Close() error { + var rerr error + for _, w := range c.workers { + if err := w.Close(); err != nil { + rerr = multierror.Append(rerr, err) + } + } + return rerr +} + // Add adds a local worker. // The first worker becomes the default. // @@ -62,9 +73,10 @@ func (c *Controller) WorkerInfos() []client.WorkerInfo { out := make([]client.WorkerInfo, 0, len(c.workers)) for _, w := range c.workers { out = append(out, client.WorkerInfo{ - ID: w.ID(), - Labels: w.Labels(), - Platforms: w.Platforms(true), + ID: w.ID(), + Labels: w.Labels(), + Platforms: w.Platforms(false), + BuildkitVersion: w.BuildkitVersion(), }) } return out